#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include "ipsec.h"
#include "ipsec-secgw.h"
#include "ipsec_worker.h"
static inline void
{
uint32_t i;
for (i = 0; i != n; i++)
}
static inline void
enqueue_cop_bulk(
struct cdev_qp *cqp,
struct rte_crypto_op *cop[], uint32_t num)
{
uint32_t i, k, len, n;
len = cqp->len;
if (num >=
RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
cqp->in_flight += n;
free_cops(cop + n, num - n);
return;
}
k = 0;
do {
for (i = 0; i != n; i++)
cqp->buf[len + i] = cop[k + i];
len += n;
k += n;
cqp->buf, len);
cqp->in_flight += n;
free_cops(cqp->buf + n, len - n);
len = 0;
}
} while (k != num);
cqp->len = len;
}
static inline int
{
if (ss->
crypto.
ses == NULL)
return -ENOENT;
if (ss->
security.ses == NULL)
return -ENOENT;
} else
RTE_ASSERT(0);
return 0;
}
static uint32_t
sa_group(
void *sa_ptr[],
struct rte_mbuf *pkts[],
{
uint32_t i, n, spi;
void *sa;
void * const nosa = &spi;
sa = nosa;
for (i = 0, n = 0; i != num; i++) {
if (sa != sa_ptr[i]) {
grp[n].
cnt = pkts + i - grp[n].
m;
n += (sa != nosa);
grp[n].
id.
ptr = sa_ptr[i];
sa = sa_ptr[i];
}
}
if (sa != nosa) {
grp[n].
cnt = pkts + i - grp[n].
m;
n++;
}
return n;
}
static inline void
copy_to_trf(
struct ipsec_traffic *trf, uint64_t satp,
struct rte_mbuf *mb[],
uint32_t num)
{
uint32_t j, ofs, s;
struct traffic_type *out;
if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
out = &trf->ip4;
} else {
out = &trf->ip6;
ofs =
offsetof(
struct ip6_hdr, ip6_nxt);
}
} else if (SATP_OUT_IPV4(satp)) {
out = &trf->ip4;
} else {
out = &trf->ip6;
ofs =
offsetof(
struct ip6_hdr, ip6_nxt);
}
for (j = 0, s = out->num; j != num; j++) {
void *, ofs);
out->pkts[s + j] = mb[j];
}
out->num += num;
}
static uint32_t
ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
unsigned int cnt)
{
struct cdev_qp *cqp;
uint32_t j, k;
struct ipsec_mbuf_metadata *priv;
cqp = sa->cqp[ctx->lcore_id];
for (j = 0; j != cnt; j++) {
priv = get_priv(m[j]);
cop[j] = &priv->cop;
priv->sa = sa;
}
if (k != 0)
enqueue_cop_bulk(cqp, cop, k);
return k;
}
static uint32_t
struct ipsec_traffic *trf,
struct rte_mbuf *mb[], uint32_t cnt)
{
uint64_t satp;
uint32_t k;
prep_process_group(sa, mb, cnt);
copy_to_trf(trf, satp, mb, k);
return k;
}
static uint32_t
struct ipsec_traffic *trf,
struct rte_mbuf *mb[], uint32_t cnt)
{
uint64_t satp;
uint32_t k;
prep_process_group(sa, mb, cnt);
k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt);
copy_to_trf(trf, satp, mb, k);
return k;
}
void
ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
{
uint32_t i, k, n;
struct ipsec_sa *sa;
n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
for (i = 0; i != n; i++) {
pg = grp + i;
sa = ipsec_mask_saptr(pg->
id.ptr);
if (sa != NULL)
ips = (pg->
id.
val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ?
ipsec_get_fallback_session(sa) :
ipsec_get_primary_session(sa);
if (sa == NULL ||
unlikely(check_ipsec_session(ips) != 0))
k = 0;
else {
k = ipsec_prepare_crypto_group(ctx, sa, ips,
break;
k = ipsec_process_inline_group(ips, sa,
break;
k = ipsec_process_cpu_group(ips, sa,
break;
default:
k = 0;
}
}
free_pkts(pg->
m + k, pg->
cnt - k);
}
}
static inline uint32_t
cqp_dequeue(
struct cdev_qp *cqp,
struct rte_crypto_op *cop[], uint32_t num)
{
uint32_t n;
if (cqp->in_flight == 0)
return 0;
RTE_ASSERT(cqp->in_flight >= n);
cqp->in_flight -= n;
return n;
}
static inline uint32_t
ctx_dequeue(
struct ipsec_ctx *ctx,
struct rte_crypto_op *cop[], uint32_t num)
{
uint32_t i, n;
n = 0;
for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
for (i = 0; n != num && i != ctx->last_qp; i++)
n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
ctx->last_qp = i;
return n;
}
void
ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
{
uint64_t satp;
uint32_t i, k, n, ng;
struct traffic_type *out;
trf->ip4.num = 0;
trf->ip6.num = 0;
out = &trf->ipsec;
n = ctx_dequeue(ctx, cop,
RTE_DIM(cop));
if (n == 0)
return;
(uintptr_t)cop, out->pkts, grp, n);
for (i = 0; i != ng; i++) {
pg = grp + i;
copy_to_trf(trf, satp, pg->
m, k);
free_pkts(pg->
m + k, pg->
cnt - k);
}
RTE_VERIFY(n == 0);
}
#define offsetof(TYPE, MEMBER)
static uint16_t rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
static uint16_t rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
static uint16_t rte_ipsec_pkt_crypto_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
static uint16_t rte_ipsec_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num)
static uint16_t rte_ipsec_pkt_crypto_group(const struct rte_crypto_op *cop[], struct rte_mbuf *mb[], struct rte_ipsec_group grp[], uint16_t num)
uint64_t rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
static void rte_pktmbuf_free(struct rte_mbuf *m)
#define rte_pktmbuf_mtod_offset(m, t, o)
@ RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO
@ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
@ RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
@ RTE_SECURITY_ACTION_TYPE_NONE
@ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
union rte_ipsec_group::@210 id
enum rte_security_session_action_type type