#include "event_helper.h"
#include "ipsec.h"
#include "ipsec-secgw.h"
#include "ipsec_worker.h"
#include "sad.h"
#if defined(__ARM_NEON)
#include "ipsec_lpm_neon.h"
#endif
struct port_drv_mode_data {
void *sess;
};
typedef void (*ipsec_worker_fn_t)(void);
int ip_reassembly_dynfield_offset = -1;
uint64_t ip_reassembly_dynflag;
static inline enum pkt_type
process_ipsec_get_pkt_type(
struct rte_mbuf *pkt, uint8_t **nlp)
{
return PKT_TYPE_IPSEC_IPV4;
else
return PKT_TYPE_PLAIN_IPV4;
return PKT_TYPE_IPSEC_IPV6;
else
return PKT_TYPE_PLAIN_IPV6;
}
return PKT_TYPE_INVALID;
}
static inline void
{
}
static inline void
ipsec_event_pre_forward(
struct rte_mbuf *m,
unsigned int port_id)
{
}
static inline void
{
}
static inline void
{
if (vec->
port == 0xFFFF) {
return;
}
}
static inline void
prepare_out_sessions_tbl(struct sa_ctx *sa_out,
struct port_drv_mode_data *data,
uint16_t size)
{
struct ipsec_sa *sa;
uint32_t i;
if (!sa_out)
return;
for (i = 0; i < sa_out->nb_sa; i++) {
if (!sa)
continue;
pri_sess = ipsec_get_primary_session(sa);
if (!pri_sess)
continue;
RTE_LOG(ERR, IPSEC,
"Invalid session type %d\n",
continue;
}
if (sa->portid >= size) {
"Port id >= than table size %d, %d\n",
sa->portid, size);
continue;
}
if (data[sa->portid].sess)
continue;
data[sa->portid].sess = pri_sess->
security.
ses;
data[sa->portid].ctx = pri_sess->security.
ctx;
}
}
static inline int
check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
{
uint32_t res;
return 0;
DEFAULT_MAX_CATEGORIES);
return 0;
else if (res == BYPASS) {
*sa_idx = -1;
return 1;
}
*sa_idx = res - 1;
return 1;
}
static inline void
check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
struct traffic_type *ipsec)
{
uint32_t i, j, res;
if (
unlikely(sp == NULL || ip->num == 0))
return;
DEFAULT_MAX_CATEGORIES);
j = 0;
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
res = ip->res[i];
free_pkts(&m, 1);
else if (res == BYPASS)
ip->pkts[j++] = m;
else {
ipsec->res[ipsec->num] = res - 1;
ipsec->pkts[ipsec->num++] = m;
}
}
ip->num = j;
}
static inline void
check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
struct traffic_type *ip)
{
struct ipsec_sa *sa;
uint32_t i, j, res;
if (
unlikely(sp == NULL || ip->num == 0))
return;
DEFAULT_MAX_CATEGORIES);
j = 0;
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
res = ip->res[i];
free_pkts(&m, 1);
else if (res == BYPASS)
ip->pkts[j++] = m;
else {
if (sa == NULL) {
free_pkts(&m, 1);
continue;
}
if (
unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
free_pkts(&m, 1);
continue;
}
ip->pkts[j++] = m;
}
}
ip->num = j;
}
static inline void
ipv4_pkt_l3_len_set(
struct rte_mbuf *pkt)
{
}
static inline int
ipv6_pkt_l3_len_set(
struct rte_mbuf *pkt)
{
size_t l3_len, ext_len;
uint32_t l3_type;
int next_proto;
uint8_t *p;
next_proto = ipv6->
proto;
while (next_proto != IPPROTO_ESP &&
l3_len < pkt->data_len &&
next_proto, &ext_len)) >= 0)
l3_len += ext_len;
return -EINVAL;
}
return 0;
}
static inline uint16_t
route4_pkt(
struct rte_mbuf *pkt,
struct rt_ctx *rt_ctx)
{
uint32_t dst_ip;
uint16_t offset;
uint32_t hop;
int ret;
if (ret == 0) {
return hop;
}
return RTE_MAX_ETHPORTS;
}
static inline uint16_t
route6_pkt(
struct rte_mbuf *pkt,
struct rt_ctx *rt_ctx)
{
uint8_t dst_ip[16];
uint8_t *ip6_dst;
uint16_t offset;
uint32_t hop;
int ret;
memcpy(&dst_ip[0], ip6_dst, 16);
if (ret == 0) {
return hop;
}
return RTE_MAX_ETHPORTS;
}
static inline uint16_t
get_route(
struct rte_mbuf *pkt,
struct route_table *rt,
enum pkt_type type)
{
if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
return route4_pkt(pkt, rt->rt4_ctx);
else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
return route6_pkt(pkt, rt->rt6_ctx);
return RTE_MAX_ETHPORTS;
}
static inline void
{
uint32_t i;
};
for (i = 0; i != num; i++) {
cop[i]->
raw = unproc_cop.raw;
}
}
static inline void
{
struct ipsec_mbuf_metadata *priv;
priv = get_priv(pkt);
cop = &priv->cop;
crypto_op_reset(sess, &pkt, &cop, 1);
}
static inline void
free_pkts_from_events(
struct rte_event events[], uint16_t count)
{
int i;
for (i = 0; i < count; i++) {
}
}
static inline int
event_crypto_enqueue(
struct rte_mbuf *pkt,
struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
{
int ret;
sess = ipsec_get_primary_session(sa);
crypto_prepare_event(pkt, sess, &ev);
ev_link->event_port_id, &ev, 1);
}
return 0;
}
static inline int
process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
const struct eh_event_link_info *ev_link,
struct rte_event *ev)
{
struct ipsec_sa *sa = NULL;
uint16_t port_id = 0;
enum pkt_type type;
uint32_t sa_idx;
uint8_t *nlp;
if (is_ip_reassembly_incomplete(pkt) > 0) {
free_reassembly_fail_pkt(pkt);
return PKT_DROPPED;
}
type = process_ipsec_get_pkt_type(pkt, &nlp);
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
}
}
if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
case PKT_TYPE_PLAIN_IPV6:
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
}
}
if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
case PKT_TYPE_IPSEC_IPV4:
ipv4_pkt_l3_len_set(pkt);
sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
sa = ipsec_mask_saptr(sa);
goto drop_pkt_and_exit;
}
if (
unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
goto drop_pkt_and_exit;
return PKT_POSTED;
case PKT_TYPE_IPSEC_IPV6:
if (
unlikely(ipv6_pkt_l3_len_set(pkt) != 0))
goto drop_pkt_and_exit;
sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
sa = ipsec_mask_saptr(sa);
goto drop_pkt_and_exit;
}
if (
unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
goto drop_pkt_and_exit;
return PKT_POSTED;
default:
RTE_LOG_DP(DEBUG, IPSEC_ESP,
"Unsupported packet type = %d\n",
type);
goto drop_pkt_and_exit;
}
if (sa_idx == BYPASS)
goto route_and_send_pkt;
if (sa_idx >= ctx->sa_ctx->nb_sa)
goto drop_pkt_and_exit;
if (sa == NULL)
goto drop_pkt_and_exit;
if (
unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
goto drop_pkt_and_exit;
route_and_send_pkt:
port_id = get_route(pkt, rt, type);
if (
unlikely(port_id == RTE_MAX_ETHPORTS)) {
goto drop_pkt_and_exit;
}
ipsec_event_pre_forward(pkt, port_id);
return PKT_FORWARDED;
drop_pkt_and_exit:
free_pkts(&pkt, 1);
return PKT_DROPPED;
}
static inline int
process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
const struct eh_event_link_info *ev_link,
struct rte_event *ev)
{
struct sa_ctx *sa_ctx;
uint16_t port_id = 0;
struct ipsec_sa *sa;
enum pkt_type type;
uint32_t sa_idx;
uint8_t *nlp;
type = process_ipsec_get_pkt_type(pkt, &nlp);
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
case PKT_TYPE_PLAIN_IPV6:
if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
default:
RTE_LOG(ERR, IPSEC,
"Unsupported packet type = %d\n", type);
goto drop_pkt_and_exit;
}
if (sa_idx == BYPASS) {
port_id = get_route(pkt, rt, type);
if (
unlikely(port_id == RTE_MAX_ETHPORTS)) {
goto drop_pkt_and_exit;
}
goto send_pkt;
}
if (
unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
goto drop_pkt_and_exit;
sa_ctx = ctx->sa_ctx;
sa = &(sa_ctx->sa[sa_idx]);
sess = ipsec_get_primary_session(sa);
goto lookaside;
sess->security.ses, pkt, NULL);
port_id = sa->portid;
send_pkt:
update_mac_addrs(ethhdr, port_id);
ipsec_event_pre_forward(pkt, port_id);
return PKT_FORWARDED;
lookaside:
if (
likely(event_crypto_enqueue(pkt, sa, ev_link) == 0))
return PKT_POSTED;
drop_pkt_and_exit:
RTE_LOG(ERR, IPSEC,
"Outbound packet dropped\n");
free_pkts(&pkt, 1);
return PKT_DROPPED;
}
static inline int
struct ipsec_traffic *t)
{
uint16_t port_id = 0;
uint32_t i, j = 0;
for (i = 0; i < t->ip4.num; i++) {
pkt = t->ip4.pkts[i];
port_id = route4_pkt(pkt, rt->rt4_ctx);
if (port_id != RTE_MAX_ETHPORTS) {
update_mac_addrs(ethhdr, port_id);
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
} else
free_pkts(&pkt, 1);
}
for (i = 0; i < t->ip6.num; i++) {
pkt = t->ip6.pkts[i];
port_id = route6_pkt(pkt, rt->rt6_ctx);
if (port_id != RTE_MAX_ETHPORTS) {
update_mac_addrs(ethhdr, port_id);
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
vec->mbufs[j++] = pkt;
} else
free_pkts(&pkt, 1);
}
return j;
}
static inline int
struct route_table *rt,
struct ipsec_traffic *t,
const struct eh_event_link_info *ev_link)
{
uint32_t ret, i, j, ev_len = 0;
struct ipsec_sa *sa;
j = ipsec_ev_route_ip_pkts(vec, rt, t);
for (i = 0; i < t->ipsec.num; i++) {
pkt = t->ipsec.pkts[i];
sa = ipsec_mask_saptr(t->ipsec.saptr[i]);
free_pkts(&pkt, 1);
continue;
}
sess = ipsec_get_primary_session(sa);
crypto_prepare_event(pkt, sess, &events[ev_len]);
ev_len++;
}
if (ev_len) {
ev_link->event_port_id, events, ev_len);
if (ret < ev_len) {
RTE_LOG_DP(DEBUG, IPSEC,
"Cannot enqueue events: %i (errno: %i)\n",
free_pkts_from_events(&events[ret], ev_len - ret);
}
}
return j;
}
static inline int
ipsec_ev_outbound_route_pkts(
struct rte_event_vector *vec,
struct route_table *rt,
struct ipsec_traffic *t, struct sa_ctx *sa_ctx,
const struct eh_event_link_info *ev_link)
{
uint32_t sa_idx, ret, i, j, ev_len = 0;
uint16_t port_id = 0;
struct ipsec_sa *sa;
j = ipsec_ev_route_ip_pkts(vec, rt, t);
for (i = 0; i < t->ipsec.num; i++) {
sa_idx = t->ipsec.res[i];
pkt = t->ipsec.pkts[i];
if (
unlikely(sa_idx >= sa_ctx->nb_sa)) {
free_pkts(&pkt, 1);
continue;
}
sa = &(sa_ctx->sa[sa_idx]);
sess = ipsec_get_primary_session(sa);
crypto_prepare_event(pkt, sess, &events[ev_len]);
ev_len++;
break;
sess->security.ses, pkt, NULL);
port_id = sa->portid;
update_mac_addrs(ethhdr, port_id);
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
vec->mbufs[j++] = pkt;
break;
default:
RTE_LOG(ERR, IPSEC,
"SA type not supported\n");
free_pkts(&pkt, 1);
break;
}
}
if (ev_len) {
ev_link->event_port_id, events, ev_len);
if (ret < ev_len) {
RTE_LOG_DP(DEBUG, IPSEC,
"Cannot enqueue events: %i (errno: %i)\n",
free_pkts_from_events(&events[ret], ev_len - ret);
}
}
return j;
}
static inline void
classify_pkt(
struct rte_mbuf *pkt,
struct ipsec_traffic *t)
{
enum pkt_type type;
uint8_t *nlp;
type = process_ipsec_get_pkt_type(pkt, &nlp);
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
t->ip4.data[t->ip4.num] = nlp;
t->ip4.pkts[(t->ip4.num)++] = pkt;
break;
case PKT_TYPE_PLAIN_IPV6:
t->ip6.data[t->ip6.num] = nlp;
t->ip6.pkts[(t->ip6.num)++] = pkt;
break;
case PKT_TYPE_IPSEC_IPV4:
ipv4_pkt_l3_len_set(pkt);
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
break;
case PKT_TYPE_IPSEC_IPV6:
if (ipv6_pkt_l3_len_set(pkt) != 0) {
free_pkts(&pkt, 1);
return;
}
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
break;
default:
RTE_LOG_DP(DEBUG, IPSEC_ESP,
"Unsupported packet type = %d\n",
type);
free_pkts(&pkt, 1);
break;
}
}
static inline int
process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
const struct eh_event_link_info *ev_link)
{
struct ipsec_traffic t;
int i;
t.ip4.num = 0;
t.ip6.num = 0;
t.ipsec.num = 0;
for (i = 0; i < vec->
nb_elem; i++) {
pkt = vec->mbufs[i];
if (is_ip_reassembly_incomplete(pkt) > 0) {
free_reassembly_fail_pkt(pkt);
continue;
}
"Inbound security offload failed\n");
free_pkts(&pkt, 1);
continue;
}
}
classify_pkt(pkt, &t);
}
check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
if (t.ipsec.num != 0)
sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num);
return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link);
}
static inline int
process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
const struct eh_event_link_info *ev_link)
{
struct ipsec_traffic t;
uint32_t i;
t.ip4.num = 0;
t.ip6.num = 0;
t.ipsec.num = 0;
for (i = 0; i < vec->
nb_elem; i++) {
pkt = vec->mbufs[i];
classify_pkt(pkt, &t);
}
check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link);
}
static inline int
struct port_drv_mode_data *data)
{
int16_t port_id;
uint32_t i;
int j = 0;
for (i = 0; i < vec->
nb_elem; i++) {
pkt = vec->mbufs[i];
free_pkts(&pkt, 1);
continue;
}
ipsec_event_pre_forward(pkt, port_id);
data[port_id].sess, pkt,
NULL);
vec->mbufs[j++] = pkt;
}
return j;
}
static void
ipsec_event_vector_free(
struct rte_event *ev)
{
}
static inline void
ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
struct eh_event_link_info *links,
{
int ret;
pkt = vec->mbufs[0];
ev_vector_attr_init(vec);
core_stats_update_rx(vec->
nb_elem);
if (is_unprotected_port(pkt->
port))
ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
&lconf->rt, vec, links);
else
ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
&lconf->rt, vec, links);
core_stats_update_tx(vec->
nb_elem);
links[0].event_port_id, ev, 1, 0);
ipsec_event_vector_free(ev);
} else {
}
}
static inline void
ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
struct port_drv_mode_data *data)
{
uint16_t ret;
pkt = vec->mbufs[0];
if (!is_unprotected_port(pkt->
port))
vec->
nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
data);
links[0].event_port_id, ev, 1, 0);
ipsec_event_vector_free(ev);
} else
}
static inline int
ipsec_ev_cryptodev_process_one_pkt(
const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
{
uint16_t port_id;
struct ip *ip;
RTE_LOG_DP(INFO, IPSEC,
"Crypto operation failed\n");
free_pkts(&pkt, 1);
return -1;
}
if (ip->ip_v == IPVERSION) {
pkt->
ol_flags |= lconf->outbound.ipv4_offloads;
pkt->
l3_len =
sizeof(
struct ip);
port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
} else {
pkt->
ol_flags |= lconf->outbound.ipv6_offloads;
pkt->
l3_len =
sizeof(
struct ip6_hdr);
port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
}
if (
unlikely(port_id == RTE_MAX_ETHPORTS)) {
RTE_LOG_DP(DEBUG, IPSEC,
"Cannot route processed packet\n");
free_pkts(&pkt, 1);
return -1;
}
ipsec_event_pre_forward(pkt, port_id);
return 0;
}
static inline void
ipsec_ev_cryptodev_vector_process(
const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
const struct eh_event_link_info *links,
{
const uint16_t nb_events = 1;
uint16_t enqueued;
int i, n = 0;
ev_vector_attr_init(vec);
for (i = 0; i < vec->
nb_elem; i++) {
if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
continue;
vec->mbufs[n++] = pkt;
ev_vector_attr_update(vec, pkt);
}
if (n == 0) {
return;
}
links[0].event_port_id, ev, nb_events, 0);
if (enqueued != nb_events) {
RTE_LOG_DP(DEBUG, IPSEC,
"Failed to enqueue to tx, ret = %u,"
free_pkts(vec->mbufs, vec->
nb_elem);
} else {
core_stats_update_tx(n);
}
}
static inline int
ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
{
if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
return PKT_DROPPED;
return PKT_FORWARDED;
}
static void
{
ipsec_event_vector_free(&ev);
else
}
#define IPSEC_EVENTMODE_WORKERS 2
static void
ipsec_ip_reassembly_dyn_offset_get(void)
{
if (ip_reassembly_dynfield_offset < 0)
if (ip_reassembly_dynflag == 0) {
int ip_reassembly_dynflag_offset;
RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
if (ip_reassembly_dynflag_offset >= 0)
ip_reassembly_dynflag =
RTE_BIT64(ip_reassembly_dynflag_offset);
}
}
static void
ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
unsigned int nb_rx = 0, nb_tx;
uint32_t lcore_id;
int32_t socket_id;
int16_t port_id;
if (nb_links == 0) {
return;
}
memset(&data, 0, sizeof(struct port_drv_mode_data));
prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
RTE_MAX_ETHPORTS);
"Launching event mode worker (non-burst - Tx internal port - "
"driver mode) on lcore %d\n", lcore_id);
if (nb_links != 1) {
"Multiple links not supported. Using first link\n");
}
RTE_LOG(INFO, IPSEC,
" -- lcoreid=%u event_port_id=%u\n", lcore_id,
links[0].event_port_id);
while (!force_quit) {
links[0].event_port_id,
&ev,
1,
0 );
if (nb_rx == 0)
continue;
ipsec_ev_vector_drv_mode_process(links, &ev, data);
continue;
break;
default:
RTE_LOG(ERR, IPSEC,
"Invalid event type %u",
continue;
}
ipsec_event_pre_forward(pkt, port_id);
if (!is_unprotected_port(port_id)) {
continue;
}
data[port_id].sess, pkt,
NULL);
}
links[0].event_port_id,
&ev,
1,
0 );
if (!nb_tx)
}
links[0].event_port_id, &ev, 1);
}
ipsec_event_port_flush, NULL);
}
static void
ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct lcore_conf_ev_tx_int_port_wrkr lconf;
unsigned int nb_rx = 0, nb_tx;
uint32_t lcore_id;
int32_t socket_id;
int ret;
if (nb_links == 0) {
return;
}
lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
lconf.inbound.lcore_id = lcore_id;
lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
lconf.outbound.ipv4_offloads = tx_offloads.ipv4_offloads;
lconf.outbound.ipv6_offloads = tx_offloads.ipv6_offloads;
lconf.outbound.lcore_id = lcore_id;
"Launching event mode worker (non-burst - Tx internal port - "
"app mode) on lcore %d\n", lcore_id);
ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
if (ret != 0) {
"SAD cache init on lcore %u, failed with code: %d\n",
lcore_id, ret);
return;
}
if (nb_links != 1) {
"Multiple links not supported. Using first link\n");
}
RTE_LOG(INFO, IPSEC,
" -- lcoreid=%u event_port_id=%u\n", lcore_id,
links[0].event_port_id);
ipsec_ip_reassembly_dyn_offset_get();
while (!force_quit) {
links[0].event_port_id,
&ev,
1,
0 );
if (nb_rx == 0)
continue;
ipsec_ev_vector_process(&lconf, links, &ev);
continue;
core_stats_update_rx(1);
ret = process_ipsec_ev_inbound(&lconf.inbound,
&lconf.rt, links, &ev);
else
ret = process_ipsec_ev_outbound(&lconf.outbound,
&lconf.rt, links, &ev);
if (ret != 1)
continue;
break;
ret = ipsec_ev_cryptodev_process(&lconf, &ev);
continue;
break;
ipsec_ev_cryptodev_vector_process(&lconf, links, &ev);
continue;
default:
RTE_LOG(ERR, IPSEC,
"Invalid event type %u",
continue;
}
core_stats_update_tx(1);
links[0].event_port_id,
&ev,
1,
0 );
if (!nb_tx)
}
links[0].event_port_id, &ev, 1);
}
ipsec_event_port_flush, NULL);
}
static uint8_t
ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
{
struct eh_app_worker_params *wrkr;
uint8_t nb_wrkr_param = 0;
wrkr = wrkrs;
wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
wrkr++;
nb_wrkr_param++;
wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
nb_wrkr_param++;
return nb_wrkr_param;
}
static void
ipsec_eventmode_worker(struct eh_conf *conf)
{
struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
{{{0} }, NULL } };
uint8_t nb_wrkr_param;
nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
}
outb_inl_pro_spd_process(struct sp_ctx *sp,
struct sa_ctx *sa_ctx,
struct traffic_type *ip,
struct traffic_type *match,
struct traffic_type *mismatch,
bool match_flag,
struct ipsec_spd_stats *stats)
{
uint32_t prev_sa_idx = UINT32_MAX;
uint32_t i, j, j_mis, sa_idx;
struct ipsec_sa *sa = NULL;
uint32_t ipsec_num = 0;
uint64_t satp;
if (ip->num == 0 || sp == NULL)
return;
ip->num, DEFAULT_MAX_CATEGORIES);
j = match->num;
j_mis = mismatch->num;
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
sa_idx = ip->res[i] - 1;
free_pkts(&m, 1);
stats->discard++;
}
else if (
unlikely(ip->res[i] == BYPASS)) {
match->pkts[j++] = m;
stats->bypass++;
} else {
if (prev_sa_idx == UINT32_MAX) {
prev_sa_idx = sa_idx;
sa = &sa_ctx->sa[sa_idx];
ips = ipsec_get_primary_session(sa);
}
if (sa_idx != prev_sa_idx) {
prep_process_group(sa, ipsec, ipsec_num);
if (SATP_OUT_IPV4(satp) == match_flag) {
memcpy(&match->pkts[j], ipsec,
ipsec_num * sizeof(void *));
j += ipsec_num;
} else {
memcpy(&mismatch->pkts[j_mis], ipsec,
ipsec_num * sizeof(void *));
j_mis += ipsec_num;
}
sa = &sa_ctx->sa[sa_idx];
ips = ipsec_get_primary_session(sa);
ipsec_num = 0;
}
ipsec[ipsec_num++] = m;
stats->protect++;
}
}
if (ipsec_num) {
prep_process_group(sa, ipsec, ipsec_num);
if (SATP_OUT_IPV4(satp) == match_flag) {
memcpy(&match->pkts[j], ipsec,
ipsec_num * sizeof(void *));
j += ipsec_num;
} else {
memcpy(&mismatch->pkts[j_mis], ipsec,
ipsec_num * sizeof(void *));
j_mis += ipsec_num;
}
}
match->num = j;
mismatch->num = j_mis;
}
void
ipsec_poll_mode_wrkr_inl_pr(void)
{
/ US_PER_S * BURST_TX_DRAIN_US;
struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
uint64_t prev_tsc, diff_tsc, cur_tsc;
struct ipsec_core_statistics *stats;
struct rt_ctx *rt4_ctx, *rt6_ctx;
struct sa_ctx *sa_in, *sa_out;
struct traffic_type ip4, ip6;
struct lcore_rx_queue *rxql;
struct ipsec_traffic trf;
struct lcore_conf *qconf;
uint16_t v4_num, v6_num;
int32_t socket_id;
uint32_t lcore_id;
int32_t i, nb_rx;
uint16_t portid;
uint8_t queueid;
prev_tsc = 0;
qconf = &lcore_conf[lcore_id];
rxql = qconf->rx_queue_list;
stats = &core_statistics[lcore_id];
rt4_ctx = socket_ctx[socket_id].rt_ip4;
rt6_ctx = socket_ctx[socket_id].rt_ip6;
sp4_in = socket_ctx[socket_id].sp_ip4_in;
sp6_in = socket_ctx[socket_id].sp_ip6_in;
sa_in = socket_ctx[socket_id].sa_in;
sp4_out = socket_ctx[socket_id].sp_ip4_out;
sp6_out = socket_ctx[socket_id].sp_ip6_out;
sa_out = socket_ctx[socket_id].sa_out;
qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
if (qconf->nb_rx_queue == 0) {
RTE_LOG(DEBUG, IPSEC,
"lcore %u has nothing to do\n",
lcore_id);
return;
}
RTE_LOG(INFO, IPSEC,
"entering main loop on lcore %u\n", lcore_id);
for (i = 0; i < qconf->nb_rx_queue; i++) {
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
ipsec_ip_reassembly_dyn_offset_get();
while (!force_quit) {
cur_tsc = rte_rdtsc();
diff_tsc = cur_tsc - prev_tsc;
drain_tx_buffers(qconf);
prev_tsc = cur_tsc;
}
for (i = 0; i < qconf->nb_rx_queue; ++i) {
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
pkts, MAX_PKT_BURST);
if (nb_rx <= 0)
continue;
core_stats_update_rx(nb_rx);
prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
free_pkts(trf.ipsec.pkts, trf.ipsec.num);
if (is_unprotected_port(portid)) {
inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
trf.ip4.num,
&stats->inbound.spd4);
inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
trf.ip6.num,
&stats->inbound.spd6);
v4 = trf.ip4.pkts;
v4_num = trf.ip4.num;
v6 = trf.ip6.pkts;
v6_num = trf.ip6.num;
} else {
ip4.num = 0;
ip6.num = 0;
outb_inl_pro_spd_process(sp4_out, sa_out,
&trf.ip4, &ip4, &ip6,
true,
&stats->outbound.spd4);
outb_inl_pro_spd_process(sp6_out, sa_out,
&trf.ip6, &ip6, &ip4,
false,
&stats->outbound.spd6);
v4 = ip4.pkts;
v4_num = ip4.num;
v6 = ip6.pkts;
v6_num = ip6.num;
}
#if defined __ARM_NEON
route4_pkts_neon(rt4_ctx, v4, v4_num, 0, false);
route6_pkts_neon(rt6_ctx, v6, v6_num);
#else
route4_pkts(rt4_ctx, v4, v4_num, 0, false);
route6_pkts(rt6_ctx, v6, v6_num);
#endif
}
}
}
void
ipsec_poll_mode_wrkr_inl_pr_ss(void)
{
/ US_PER_S * BURST_TX_DRAIN_US;
uint16_t sa_out_portid = 0, sa_out_proto = 0;
struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
uint64_t prev_tsc, diff_tsc, cur_tsc;
struct lcore_rx_queue *rxql;
struct ipsec_sa *sa = NULL;
struct lcore_conf *qconf;
struct sa_ctx *sa_out;
uint32_t i, nb_rx, j;
int32_t socket_id;
uint32_t lcore_id;
uint16_t portid;
uint8_t queueid;
prev_tsc = 0;
qconf = &lcore_conf[lcore_id];
rxql = qconf->rx_queue_list;
sa_out = socket_ctx[socket_id].sa_out;
if (sa_out && single_sa_idx < sa_out->nb_sa) {
sa = &sa_out->sa[single_sa_idx];
ips = ipsec_get_primary_session(sa);
sa_out_portid = sa->portid;
if (sa->flags & IP6_TUNNEL)
sa_out_proto = IPPROTO_IPV6;
else
sa_out_proto = IPPROTO_IP;
}
qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
if (qconf->nb_rx_queue == 0) {
RTE_LOG(DEBUG, IPSEC,
"lcore %u has nothing to do\n",
lcore_id);
return;
}
RTE_LOG(INFO, IPSEC,
"entering main loop on lcore %u\n", lcore_id);
for (i = 0; i < qconf->nb_rx_queue; i++) {
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
while (!force_quit) {
cur_tsc = rte_rdtsc();
diff_tsc = cur_tsc - prev_tsc;
drain_tx_buffers(qconf);
prev_tsc = cur_tsc;
}
for (i = 0; i < qconf->nb_rx_queue; ++i) {
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
pkts, MAX_PKT_BURST);
if (nb_rx <= 0)
continue;
core_stats_update_rx(nb_rx);
if (is_unprotected_port(portid)) {
for (j = 0; j < nb_rx; j++) {
uint32_t ptype, proto;
pkt = pkts[j];
proto = IPPROTO_IP;
else
proto = IPPROTO_IPV6;
send_single_packet(pkt, portid, proto);
}
continue;
}
continue;
}
for (j = 0; j < nb_rx; j++) {
pkt = pkts[j];
send_single_packet(pkt, sa_out_portid,
sa_out_proto);
}
}
}
}
static void
ipsec_poll_mode_wrkr_launch(void)
{
static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
[INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr,
[INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
};
ipsec_worker_fn_t fn;
if (!app_sa_prm.enable) {
fn = ipsec_poll_mode_worker;
} else {
fn = poll_mode_wrkrs[wrkr_flags];
if (!fn)
fn = ipsec_poll_mode_worker;
}
(*fn)();
}
int ipsec_launch_one_lcore(void *args)
{
struct eh_conf *conf;
conf = (struct eh_conf *)args;
if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
ipsec_poll_mode_wrkr_launch();
} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
ipsec_eventmode_worker(conf);
}
return 0;
}
int rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data, uint32_t *results, uint32_t num, uint32_t categories)
static uint32_t rte_be_to_cpu_32(rte_be32_t x)
static rte_be16_t rte_cpu_to_be_16(uint16_t x)
#define offsetof(TYPE, MEMBER)
#define RTE_PTR_ADD(ptr, x)
#define __rte_always_inline
@ RTE_CRYPTO_OP_SECURITY_SESSION
@ RTE_CRYPTO_OP_TYPE_SYMMETRIC
@ RTE_CRYPTO_OP_STATUS_SUCCESS
@ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED
uint64_t rte_get_tsc_hz(void)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
#define RTE_ETHER_TYPE_IPV4
#define RTE_ETHER_ADDR_LEN
#define RTE_ETHER_HDR_LEN
#define RTE_ETHER_TYPE_IPV6
static uint16_t rte_event_crypto_adapter_enqueue(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events)
static __rte_always_inline void rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
static uint16_t rte_event_eth_tx_adapter_enqueue(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, const uint8_t flags)
#define RTE_EVENT_TYPE_ETHDEV_VECTOR
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
#define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR
#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR
#define RTE_EVENT_TYPE_VECTOR
#define RTE_EVENT_TYPE_CRYPTODEV
#define RTE_EVENT_OP_RELEASE
__rte_experimental void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
#define RTE_EVENT_TYPE_ETHDEV
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
static __rte_experimental int rte_ipv6_get_next_ext(const uint8_t *p, int proto, size_t *ext_len)
static uint16_t rte_ipsec_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num)
uint64_t rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
unsigned int rte_lcore_to_socket_id(unsigned int lcore_id)
static unsigned rte_lcore_id(void)
#define RTE_LOG(l, t,...)
#define RTE_LOG_DP(l, t,...)
int rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip, uint32_t *next_hop)
static int rte_lpm_lookup(const struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
static void rte_pktmbuf_free(struct rte_mbuf *m)
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
#define rte_pktmbuf_mtod(m, t)
#define RTE_MBUF_F_TX_SEC_OFFLOAD
#define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED
#define RTE_MBUF_F_RX_SEC_OFFLOAD
#define rte_pktmbuf_mtod_offset(m, t, o)
int rte_mbuf_dynflag_lookup(const char *name, struct rte_mbuf_dynflag *params)
#define RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME
int rte_mbuf_dynfield_lookup(const char *name, struct rte_mbuf_dynfield *params)
#define RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
#define RTE_PTYPE_TUNNEL_MASK
#define RTE_PTYPE_TUNNEL_ESP
#define RTE_ETH_IS_IPV6_HDR(ptype)
#define RTE_ETH_IS_IPV4_HDR(ptype)
#define RTE_PTYPE_L3_MASK
#define RTE_PTYPE_L3_IPV6_EXT
#define RTE_PTYPE_L3_IPV4
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
static void rte_prefetch0(const volatile void *p)
@ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
@ RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
static int rte_security_set_pkt_metadata(struct rte_security_ctx *instance, void *sess, struct rte_mbuf *mb, void *params)
static int __rte_security_attach_session(struct rte_crypto_sym_op *sym_op, void *sess)
static __rte_experimental rte_security_dynfield_t * rte_security_dynfield(struct rte_mbuf *mbuf)
struct rte_crypto_sym_op sym[0]
struct rte_ether_addr src_addr
struct rte_ether_addr dst_addr
struct rte_event_vector * vec
enum rte_security_session_action_type type