DPDK 22.11.5
examples/pipeline/thread.c
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2020 Intel Corporation
*/
#include <stdlib.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_lcore.h>
#include <rte_ring.h>
#include <rte_table_acl.h>
#include <rte_table_hash.h>
#include <rte_table_lpm.h>
#include "obj.h"
#include "thread.h"
#ifndef THREAD_PIPELINES_MAX
#define THREAD_PIPELINES_MAX 256
#endif
#ifndef THREAD_MSGQ_SIZE
#define THREAD_MSGQ_SIZE 64
#endif
#ifndef THREAD_TIMER_PERIOD_MS
#define THREAD_TIMER_PERIOD_MS 100
#endif
/* Pipeline instruction quanta: Needs to be big enough to do some meaningful
* work, but not too big to avoid starving any other pipelines mapped to the
* same thread. For a pipeline that executes 10 instructions per packet, a
* quanta of 1000 instructions equates to processing 100 packets.
*/
#ifndef PIPELINE_INSTR_QUANTA
#define PIPELINE_INSTR_QUANTA 1000
#endif
struct thread {
struct rte_ring *msgq_req;
struct rte_ring *msgq_rsp;
uint32_t enabled;
};
static struct thread thread[RTE_MAX_LCORE];
struct pipeline_data {
struct rte_swx_pipeline *p;
uint64_t timer_period; /* Measured in CPU cycles. */
uint64_t time_next;
};
struct thread_data {
struct rte_swx_pipeline *p[THREAD_PIPELINES_MAX];
uint32_t n_pipelines;
struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
struct rte_ring *msgq_req;
struct rte_ring *msgq_rsp;
uint64_t timer_period; /* Measured in CPU cycles. */
uint64_t time_next;
uint64_t time_next_min;
static struct thread_data thread_data[RTE_MAX_LCORE];
static void
thread_free(void)
{
uint32_t i;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct thread *t = &thread[i];
continue;
/* MSGQs */
rte_ring_free(t->msgq_req);
rte_ring_free(t->msgq_rsp);
}
}
int
thread_init(void)
{
uint32_t i;
char name[NAME_MAX];
struct rte_ring *msgq_req, *msgq_rsp;
struct thread *t = &thread[i];
struct thread_data *t_data = &thread_data[i];
uint32_t cpu_id = rte_lcore_to_socket_id(i);
/* MSGQs */
snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
msgq_req = rte_ring_create(name,
THREAD_MSGQ_SIZE,
cpu_id,
if (msgq_req == NULL) {
thread_free();
return -1;
}
snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
msgq_rsp = rte_ring_create(name,
THREAD_MSGQ_SIZE,
cpu_id,
if (msgq_rsp == NULL) {
thread_free();
return -1;
}
/* Control thread records */
t->msgq_req = msgq_req;
t->msgq_rsp = msgq_rsp;
t->enabled = 1;
/* Data plane thread records */
t_data->n_pipelines = 0;
t_data->msgq_req = msgq_req;
t_data->msgq_rsp = msgq_rsp;
t_data->timer_period =
(rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
t_data->time_next_min = t_data->time_next;
}
return 0;
}
static inline int
thread_is_running(uint32_t thread_id)
{
enum rte_lcore_state_t thread_state;
thread_state = rte_eal_get_lcore_state(thread_id);
return (thread_state == RUNNING) ? 1 : 0;
}
enum thread_req_type {
THREAD_REQ_PIPELINE_ENABLE = 0,
THREAD_REQ_PIPELINE_DISABLE,
THREAD_REQ_MAX
};
struct thread_msg_req {
enum thread_req_type type;
union {
struct {
struct rte_swx_pipeline *p;
uint32_t timer_period_ms;
} pipeline_enable;
struct {
struct rte_swx_pipeline *p;
} pipeline_disable;
};
};
struct thread_msg_rsp {
int status;
};
static struct thread_msg_req *
thread_msg_alloc(void)
{
size_t size = RTE_MAX(sizeof(struct thread_msg_req),
sizeof(struct thread_msg_rsp));
return calloc(1, size);
}
static void
thread_msg_free(struct thread_msg_rsp *rsp)
{
free(rsp);
}
static struct thread_msg_rsp *
thread_msg_send_recv(uint32_t thread_id,
struct thread_msg_req *req)
{
struct thread *t = &thread[thread_id];
struct rte_ring *msgq_req = t->msgq_req;
struct rte_ring *msgq_rsp = t->msgq_rsp;
struct thread_msg_rsp *rsp;
int status;
/* send */
do {
status = rte_ring_sp_enqueue(msgq_req, req);
} while (status == -ENOBUFS);
/* recv */
do {
status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
} while (status != 0);
return rsp;
}
static int
thread_is_pipeline_enabled(uint32_t thread_id, struct rte_swx_pipeline *p)
{
struct thread *t = &thread[thread_id];
struct thread_data *td = &thread_data[thread_id];
uint32_t i;
if (!t->enabled)
return 0; /* Pipeline NOT enabled on this thread. */
for (i = 0; i < td->n_pipelines; i++)
if (td->p[i] == p)
return 1; /* Pipeline enabled on this thread. */
return 0 /* Pipeline NOT enabled on this thread. */;
}
int
thread_pipeline_enable(uint32_t thread_id, struct rte_swx_pipeline *p, uint32_t timer_period_ms)
{
struct thread *t;
struct thread_msg_req *req;
struct thread_msg_rsp *rsp;
int status;
/* Check input params */
if ((thread_id >= RTE_MAX_LCORE) || !p || !timer_period_ms)
return -1;
t = &thread[thread_id];
if (t->enabled == 0)
return -1;
if (!thread_is_running(thread_id)) {
struct thread_data *td = &thread_data[thread_id];
struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
if (td->n_pipelines >= THREAD_PIPELINES_MAX)
return -1;
/* Data plane thread */
td->p[td->n_pipelines] = p;
tdp->p = p;
tdp->timer_period = (rte_get_tsc_hz() * timer_period_ms) / 1000;
tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
td->n_pipelines++;
return 0;
}
/* Allocate request */
req = thread_msg_alloc();
if (req == NULL)
return -1;
/* Write request */
req->type = THREAD_REQ_PIPELINE_ENABLE;
req->pipeline_enable.p = p;
req->pipeline_enable.timer_period_ms = timer_period_ms;
/* Send request and wait for response */
rsp = thread_msg_send_recv(thread_id, req);
/* Read response */
status = rsp->status;
/* Free response */
thread_msg_free(rsp);
/* Request completion */
if (status)
return status;
return 0;
}
int
thread_pipeline_disable(uint32_t thread_id, struct rte_swx_pipeline *p)
{
struct thread *t;
struct thread_msg_req *req;
struct thread_msg_rsp *rsp;
int status;
/* Check input params */
if ((thread_id >= RTE_MAX_LCORE) || !p)
return -1;
t = &thread[thread_id];
if (t->enabled == 0)
return -1;
if (!thread_is_pipeline_enabled(thread_id, p))
return 0;
if (!thread_is_running(thread_id)) {
struct thread_data *td = &thread_data[thread_id];
uint32_t i;
for (i = 0; i < td->n_pipelines; i++) {
struct pipeline_data *tdp = &td->pipeline_data[i];
if (tdp->p != p)
continue;
/* Data plane thread */
if (i < td->n_pipelines - 1) {
struct rte_swx_pipeline *pipeline_last =
td->p[td->n_pipelines - 1];
struct pipeline_data *tdp_last =
&td->pipeline_data[td->n_pipelines - 1];
td->p[i] = pipeline_last;
memcpy(tdp, tdp_last, sizeof(*tdp));
}
td->n_pipelines--;
break;
}
return 0;
}
/* Allocate request */
req = thread_msg_alloc();
if (req == NULL)
return -1;
/* Write request */
req->type = THREAD_REQ_PIPELINE_DISABLE;
req->pipeline_disable.p = p;
/* Send request and wait for response */
rsp = thread_msg_send_recv(thread_id, req);
/* Read response */
status = rsp->status;
/* Free response */
thread_msg_free(rsp);
/* Request completion */
if (status)
return status;
return 0;
}
static inline struct thread_msg_req *
thread_msg_recv(struct rte_ring *msgq_req)
{
struct thread_msg_req *req;
int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
if (status != 0)
return NULL;
return req;
}
static inline void
thread_msg_send(struct rte_ring *msgq_rsp,
struct thread_msg_rsp *rsp)
{
int status;
do {
status = rte_ring_sp_enqueue(msgq_rsp, rsp);
} while (status == -ENOBUFS);
}
static struct thread_msg_rsp *
thread_msg_handle_pipeline_enable(struct thread_data *t,
struct thread_msg_req *req)
{
struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
/* Request */
if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
rsp->status = -1;
return rsp;
}
t->p[t->n_pipelines] = req->pipeline_enable.p;
p->p = req->pipeline_enable.p;
p->timer_period = (rte_get_tsc_hz() *
req->pipeline_enable.timer_period_ms) / 1000;
p->time_next = rte_get_tsc_cycles() + p->timer_period;
t->n_pipelines++;
/* Response */
rsp->status = 0;
return rsp;
}
static struct thread_msg_rsp *
thread_msg_handle_pipeline_disable(struct thread_data *t,
struct thread_msg_req *req)
{
struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
uint32_t n_pipelines = t->n_pipelines;
struct rte_swx_pipeline *pipeline = req->pipeline_disable.p;
uint32_t i;
/* find pipeline */
for (i = 0; i < n_pipelines; i++) {
struct pipeline_data *p = &t->pipeline_data[i];
if (p->p != pipeline)
continue;
if (i < n_pipelines - 1) {
struct rte_swx_pipeline *pipeline_last =
t->p[n_pipelines - 1];
struct pipeline_data *p_last =
&t->pipeline_data[n_pipelines - 1];
t->p[i] = pipeline_last;
memcpy(p, p_last, sizeof(*p));
}
t->n_pipelines--;
rsp->status = 0;
return rsp;
}
/* should not get here */
rsp->status = 0;
return rsp;
}
static void
thread_msg_handle(struct thread_data *t)
{
for ( ; ; ) {
struct thread_msg_req *req;
struct thread_msg_rsp *rsp;
req = thread_msg_recv(t->msgq_req);
if (req == NULL)
break;
switch (req->type) {
case THREAD_REQ_PIPELINE_ENABLE:
rsp = thread_msg_handle_pipeline_enable(t, req);
break;
case THREAD_REQ_PIPELINE_DISABLE:
rsp = thread_msg_handle_pipeline_disable(t, req);
break;
default:
rsp = (struct thread_msg_rsp *) req;
rsp->status = -1;
}
thread_msg_send(t->msgq_rsp, rsp);
}
}
int
thread_main(void *arg __rte_unused)
{
struct thread_data *t;
uint32_t thread_id, i;
thread_id = rte_lcore_id();
t = &thread_data[thread_id];
/* Dispatch loop */
for (i = 0; ; i++) {
uint32_t j;
/* Data Plane */
for (j = 0; j < t->n_pipelines; j++)
rte_swx_pipeline_run(t->p[j], PIPELINE_INSTR_QUANTA);
/* Control Plane */
if ((i & 0xF) == 0) {
uint64_t time = rte_get_tsc_cycles();
uint64_t time_next_min = UINT64_MAX;
if (time < t->time_next_min)
continue;
/* Thread message queues */
{
uint64_t time_next = t->time_next;
if (time_next <= time) {
thread_msg_handle(t);
time_next = time + t->timer_period;
t->time_next = time_next;
}
if (time_next < time_next_min)
time_next_min = time_next;
}
t->time_next_min = time_next_min;
}
}
return 0;
}
#define __rte_cache_aligned
Definition: rte_common.h:440
#define RTE_MAX(a, b)
Definition: rte_common.h:623
#define __rte_unused
Definition: rte_common.h:120
static uint64_t rte_get_tsc_cycles(void)
uint64_t rte_get_tsc_hz(void)
enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned int worker_id)
rte_lcore_state_t
Definition: rte_launch.h:21
@ RUNNING
Definition: rte_launch.h:24
unsigned int rte_lcore_to_socket_id(unsigned int lcore_id)
int rte_lcore_is_enabled(unsigned int lcore_id)
#define RTE_LCORE_FOREACH_WORKER(i)
Definition: rte_lcore.h:226
static unsigned rte_lcore_id(void)
Definition: rte_lcore.h:79
void rte_ring_free(struct rte_ring *r)
struct rte_ring * rte_ring_create(const char *name, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:453
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:316
#define RING_F_SP_ENQ
#define RING_F_SC_DEQ
__rte_experimental void rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)