5#ifndef __INCLUDE_RTE_SCHED_H__
6#define __INCLUDE_RTE_SCHED_H__
59#include <rte_compat.h>
75#define RTE_SCHED_QUEUES_PER_PIPE 16
81#define RTE_SCHED_BE_QUEUES_PER_PIPE 4
87#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE \
88(RTE_SCHED_QUEUES_PER_PIPE - RTE_SCHED_BE_QUEUES_PER_PIPE + 1)
93#define RTE_SCHED_TRAFFIC_CLASS_BE (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
107#ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
108#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
142struct rte_sched_pipe_params {
156 uint8_t tc_ov_weight;
165struct rte_sched_cman_params {
187struct rte_sched_subport_params {
194 uint32_t n_pipes_per_subport_enabled;
205 struct rte_sched_pipe_params *pipe_profiles;
208 uint32_t n_pipe_profiles;
211 uint32_t n_max_pipe_profiles;
217 struct rte_sched_cman_params *cman_params;
220struct rte_sched_subport_profile_params {
324struct rte_sched_port *
354 struct rte_sched_pipe_params *params,
355 uint32_t *pipe_profile_id);
377 struct rte_sched_subport_profile_params *profile,
378 uint32_t *subport_profile_id);
401 struct rte_sched_subport_params *params,
402 uint32_t subport_profile_id);
422 int32_t pipe_profile);
436 struct rte_sched_subport_params **subport_params);
510 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
536 uint32_t *subport, uint32_t *pipe,
537 uint32_t *traffic_class, uint32_t *queue);
540rte_sched_port_pkt_read_color(
const struct rte_mbuf *pkt);
int rte_sched_pipe_config(struct rte_sched_port *port, uint32_t subport_id, uint32_t pipe_id, int32_t pipe_profile)
int rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
void rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port, const struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
struct rte_sched_port * rte_sched_port_config(struct rte_sched_port_params *params)
int rte_sched_subport_pipe_profile_add(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_pipe_params *params, uint32_t *pipe_profile_id)
void rte_sched_port_pkt_write(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_color color)
uint32_t rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params, struct rte_sched_subport_params **subport_params)
#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE
int rte_sched_subport_read_stats(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_stats *stats, uint32_t *tc_ov)
__rte_experimental int rte_sched_port_subport_profile_add(struct rte_sched_port *port, struct rte_sched_subport_profile_params *profile, uint32_t *subport_profile_id)
int rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_params *params, uint32_t subport_profile_id)
void rte_sched_port_free(struct rte_sched_port *port)
int rte_sched_queue_read_stats(struct rte_sched_port *port, uint32_t queue_id, struct rte_sched_queue_stats *stats, uint16_t *qlen)
#define RTE_SCHED_BE_QUEUES_PER_PIPE
__rte_experimental int rte_sched_subport_tc_ov_config(struct rte_sched_port *port, uint32_t subport_id, bool tc_ov_enable)
int rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
uint32_t n_subport_profiles
struct rte_sched_subport_profile_params * subport_profiles
uint32_t n_subports_per_port
uint32_t n_pipes_per_subport
uint32_t n_max_subport_profiles
uint64_t n_pkts_cman_dropped
uint64_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_pkts_cman_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]