10#ifndef _RTE_RING_PEEK_ELEM_PVT_H_
11#define _RTE_RING_PEEK_ELEM_PVT_H_
41 num = (n >= num) ? num : 0;
54 uint32_t num, uint32_t enqueue)
62 __atomic_store_n(&ht->
tail, pos, __ATOMIC_RELEASE);
75__rte_ring_hts_get_tail(
struct rte_ring_hts_headtail *ht, uint32_t *tail,
79 union __rte_ring_hts_pos p;
81 p.raw = __atomic_load_n(&ht->ht.raw, __ATOMIC_RELAXED);
82 n = p.pos.head - p.pos.tail;
85 num = (n >= num) ? num : 0;
97__rte_ring_hts_set_head_tail(
struct rte_ring_hts_headtail *ht, uint32_t tail,
98 uint32_t num, uint32_t enqueue)
100 union __rte_ring_hts_pos p;
104 p.pos.head = tail + num;
105 p.pos.tail = p.pos.head;
107 __atomic_store_n(&ht->ht.raw, p.raw, __ATOMIC_RELEASE);
114__rte_ring_do_enqueue_start(
struct rte_ring *r, uint32_t n,
117 uint32_t free, head, next;
122 behavior, &head, &next, &free);
125 n = __rte_ring_hts_move_prod_head(r, n, behavior,
137 if (free_space != NULL)
138 *free_space = free - n;
147__rte_ring_do_dequeue_start(
struct rte_ring *r,
void *obj_table,
151 uint32_t avail, head, next;
156 behavior, &head, &next, &avail);
159 n = __rte_ring_hts_move_cons_head(r, n, behavior,
172 __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
174 if (available != NULL)
175 *available = avail - n;
#define __rte_always_inline
enum rte_ring_sync_type sync_type