1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
6 * Generic ring structure for passing events from one core to another.
8 * Used by the software scheduler for the producer and consumer rings for
9 * each port, i.e. for passing events from worker cores to scheduler and
10 * vice-versa. Designed for single-producer, single-consumer use with two
11 * cores working on each ring.
19 #include <rte_common.h>
20 #include <rte_memory.h>
21 #include <rte_malloc.h>
23 #define QE_RING_NAMESIZE 32
26 char name
[QE_RING_NAMESIZE
] __rte_cache_aligned
;
27 uint32_t ring_size
; /* size of memory block allocated to the ring */
28 uint32_t mask
; /* mask for read/write values == ring_size -1 */
29 uint32_t size
; /* actual usable space in the ring */
30 volatile uint32_t write_idx __rte_cache_aligned
;
31 volatile uint32_t read_idx __rte_cache_aligned
;
33 struct rte_event ring
[0] __rte_cache_aligned
;
36 static inline struct qe_ring
*
37 qe_ring_create(const char *name
, unsigned int size
, unsigned int socket_id
)
39 struct qe_ring
*retval
;
40 const uint32_t ring_size
= rte_align32pow2(size
+ 1);
41 size_t memsize
= sizeof(*retval
) +
42 (ring_size
* sizeof(retval
->ring
[0]));
44 retval
= rte_zmalloc_socket(NULL
, memsize
, 0, socket_id
);
48 snprintf(retval
->name
, sizeof(retval
->name
), "EVDEV_RG_%s", name
);
49 retval
->ring_size
= ring_size
;
50 retval
->mask
= ring_size
- 1;
57 qe_ring_destroy(struct qe_ring
*r
)
62 static __rte_always_inline
unsigned int
63 qe_ring_count(const struct qe_ring
*r
)
65 return r
->write_idx
- r
->read_idx
;
68 static __rte_always_inline
unsigned int
69 qe_ring_free_count(const struct qe_ring
*r
)
71 return r
->size
- qe_ring_count(r
);
74 static __rte_always_inline
unsigned int
75 qe_ring_enqueue_burst(struct qe_ring
*r
, const struct rte_event
*qes
,
76 unsigned int nb_qes
, uint16_t *free_count
)
78 const uint32_t size
= r
->size
;
79 const uint32_t mask
= r
->mask
;
80 const uint32_t read
= r
->read_idx
;
81 uint32_t write
= r
->write_idx
;
82 const uint32_t space
= read
+ size
- write
;
88 for (i
= 0; i
< nb_qes
; i
++, write
++)
89 r
->ring
[write
& mask
] = qes
[i
];
96 *free_count
= space
- nb_qes
;
101 static __rte_always_inline
unsigned int
102 qe_ring_enqueue_burst_with_ops(struct qe_ring
*r
, const struct rte_event
*qes
,
103 unsigned int nb_qes
, uint8_t *ops
)
105 const uint32_t size
= r
->size
;
106 const uint32_t mask
= r
->mask
;
107 const uint32_t read
= r
->read_idx
;
108 uint32_t write
= r
->write_idx
;
109 const uint32_t space
= read
+ size
- write
;
115 for (i
= 0; i
< nb_qes
; i
++, write
++) {
116 r
->ring
[write
& mask
] = qes
[i
];
117 r
->ring
[write
& mask
].op
= ops
[i
];
123 r
->write_idx
= write
;
128 static __rte_always_inline
unsigned int
129 qe_ring_dequeue_burst(struct qe_ring
*r
, struct rte_event
*qes
,
132 const uint32_t mask
= r
->mask
;
133 uint32_t read
= r
->read_idx
;
134 const uint32_t write
= r
->write_idx
;
135 const uint32_t items
= write
- read
;
142 for (i
= 0; i
< nb_qes
; i
++, read
++)
143 qes
[i
] = r
->ring
[read
& mask
];
148 r
->read_idx
+= nb_qes
;