]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/drivers/event/sw/iq_ring.h
update download target update for octopus release
[ceph.git] / ceph / src / seastar / dpdk / drivers / event / sw / iq_ring.h
CommitLineData
11fdf7f2
TL
1/*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Ring structure definitions used for the internal ring buffers of the
35 * SW eventdev implementation. These are designed for single-core use only.
36 */
37#ifndef _IQ_RING_
38#define _IQ_RING_
39
40#include <stdint.h>
41
42#include <rte_common.h>
43#include <rte_memory.h>
44#include <rte_malloc.h>
45#include <rte_eventdev.h>
46
47#define IQ_RING_NAMESIZE 12
48#define QID_IQ_DEPTH 512
49#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)
50
51struct iq_ring {
52 char name[IQ_RING_NAMESIZE] __rte_cache_aligned;
53 uint16_t write_idx;
54 uint16_t read_idx;
55
56 struct rte_event ring[QID_IQ_DEPTH];
57};
58
59#ifndef force_inline
60#define force_inline inline __attribute__((always_inline))
61#endif
62
63static inline struct iq_ring *
64iq_ring_create(const char *name, unsigned int socket_id)
65{
66 struct iq_ring *retval;
67
68 retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);
69 if (retval == NULL)
70 goto end;
71
72 snprintf(retval->name, sizeof(retval->name), "%s", name);
73 retval->write_idx = retval->read_idx = 0;
74end:
75 return retval;
76}
77
78static inline void
79iq_ring_destroy(struct iq_ring *r)
80{
81 rte_free(r);
82}
83
84static force_inline uint16_t
85iq_ring_count(const struct iq_ring *r)
86{
87 return r->write_idx - r->read_idx;
88}
89
90static force_inline uint16_t
91iq_ring_free_count(const struct iq_ring *r)
92{
93 return QID_IQ_MASK - iq_ring_count(r);
94}
95
96static force_inline uint16_t
97iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
98{
99 const uint16_t read = r->read_idx;
100 uint16_t write = r->write_idx;
101 const uint16_t space = read + QID_IQ_MASK - write;
102 uint16_t i;
103
104 if (space < nb_qes)
105 nb_qes = space;
106
107 for (i = 0; i < nb_qes; i++, write++)
108 r->ring[write & QID_IQ_MASK] = qes[i];
109
110 r->write_idx = write;
111
112 return nb_qes;
113}
114
115static force_inline uint16_t
116iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
117{
118 uint16_t read = r->read_idx;
119 const uint16_t write = r->write_idx;
120 const uint16_t items = write - read;
121 uint16_t i;
122
123 for (i = 0; i < nb_qes; i++, read++)
124 qes[i] = r->ring[read & QID_IQ_MASK];
125
126 if (items < nb_qes)
127 nb_qes = items;
128
129 r->read_idx += nb_qes;
130
131 return nb_qes;
132}
133
134/* assumes there is space, from a previous dequeue_burst */
135static force_inline uint16_t
136iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
137{
138 uint16_t i, read = r->read_idx;
139
140 for (i = nb_qes; i-- > 0; )
141 r->ring[--read & QID_IQ_MASK] = qes[i];
142
143 r->read_idx = read;
144 return nb_qes;
145}
146
147static force_inline const struct rte_event *
148iq_ring_peek(const struct iq_ring *r)
149{
150 return &r->ring[r->read_idx & QID_IQ_MASK];
151}
152
153static force_inline void
154iq_ring_pop(struct iq_ring *r)
155{
156 r->read_idx++;
157}
158
159static force_inline int
160iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
161{
162 const uint16_t read = r->read_idx;
163 const uint16_t write = r->write_idx;
164 const uint16_t space = read + QID_IQ_MASK - write;
165
166 if (space == 0)
167 return -1;
168
169 r->ring[write & QID_IQ_MASK] = *qe;
170
171 r->write_idx = write + 1;
172
173 return 0;
174}
175
176#endif