]>
Commit | Line | Data |
---|---|---|
ad69f35d MT |
1 | /* |
2 | * Definitions for the 'struct skb_array' datastructure. | |
3 | * | |
4 | * Author: | |
5 | * Michael S. Tsirkin <mst@redhat.com> | |
6 | * | |
7 | * Copyright (C) 2016 Red Hat, Inc. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the | |
11 | * Free Software Foundation; either version 2 of the License, or (at your | |
12 | * option) any later version. | |
13 | * | |
14 | * Limited-size FIFO of skbs. Can be used more or less whenever | |
15 | * sk_buff_head can be used, except you need to know the queue size in | |
16 | * advance. | |
17 | * Implemented as a type-safe wrapper around ptr_ring. | |
18 | */ | |
19 | ||
20 | #ifndef _LINUX_SKB_ARRAY_H | |
21 | #define _LINUX_SKB_ARRAY_H 1 | |
22 | ||
23 | #ifdef __KERNEL__ | |
24 | #include <linux/ptr_ring.h> | |
25 | #include <linux/skbuff.h> | |
26 | #include <linux/if_vlan.h> | |
27 | #endif | |
28 | ||
29 | struct skb_array { | |
30 | struct ptr_ring ring; | |
31 | }; | |
32 | ||
33 | /* Might be slightly faster than skb_array_full below, but callers invoking | |
34 | * this in a loop must use a compiler barrier, for example cpu_relax(). | |
35 | */ | |
36 | static inline bool __skb_array_full(struct skb_array *a) | |
37 | { | |
38 | return __ptr_ring_full(&a->ring); | |
39 | } | |
40 | ||
41 | static inline bool skb_array_full(struct skb_array *a) | |
42 | { | |
43 | return ptr_ring_full(&a->ring); | |
44 | } | |
45 | ||
46 | static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb) | |
47 | { | |
48 | return ptr_ring_produce(&a->ring, skb); | |
49 | } | |
50 | ||
51 | static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb) | |
52 | { | |
53 | return ptr_ring_produce_irq(&a->ring, skb); | |
54 | } | |
55 | ||
56 | static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb) | |
57 | { | |
58 | return ptr_ring_produce_bh(&a->ring, skb); | |
59 | } | |
60 | ||
61 | static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb) | |
62 | { | |
63 | return ptr_ring_produce_any(&a->ring, skb); | |
64 | } | |
65 | ||
7d7072e3 MT |
66 | /* Might be slightly faster than skb_array_empty below, but only safe if the |
67 | * array is never resized. Also, callers invoking this in a loop must take care | |
68 | * to use a compiler barrier, for example cpu_relax(). | |
ad69f35d MT |
69 | */ |
70 | static inline bool __skb_array_empty(struct skb_array *a) | |
71 | { | |
72 | return !__ptr_ring_peek(&a->ring); | |
73 | } | |
74 | ||
75 | static inline bool skb_array_empty(struct skb_array *a) | |
76 | { | |
77 | return ptr_ring_empty(&a->ring); | |
78 | } | |
79 | ||
7d7072e3 MT |
80 | static inline bool skb_array_empty_bh(struct skb_array *a) |
81 | { | |
82 | return ptr_ring_empty_bh(&a->ring); | |
83 | } | |
84 | ||
85 | static inline bool skb_array_empty_irq(struct skb_array *a) | |
86 | { | |
87 | return ptr_ring_empty_irq(&a->ring); | |
88 | } | |
89 | ||
90 | static inline bool skb_array_empty_any(struct skb_array *a) | |
91 | { | |
92 | return ptr_ring_empty_any(&a->ring); | |
93 | } | |
94 | ||
ad69f35d MT |
95 | static inline struct sk_buff *skb_array_consume(struct skb_array *a) |
96 | { | |
97 | return ptr_ring_consume(&a->ring); | |
98 | } | |
99 | ||
3528c1a5 JW |
100 | static inline int skb_array_consume_batched(struct skb_array *a, |
101 | struct sk_buff **array, int n) | |
102 | { | |
103 | return ptr_ring_consume_batched(&a->ring, (void **)array, n); | |
104 | } | |
105 | ||
ad69f35d MT |
106 | static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) |
107 | { | |
108 | return ptr_ring_consume_irq(&a->ring); | |
109 | } | |
110 | ||
3528c1a5 JW |
111 | static inline int skb_array_consume_batched_irq(struct skb_array *a, |
112 | struct sk_buff **array, int n) | |
113 | { | |
114 | return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); | |
115 | } | |
116 | ||
ad69f35d MT |
117 | static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) |
118 | { | |
119 | return ptr_ring_consume_any(&a->ring); | |
120 | } | |
121 | ||
3528c1a5 JW |
122 | static inline int skb_array_consume_batched_any(struct skb_array *a, |
123 | struct sk_buff **array, int n) | |
124 | { | |
125 | return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); | |
126 | } | |
127 | ||
128 | ||
ad69f35d MT |
129 | static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) |
130 | { | |
131 | return ptr_ring_consume_bh(&a->ring); | |
132 | } | |
133 | ||
3528c1a5 JW |
134 | static inline int skb_array_consume_batched_bh(struct skb_array *a, |
135 | struct sk_buff **array, int n) | |
136 | { | |
137 | return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); | |
138 | } | |
139 | ||
ad69f35d MT |
140 | static inline int __skb_array_len_with_tag(struct sk_buff *skb) |
141 | { | |
142 | if (likely(skb)) { | |
143 | int len = skb->len; | |
144 | ||
145 | if (skb_vlan_tag_present(skb)) | |
146 | len += VLAN_HLEN; | |
147 | ||
148 | return len; | |
149 | } else { | |
150 | return 0; | |
151 | } | |
152 | } | |
153 | ||
154 | static inline int skb_array_peek_len(struct skb_array *a) | |
155 | { | |
156 | return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag); | |
157 | } | |
158 | ||
159 | static inline int skb_array_peek_len_irq(struct skb_array *a) | |
160 | { | |
161 | return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag); | |
162 | } | |
163 | ||
164 | static inline int skb_array_peek_len_bh(struct skb_array *a) | |
165 | { | |
166 | return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag); | |
167 | } | |
168 | ||
169 | static inline int skb_array_peek_len_any(struct skb_array *a) | |
170 | { | |
171 | return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); | |
172 | } | |
173 | ||
174 | static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) | |
175 | { | |
176 | return ptr_ring_init(&a->ring, size, gfp); | |
177 | } | |
178 | ||
fd68adec | 179 | static void __skb_array_destroy_skb(void *ptr) |
7d7072e3 MT |
180 | { |
181 | kfree_skb(ptr); | |
182 | } | |
183 | ||
3acb6960 JW |
184 | static inline void skb_array_unconsume(struct skb_array *a, |
185 | struct sk_buff **skbs, int n) | |
186 | { | |
187 | ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); | |
188 | } | |
189 | ||
fd68adec | 190 | static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) |
7d7072e3 MT |
191 | { |
192 | return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); | |
193 | } | |
194 | ||
bf900b3d | 195 | static inline int skb_array_resize_multiple(struct skb_array **rings, |
81fbfe8a ED |
196 | int nrings, unsigned int size, |
197 | gfp_t gfp) | |
bf900b3d JW |
198 | { |
199 | BUILD_BUG_ON(offsetof(struct skb_array, ring)); | |
200 | return ptr_ring_resize_multiple((struct ptr_ring **)rings, | |
201 | nrings, size, gfp, | |
202 | __skb_array_destroy_skb); | |
203 | } | |
204 | ||
ad69f35d MT |
205 | static inline void skb_array_cleanup(struct skb_array *a) |
206 | { | |
7d7072e3 | 207 | ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb); |
ad69f35d MT |
208 | } |
209 | ||
210 | #endif /* _LINUX_SKB_ARRAY_H */ |