]>
Commit | Line | Data |
---|---|---|
ff7d6b27 JDB |
1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | |
3 | * page_pool.h | |
4 | * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> | |
5 | * Copyright (C) 2016 Red Hat, Inc. | |
6 | */ | |
7 | ||
8 | /** | |
9 | * DOC: page_pool allocator | |
10 | * | |
11 | * This page_pool allocator is optimized for the XDP mode that | |
12 | * uses one-frame-per-page, but have fallbacks that act like the | |
13 | * regular page allocator APIs. | |
14 | * | |
15 | * Basic use involve replacing alloc_pages() calls with the | |
16 | * page_pool_alloc_pages() call. Drivers should likely use | |
17 | * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). | |
18 | * | |
99c07c43 JDB |
19 | * API keeps track of in-flight pages, in-order to let API user know |
20 | * when it is safe to dealloactor page_pool object. Thus, API users | |
21 | * must make sure to call page_pool_release_page() when a page is | |
22 | * "leaving" the page_pool. Or call page_pool_put_page() where | |
23 | * appropiate. For maintaining correct accounting. | |
ff7d6b27 | 24 | * |
99c07c43 JDB |
25 | * API user must only call page_pool_put_page() once on a page, as it |
26 | * will either recycle the page, or in case of elevated refcnt, it | |
27 | * will release the DMA mapping and in-flight state accounting. We | |
28 | * hope to lift this requirement in the future. | |
ff7d6b27 JDB |
29 | */ |
30 | #ifndef _NET_PAGE_POOL_H | |
31 | #define _NET_PAGE_POOL_H | |
32 | ||
33 | #include <linux/mm.h> /* Needed by ptr_ring */ | |
34 | #include <linux/ptr_ring.h> | |
35 | #include <linux/dma-direction.h> | |
36 | ||
e68bc756 LB |
37 | #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA |
38 | * map/unmap | |
39 | */ | |
40 | #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets | |
41 | * from page_pool will be | |
42 | * DMA-synced-for-device according to | |
43 | * the length provided by the device | |
44 | * driver. | |
45 | * Please note DMA-sync-for-CPU is still | |
46 | * device driver responsibility | |
47 | */ | |
0e9d2a0a YL |
48 | #define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ |
49 | #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ | |
50 | PP_FLAG_DMA_SYNC_DEV |\ | |
51 | PP_FLAG_PAGE_FRAG) | |
ff7d6b27 JDB |
52 | |
53 | /* | |
54 | * Fast allocation side cache array/stack | |
55 | * | |
56 | * The cache size and refill watermark is related to the network | |
57 | * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX | |
58 | * ring is usually refilled and the max consumed elements will be 64, | |
59 | * thus a natural max size of objects needed in the cache. | |
60 | * | |
61 | * Keeping room for more objects, is due to XDP_DROP use-case. As | |
62 | * XDP_DROP allows the opportunity to recycle objects directly into | |
63 | * this array, as it shares the same softirq/NAPI protection. If | |
64 | * cache is already full (or partly full) then the XDP_DROP recycles | |
65 | * would have to take a slower code path. | |
66 | */ | |
67 | #define PP_ALLOC_CACHE_SIZE 128 | |
68 | #define PP_ALLOC_CACHE_REFILL 64 | |
69 | struct pp_alloc_cache { | |
70 | u32 count; | |
be5dba25 | 71 | struct page *cache[PP_ALLOC_CACHE_SIZE]; |
ff7d6b27 JDB |
72 | }; |
73 | ||
74 | struct page_pool_params { | |
75 | unsigned int flags; | |
76 | unsigned int order; | |
77 | unsigned int pool_size; | |
78 | int nid; /* Numa node id to allocate from pages from */ | |
79 | struct device *dev; /* device, for DMA pre-mapping purposes */ | |
80 | enum dma_data_direction dma_dir; /* DMA mapping direction */ | |
e68bc756 LB |
81 | unsigned int max_len; /* max DMA sync memory size */ |
82 | unsigned int offset; /* DMA addr offset */ | |
ff7d6b27 JDB |
83 | }; |
84 | ||
85 | struct page_pool { | |
ff7d6b27 JDB |
86 | struct page_pool_params p; |
87 | ||
c3f812ce JL |
88 | struct delayed_work release_dw; |
89 | void (*disconnect)(void *); | |
90 | unsigned long defer_start; | |
91 | unsigned long defer_warn; | |
92 | ||
93 | u32 pages_state_hold_cnt; | |
53e0961d YL |
94 | unsigned int frag_offset; |
95 | struct page *frag_page; | |
96 | long frag_users; | |
99c07c43 | 97 | |
ff7d6b27 JDB |
98 | /* |
99 | * Data structure for allocation side | |
100 | * | |
101 | * Drivers allocation side usually already perform some kind | |
102 | * of resource protection. Piggyback on this protection, and | |
103 | * require driver to protect allocation side. | |
104 | * | |
105 | * For NIC drivers this means, allocate a page_pool per | |
106 | * RX-queue. As the RX-queue is already protected by | |
107 | * Softirq/BH scheduling and napi_schedule. NAPI schedule | |
108 | * guarantee that a single napi_struct will only be scheduled | |
109 | * on a single CPU (see napi_schedule). | |
110 | */ | |
111 | struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; | |
112 | ||
113 | /* Data structure for storing recycled pages. | |
114 | * | |
115 | * Returning/freeing pages is more complicated synchronization | |
116 | * wise, because free's can happen on remote CPUs, with no | |
117 | * association with allocation resource. | |
118 | * | |
119 | * Use ptr_ring, as it separates consumer and producer | |
120 | * effeciently, it a way that doesn't bounce cache-lines. | |
121 | * | |
122 | * TODO: Implement bulk return pages into this structure. | |
123 | */ | |
124 | struct ptr_ring ring; | |
99c07c43 JDB |
125 | |
126 | atomic_t pages_state_release_cnt; | |
1da4bbef IK |
127 | |
128 | /* A page_pool is strictly tied to a single RX-queue being | |
129 | * protected by NAPI, due to above pp_alloc_cache. This | |
130 | * refcnt serves purpose is to simplify drivers error handling. | |
131 | */ | |
132 | refcount_t user_cnt; | |
7c9e6942 JDB |
133 | |
134 | u64 destroy_cnt; | |
ff7d6b27 JDB |
135 | }; |
136 | ||
137 | struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); | |
138 | ||
139 | static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) | |
140 | { | |
141 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); | |
142 | ||
143 | return page_pool_alloc_pages(pool, gfp); | |
144 | } | |
145 | ||
53e0961d YL |
146 | struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, |
147 | unsigned int size, gfp_t gfp); | |
148 | ||
149 | static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, | |
150 | unsigned int *offset, | |
151 | unsigned int size) | |
152 | { | |
153 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); | |
154 | ||
155 | return page_pool_alloc_frag(pool, offset, size, gfp); | |
156 | } | |
157 | ||
bb005f2a IA |
158 | /* get the stored dma direction. A driver might decide to treat this locally and |
159 | * avoid the extra cache line from page_pool to determine the direction | |
160 | */ | |
161 | static | |
162 | inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) | |
163 | { | |
164 | return pool->p.dma_dir; | |
165 | } | |
166 | ||
6a5bcd84 IA |
167 | bool page_pool_return_skb_page(struct page *page); |
168 | ||
ff7d6b27 JDB |
169 | struct page_pool *page_pool_create(const struct page_pool_params *params); |
170 | ||
e54cfd7e | 171 | #ifdef CONFIG_PAGE_POOL |
c3f812ce JL |
172 | void page_pool_destroy(struct page_pool *pool); |
173 | void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)); | |
458de8a9 | 174 | void page_pool_release_page(struct page_pool *pool, struct page *page); |
78862447 LB |
175 | void page_pool_put_page_bulk(struct page_pool *pool, void **data, |
176 | int count); | |
c3f812ce | 177 | #else |
1da4bbef IK |
178 | static inline void page_pool_destroy(struct page_pool *pool) |
179 | { | |
c3f812ce | 180 | } |
1da4bbef | 181 | |
c3f812ce JL |
182 | static inline void page_pool_use_xdp_mem(struct page_pool *pool, |
183 | void (*disconnect)(void *)) | |
184 | { | |
1da4bbef | 185 | } |
458de8a9 IA |
186 | static inline void page_pool_release_page(struct page_pool *pool, |
187 | struct page *page) | |
188 | { | |
189 | } | |
78862447 LB |
190 | |
191 | static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, | |
192 | int count) | |
193 | { | |
194 | } | |
c3f812ce | 195 | #endif |
1da4bbef | 196 | |
458de8a9 IA |
197 | void page_pool_put_page(struct page_pool *pool, struct page *page, |
198 | unsigned int dma_sync_size, bool allow_direct); | |
ff7d6b27 | 199 | |
458de8a9 IA |
200 | /* Same as above but will try to sync the entire area pool->max_len */ |
201 | static inline void page_pool_put_full_page(struct page_pool *pool, | |
202 | struct page *page, bool allow_direct) | |
ff7d6b27 | 203 | { |
57d0a1c1 JDB |
204 | /* When page_pool isn't compiled-in, net/core/xdp.c doesn't |
205 | * allow registering MEM_TYPE_PAGE_POOL, but shield linker. | |
206 | */ | |
207 | #ifdef CONFIG_PAGE_POOL | |
458de8a9 | 208 | page_pool_put_page(pool, page, -1, allow_direct); |
57d0a1c1 | 209 | #endif |
ff7d6b27 | 210 | } |
458de8a9 IA |
211 | |
212 | /* Same as above but the caller must guarantee safe context. e.g NAPI */ | |
ff7d6b27 JDB |
213 | static inline void page_pool_recycle_direct(struct page_pool *pool, |
214 | struct page *page) | |
215 | { | |
458de8a9 | 216 | page_pool_put_full_page(pool, page, true); |
6bf071bf JDB |
217 | } |
218 | ||
0e9d2a0a YL |
219 | #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ |
220 | (sizeof(dma_addr_t) > sizeof(unsigned long)) | |
221 | ||
0afdeeed IA |
222 | static inline dma_addr_t page_pool_get_dma_addr(struct page *page) |
223 | { | |
0e9d2a0a YL |
224 | dma_addr_t ret = page->dma_addr; |
225 | ||
226 | if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) | |
227 | ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; | |
228 | ||
9ddb3c14 MWO |
229 | return ret; |
230 | } | |
231 | ||
232 | static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) | |
233 | { | |
0e9d2a0a YL |
234 | page->dma_addr = addr; |
235 | if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) | |
236 | page->dma_addr_upper = upper_32_bits(addr); | |
237 | } | |
238 | ||
239 | static inline void page_pool_set_frag_count(struct page *page, long nr) | |
240 | { | |
241 | atomic_long_set(&page->pp_frag_count, nr); | |
242 | } | |
243 | ||
244 | static inline long page_pool_atomic_sub_frag_count_return(struct page *page, | |
245 | long nr) | |
246 | { | |
247 | long ret; | |
248 | ||
249 | /* As suggested by Alexander, atomic_long_read() may cover up the | |
250 | * reference count errors, so avoid calling atomic_long_read() in | |
251 | * the cases of freeing or draining the page_frags, where we would | |
252 | * not expect it to match or that are slowpath anyway. | |
253 | */ | |
254 | if (__builtin_constant_p(nr) && | |
255 | atomic_long_read(&page->pp_frag_count) == nr) | |
256 | return 0; | |
257 | ||
258 | ret = atomic_long_sub_return(nr, &page->pp_frag_count); | |
259 | WARN_ON(ret < 0); | |
260 | return ret; | |
0afdeeed IA |
261 | } |
262 | ||
57d0a1c1 JDB |
263 | static inline bool is_page_pool_compiled_in(void) |
264 | { | |
265 | #ifdef CONFIG_PAGE_POOL | |
266 | return true; | |
267 | #else | |
268 | return false; | |
269 | #endif | |
270 | } | |
271 | ||
1da4bbef IK |
272 | static inline bool page_pool_put(struct page_pool *pool) |
273 | { | |
274 | return refcount_dec_and_test(&pool->user_cnt); | |
275 | } | |
276 | ||
bc836748 SM |
277 | /* Caller must provide appropriate safe context, e.g. NAPI. */ |
278 | void page_pool_update_nid(struct page_pool *pool, int new_nid); | |
279 | static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) | |
280 | { | |
281 | if (unlikely(pool->p.nid != new_nid)) | |
282 | page_pool_update_nid(pool, new_nid); | |
283 | } | |
78862447 LB |
284 | |
285 | static inline void page_pool_ring_lock(struct page_pool *pool) | |
286 | __acquires(&pool->ring.producer_lock) | |
287 | { | |
288 | if (in_serving_softirq()) | |
289 | spin_lock(&pool->ring.producer_lock); | |
290 | else | |
291 | spin_lock_bh(&pool->ring.producer_lock); | |
292 | } | |
293 | ||
294 | static inline void page_pool_ring_unlock(struct page_pool *pool) | |
295 | __releases(&pool->ring.producer_lock) | |
296 | { | |
297 | if (in_serving_softirq()) | |
298 | spin_unlock(&pool->ring.producer_lock); | |
299 | else | |
300 | spin_unlock_bh(&pool->ring.producer_lock); | |
301 | } | |
302 | ||
ff7d6b27 | 303 | #endif /* _NET_PAGE_POOL_H */ |