1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
13 #include <net/page_pool.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/page-flags.h>
17 #include <linux/mm.h> /* for __put_page() */
19 #include <trace/events/page_pool.h>
21 static int page_pool_init(struct page_pool
*pool
,
22 const struct page_pool_params
*params
)
24 unsigned int ring_qsize
= 1024; /* Default */
26 memcpy(&pool
->p
, params
, sizeof(pool
->p
));
28 /* Validate only known flags were used */
29 if (pool
->p
.flags
& ~(PP_FLAG_ALL
))
32 if (pool
->p
.pool_size
)
33 ring_qsize
= pool
->p
.pool_size
;
35 /* Sanity limit mem that can be pinned down */
36 if (ring_qsize
> 32768)
39 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
40 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
41 * which is the XDP_TX use-case.
43 if ((pool
->p
.dma_dir
!= DMA_FROM_DEVICE
) &&
44 (pool
->p
.dma_dir
!= DMA_BIDIRECTIONAL
))
47 if (ptr_ring_init(&pool
->ring
, ring_qsize
, GFP_KERNEL
) < 0)
50 atomic_set(&pool
->pages_state_release_cnt
, 0);
52 /* Driver calling page_pool_create() also call page_pool_destroy() */
53 refcount_set(&pool
->user_cnt
, 1);
55 if (pool
->p
.flags
& PP_FLAG_DMA_MAP
)
56 get_device(pool
->p
.dev
);
61 struct page_pool
*page_pool_create(const struct page_pool_params
*params
)
63 struct page_pool
*pool
;
66 pool
= kzalloc_node(sizeof(*pool
), GFP_KERNEL
, params
->nid
);
68 return ERR_PTR(-ENOMEM
);
70 err
= page_pool_init(pool
, params
);
72 pr_warn("%s() gave up with errno %d\n", __func__
, err
);
79 EXPORT_SYMBOL(page_pool_create
);
82 static struct page
*__page_pool_get_cached(struct page_pool
*pool
)
84 struct ptr_ring
*r
= &pool
->ring
;
88 /* Test for safe-context, caller should provide this guarantee */
89 if (likely(in_serving_softirq())) {
90 if (likely(pool
->alloc
.count
)) {
92 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
98 /* Quicker fallback, avoid locks when ring is empty */
99 if (__ptr_ring_empty(r
))
102 /* Slow-path: Get page from locked ring queue,
103 * refill alloc array if requested.
105 spin_lock(&r
->consumer_lock
);
106 page
= __ptr_ring_consume(r
);
108 pool
->alloc
.count
= __ptr_ring_consume_batched(r
,
110 PP_ALLOC_CACHE_REFILL
);
111 spin_unlock(&r
->consumer_lock
);
117 static struct page
*__page_pool_alloc_pages_slow(struct page_pool
*pool
,
124 /* We could always set __GFP_COMP, and avoid this branch, as
125 * prep_new_page() can handle order-0 with __GFP_COMP.
130 /* FUTURE development:
132 * Current slow-path essentially falls back to single page
133 * allocations, which doesn't improve performance. This code
134 * need bulk allocation support from the page allocator code.
137 /* Cache was empty, do real allocation */
138 page
= alloc_pages_node(pool
->p
.nid
, gfp
, pool
->p
.order
);
142 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
145 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
146 * since dma_addr_t can be either 32 or 64 bits and does not always fit
147 * into page private data (i.e 32bit cpu with 64bit DMA caps)
148 * This mapping is kept for lifetime of page, until leaving pool.
150 dma
= dma_map_page_attrs(pool
->p
.dev
, page
, 0,
151 (PAGE_SIZE
<< pool
->p
.order
),
152 pool
->p
.dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
153 if (dma_mapping_error(pool
->p
.dev
, dma
)) {
157 page
->dma_addr
= dma
;
160 /* Track how many pages are held 'in-flight' */
161 pool
->pages_state_hold_cnt
++;
163 trace_page_pool_state_hold(pool
, page
, pool
->pages_state_hold_cnt
);
165 /* When page just alloc'ed is should/must have refcnt 1. */
169 /* For using page_pool replace: alloc_pages() API calls, but provide
170 * synchronization guarantee for allocation side.
172 struct page
*page_pool_alloc_pages(struct page_pool
*pool
, gfp_t gfp
)
176 /* Fast-path: Get a page from cache */
177 page
= __page_pool_get_cached(pool
);
181 /* Slow-path: cache empty, do real allocation */
182 page
= __page_pool_alloc_pages_slow(pool
, gfp
);
185 EXPORT_SYMBOL(page_pool_alloc_pages
);
187 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
188 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
190 #define _distance(a, b) (s32)((a) - (b))
192 static s32
page_pool_inflight(struct page_pool
*pool
)
194 u32 release_cnt
= atomic_read(&pool
->pages_state_release_cnt
);
195 u32 hold_cnt
= READ_ONCE(pool
->pages_state_hold_cnt
);
198 distance
= _distance(hold_cnt
, release_cnt
);
200 trace_page_pool_inflight(pool
, distance
, hold_cnt
, release_cnt
);
204 static bool __page_pool_safe_to_destroy(struct page_pool
*pool
)
206 s32 inflight
= page_pool_inflight(pool
);
208 /* The distance should not be able to become negative */
209 WARN(inflight
< 0, "Negative(%d) inflight packet-pages", inflight
);
211 return (inflight
== 0);
214 /* Cleanup page_pool state from page */
215 static void __page_pool_clean_page(struct page_pool
*pool
,
220 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
223 dma
= page
->dma_addr
;
225 dma_unmap_page_attrs(pool
->p
.dev
, dma
,
226 PAGE_SIZE
<< pool
->p
.order
, pool
->p
.dma_dir
,
227 DMA_ATTR_SKIP_CPU_SYNC
);
230 atomic_inc(&pool
->pages_state_release_cnt
);
231 trace_page_pool_state_release(pool
, page
,
232 atomic_read(&pool
->pages_state_release_cnt
));
235 /* unmap the page and clean our state */
236 void page_pool_unmap_page(struct page_pool
*pool
, struct page
*page
)
238 /* When page is unmapped, this implies page will not be
239 * returned to page_pool.
241 __page_pool_clean_page(pool
, page
);
243 EXPORT_SYMBOL(page_pool_unmap_page
);
245 /* Return a page to the page allocator, cleaning up our state */
246 static void __page_pool_return_page(struct page_pool
*pool
, struct page
*page
)
248 __page_pool_clean_page(pool
, page
);
251 /* An optimization would be to call __free_pages(page, pool->p.order)
252 * knowing page is not part of page-cache (thus avoiding a
253 * __page_cache_release() call).
257 static bool __page_pool_recycle_into_ring(struct page_pool
*pool
,
261 /* BH protection not needed if current is serving softirq */
262 if (in_serving_softirq())
263 ret
= ptr_ring_produce(&pool
->ring
, page
);
265 ret
= ptr_ring_produce_bh(&pool
->ring
, page
);
267 return (ret
== 0) ? true : false;
270 /* Only allow direct recycling in special circumstances, into the
271 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
273 * Caller must provide appropriate safe context.
275 static bool __page_pool_recycle_direct(struct page
*page
,
276 struct page_pool
*pool
)
278 if (unlikely(pool
->alloc
.count
== PP_ALLOC_CACHE_SIZE
))
281 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
282 pool
->alloc
.cache
[pool
->alloc
.count
++] = page
;
286 void __page_pool_put_page(struct page_pool
*pool
,
287 struct page
*page
, bool allow_direct
)
289 /* This allocator is optimized for the XDP mode that uses
290 * one-frame-per-page, but have fallbacks that act like the
291 * regular page allocator APIs.
293 * refcnt == 1 means page_pool owns page, and can recycle it.
295 if (likely(page_ref_count(page
) == 1)) {
296 /* Read barrier done in page_ref_count / READ_ONCE */
298 if (allow_direct
&& in_serving_softirq())
299 if (__page_pool_recycle_direct(page
, pool
))
302 if (!__page_pool_recycle_into_ring(pool
, page
)) {
303 /* Cache full, fallback to free pages */
304 __page_pool_return_page(pool
, page
);
308 /* Fallback/non-XDP mode: API user have elevated refcnt.
310 * Many drivers split up the page into fragments, and some
311 * want to keep doing this to save memory and do refcnt based
312 * recycling. Support this use case too, to ease drivers
313 * switching between XDP/non-XDP.
315 * In-case page_pool maintains the DMA mapping, API user must
316 * call page_pool_put_page once. In this elevated refcnt
317 * case, the DMA is unmapped/released, as driver is likely
318 * doing refcnt based recycle tricks, meaning another process
319 * will be invoking put_page.
321 __page_pool_clean_page(pool
, page
);
324 EXPORT_SYMBOL(__page_pool_put_page
);
326 static void __page_pool_empty_ring(struct page_pool
*pool
)
330 /* Empty recycle ring */
331 while ((page
= ptr_ring_consume_bh(&pool
->ring
))) {
332 /* Verify the refcnt invariant of cached pages */
333 if (!(page_ref_count(page
) == 1))
334 pr_crit("%s() page_pool refcnt %d violation\n",
335 __func__
, page_ref_count(page
));
337 __page_pool_return_page(pool
, page
);
341 static void __warn_in_flight(struct page_pool
*pool
)
343 u32 release_cnt
= atomic_read(&pool
->pages_state_release_cnt
);
344 u32 hold_cnt
= READ_ONCE(pool
->pages_state_hold_cnt
);
347 distance
= _distance(hold_cnt
, release_cnt
);
349 /* Drivers should fix this, but only problematic when DMA is used */
350 WARN(1, "Still in-flight pages:%d hold:%u released:%u",
351 distance
, hold_cnt
, release_cnt
);
354 void __page_pool_free(struct page_pool
*pool
)
356 /* Only last user actually free/release resources */
357 if (!page_pool_put(pool
))
360 WARN(pool
->alloc
.count
, "API usage violation");
361 WARN(!ptr_ring_empty(&pool
->ring
), "ptr_ring is not empty");
363 /* Can happen due to forced shutdown */
364 if (!__page_pool_safe_to_destroy(pool
))
365 __warn_in_flight(pool
);
367 ptr_ring_cleanup(&pool
->ring
, NULL
);
369 if (pool
->p
.flags
& PP_FLAG_DMA_MAP
)
370 put_device(pool
->p
.dev
);
374 EXPORT_SYMBOL(__page_pool_free
);
376 /* Request to shutdown: release pages cached by page_pool, and check
377 * for in-flight pages
379 bool __page_pool_request_shutdown(struct page_pool
*pool
)
383 /* Empty alloc cache, assume caller made sure this is
384 * no-longer in use, and page_pool_alloc_pages() cannot be
387 while (pool
->alloc
.count
) {
388 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
389 __page_pool_return_page(pool
, page
);
392 /* No more consumers should exist, but producers could still
395 __page_pool_empty_ring(pool
);
397 return __page_pool_safe_to_destroy(pool
);
399 EXPORT_SYMBOL(__page_pool_request_shutdown
);