]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/core/page_pool.c
Merge branch 'for-rc8-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/pavel...
[mirror_ubuntu-hirsute-kernel.git] / net / core / page_pool.c
CommitLineData
ff7d6b27
JDB
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * page_pool.c
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
6 */
32c28f7e 7
ff7d6b27
JDB
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
f71fec47 11#include <linux/device.h>
ff7d6b27
JDB
12
13#include <net/page_pool.h>
78862447
LB
14#include <net/xdp.h>
15
ff7d6b27
JDB
16#include <linux/dma-direction.h>
17#include <linux/dma-mapping.h>
18#include <linux/page-flags.h>
19#include <linux/mm.h> /* for __put_page() */
20
32c28f7e
JDB
21#include <trace/events/page_pool.h>
22
c3f812ce
JL
23#define DEFER_TIME (msecs_to_jiffies(1000))
24#define DEFER_WARN_INTERVAL (60 * HZ)
25
ff7d6b27
JDB
26static int page_pool_init(struct page_pool *pool,
27 const struct page_pool_params *params)
28{
29 unsigned int ring_qsize = 1024; /* Default */
30
31 memcpy(&pool->p, params, sizeof(pool->p));
32
33 /* Validate only known flags were used */
34 if (pool->p.flags & ~(PP_FLAG_ALL))
35 return -EINVAL;
36
37 if (pool->p.pool_size)
38 ring_qsize = pool->p.pool_size;
39
40 /* Sanity limit mem that can be pinned down */
41 if (ring_qsize > 32768)
42 return -E2BIG;
43
44 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
45 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
46 * which is the XDP_TX use-case.
47 */
798dda81
DK
48 if (pool->p.flags & PP_FLAG_DMA_MAP) {
49 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
50 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
51 return -EINVAL;
52 }
ff7d6b27 53
e68bc756
LB
54 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
55 /* In order to request DMA-sync-for-device the page
56 * needs to be mapped
57 */
58 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
59 return -EINVAL;
60
61 if (!pool->p.max_len)
62 return -EINVAL;
63
64 /* pool->p.offset has to be set according to the address
65 * offset used by the DMA engine to start copying rx data
66 */
67 }
68
ff7d6b27
JDB
69 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
70 return -ENOMEM;
71
99c07c43
JDB
72 atomic_set(&pool->pages_state_release_cnt, 0);
73
1da4bbef
IK
74 /* Driver calling page_pool_create() also call page_pool_destroy() */
75 refcount_set(&pool->user_cnt, 1);
76
f71fec47
JDB
77 if (pool->p.flags & PP_FLAG_DMA_MAP)
78 get_device(pool->p.dev);
79
ff7d6b27
JDB
80 return 0;
81}
82
83struct page_pool *page_pool_create(const struct page_pool_params *params)
84{
85 struct page_pool *pool;
873343e7 86 int err;
ff7d6b27
JDB
87
88 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
89 if (!pool)
90 return ERR_PTR(-ENOMEM);
91
92 err = page_pool_init(pool, params);
93 if (err < 0) {
94 pr_warn("%s() gave up with errno %d\n", __func__, err);
95 kfree(pool);
96 return ERR_PTR(err);
97 }
1da4bbef 98
ff7d6b27
JDB
99 return pool;
100}
101EXPORT_SYMBOL(page_pool_create);
102
458de8a9 103static void page_pool_return_page(struct page_pool *pool, struct page *page);
44768dec
JDB
104
105noinline
304db6cb 106static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
44768dec
JDB
107{
108 struct ptr_ring *r = &pool->ring;
109 struct page *page;
110 int pref_nid; /* preferred NUMA node */
111
112 /* Quicker fallback, avoid locks when ring is empty */
113 if (__ptr_ring_empty(r))
114 return NULL;
115
116 /* Softirq guarantee CPU and thus NUMA node is stable. This,
117 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
118 */
f13fc107 119#ifdef CONFIG_NUMA
44768dec 120 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
f13fc107
JDB
121#else
122 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
123 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
124#endif
44768dec
JDB
125
126 /* Slower-path: Get pages from locked ring queue */
127 spin_lock(&r->consumer_lock);
128
129 /* Refill alloc array, but only if NUMA match */
130 do {
131 page = __ptr_ring_consume(r);
132 if (unlikely(!page))
133 break;
134
135 if (likely(page_to_nid(page) == pref_nid)) {
136 pool->alloc.cache[pool->alloc.count++] = page;
137 } else {
138 /* NUMA mismatch;
139 * (1) release 1 page to page-allocator and
140 * (2) break out to fallthrough to alloc_pages_node.
141 * This limit stress on page buddy alloactor.
142 */
458de8a9 143 page_pool_return_page(pool, page);
44768dec
JDB
144 page = NULL;
145 break;
146 }
304db6cb 147 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
44768dec
JDB
148
149 /* Return last page */
150 if (likely(pool->alloc.count > 0))
151 page = pool->alloc.cache[--pool->alloc.count];
152
153 spin_unlock(&r->consumer_lock);
154 return page;
155}
156
ff7d6b27
JDB
157/* fast path */
158static struct page *__page_pool_get_cached(struct page_pool *pool)
159{
ff7d6b27
JDB
160 struct page *page;
161
304db6cb
LR
162 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
163 if (likely(pool->alloc.count)) {
164 /* Fast-path */
165 page = pool->alloc.cache[--pool->alloc.count];
166 } else {
167 page = page_pool_refill_alloc_cache(pool);
ff7d6b27
JDB
168 }
169
ff7d6b27
JDB
170 return page;
171}
172
e68bc756
LB
173static void page_pool_dma_sync_for_device(struct page_pool *pool,
174 struct page *page,
175 unsigned int dma_sync_size)
176{
177 dma_sync_size = min(dma_sync_size, pool->p.max_len);
178 dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
179 pool->p.offset, dma_sync_size,
180 pool->p.dma_dir);
181}
182
ff7d6b27
JDB
183/* slow path */
184noinline
185static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
186 gfp_t _gfp)
187{
188 struct page *page;
189 gfp_t gfp = _gfp;
190 dma_addr_t dma;
191
192 /* We could always set __GFP_COMP, and avoid this branch, as
193 * prep_new_page() can handle order-0 with __GFP_COMP.
194 */
195 if (pool->p.order)
196 gfp |= __GFP_COMP;
197
198 /* FUTURE development:
199 *
200 * Current slow-path essentially falls back to single page
201 * allocations, which doesn't improve performance. This code
202 * need bulk allocation support from the page allocator code.
203 */
204
205 /* Cache was empty, do real allocation */
f13fc107 206#ifdef CONFIG_NUMA
ff7d6b27 207 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
f13fc107
JDB
208#else
209 page = alloc_pages(gfp, pool->p.order);
210#endif
ff7d6b27
JDB
211 if (!page)
212 return NULL;
213
214 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
215 goto skip_dma_map;
216
1567b85e
IA
217 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
218 * since dma_addr_t can be either 32 or 64 bits and does not always fit
219 * into page private data (i.e 32bit cpu with 64bit DMA caps)
ff7d6b27
JDB
220 * This mapping is kept for lifetime of page, until leaving pool.
221 */
13f16d9d
JDB
222 dma = dma_map_page_attrs(pool->p.dev, page, 0,
223 (PAGE_SIZE << pool->p.order),
224 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
ff7d6b27
JDB
225 if (dma_mapping_error(pool->p.dev, dma)) {
226 put_page(page);
227 return NULL;
228 }
1567b85e 229 page->dma_addr = dma;
ff7d6b27 230
e68bc756
LB
231 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
232 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
233
ff7d6b27 234skip_dma_map:
99c07c43
JDB
235 /* Track how many pages are held 'in-flight' */
236 pool->pages_state_hold_cnt++;
237
32c28f7e
JDB
238 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
239
ff7d6b27
JDB
240 /* When page just alloc'ed is should/must have refcnt 1. */
241 return page;
242}
243
244/* For using page_pool replace: alloc_pages() API calls, but provide
245 * synchronization guarantee for allocation side.
246 */
247struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
248{
249 struct page *page;
250
251 /* Fast-path: Get a page from cache */
252 page = __page_pool_get_cached(pool);
253 if (page)
254 return page;
255
256 /* Slow-path: cache empty, do real allocation */
257 page = __page_pool_alloc_pages_slow(pool, gfp);
258 return page;
259}
260EXPORT_SYMBOL(page_pool_alloc_pages);
261
99c07c43
JDB
262/* Calculate distance between two u32 values, valid if distance is below 2^(31)
263 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
264 */
265#define _distance(a, b) (s32)((a) - (b))
266
267static s32 page_pool_inflight(struct page_pool *pool)
268{
269 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
270 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
c3f812ce 271 s32 inflight;
99c07c43 272
c3f812ce 273 inflight = _distance(hold_cnt, release_cnt);
99c07c43 274
7c9e6942 275 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
99c07c43
JDB
276 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
277
c3f812ce 278 return inflight;
99c07c43
JDB
279}
280
458de8a9
IA
281/* Disconnects a page (from a page_pool). API users can have a need
282 * to disconnect a page (from a page_pool), to allow it to be used as
283 * a regular page (that will eventually be returned to the normal
284 * page-allocator via put_page).
285 */
286void page_pool_release_page(struct page_pool *pool, struct page *page)
ff7d6b27 287{
1567b85e 288 dma_addr_t dma;
c3f812ce 289 int count;
1567b85e 290
ff7d6b27 291 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
458de8a9
IA
292 /* Always account for inflight pages, even if we didn't
293 * map them
294 */
99c07c43 295 goto skip_dma_unmap;
ff7d6b27 296
1567b85e 297 dma = page->dma_addr;
458de8a9
IA
298
299 /* When page is unmapped, it cannot be returned our pool */
13f16d9d
JDB
300 dma_unmap_page_attrs(pool->p.dev, dma,
301 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
302 DMA_ATTR_SKIP_CPU_SYNC);
1567b85e 303 page->dma_addr = 0;
99c07c43 304skip_dma_unmap:
c3f812ce
JL
305 /* This may be the last page returned, releasing the pool, so
306 * it is not safe to reference pool afterwards.
307 */
308 count = atomic_inc_return(&pool->pages_state_release_cnt);
309 trace_page_pool_state_release(pool, page, count);
ff7d6b27 310}
458de8a9 311EXPORT_SYMBOL(page_pool_release_page);
a25d50bf 312
ff7d6b27 313/* Return a page to the page allocator, cleaning up our state */
458de8a9 314static void page_pool_return_page(struct page_pool *pool, struct page *page)
ff7d6b27 315{
458de8a9 316 page_pool_release_page(pool, page);
99c07c43 317
ff7d6b27
JDB
318 put_page(page);
319 /* An optimization would be to call __free_pages(page, pool->p.order)
320 * knowing page is not part of page-cache (thus avoiding a
321 * __page_cache_release() call).
322 */
323}
324
458de8a9 325static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
ff7d6b27
JDB
326{
327 int ret;
328 /* BH protection not needed if current is serving softirq */
329 if (in_serving_softirq())
330 ret = ptr_ring_produce(&pool->ring, page);
331 else
332 ret = ptr_ring_produce_bh(&pool->ring, page);
333
334 return (ret == 0) ? true : false;
335}
336
337/* Only allow direct recycling in special circumstances, into the
338 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
339 *
340 * Caller must provide appropriate safe context.
341 */
458de8a9 342static bool page_pool_recycle_in_cache(struct page *page,
ff7d6b27
JDB
343 struct page_pool *pool)
344{
345 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
346 return false;
347
348 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
349 pool->alloc.cache[pool->alloc.count++] = page;
350 return true;
351}
352
d5394610
SM
353/* page is NOT reusable when:
354 * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
d5394610
SM
355 */
356static bool pool_page_reusable(struct page_pool *pool, struct page *page)
357{
44768dec 358 return !page_is_pfmemalloc(page);
d5394610
SM
359}
360
458de8a9
IA
361/* If the page refcnt == 1, this will try to recycle the page.
362 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
363 * the configured size min(dma_sync_size, pool->max_len).
364 * If the page refcnt != 1, then the page will be returned to memory
365 * subsystem.
366 */
78862447
LB
367static __always_inline struct page *
368__page_pool_put_page(struct page_pool *pool, struct page *page,
369 unsigned int dma_sync_size, bool allow_direct)
ff7d6b27
JDB
370{
371 /* This allocator is optimized for the XDP mode that uses
372 * one-frame-per-page, but have fallbacks that act like the
373 * regular page allocator APIs.
374 *
375 * refcnt == 1 means page_pool owns page, and can recycle it.
376 */
d5394610
SM
377 if (likely(page_ref_count(page) == 1 &&
378 pool_page_reusable(pool, page))) {
ff7d6b27
JDB
379 /* Read barrier done in page_ref_count / READ_ONCE */
380
e68bc756
LB
381 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
382 page_pool_dma_sync_for_device(pool, page,
383 dma_sync_size);
384
78862447
LB
385 if (allow_direct && in_serving_softirq() &&
386 page_pool_recycle_in_cache(page, pool))
387 return NULL;
ff7d6b27 388
78862447
LB
389 /* Page found as candidate for recycling */
390 return page;
ff7d6b27
JDB
391 }
392 /* Fallback/non-XDP mode: API user have elevated refcnt.
393 *
394 * Many drivers split up the page into fragments, and some
395 * want to keep doing this to save memory and do refcnt based
396 * recycling. Support this use case too, to ease drivers
397 * switching between XDP/non-XDP.
398 *
399 * In-case page_pool maintains the DMA mapping, API user must
400 * call page_pool_put_page once. In this elevated refcnt
401 * case, the DMA is unmapped/released, as driver is likely
402 * doing refcnt based recycle tricks, meaning another process
403 * will be invoking put_page.
404 */
458de8a9
IA
405 /* Do not replace this with page_pool_return_page() */
406 page_pool_release_page(pool, page);
ff7d6b27 407 put_page(page);
78862447
LB
408
409 return NULL;
410}
411
412void page_pool_put_page(struct page_pool *pool, struct page *page,
413 unsigned int dma_sync_size, bool allow_direct)
414{
415 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
416 if (page && !page_pool_recycle_in_ring(pool, page)) {
417 /* Cache full, fallback to free pages */
418 page_pool_return_page(pool, page);
419 }
ff7d6b27 420}
458de8a9 421EXPORT_SYMBOL(page_pool_put_page);
ff7d6b27 422
78862447
LB
423/* Caller must not use data area after call, as this function overwrites it */
424void page_pool_put_page_bulk(struct page_pool *pool, void **data,
425 int count)
426{
427 int i, bulk_len = 0;
428
429 for (i = 0; i < count; i++) {
430 struct page *page = virt_to_head_page(data[i]);
431
432 page = __page_pool_put_page(pool, page, -1, false);
433 /* Approved for bulk recycling in ptr_ring cache */
434 if (page)
435 data[bulk_len++] = page;
436 }
437
438 if (unlikely(!bulk_len))
439 return;
440
441 /* Bulk producer into ptr_ring page_pool cache */
442 page_pool_ring_lock(pool);
443 for (i = 0; i < bulk_len; i++) {
444 if (__ptr_ring_produce(&pool->ring, data[i]))
445 break; /* ring full */
446 }
447 page_pool_ring_unlock(pool);
448
449 /* Hopefully all pages was return into ptr_ring */
450 if (likely(i == bulk_len))
451 return;
452
453 /* ptr_ring cache full, free remaining pages outside producer lock
454 * since put_page() with refcnt == 1 can be an expensive operation
455 */
456 for (; i < bulk_len; i++)
457 page_pool_return_page(pool, data[i]);
458}
459EXPORT_SYMBOL(page_pool_put_page_bulk);
460
458de8a9 461static void page_pool_empty_ring(struct page_pool *pool)
ff7d6b27
JDB
462{
463 struct page *page;
464
465 /* Empty recycle ring */
4905bd9a 466 while ((page = ptr_ring_consume_bh(&pool->ring))) {
ff7d6b27
JDB
467 /* Verify the refcnt invariant of cached pages */
468 if (!(page_ref_count(page) == 1))
469 pr_crit("%s() page_pool refcnt %d violation\n",
470 __func__, page_ref_count(page));
471
458de8a9 472 page_pool_return_page(pool, page);
ff7d6b27
JDB
473 }
474}
475
c3f812ce 476static void page_pool_free(struct page_pool *pool)
d956a048 477{
c3f812ce
JL
478 if (pool->disconnect)
479 pool->disconnect(pool);
e54cfd7e
JDB
480
481 ptr_ring_cleanup(&pool->ring, NULL);
f71fec47
JDB
482
483 if (pool->p.flags & PP_FLAG_DMA_MAP)
484 put_device(pool->p.dev);
485
e54cfd7e
JDB
486 kfree(pool);
487}
e54cfd7e 488
7c9e6942 489static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
ff7d6b27
JDB
490{
491 struct page *page;
492
7c9e6942
JDB
493 if (pool->destroy_cnt)
494 return;
495
ff7d6b27
JDB
496 /* Empty alloc cache, assume caller made sure this is
497 * no-longer in use, and page_pool_alloc_pages() cannot be
498 * call concurrently.
499 */
500 while (pool->alloc.count) {
501 page = pool->alloc.cache[--pool->alloc.count];
458de8a9 502 page_pool_return_page(pool, page);
ff7d6b27 503 }
7c9e6942
JDB
504}
505
506static void page_pool_scrub(struct page_pool *pool)
507{
508 page_pool_empty_alloc_cache_once(pool);
509 pool->destroy_cnt++;
ff7d6b27
JDB
510
511 /* No more consumers should exist, but producers could still
512 * be in-flight.
513 */
458de8a9 514 page_pool_empty_ring(pool);
c3f812ce
JL
515}
516
517static int page_pool_release(struct page_pool *pool)
518{
519 int inflight;
520
521 page_pool_scrub(pool);
522 inflight = page_pool_inflight(pool);
523 if (!inflight)
524 page_pool_free(pool);
525
526 return inflight;
527}
528
529static void page_pool_release_retry(struct work_struct *wq)
530{
531 struct delayed_work *dwq = to_delayed_work(wq);
532 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
533 int inflight;
534
535 inflight = page_pool_release(pool);
536 if (!inflight)
537 return;
538
539 /* Periodic warning */
540 if (time_after_eq(jiffies, pool->defer_warn)) {
541 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
542
543 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
544 __func__, inflight, sec);
545 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
546 }
547
548 /* Still not ready to be disconnected, retry later */
549 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
550}
551
552void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
553{
554 refcount_inc(&pool->user_cnt);
555 pool->disconnect = disconnect;
556}
557
558void page_pool_destroy(struct page_pool *pool)
559{
560 if (!pool)
561 return;
562
563 if (!page_pool_put(pool))
564 return;
565
566 if (!page_pool_release(pool))
567 return;
568
569 pool->defer_start = jiffies;
570 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
ff7d6b27 571
c3f812ce
JL
572 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
573 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
ff7d6b27 574}
c3f812ce 575EXPORT_SYMBOL(page_pool_destroy);
bc836748
SM
576
577/* Caller must provide appropriate safe context, e.g. NAPI. */
578void page_pool_update_nid(struct page_pool *pool, int new_nid)
579{
44768dec
JDB
580 struct page *page;
581
bc836748
SM
582 trace_page_pool_update_nid(pool, new_nid);
583 pool->p.nid = new_nid;
44768dec
JDB
584
585 /* Flush pool alloc cache, as refill will check NUMA node */
586 while (pool->alloc.count) {
587 page = pool->alloc.cache[--pool->alloc.count];
458de8a9 588 page_pool_return_page(pool, page);
44768dec 589 }
bc836748
SM
590}
591EXPORT_SYMBOL(page_pool_update_nid);