]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
page_pool: Add API to update numa node
authorSaeed Mahameed <saeedm@mellanox.com>
Wed, 20 Nov 2019 00:15:17 +0000 (00:15 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 20 Nov 2019 19:47:36 +0000 (11:47 -0800)
Add page_pool_update_nid() to be called by page pool consumers when they
detect numa node changes.

It will update the page pool nid value to start allocating from the new
effective numa node.

This is to mitigate page pool allocating pages from a wrong numa node,
where the pool was originally allocated, and holding on to pages that
belong to a different numa node, which causes performance degradation.

For pages that are already being consumed and could be returned to the
pool by the consumer, in next patch we will add a check per page to avoid
recycling them back to the pool and return them to the page allocator.

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/page_pool.h
include/trace/events/page_pool.h
net/core/page_pool.c

index ace881c15dcba888bcaf3ea3252b7a58029014b7..e2e1b7b1e8ba1697c8b3928b96671e3366c064f9 100644 (file)
@@ -204,4 +204,11 @@ static inline bool page_pool_put(struct page_pool *pool)
        return refcount_dec_and_test(&pool->user_cnt);
 }
 
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+       if (unlikely(pool->p.nid != new_nid))
+               page_pool_update_nid(pool, new_nid);
+}
 #endif /* _NET_PAGE_POOL_H */
index 2f2a10e8eb56ff5a32a9a83d37e25cfa0a91f3e8..ad0aa7f316755005067308363131d492a0194eb3 100644 (file)
@@ -89,6 +89,28 @@ TRACE_EVENT(page_pool_state_hold,
                  __entry->pool, __entry->page, __entry->pfn, __entry->hold)
 );
 
+TRACE_EVENT(page_pool_update_nid,
+
+       TP_PROTO(const struct page_pool *pool, int new_nid),
+
+       TP_ARGS(pool, new_nid),
+
+       TP_STRUCT__entry(
+               __field(const struct page_pool *, pool)
+               __field(int,                      pool_nid)
+               __field(int,                      new_nid)
+       ),
+
+       TP_fast_assign(
+               __entry->pool           = pool;
+               __entry->pool_nid       = pool->p.nid;
+               __entry->new_nid        = new_nid;
+       ),
+
+       TP_printk("page_pool=%p pool_nid=%d new_nid=%d",
+                 __entry->pool, __entry->pool_nid, __entry->new_nid)
+);
+
 #endif /* _TRACE_PAGE_POOL_H */
 
 /* This part must be outside protection */
index e28db2ef8e12ae952b016daa1c50953c48fd4531..9b704ea3f4b2104431f511f2f71eb08ceaed925d 100644 (file)
@@ -436,3 +436,11 @@ void page_pool_destroy(struct page_pool *pool)
        schedule_delayed_work(&pool->release_dw, DEFER_TIME);
 }
 EXPORT_SYMBOL(page_pool_destroy);
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid)
+{
+       trace_page_pool_update_nid(pool, new_nid);
+       pool->p.nid = new_nid;
+}
+EXPORT_SYMBOL(page_pool_update_nid);