]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0017-i40e-Fix-memory-leak-related-filter-programming-stat.patch
warn when non-RETPOLINED module gets loaded
[pve-kernel.git] / patches / kernel / 0017-i40e-Fix-memory-leak-related-filter-programming-stat.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Alexander Duyck <alexander.h.duyck@intel.com>
3 Date: Wed, 4 Oct 2017 08:44:43 -0700
4 Subject: [PATCH] i40e: Fix memory leak related filter programming status
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 It looks like we weren't correctly placing the pages from buffers that had
10 been used to return a filter programming status back on the ring. As a
11 result they were being overwritten and tracking of the pages was lost.
12
13 This change works to correct that by incorporating part of
14 i40e_put_rx_buffer into the programming status handler code. As a result we
15 should now be correctly placing the pages for those buffers on the
16 re-allocation list instead of letting them stay in place.
17
18 Fixes: 0e626ff7ccbf ("i40e: Fix support for flow director programming status")
19 Reported-by: Anders K. Pedersen <akp@cohaesio.com>
20 Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
21 Tested-by: Anders K Pedersen <akp@cohaesio.com>
22 Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
23 (cherry picked from commit 2b9478ffc550f17c6cd8c69057234e91150f5972)
24 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
25 ---
26 drivers/net/ethernet/intel/i40e/i40e_txrx.c | 63 ++++++++++++++++-------------
27 1 file changed, 36 insertions(+), 27 deletions(-)
28
29 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
30 index 2194960d5855..391b1878c24b 100644
31 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
32 +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
33 @@ -1042,6 +1042,32 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
34 return false;
35 }
36
37 +/**
38 + * i40e_reuse_rx_page - page flip buffer and store it back on the ring
39 + * @rx_ring: rx descriptor ring to store buffers on
40 + * @old_buff: donor buffer to have page reused
41 + *
42 + * Synchronizes page for reuse by the adapter
43 + **/
44 +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
45 + struct i40e_rx_buffer *old_buff)
46 +{
47 + struct i40e_rx_buffer *new_buff;
48 + u16 nta = rx_ring->next_to_alloc;
49 +
50 + new_buff = &rx_ring->rx_bi[nta];
51 +
52 + /* update, and store next to alloc */
53 + nta++;
54 + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
55 +
56 + /* transfer page from old buffer to new buffer */
57 + new_buff->dma = old_buff->dma;
58 + new_buff->page = old_buff->page;
59 + new_buff->page_offset = old_buff->page_offset;
60 + new_buff->pagecnt_bias = old_buff->pagecnt_bias;
61 +}
62 +
63 /**
64 * i40e_rx_is_programming_status - check for programming status descriptor
65 * @qw: qword representing status_error_len in CPU ordering
66 @@ -1076,15 +1102,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
67 union i40e_rx_desc *rx_desc,
68 u64 qw)
69 {
70 - u32 ntc = rx_ring->next_to_clean + 1;
71 + struct i40e_rx_buffer *rx_buffer;
72 + u32 ntc = rx_ring->next_to_clean;
73 u8 id;
74
75 /* fetch, update, and store next to clean */
76 + rx_buffer = &rx_ring->rx_bi[ntc++];
77 ntc = (ntc < rx_ring->count) ? ntc : 0;
78 rx_ring->next_to_clean = ntc;
79
80 prefetch(I40E_RX_DESC(rx_ring, ntc));
81
82 + /* place unused page back on the ring */
83 + i40e_reuse_rx_page(rx_ring, rx_buffer);
84 + rx_ring->rx_stats.page_reuse_count++;
85 +
86 + /* clear contents of buffer_info */
87 + rx_buffer->page = NULL;
88 +
89 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
90 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
91
92 @@ -1643,32 +1678,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
93 return false;
94 }
95
96 -/**
97 - * i40e_reuse_rx_page - page flip buffer and store it back on the ring
98 - * @rx_ring: rx descriptor ring to store buffers on
99 - * @old_buff: donor buffer to have page reused
100 - *
101 - * Synchronizes page for reuse by the adapter
102 - **/
103 -static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
104 - struct i40e_rx_buffer *old_buff)
105 -{
106 - struct i40e_rx_buffer *new_buff;
107 - u16 nta = rx_ring->next_to_alloc;
108 -
109 - new_buff = &rx_ring->rx_bi[nta];
110 -
111 - /* update, and store next to alloc */
112 - nta++;
113 - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
114 -
115 - /* transfer page from old buffer to new buffer */
116 - new_buff->dma = old_buff->dma;
117 - new_buff->page = old_buff->page;
118 - new_buff->page_offset = old_buff->page_offset;
119 - new_buff->pagecnt_bias = old_buff->pagecnt_bias;
120 -}
121 -
122 /**
123 * i40e_page_is_reusable - check if any reuse is possible
124 * @page: page struct to check
125 --
126 2.14.2
127