]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/aquantia/atlantic/aq_ring.c
Merge branches 'x86/amd', 'x86/vt-d', 'arm/rockchip', 'arm/omap', 'arm/mediatek'...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / aquantia / atlantic / aq_ring.c
CommitLineData
018423e9
DV
1/*
2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 */
9
10/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
11
12#include "aq_ring.h"
13#include "aq_nic.h"
14#include "aq_hw.h"
15
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18
19static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
20 struct aq_nic_s *aq_nic)
21{
22 int err = 0;
23
24 self->buff_ring =
25 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
26
27 if (!self->buff_ring) {
28 err = -ENOMEM;
29 goto err_exit;
30 }
31 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
32 self->size * self->dx_size,
33 &self->dx_ring_pa, GFP_KERNEL);
34 if (!self->dx_ring) {
35 err = -ENOMEM;
36 goto err_exit;
37 }
38
39err_exit:
40 if (err < 0) {
41 aq_ring_free(self);
42 self = NULL;
43 }
44 return self;
45}
46
47struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
48 struct aq_nic_s *aq_nic,
49 unsigned int idx,
50 struct aq_nic_cfg_s *aq_nic_cfg)
51{
52 int err = 0;
53
54 self->aq_nic = aq_nic;
55 self->idx = idx;
56 self->size = aq_nic_cfg->txds;
57 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
58
59 self = aq_ring_alloc(self, aq_nic);
60 if (!self) {
61 err = -ENOMEM;
62 goto err_exit;
63 }
64
65err_exit:
66 if (err < 0) {
67 aq_ring_free(self);
68 self = NULL;
69 }
70 return self;
71}
72
73struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
74 struct aq_nic_s *aq_nic,
75 unsigned int idx,
76 struct aq_nic_cfg_s *aq_nic_cfg)
77{
78 int err = 0;
79
80 self->aq_nic = aq_nic;
81 self->idx = idx;
82 self->size = aq_nic_cfg->rxds;
83 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
84
85 self = aq_ring_alloc(self, aq_nic);
86 if (!self) {
87 err = -ENOMEM;
88 goto err_exit;
89 }
90
91err_exit:
92 if (err < 0) {
93 aq_ring_free(self);
94 self = NULL;
95 }
96 return self;
97}
98
99int aq_ring_init(struct aq_ring_s *self)
100{
101 self->hw_head = 0;
102 self->sw_head = 0;
103 self->sw_tail = 0;
104 return 0;
105}
106
c7545689
PB
107static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
108 unsigned int t)
109{
110 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
111}
112
3aec6412
IR
113void aq_ring_update_queue_state(struct aq_ring_s *ring)
114{
115 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
116 aq_ring_queue_stop(ring);
117 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
118 aq_ring_queue_wake(ring);
119}
120
121void aq_ring_queue_wake(struct aq_ring_s *ring)
122{
123 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
124
125 if (__netif_subqueue_stopped(ndev, ring->idx)) {
126 netif_wake_subqueue(ndev, ring->idx);
127 ring->stats.tx.queue_restarts++;
128 }
129}
130
131void aq_ring_queue_stop(struct aq_ring_s *ring)
132{
133 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
134
135 if (!__netif_subqueue_stopped(ndev, ring->idx))
136 netif_stop_subqueue(ndev, ring->idx);
137}
138
b647d398 139bool aq_ring_tx_clean(struct aq_ring_s *self)
018423e9
DV
140{
141 struct device *dev = aq_nic_get_dev(self->aq_nic);
b647d398 142 unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
018423e9 143
b647d398 144 for (; self->sw_head != self->hw_head && budget--;
018423e9
DV
145 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
146 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
147
148 if (likely(buff->is_mapped)) {
c7545689
PB
149 if (unlikely(buff->is_sop)) {
150 if (!buff->is_eop &&
151 buff->eop_index != 0xffffU &&
152 (!aq_ring_dx_in_range(self->sw_head,
153 buff->eop_index,
154 self->hw_head)))
155 break;
156
018423e9
DV
157 dma_unmap_single(dev, buff->pa, buff->len,
158 DMA_TO_DEVICE);
c7545689 159 } else {
018423e9
DV
160 dma_unmap_page(dev, buff->pa, buff->len,
161 DMA_TO_DEVICE);
c7545689 162 }
018423e9
DV
163 }
164
165 if (unlikely(buff->is_eop))
166 dev_kfree_skb_any(buff->skb);
018423e9 167
c7545689
PB
168 buff->pa = 0U;
169 buff->eop_index = 0xffffU;
170 }
b647d398
IR
171
172 return !!budget;
018423e9
DV
173}
174
175#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
a54df682
PB
176int aq_ring_rx_clean(struct aq_ring_s *self,
177 struct napi_struct *napi,
178 int *work_done,
179 int budget)
018423e9
DV
180{
181 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
182 int err = 0;
183 bool is_rsc_completed = true;
184
185 for (; (self->sw_head != self->hw_head) && budget;
186 self->sw_head = aq_ring_next_dx(self, self->sw_head),
187 --budget, ++(*work_done)) {
188 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
189 struct sk_buff *skb = NULL;
190 unsigned int next_ = 0U;
191 unsigned int i = 0U;
192 struct aq_ring_buff_s *buff_ = NULL;
193
194 if (buff->is_error) {
195 __free_pages(buff->page, 0);
196 continue;
197 }
198
199 if (buff->is_cleaned)
200 continue;
201
202 if (!buff->is_eop) {
203 for (next_ = buff->next,
204 buff_ = &self->buff_ring[next_]; true;
205 next_ = buff_->next,
206 buff_ = &self->buff_ring[next_]) {
207 is_rsc_completed =
208 aq_ring_dx_in_range(self->sw_head,
209 next_,
210 self->hw_head);
211
212 if (unlikely(!is_rsc_completed)) {
213 is_rsc_completed = false;
214 break;
215 }
216
217 if (buff_->is_eop)
218 break;
219 }
220
221 if (!is_rsc_completed) {
222 err = 0;
223 goto err_exit;
224 }
225 }
226
227 /* for single fragment packets use build_skb() */
228 if (buff->is_eop) {
229 skb = build_skb(page_address(buff->page),
230 buff->len + AQ_SKB_ALIGN);
231 if (unlikely(!skb)) {
232 err = -ENOMEM;
233 goto err_exit;
234 }
235
018423e9
DV
236 skb_put(skb, buff->len);
237 } else {
238 skb = netdev_alloc_skb(ndev, ETH_HLEN);
239 if (unlikely(!skb)) {
240 err = -ENOMEM;
241 goto err_exit;
242 }
243 skb_put(skb, ETH_HLEN);
244 memcpy(skb->data, page_address(buff->page), ETH_HLEN);
245
246 skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
247 buff->len - ETH_HLEN,
248 SKB_TRUESIZE(buff->len - ETH_HLEN));
249
250 for (i = 1U, next_ = buff->next,
251 buff_ = &self->buff_ring[next_]; true;
252 next_ = buff_->next,
253 buff_ = &self->buff_ring[next_], ++i) {
254 skb_add_rx_frag(skb, i, buff_->page, 0,
255 buff_->len,
256 SKB_TRUESIZE(buff->len -
257 ETH_HLEN));
258 buff_->is_cleaned = 1;
259
260 if (buff_->is_eop)
261 break;
262 }
263 }
264
265 skb->protocol = eth_type_trans(skb, ndev);
266 if (unlikely(buff->is_cso_err)) {
267 ++self->stats.rx.errors;
219f1d79 268 skb->ip_summed = CHECKSUM_NONE;
018423e9
DV
269 } else {
270 if (buff->is_ip_cso) {
271 __skb_incr_checksum_unnecessary(skb);
272 if (buff->is_udp_cso || buff->is_tcp_cso)
273 __skb_incr_checksum_unnecessary(skb);
274 } else {
275 skb->ip_summed = CHECKSUM_NONE;
276 }
277 }
278
279 skb_set_hash(skb, buff->rss_hash,
280 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
281 PKT_HASH_TYPE_NONE);
282
283 skb_record_rx_queue(skb, self->idx);
284
018423e9
DV
285 ++self->stats.rx.packets;
286 self->stats.rx.bytes += skb->len;
9ec03bf6
IR
287
288 napi_gro_receive(napi, skb);
018423e9
DV
289 }
290
291err_exit:
292 return err;
293}
294
295int aq_ring_rx_fill(struct aq_ring_s *self)
296{
89b64388
PB
297 unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
298 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
018423e9
DV
299 struct aq_ring_buff_s *buff = NULL;
300 int err = 0;
301 int i = 0;
302
303 for (i = aq_ring_avail_dx(self); i--;
304 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
305 buff = &self->buff_ring[self->sw_tail];
306
307 buff->flags = 0U;
308 buff->len = AQ_CFG_RX_FRAME_MAX;
309
453f85d4 310 buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
018423e9
DV
311 if (!buff->page) {
312 err = -ENOMEM;
313 goto err_exit;
314 }
315
316 buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
317 buff->page, 0,
318 AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
319
ff1176f6
DC
320 if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
321 err = -ENOMEM;
018423e9 322 goto err_exit;
ff1176f6 323 }
018423e9
DV
324
325 buff = NULL;
326 }
018423e9
DV
327
328err_exit:
329 if (err < 0) {
330 if (buff && buff->page)
331 __free_pages(buff->page, 0);
332 }
333
334 return err;
335}
336
337void aq_ring_rx_deinit(struct aq_ring_s *self)
338{
339 if (!self)
340 goto err_exit;
341
342 for (; self->sw_head != self->sw_tail;
343 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
344 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
345
346 dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
347 AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
348
349 __free_pages(buff->page, 0);
350 }
351
352err_exit:;
353}
354
018423e9
DV
355void aq_ring_free(struct aq_ring_s *self)
356{
357 if (!self)
358 goto err_exit;
359
360 kfree(self->buff_ring);
361
362 if (self->dx_ring)
363 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
364 self->size * self->dx_size, self->dx_ring,
365 self->dx_ring_pa);
366
367err_exit:;
368}