]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/aquantia/atlantic/aq_ring.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 422
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / aquantia / atlantic / aq_ring.c
CommitLineData
75a6faf6 1// SPDX-License-Identifier: GPL-2.0-only
018423e9
DV
2/*
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
018423e9
DV
5 */
6
7/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
8
9#include "aq_ring.h"
10#include "aq_nic.h"
11#include "aq_hw.h"
46f4c29d 12#include "aq_hw_utils.h"
018423e9
DV
13
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16
46f4c29d
IR
17static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
18{
19 unsigned int len = PAGE_SIZE << rxpage->order;
20
21 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
22
23 /* Drop the ref for being in the ring. */
24 __free_pages(rxpage->page, rxpage->order);
25 rxpage->page = NULL;
26}
27
28static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
29 struct device *dev)
30{
31 struct page *page;
32 dma_addr_t daddr;
33 int ret = -ENOMEM;
34
35 page = dev_alloc_pages(order);
36 if (unlikely(!page))
37 goto err_exit;
38
39 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
40 DMA_FROM_DEVICE);
41
42 if (unlikely(dma_mapping_error(dev, daddr)))
43 goto free_page;
44
45 rxpage->page = page;
46 rxpage->daddr = daddr;
47 rxpage->order = order;
48 rxpage->pg_off = 0;
49
50 return 0;
51
52free_page:
53 __free_pages(page, order);
54
55err_exit:
56 return ret;
57}
58
59static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
60 int order)
61{
62 int ret;
63
64 if (rxbuf->rxdata.page) {
65 /* One means ring is the only user and can reuse */
66 if (page_ref_count(rxbuf->rxdata.page) > 1) {
67 /* Try reuse buffer */
68 rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
69 if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
70 (PAGE_SIZE << order)) {
71 self->stats.rx.pg_flips++;
72 } else {
73 /* Buffer exhausted. We have other users and
74 * should release this page and realloc
75 */
76 aq_free_rxpage(&rxbuf->rxdata,
77 aq_nic_get_dev(self->aq_nic));
78 self->stats.rx.pg_losts++;
79 }
80 } else {
81 rxbuf->rxdata.pg_off = 0;
82 self->stats.rx.pg_reuses++;
83 }
84 }
85
86 if (!rxbuf->rxdata.page) {
87 ret = aq_get_rxpage(&rxbuf->rxdata, order,
88 aq_nic_get_dev(self->aq_nic));
89 return ret;
90 }
91
92 return 0;
93}
94
018423e9
DV
95static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
96 struct aq_nic_s *aq_nic)
97{
98 int err = 0;
99
100 self->buff_ring =
101 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
102
103 if (!self->buff_ring) {
104 err = -ENOMEM;
105 goto err_exit;
106 }
107 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
e9157848
ND
108 self->size * self->dx_size,
109 &self->dx_ring_pa, GFP_KERNEL);
018423e9
DV
110 if (!self->dx_ring) {
111 err = -ENOMEM;
112 goto err_exit;
113 }
114
115err_exit:
116 if (err < 0) {
117 aq_ring_free(self);
118 self = NULL;
119 }
120 return self;
121}
122
123struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
124 struct aq_nic_s *aq_nic,
125 unsigned int idx,
126 struct aq_nic_cfg_s *aq_nic_cfg)
127{
128 int err = 0;
129
130 self->aq_nic = aq_nic;
131 self->idx = idx;
132 self->size = aq_nic_cfg->txds;
133 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
134
135 self = aq_ring_alloc(self, aq_nic);
136 if (!self) {
137 err = -ENOMEM;
138 goto err_exit;
139 }
140
141err_exit:
142 if (err < 0) {
143 aq_ring_free(self);
144 self = NULL;
145 }
146 return self;
147}
148
149struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
150 struct aq_nic_s *aq_nic,
151 unsigned int idx,
152 struct aq_nic_cfg_s *aq_nic_cfg)
153{
154 int err = 0;
155
156 self->aq_nic = aq_nic;
157 self->idx = idx;
158 self->size = aq_nic_cfg->rxds;
159 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
46f4c29d
IR
160 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
161 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
162
163 if (aq_nic_cfg->rxpageorder > self->page_order)
164 self->page_order = aq_nic_cfg->rxpageorder;
018423e9
DV
165
166 self = aq_ring_alloc(self, aq_nic);
167 if (!self) {
168 err = -ENOMEM;
169 goto err_exit;
170 }
171
172err_exit:
173 if (err < 0) {
174 aq_ring_free(self);
175 self = NULL;
176 }
177 return self;
178}
179
180int aq_ring_init(struct aq_ring_s *self)
181{
182 self->hw_head = 0;
183 self->sw_head = 0;
184 self->sw_tail = 0;
185 return 0;
186}
187
c7545689
PB
188static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
189 unsigned int t)
190{
191 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
192}
193
3aec6412
IR
194void aq_ring_update_queue_state(struct aq_ring_s *ring)
195{
196 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
197 aq_ring_queue_stop(ring);
198 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
199 aq_ring_queue_wake(ring);
200}
201
202void aq_ring_queue_wake(struct aq_ring_s *ring)
203{
204 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
205
206 if (__netif_subqueue_stopped(ndev, ring->idx)) {
207 netif_wake_subqueue(ndev, ring->idx);
208 ring->stats.tx.queue_restarts++;
209 }
210}
211
212void aq_ring_queue_stop(struct aq_ring_s *ring)
213{
214 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
215
216 if (!__netif_subqueue_stopped(ndev, ring->idx))
217 netif_stop_subqueue(ndev, ring->idx);
218}
219
b647d398 220bool aq_ring_tx_clean(struct aq_ring_s *self)
018423e9
DV
221{
222 struct device *dev = aq_nic_get_dev(self->aq_nic);
31bafc49 223 unsigned int budget;
018423e9 224
31bafc49
IR
225 for (budget = AQ_CFG_TX_CLEAN_BUDGET;
226 budget && self->sw_head != self->hw_head; budget--) {
018423e9
DV
227 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
228
229 if (likely(buff->is_mapped)) {
c7545689
PB
230 if (unlikely(buff->is_sop)) {
231 if (!buff->is_eop &&
232 buff->eop_index != 0xffffU &&
233 (!aq_ring_dx_in_range(self->sw_head,
234 buff->eop_index,
235 self->hw_head)))
236 break;
237
018423e9
DV
238 dma_unmap_single(dev, buff->pa, buff->len,
239 DMA_TO_DEVICE);
c7545689 240 } else {
018423e9
DV
241 dma_unmap_page(dev, buff->pa, buff->len,
242 DMA_TO_DEVICE);
c7545689 243 }
018423e9
DV
244 }
245
246 if (unlikely(buff->is_eop))
247 dev_kfree_skb_any(buff->skb);
018423e9 248
c7545689
PB
249 buff->pa = 0U;
250 buff->eop_index = 0xffffU;
31bafc49 251 self->sw_head = aq_ring_next_dx(self, self->sw_head);
c7545689 252 }
b647d398
IR
253
254 return !!budget;
018423e9
DV
255}
256
ad703c2b
DB
257static void aq_rx_checksum(struct aq_ring_s *self,
258 struct aq_ring_buff_s *buff,
259 struct sk_buff *skb)
260{
261 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
262 return;
263
264 if (unlikely(buff->is_cso_err)) {
265 ++self->stats.rx.errors;
266 skb->ip_summed = CHECKSUM_NONE;
267 return;
268 }
269 if (buff->is_ip_cso) {
270 __skb_incr_checksum_unnecessary(skb);
ad703c2b
DB
271 } else {
272 skb->ip_summed = CHECKSUM_NONE;
273 }
a7faaa0c
DB
274
275 if (buff->is_udp_cso || buff->is_tcp_cso)
276 __skb_incr_checksum_unnecessary(skb);
ad703c2b
DB
277}
278
018423e9 279#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
a54df682
PB
280int aq_ring_rx_clean(struct aq_ring_s *self,
281 struct napi_struct *napi,
282 int *work_done,
283 int budget)
018423e9
DV
284{
285 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
018423e9 286 bool is_rsc_completed = true;
7e2698c4 287 int err = 0;
018423e9
DV
288
289 for (; (self->sw_head != self->hw_head) && budget;
290 self->sw_head = aq_ring_next_dx(self, self->sw_head),
291 --budget, ++(*work_done)) {
292 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
7e2698c4 293 struct aq_ring_buff_s *buff_ = NULL;
018423e9
DV
294 struct sk_buff *skb = NULL;
295 unsigned int next_ = 0U;
296 unsigned int i = 0U;
7e2698c4 297 u16 hdr_len;
018423e9 298
018423e9
DV
299 if (buff->is_cleaned)
300 continue;
301
302 if (!buff->is_eop) {
f38f1ee8
DB
303 buff_ = buff;
304 do {
305 next_ = buff_->next,
306 buff_ = &self->buff_ring[next_];
018423e9
DV
307 is_rsc_completed =
308 aq_ring_dx_in_range(self->sw_head,
309 next_,
310 self->hw_head);
311
f38f1ee8 312 if (unlikely(!is_rsc_completed))
018423e9 313 break;
018423e9 314
f38f1ee8
DB
315 buff->is_error |= buff_->is_error;
316
317 } while (!buff_->is_eop);
018423e9
DV
318
319 if (!is_rsc_completed) {
320 err = 0;
321 goto err_exit;
322 }
f38f1ee8
DB
323 if (buff->is_error) {
324 buff_ = buff;
325 do {
326 next_ = buff_->next,
327 buff_ = &self->buff_ring[next_];
328
329 buff_->is_cleaned = true;
330 } while (!buff_->is_eop);
331
332 ++self->stats.rx.errors;
333 continue;
334 }
335 }
336
337 if (buff->is_error) {
338 ++self->stats.rx.errors;
339 continue;
018423e9
DV
340 }
341
46f4c29d
IR
342 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
343 buff->rxdata.daddr,
344 buff->rxdata.pg_off,
345 buff->len, DMA_FROM_DEVICE);
346
018423e9 347 /* for single fragment packets use build_skb() */
6f9dbadc
FG
348 if (buff->is_eop &&
349 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
46f4c29d 350 skb = build_skb(aq_buf_vaddr(&buff->rxdata),
6f9dbadc 351 AQ_CFG_RX_FRAME_MAX);
018423e9
DV
352 if (unlikely(!skb)) {
353 err = -ENOMEM;
354 goto err_exit;
355 }
018423e9 356 skb_put(skb, buff->len);
46f4c29d 357 page_ref_inc(buff->rxdata.page);
018423e9 358 } else {
7e2698c4 359 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
018423e9
DV
360 if (unlikely(!skb)) {
361 err = -ENOMEM;
362 goto err_exit;
363 }
018423e9 364
7e2698c4
IR
365 hdr_len = buff->len;
366 if (hdr_len > AQ_CFG_RX_HDR_SIZE)
c43f1255
SF
367 hdr_len = eth_get_headlen(skb->dev,
368 aq_buf_vaddr(&buff->rxdata),
7e2698c4
IR
369 AQ_CFG_RX_HDR_SIZE);
370
46f4c29d 371 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
7e2698c4
IR
372 ALIGN(hdr_len, sizeof(long)));
373
374 if (buff->len - hdr_len > 0) {
46f4c29d
IR
375 skb_add_rx_frag(skb, 0, buff->rxdata.page,
376 buff->rxdata.pg_off + hdr_len,
7e2698c4 377 buff->len - hdr_len,
46f4c29d
IR
378 AQ_CFG_RX_FRAME_MAX);
379 page_ref_inc(buff->rxdata.page);
7e2698c4 380 }
018423e9 381
6f9dbadc 382 if (!buff->is_eop) {
46f4c29d
IR
383 buff_ = buff;
384 i = 1U;
385 do {
386 next_ = buff_->next,
387 buff_ = &self->buff_ring[next_];
388
389 dma_sync_single_range_for_cpu(
390 aq_nic_get_dev(self->aq_nic),
391 buff_->rxdata.daddr,
392 buff_->rxdata.pg_off,
393 buff_->len,
394 DMA_FROM_DEVICE);
395 skb_add_rx_frag(skb, i++,
396 buff_->rxdata.page,
397 buff_->rxdata.pg_off,
6f9dbadc 398 buff_->len,
46f4c29d
IR
399 AQ_CFG_RX_FRAME_MAX);
400 page_ref_inc(buff_->rxdata.page);
6f9dbadc 401 buff_->is_cleaned = 1;
f38f1ee8
DB
402
403 buff->is_ip_cso &= buff_->is_ip_cso;
404 buff->is_udp_cso &= buff_->is_udp_cso;
405 buff->is_tcp_cso &= buff_->is_tcp_cso;
406 buff->is_cso_err |= buff_->is_cso_err;
407
46f4c29d 408 } while (!buff_->is_eop);
018423e9
DV
409 }
410 }
411
412 skb->protocol = eth_type_trans(skb, ndev);
ad703c2b
DB
413
414 aq_rx_checksum(self, buff, skb);
018423e9
DV
415
416 skb_set_hash(skb, buff->rss_hash,
417 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
418 PKT_HASH_TYPE_NONE);
419
420 skb_record_rx_queue(skb, self->idx);
421
018423e9
DV
422 ++self->stats.rx.packets;
423 self->stats.rx.bytes += skb->len;
9ec03bf6
IR
424
425 napi_gro_receive(napi, skb);
018423e9
DV
426 }
427
428err_exit:
429 return err;
430}
431
432int aq_ring_rx_fill(struct aq_ring_s *self)
433{
46f4c29d 434 unsigned int page_order = self->page_order;
018423e9
DV
435 struct aq_ring_buff_s *buff = NULL;
436 int err = 0;
437 int i = 0;
438
9773ef18
IR
439 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
440 self->size / 2))
441 return err;
442
018423e9
DV
443 for (i = aq_ring_avail_dx(self); i--;
444 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
445 buff = &self->buff_ring[self->sw_tail];
446
447 buff->flags = 0U;
448 buff->len = AQ_CFG_RX_FRAME_MAX;
449
46f4c29d
IR
450 err = aq_get_rxpages(self, buff, page_order);
451 if (err)
018423e9 452 goto err_exit;
018423e9 453
46f4c29d 454 buff->pa = aq_buf_daddr(&buff->rxdata);
018423e9
DV
455 buff = NULL;
456 }
018423e9
DV
457
458err_exit:
018423e9
DV
459 return err;
460}
461
462void aq_ring_rx_deinit(struct aq_ring_s *self)
463{
464 if (!self)
465 goto err_exit;
466
467 for (; self->sw_head != self->sw_tail;
468 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
469 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
470
46f4c29d 471 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
018423e9
DV
472 }
473
474err_exit:;
475}
476
018423e9
DV
477void aq_ring_free(struct aq_ring_s *self)
478{
479 if (!self)
480 goto err_exit;
481
482 kfree(self->buff_ring);
483
484 if (self->dx_ring)
485 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
486 self->size * self->dx_size, self->dx_ring,
487 self->dx_ring_pa);
488
489err_exit:;
490}