]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/aquantia/atlantic/aq_nic.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / aquantia / atlantic / aq_nic.c
CommitLineData
75a6faf6 1// SPDX-License-Identifier: GPL-2.0-only
97bde5c4
DV
2/*
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
97bde5c4
DV
5 */
6
7/* File aq_nic.c: Definition of common code for NIC. */
8
9#include "aq_nic.h"
10#include "aq_ring.h"
11#include "aq_vec.h"
12#include "aq_hw.h"
13#include "aq_pci_func.h"
4c83f170 14#include "aq_main.h"
97bde5c4 15
b82ee71a 16#include <linux/moduleparam.h>
97bde5c4
DV
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/timer.h>
20#include <linux/cpu.h>
21#include <linux/ip.h>
22#include <linux/tcp.h>
23#include <net/ip.h>
24
b82ee71a
IR
25static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
26module_param_named(aq_itr, aq_itr, uint, 0644);
27MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
28
29static unsigned int aq_itr_tx;
30module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
31MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
32
33static unsigned int aq_itr_rx;
34module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
35MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
36
9f8a2203
IR
37static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
38
97bde5c4
DV
39static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
40{
41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
42 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
43 int i = 0;
44
474fb115 45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
97bde5c4
DV
46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
51 };
52
53 rss_params->hash_secret_key_size = sizeof(rss_key);
54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
56
57 for (i = rss_params->indirection_table_size; i--;)
58 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
59}
60
23ee07ad
IR
61/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
62void aq_nic_cfg_start(struct aq_nic_s *self)
97bde5c4
DV
63{
64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
65
97bde5c4
DV
66 cfg->tcs = AQ_CFG_TCS_DEF;
67
97bde5c4
DV
68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
69
b82ee71a
IR
70 cfg->itr = aq_itr;
71 cfg->tx_itr = aq_itr_tx;
72 cfg->rx_itr = aq_itr_rx;
97bde5c4 73
46f4c29d 74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
97bde5c4
DV
75 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
78 cfg->flow_control = AQ_CFG_FC_MODE;
79
80 cfg->mtu = AQ_CFG_MTU_DEF;
81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
83
84 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
85
97bde5c4 86 /*descriptors */
c1af5427
AM
87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
97bde5c4
DV
89
90 /*rss rings */
23ee07ad 91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
97bde5c4 92 cfg->vecs = min(cfg->vecs, num_online_cpus());
4c83f170
IR
93 if (self->irqvecs > AQ_HW_SERVICE_IRQS)
94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
97bde5c4
DV
95 /* cfg->vecs should be power of 2 for RSS */
96 if (cfg->vecs >= 8U)
97 cfg->vecs = 8U;
98 else if (cfg->vecs >= 4U)
99 cfg->vecs = 4U;
100 else if (cfg->vecs >= 2U)
101 cfg->vecs = 2U;
102 else
103 cfg->vecs = 1U;
104
64fc7953
PB
105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
106
a8c69ca7
DB
107 aq_nic_rss_init(self, cfg->num_rss_queues);
108
23ee07ad 109 cfg->irq_type = aq_pci_func_get_irq_type(self);
97bde5c4
DV
110
111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
8fcb98f4 112 (cfg->aq_hw_caps->vecs == 1U) ||
97bde5c4
DV
113 (cfg->vecs == 1U)) {
114 cfg->is_rss = 0U;
115 cfg->vecs = 1U;
116 }
117
4c83f170
IR
118 /* Check if we have enough vectors allocated for
119 * link status IRQ. If no - we'll know link state from
120 * slower service task.
121 */
122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
123 cfg->link_irq_vec = cfg->vecs;
124 else
125 cfg->link_irq_vec = 0;
126
8fcb98f4 127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
bbb67a44 128 cfg->features = cfg->aq_hw_caps->hw_features;
97bde5c4
DV
129}
130
3aec6412
IR
131static int aq_nic_update_link_status(struct aq_nic_s *self)
132{
0c58c35f 133 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
35e8e8b4 134 u32 fc = 0;
3aec6412
IR
135
136 if (err)
137 return err;
138
b82ee71a 139 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
3aec6412
IR
140 pr_info("%s: link change old %d new %d\n",
141 AQ_CFG_DRV_NAME, self->link_status.mbps,
142 self->aq_hw->aq_link_status.mbps);
b82ee71a 143 aq_nic_update_interrupt_moderation_settings(self);
35e8e8b4
IR
144
145 /* Driver has to update flow control settings on RX block
146 * on any link event.
147 * We should query FW whether it negotiated FC.
148 */
149 if (self->aq_fw_ops->get_flow_control)
150 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
151 if (self->aq_hw_ops->hw_set_fc)
152 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
b82ee71a 153 }
3aec6412
IR
154
155 self->link_status = self->aq_hw->aq_link_status;
156 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
78f5193d 157 aq_utils_obj_set(&self->flags,
3aec6412 158 AQ_NIC_FLAG_STARTED);
78f5193d 159 aq_utils_obj_clear(&self->flags,
3aec6412
IR
160 AQ_NIC_LINK_DOWN);
161 netif_carrier_on(self->ndev);
162 netif_tx_wake_all_queues(self->ndev);
163 }
164 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
165 netif_carrier_off(self->ndev);
166 netif_tx_disable(self->ndev);
78f5193d 167 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
3aec6412
IR
168 }
169 return 0;
170}
171
1d2a8a13
IR
172static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
173{
174 struct aq_nic_s *self = private;
175
176 if (!self)
177 return IRQ_NONE;
178
179 aq_nic_update_link_status(self);
180
181 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
182 BIT(self->aq_nic_cfg.link_irq_vec));
183 return IRQ_HANDLED;
184}
185
49544935 186static void aq_nic_service_task(struct work_struct *work)
97bde5c4 187{
49544935
IR
188 struct aq_nic_s *self = container_of(work, struct aq_nic_s,
189 service_task);
190 int err;
97bde5c4 191
78f5193d 192 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
49544935 193 return;
97bde5c4 194
3aec6412
IR
195 err = aq_nic_update_link_status(self);
196 if (err)
49544935 197 return;
97bde5c4 198
49544935 199 mutex_lock(&self->fwreq_mutex);
0c58c35f
IR
200 if (self->aq_fw_ops->update_stats)
201 self->aq_fw_ops->update_stats(self->aq_hw);
49544935 202 mutex_unlock(&self->fwreq_mutex);
65e665e6 203
9f8a2203 204 aq_nic_update_ndev_stats(self);
49544935
IR
205}
206
207static void aq_nic_service_timer_cb(struct timer_list *t)
208{
209 struct aq_nic_s *self = from_timer(self, t, service_timer);
97bde5c4 210
4c83f170 211 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
49544935
IR
212
213 aq_ndev_schedule_work(&self->service_task);
97bde5c4
DV
214}
215
e99e88a9 216static void aq_nic_polling_timer_cb(struct timer_list *t)
97bde5c4 217{
e99e88a9 218 struct aq_nic_s *self = from_timer(self, t, polling_timer);
97bde5c4
DV
219 struct aq_vec_s *aq_vec = NULL;
220 unsigned int i = 0U;
221
222 for (i = 0U, aq_vec = self->aq_vec[0];
223 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
224 aq_vec_isr(i, (void *)aq_vec);
225
226 mod_timer(&self->polling_timer, jiffies +
e9157848 227 AQ_CFG_POLLING_TIMER_INTERVAL);
97bde5c4
DV
228}
229
97bde5c4
DV
230int aq_nic_ndev_register(struct aq_nic_s *self)
231{
232 int err = 0;
97bde5c4
DV
233
234 if (!self->ndev) {
235 err = -EINVAL;
236 goto err_exit;
237 }
23ee07ad 238
0c58c35f
IR
239 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
240 if (err)
241 goto err_exit;
242
f5dce08a 243 mutex_lock(&self->fwreq_mutex);
0c58c35f 244 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
97bde5c4 245 self->ndev->dev_addr);
f5dce08a 246 mutex_unlock(&self->fwreq_mutex);
23ee07ad 247 if (err)
97bde5c4
DV
248 goto err_exit;
249
250#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
251 {
252 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
253
254 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
255 }
256#endif
97bde5c4 257
23ee07ad
IR
258 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
259 self->aq_vecs++) {
260 self->aq_vec[self->aq_vecs] =
261 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
262 if (!self->aq_vec[self->aq_vecs]) {
263 err = -ENOMEM;
264 goto err_exit;
265 }
266 }
267
97bde5c4
DV
268 netif_carrier_off(self->ndev);
269
3aec6412 270 netif_tx_disable(self->ndev);
97bde5c4 271
55629109 272 err = register_netdev(self->ndev);
23ee07ad 273 if (err)
55629109
PB
274 goto err_exit;
275
97bde5c4
DV
276err_exit:
277 return err;
278}
279
23ee07ad 280void aq_nic_ndev_init(struct aq_nic_s *self)
97bde5c4 281{
4cbc9f92 282 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
97bde5c4
DV
283 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
284
285 self->ndev->hw_features |= aq_hw_caps->hw_features;
286 self->ndev->features = aq_hw_caps->hw_features;
8c61ab7f
IR
287 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
288 NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
97bde5c4 289 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
3e9a5451
IR
290 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
291
97bde5c4 292 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
8fcb98f4 293 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
97bde5c4 294
97bde5c4
DV
295}
296
297void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
298 struct aq_ring_s *ring)
299{
300 self->aq_ring_tx[idx] = ring;
301}
302
97bde5c4
DV
303struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
304{
305 return self->ndev;
306}
307
308int aq_nic_init(struct aq_nic_s *self)
309{
310 struct aq_vec_s *aq_vec = NULL;
311 int err = 0;
312 unsigned int i = 0U;
313
314 self->power_state = AQ_HW_POWER_STATE_D0;
f5dce08a 315 mutex_lock(&self->fwreq_mutex);
8fcb98f4 316 err = self->aq_hw_ops->hw_reset(self->aq_hw);
f5dce08a 317 mutex_unlock(&self->fwreq_mutex);
97bde5c4
DV
318 if (err < 0)
319 goto err_exit;
320
8fcb98f4 321 err = self->aq_hw_ops->hw_init(self->aq_hw,
23ee07ad 322 aq_nic_get_ndev(self)->dev_addr);
97bde5c4
DV
323 if (err < 0)
324 goto err_exit;
325
326 for (i = 0U, aq_vec = self->aq_vec[0];
327 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
8fcb98f4 328 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
97bde5c4 329
23ee07ad
IR
330 netif_carrier_off(self->ndev);
331
97bde5c4
DV
332err_exit:
333 return err;
334}
335
97bde5c4
DV
336int aq_nic_start(struct aq_nic_s *self)
337{
338 struct aq_vec_s *aq_vec = NULL;
339 int err = 0;
340 unsigned int i = 0U;
341
8fcb98f4 342 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
e9157848
ND
343 self->mc_list.ar,
344 self->mc_list.count);
97bde5c4
DV
345 if (err < 0)
346 goto err_exit;
347
8fcb98f4 348 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
e9157848 349 self->packet_filter);
97bde5c4
DV
350 if (err < 0)
351 goto err_exit;
352
353 for (i = 0U, aq_vec = self->aq_vec[0];
354 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
355 err = aq_vec_start(aq_vec);
356 if (err < 0)
357 goto err_exit;
358 }
359
8fcb98f4 360 err = self->aq_hw_ops->hw_start(self->aq_hw);
97bde5c4
DV
361 if (err < 0)
362 goto err_exit;
363
b82ee71a
IR
364 err = aq_nic_update_interrupt_moderation_settings(self);
365 if (err)
97bde5c4 366 goto err_exit;
49544935
IR
367
368 INIT_WORK(&self->service_task, aq_nic_service_task);
369
e99e88a9 370 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
67758788 371 aq_nic_service_timer_cb(&self->service_timer);
97bde5c4
DV
372
373 if (self->aq_nic_cfg.is_polling) {
e99e88a9 374 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
97bde5c4
DV
375 mod_timer(&self->polling_timer, jiffies +
376 AQ_CFG_POLLING_TIMER_INTERVAL);
377 } else {
378 for (i = 0U, aq_vec = self->aq_vec[0];
379 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
4c83f170
IR
380 err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
381 aq_vec_isr, aq_vec,
23ee07ad 382 aq_vec_get_affinity_mask(aq_vec));
97bde5c4
DV
383 if (err < 0)
384 goto err_exit;
385 }
386
4c83f170
IR
387 if (self->aq_nic_cfg.link_irq_vec) {
388 int irqvec = pci_irq_vector(self->pdev,
389 self->aq_nic_cfg.link_irq_vec);
390 err = request_threaded_irq(irqvec, NULL,
391 aq_linkstate_threaded_isr,
392 IRQF_SHARED,
393 self->ndev->name, self);
394 if (err < 0)
395 goto err_exit;
396 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
397 }
398
8fcb98f4 399 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
e9157848 400 AQ_CFG_IRQ_MASK);
97bde5c4
DV
401 if (err < 0)
402 goto err_exit;
403 }
404
97bde5c4
DV
405 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
406 if (err < 0)
407 goto err_exit;
408
409 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
410 if (err < 0)
411 goto err_exit;
412
3aec6412
IR
413 netif_tx_start_all_queues(self->ndev);
414
97bde5c4
DV
415err_exit:
416 return err;
417}
418
e399553d
PB
419static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
420 struct sk_buff *skb,
421 struct aq_ring_s *ring)
97bde5c4
DV
422{
423 unsigned int ret = 0U;
424 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
425 unsigned int frag_count = 0U;
e399553d 426 unsigned int dx = ring->sw_tail;
c7545689 427 struct aq_ring_buff_s *first = NULL;
e399553d 428 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
97bde5c4 429
e399553d
PB
430 if (unlikely(skb_is_gso(skb))) {
431 dx_buff->flags = 0U;
432 dx_buff->len_pkt = skb->len;
433 dx_buff->len_l2 = ETH_HLEN;
434 dx_buff->len_l3 = ip_hdrlen(skb);
435 dx_buff->len_l4 = tcp_hdrlen(skb);
436 dx_buff->mss = skb_shinfo(skb)->gso_size;
437 dx_buff->is_txc = 1U;
c7545689 438 dx_buff->eop_index = 0xffffU;
e399553d 439
386aff88
PB
440 dx_buff->is_ipv6 =
441 (ip_hdr(skb)->version == 6) ? 1U : 0U;
442
e399553d
PB
443 dx = aq_ring_next_dx(ring, dx);
444 dx_buff = &ring->buff_ring[dx];
445 ++ret;
446 }
447
448 dx_buff->flags = 0U;
449 dx_buff->len = skb_headlen(skb);
450 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
451 skb->data,
452 dx_buff->len,
453 DMA_TO_DEVICE);
97bde5c4 454
e399553d
PB
455 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
456 goto exit;
457
c7545689 458 first = dx_buff;
e399553d
PB
459 dx_buff->len_pkt = skb->len;
460 dx_buff->is_sop = 1U;
461 dx_buff->is_mapped = 1U;
97bde5c4
DV
462 ++ret;
463
464 if (skb->ip_summed == CHECKSUM_PARTIAL) {
e399553d
PB
465 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
466 1U : 0U;
ea0504f5
PB
467
468 if (ip_hdr(skb)->version == 4) {
469 dx_buff->is_tcp_cso =
470 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
471 1U : 0U;
472 dx_buff->is_udp_cso =
473 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
474 1U : 0U;
475 } else if (ip_hdr(skb)->version == 6) {
476 dx_buff->is_tcp_cso =
477 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
478 1U : 0U;
479 dx_buff->is_udp_cso =
480 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
481 1U : 0U;
482 }
97bde5c4
DV
483 }
484
485 for (; nr_frags--; ++frag_count) {
e399553d 486 unsigned int frag_len = 0U;
c7545689
PB
487 unsigned int buff_offset = 0U;
488 unsigned int buff_size = 0U;
97bde5c4
DV
489 dma_addr_t frag_pa;
490 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
491
492 frag_len = skb_frag_size(frag);
97bde5c4 493
c7545689
PB
494 while (frag_len) {
495 if (frag_len > AQ_CFG_TX_FRAME_MAX)
496 buff_size = AQ_CFG_TX_FRAME_MAX;
497 else
498 buff_size = frag_len;
499
500 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
501 frag,
502 buff_offset,
503 buff_size,
504 DMA_TO_DEVICE);
505
506 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
507 frag_pa)))
508 goto mapping_error;
e399553d 509
e399553d
PB
510 dx = aq_ring_next_dx(ring, dx);
511 dx_buff = &ring->buff_ring[dx];
512
513 dx_buff->flags = 0U;
c7545689 514 dx_buff->len = buff_size;
e399553d
PB
515 dx_buff->pa = frag_pa;
516 dx_buff->is_mapped = 1U;
c7545689
PB
517 dx_buff->eop_index = 0xffffU;
518
519 frag_len -= buff_size;
520 buff_offset += buff_size;
97bde5c4 521
e399553d 522 ++ret;
97bde5c4 523 }
97bde5c4
DV
524 }
525
c7545689 526 first->eop_index = dx;
e399553d
PB
527 dx_buff->is_eop = 1U;
528 dx_buff->skb = skb;
529 goto exit;
530
531mapping_error:
532 for (dx = ring->sw_tail;
533 ret > 0;
534 --ret, dx = aq_ring_next_dx(ring, dx)) {
535 dx_buff = &ring->buff_ring[dx];
536
537 if (!dx_buff->is_txc && dx_buff->pa) {
538 if (unlikely(dx_buff->is_sop)) {
539 dma_unmap_single(aq_nic_get_dev(self),
540 dx_buff->pa,
541 dx_buff->len,
542 DMA_TO_DEVICE);
543 } else {
544 dma_unmap_page(aq_nic_get_dev(self),
545 dx_buff->pa,
546 dx_buff->len,
547 DMA_TO_DEVICE);
548 }
549 }
97bde5c4
DV
550 }
551
e399553d 552exit:
97bde5c4
DV
553 return ret;
554}
555
556int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
97bde5c4
DV
557{
558 struct aq_ring_s *ring = NULL;
559 unsigned int frags = 0U;
560 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
561 unsigned int tc = 0U;
b350d7b8 562 int err = NETDEV_TX_OK;
97bde5c4
DV
563
564 frags = skb_shinfo(skb)->nr_frags + 1;
565
566 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
567
97bde5c4
DV
568 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
569 dev_kfree_skb_any(skb);
570 goto err_exit;
571 }
572
3aec6412 573 aq_ring_update_queue_state(ring);
97bde5c4 574
3aec6412
IR
575 /* Above status update may stop the queue. Check this. */
576 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
97bde5c4
DV
577 err = NETDEV_TX_BUSY;
578 goto err_exit;
579 }
580
278175ab 581 frags = aq_nic_map_skb(self, skb, ring);
97bde5c4 582
278175ab 583 if (likely(frags)) {
8fcb98f4 584 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
23ee07ad 585 ring, frags);
278175ab 586 if (err >= 0) {
278175ab
PB
587 ++ring->stats.tx.packets;
588 ring->stats.tx.bytes += skb->len;
589 }
590 } else {
97bde5c4 591 err = NETDEV_TX_BUSY;
97bde5c4
DV
592 }
593
594err_exit:
97bde5c4
DV
595 return err;
596}
597
b82ee71a
IR
598int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
599{
8fcb98f4 600 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
b82ee71a
IR
601}
602
97bde5c4
DV
603int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
604{
605 int err = 0;
606
8fcb98f4 607 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
97bde5c4
DV
608 if (err < 0)
609 goto err_exit;
610
611 self->packet_filter = flags;
612
613err_exit:
614 return err;
615}
616
617int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
618{
94b3b542 619 unsigned int packet_filter = self->packet_filter;
97bde5c4
DV
620 struct netdev_hw_addr *ha = NULL;
621 unsigned int i = 0U;
622
94b3b542
IR
623 self->mc_list.count = 0;
624 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
625 packet_filter |= IFF_PROMISC;
626 } else {
627 netdev_for_each_uc_addr(ha, ndev) {
628 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
b21f502f 629
94b3b542
IR
630 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
631 break;
632 }
97bde5c4
DV
633 }
634
94b3b542
IR
635 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
636 packet_filter |= IFF_ALLMULTI;
b21f502f 637 } else {
94b3b542
IR
638 netdev_for_each_mc_addr(ha, ndev) {
639 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
640
641 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
642 break;
643 }
644 }
645
bfaa9f85 646 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
94b3b542
IR
647 packet_filter |= IFF_MULTICAST;
648 self->mc_list.count = i;
649 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
650 self->mc_list.ar,
651 self->mc_list.count);
b21f502f 652 }
94b3b542 653 return aq_nic_set_packet_filter(self, packet_filter);
97bde5c4
DV
654}
655
656int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
657{
97bde5c4
DV
658 self->aq_nic_cfg.mtu = new_mtu;
659
d85fc17b 660 return 0;
97bde5c4
DV
661}
662
663int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
664{
8fcb98f4 665 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
97bde5c4
DV
666}
667
668unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
669{
670 return self->link_status.mbps;
671}
672
673int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
674{
675 u32 *regs_buff = p;
676 int err = 0;
677
678 regs->version = 1;
679
8fcb98f4
IR
680 err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
681 self->aq_nic_cfg.aq_hw_caps,
682 regs_buff);
97bde5c4
DV
683 if (err < 0)
684 goto err_exit;
685
686err_exit:
687 return err;
688}
689
690int aq_nic_get_regs_count(struct aq_nic_s *self)
691{
8fcb98f4 692 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
97bde5c4
DV
693}
694
695void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
696{
97bde5c4
DV
697 unsigned int i = 0U;
698 unsigned int count = 0U;
be08d839 699 struct aq_vec_s *aq_vec = NULL;
190f3438
DB
700 struct aq_stats_s *stats;
701
702 if (self->aq_fw_ops->update_stats) {
703 mutex_lock(&self->fwreq_mutex);
704 self->aq_fw_ops->update_stats(self->aq_hw);
705 mutex_unlock(&self->fwreq_mutex);
706 }
707 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
97bde5c4 708
be08d839 709 if (!stats)
97bde5c4
DV
710 goto err_exit;
711
be08d839
IR
712 data[i] = stats->uprc + stats->mprc + stats->bprc;
713 data[++i] = stats->uprc;
714 data[++i] = stats->mprc;
715 data[++i] = stats->bprc;
716 data[++i] = stats->erpt;
717 data[++i] = stats->uptc + stats->mptc + stats->bptc;
718 data[++i] = stats->uptc;
719 data[++i] = stats->mptc;
720 data[++i] = stats->bptc;
721 data[++i] = stats->ubrc;
722 data[++i] = stats->ubtc;
723 data[++i] = stats->mbrc;
724 data[++i] = stats->mbtc;
725 data[++i] = stats->bbrc;
726 data[++i] = stats->bbtc;
727 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
728 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
729 data[++i] = stats->dma_pkt_rc;
730 data[++i] = stats->dma_pkt_tc;
731 data[++i] = stats->dma_oct_rc;
732 data[++i] = stats->dma_oct_tc;
733 data[++i] = stats->dpc;
734
735 i++;
736
737 data += i;
97bde5c4
DV
738
739 for (i = 0U, aq_vec = self->aq_vec[0];
3013c498 740 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
97bde5c4
DV
741 data += count;
742 aq_vec_get_sw_stats(aq_vec, data, &count);
743 }
744
745err_exit:;
97bde5c4
DV
746}
747
9f8a2203
IR
748static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
749{
750 struct net_device *ndev = self->ndev;
8fcb98f4 751 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
9f8a2203 752
f55d477b
DB
753 ndev->stats.rx_packets = stats->dma_pkt_rc;
754 ndev->stats.rx_bytes = stats->dma_oct_rc;
9f8a2203 755 ndev->stats.rx_errors = stats->erpr;
f55d477b
DB
756 ndev->stats.rx_dropped = stats->dpc;
757 ndev->stats.tx_packets = stats->dma_pkt_tc;
758 ndev->stats.tx_bytes = stats->dma_oct_tc;
9f8a2203 759 ndev->stats.tx_errors = stats->erpt;
45cc1c7a 760 ndev->stats.multicast = stats->mprc;
9f8a2203
IR
761}
762
f8244ab5
PR
763void aq_nic_get_link_ksettings(struct aq_nic_s *self,
764 struct ethtool_link_ksettings *cmd)
97bde5c4 765{
854ab38c
IR
766 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
767 cmd->base.port = PORT_FIBRE;
768 else
769 cmd->base.port = PORT_TP;
97bde5c4 770 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
f8244ab5
PR
771 cmd->base.duplex = DUPLEX_FULL;
772 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
773
8f9000a5
PB
774 ethtool_link_ksettings_zero_link_mode(cmd, supported);
775
8fcb98f4 776 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
8f9000a5
PB
777 ethtool_link_ksettings_add_link_mode(cmd, supported,
778 10000baseT_Full);
779
8fcb98f4 780 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
8f9000a5
PB
781 ethtool_link_ksettings_add_link_mode(cmd, supported,
782 5000baseT_Full);
783
8fcb98f4 784 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
8f9000a5
PB
785 ethtool_link_ksettings_add_link_mode(cmd, supported,
786 2500baseT_Full);
787
8fcb98f4 788 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
8f9000a5
PB
789 ethtool_link_ksettings_add_link_mode(cmd, supported,
790 1000baseT_Full);
791
8fcb98f4 792 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
8f9000a5
PB
793 ethtool_link_ksettings_add_link_mode(cmd, supported,
794 100baseT_Full);
795
8fcb98f4 796 if (self->aq_nic_cfg.aq_hw_caps->flow_control)
8f9000a5
PB
797 ethtool_link_ksettings_add_link_mode(cmd, supported,
798 Pause);
799
800 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
854ab38c
IR
801
802 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
803 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
804 else
805 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
8f9000a5
PB
806
807 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
808
809 if (self->aq_nic_cfg.is_autoneg)
810 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
811
812 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
813 ethtool_link_ksettings_add_link_mode(cmd, advertising,
814 10000baseT_Full);
815
816 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
817 ethtool_link_ksettings_add_link_mode(cmd, advertising,
818 5000baseT_Full);
819
820 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
821 ethtool_link_ksettings_add_link_mode(cmd, advertising,
822 2500baseT_Full);
823
824 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
825 ethtool_link_ksettings_add_link_mode(cmd, advertising,
826 1000baseT_Full);
827
828 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
829 ethtool_link_ksettings_add_link_mode(cmd, advertising,
830 100baseT_Full);
831
288551de 832 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
8f9000a5
PB
833 ethtool_link_ksettings_add_link_mode(cmd, advertising,
834 Pause);
835
35e8e8b4
IR
836 /* Asym is when either RX or TX, but not both */
837 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
838 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
288551de
IR
839 ethtool_link_ksettings_add_link_mode(cmd, advertising,
840 Asym_Pause);
841
854ab38c
IR
842 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
843 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
844 else
845 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
97bde5c4
DV
846}
847
f8244ab5
PR
848int aq_nic_set_link_ksettings(struct aq_nic_s *self,
849 const struct ethtool_link_ksettings *cmd)
97bde5c4
DV
850{
851 u32 speed = 0U;
852 u32 rate = 0U;
853 int err = 0;
854
f8244ab5 855 if (cmd->base.autoneg == AUTONEG_ENABLE) {
8fcb98f4 856 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
97bde5c4
DV
857 self->aq_nic_cfg.is_autoneg = true;
858 } else {
f8244ab5 859 speed = cmd->base.speed;
97bde5c4
DV
860
861 switch (speed) {
862 case SPEED_100:
863 rate = AQ_NIC_RATE_100M;
864 break;
865
866 case SPEED_1000:
867 rate = AQ_NIC_RATE_1G;
868 break;
869
870 case SPEED_2500:
871 rate = AQ_NIC_RATE_2GS;
872 break;
873
874 case SPEED_5000:
875 rate = AQ_NIC_RATE_5G;
876 break;
877
878 case SPEED_10000:
879 rate = AQ_NIC_RATE_10G;
880 break;
881
882 default:
883 err = -1;
884 goto err_exit;
885 break;
886 }
8fcb98f4 887 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
97bde5c4
DV
888 err = -1;
889 goto err_exit;
890 }
891
892 self->aq_nic_cfg.is_autoneg = false;
893 }
894
f5dce08a 895 mutex_lock(&self->fwreq_mutex);
0c58c35f 896 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
f5dce08a 897 mutex_unlock(&self->fwreq_mutex);
97bde5c4
DV
898 if (err < 0)
899 goto err_exit;
900
901 self->aq_nic_cfg.link_speed_msk = rate;
902
903err_exit:
904 return err;
905}
906
907struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
908{
909 return &self->aq_nic_cfg;
910}
911
912u32 aq_nic_get_fw_version(struct aq_nic_s *self)
913{
914 u32 fw_version = 0U;
915
8fcb98f4 916 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
97bde5c4
DV
917
918 return fw_version;
919}
920
921int aq_nic_stop(struct aq_nic_s *self)
922{
923 struct aq_vec_s *aq_vec = NULL;
924 unsigned int i = 0U;
925
3aec6412 926 netif_tx_disable(self->ndev);
93d87b8f 927 netif_carrier_off(self->ndev);
97bde5c4
DV
928
929 del_timer_sync(&self->service_timer);
49544935 930 cancel_work_sync(&self->service_task);
97bde5c4 931
8fcb98f4 932 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
97bde5c4
DV
933
934 if (self->aq_nic_cfg.is_polling)
935 del_timer_sync(&self->polling_timer);
936 else
23ee07ad 937 aq_pci_func_free_irqs(self);
97bde5c4
DV
938
939 for (i = 0U, aq_vec = self->aq_vec[0];
940 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
941 aq_vec_stop(aq_vec);
942
8fcb98f4 943 return self->aq_hw_ops->hw_stop(self->aq_hw);
97bde5c4
DV
944}
945
946void aq_nic_deinit(struct aq_nic_s *self)
947{
948 struct aq_vec_s *aq_vec = NULL;
949 unsigned int i = 0U;
950
951 if (!self)
952 goto err_exit;
953
954 for (i = 0U, aq_vec = self->aq_vec[0];
955 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
956 aq_vec_deinit(aq_vec);
957
f5dce08a
ND
958 if (likely(self->aq_fw_ops->deinit)) {
959 mutex_lock(&self->fwreq_mutex);
960 self->aq_fw_ops->deinit(self->aq_hw);
961 mutex_unlock(&self->fwreq_mutex);
962 }
a0da96c0
YE
963
964 if (self->power_state != AQ_HW_POWER_STATE_D0 ||
f5dce08a
ND
965 self->aq_hw->aq_nic_cfg->wol)
966 if (likely(self->aq_fw_ops->set_power)) {
967 mutex_lock(&self->fwreq_mutex);
968 self->aq_fw_ops->set_power(self->aq_hw,
969 self->power_state,
970 self->ndev->dev_addr);
971 mutex_unlock(&self->fwreq_mutex);
972 }
973
97bde5c4
DV
974
975err_exit:;
976}
977
23ee07ad 978void aq_nic_free_vectors(struct aq_nic_s *self)
97bde5c4
DV
979{
980 unsigned int i = 0U;
981
982 if (!self)
983 goto err_exit;
984
08b5cf08 985 for (i = ARRAY_SIZE(self->aq_vec); i--;) {
3013c498 986 if (self->aq_vec[i]) {
97bde5c4 987 aq_vec_free(self->aq_vec[i]);
3013c498
PB
988 self->aq_vec[i] = NULL;
989 }
97bde5c4
DV
990 }
991
992err_exit:;
993}
994
995int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
996{
997 int err = 0;
998
999 if (!netif_running(self->ndev)) {
1000 err = 0;
d5919aeb 1001 goto out;
97bde5c4
DV
1002 }
1003 rtnl_lock();
1004 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
1005 self->power_state = AQ_HW_POWER_STATE_D3;
1006 netif_device_detach(self->ndev);
1007 netif_tx_stop_all_queues(self->ndev);
1008
1009 err = aq_nic_stop(self);
1010 if (err < 0)
1011 goto err_exit;
1012
1013 aq_nic_deinit(self);
1014 } else {
1015 err = aq_nic_init(self);
1016 if (err < 0)
1017 goto err_exit;
1018
1019 err = aq_nic_start(self);
1020 if (err < 0)
1021 goto err_exit;
1022
1023 netif_device_attach(self->ndev);
1024 netif_tx_start_all_queues(self->ndev);
1025 }
97bde5c4
DV
1026
1027err_exit:
d5919aeb
PB
1028 rtnl_unlock();
1029out:
97bde5c4
DV
1030 return err;
1031}
90869ddf
IR
1032
1033void aq_nic_shutdown(struct aq_nic_s *self)
1034{
1035 int err = 0;
1036
1037 if (!self->ndev)
1038 return;
1039
1040 rtnl_lock();
1041
1042 netif_device_detach(self->ndev);
1043
9a11aff2
IR
1044 if (netif_running(self->ndev)) {
1045 err = aq_nic_stop(self);
1046 if (err < 0)
1047 goto err_exit;
1048 }
90869ddf
IR
1049 aq_nic_deinit(self);
1050
1051err_exit:
1052 rtnl_unlock();
ea4854dd 1053}