2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_nic.c: Definition of common code for NIC. */
16 #include "aq_pci_func.h"
17 #include "aq_nic_internal.h"
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/timer.h>
22 #include <linux/cpu.h>
24 #include <linux/tcp.h>
27 static void aq_nic_rss_init(struct aq_nic_s
*self
, unsigned int num_rss_queues
)
29 struct aq_nic_cfg_s
*cfg
= &self
->aq_nic_cfg
;
30 struct aq_rss_parameters
*rss_params
= &cfg
->aq_rss
;
33 static u8 rss_key
[40] = {
34 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
35 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
36 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
37 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
38 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
41 rss_params
->hash_secret_key_size
= sizeof(rss_key
);
42 memcpy(rss_params
->hash_secret_key
, rss_key
, sizeof(rss_key
));
43 rss_params
->indirection_table_size
= AQ_CFG_RSS_INDIRECTION_TABLE_MAX
;
45 for (i
= rss_params
->indirection_table_size
; i
--;)
46 rss_params
->indirection_table
[i
] = i
& (num_rss_queues
- 1);
49 /* Fills aq_nic_cfg with valid defaults */
50 static void aq_nic_cfg_init_defaults(struct aq_nic_s
*self
)
52 struct aq_nic_cfg_s
*cfg
= &self
->aq_nic_cfg
;
54 cfg
->aq_hw_caps
= &self
->aq_hw_caps
;
56 cfg
->vecs
= AQ_CFG_VECS_DEF
;
57 cfg
->tcs
= AQ_CFG_TCS_DEF
;
59 cfg
->rxds
= AQ_CFG_RXDS_DEF
;
60 cfg
->txds
= AQ_CFG_TXDS_DEF
;
62 cfg
->is_polling
= AQ_CFG_IS_POLLING_DEF
;
64 cfg
->is_interrupt_moderation
= AQ_CFG_IS_INTERRUPT_MODERATION_DEF
;
65 cfg
->itr
= cfg
->is_interrupt_moderation
?
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF
: 0U;
68 cfg
->is_rss
= AQ_CFG_IS_RSS_DEF
;
69 cfg
->num_rss_queues
= AQ_CFG_NUM_RSS_QUEUES_DEF
;
70 cfg
->aq_rss
.base_cpu_number
= AQ_CFG_RSS_BASE_CPU_NUM_DEF
;
71 cfg
->flow_control
= AQ_CFG_FC_MODE
;
73 cfg
->mtu
= AQ_CFG_MTU_DEF
;
74 cfg
->link_speed_msk
= AQ_CFG_SPEED_MSK
;
75 cfg
->is_autoneg
= AQ_CFG_IS_AUTONEG_DEF
;
77 cfg
->is_lro
= AQ_CFG_IS_LRO_DEF
;
81 aq_nic_rss_init(self
, cfg
->num_rss_queues
);
84 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
85 int aq_nic_cfg_start(struct aq_nic_s
*self
)
87 struct aq_nic_cfg_s
*cfg
= &self
->aq_nic_cfg
;
90 cfg
->rxds
= min(cfg
->rxds
, cfg
->aq_hw_caps
->rxds
);
91 cfg
->txds
= min(cfg
->txds
, cfg
->aq_hw_caps
->txds
);
94 cfg
->vecs
= min(cfg
->vecs
, cfg
->aq_hw_caps
->vecs
);
95 cfg
->vecs
= min(cfg
->vecs
, num_online_cpus());
96 /* cfg->vecs should be power of 2 for RSS */
99 else if (cfg
->vecs
>= 4U)
101 else if (cfg
->vecs
>= 2U)
106 cfg
->irq_type
= aq_pci_func_get_irq_type(self
->aq_pci_func
);
108 if ((cfg
->irq_type
== AQ_HW_IRQ_LEGACY
) ||
109 (self
->aq_hw_caps
.vecs
== 1U) ||
115 cfg
->link_speed_msk
&= self
->aq_hw_caps
.link_speed_msk
;
116 cfg
->hw_features
= self
->aq_hw_caps
.hw_features
;
120 static void aq_nic_service_timer_cb(unsigned long param
)
122 struct aq_nic_s
*self
= (struct aq_nic_s
*)param
;
123 struct net_device
*ndev
= aq_nic_get_ndev(self
);
126 struct aq_hw_link_status_s link_status
;
127 struct aq_ring_stats_rx_s stats_rx
;
128 struct aq_ring_stats_tx_s stats_tx
;
130 if (aq_utils_obj_test(&self
->header
.flags
, AQ_NIC_FLAGS_IS_NOT_READY
))
133 err
= self
->aq_hw_ops
.hw_get_link_status(self
->aq_hw
, &link_status
);
137 self
->aq_hw_ops
.hw_interrupt_moderation_set(self
->aq_hw
,
138 self
->aq_nic_cfg
.is_interrupt_moderation
);
140 if (memcmp(&link_status
, &self
->link_status
, sizeof(link_status
))) {
141 if (link_status
.mbps
) {
142 aq_utils_obj_set(&self
->header
.flags
,
143 AQ_NIC_FLAG_STARTED
);
144 aq_utils_obj_clear(&self
->header
.flags
,
146 netif_carrier_on(self
->ndev
);
148 netif_carrier_off(self
->ndev
);
149 aq_utils_obj_set(&self
->header
.flags
, AQ_NIC_LINK_DOWN
);
152 self
->link_status
= link_status
;
155 memset(&stats_rx
, 0U, sizeof(struct aq_ring_stats_rx_s
));
156 memset(&stats_tx
, 0U, sizeof(struct aq_ring_stats_tx_s
));
157 for (i
= AQ_DIMOF(self
->aq_vec
); i
--;) {
159 aq_vec_add_stats(self
->aq_vec
[i
], &stats_rx
, &stats_tx
);
162 ndev
->stats
.rx_packets
= stats_rx
.packets
;
163 ndev
->stats
.rx_bytes
= stats_rx
.bytes
;
164 ndev
->stats
.rx_errors
= stats_rx
.errors
;
165 ndev
->stats
.tx_packets
= stats_tx
.packets
;
166 ndev
->stats
.tx_bytes
= stats_tx
.bytes
;
167 ndev
->stats
.tx_errors
= stats_tx
.errors
;
170 mod_timer(&self
->service_timer
,
171 jiffies
+ AQ_CFG_SERVICE_TIMER_INTERVAL
);
174 static void aq_nic_polling_timer_cb(unsigned long param
)
176 struct aq_nic_s
*self
= (struct aq_nic_s
*)param
;
177 struct aq_vec_s
*aq_vec
= NULL
;
180 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
181 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
182 aq_vec_isr(i
, (void *)aq_vec
);
184 mod_timer(&self
->polling_timer
, jiffies
+
185 AQ_CFG_POLLING_TIMER_INTERVAL
);
188 static struct net_device
*aq_nic_ndev_alloc(void)
190 return alloc_etherdev_mq(sizeof(struct aq_nic_s
), AQ_CFG_VECS_MAX
);
193 struct aq_nic_s
*aq_nic_alloc_cold(const struct net_device_ops
*ndev_ops
,
194 const struct ethtool_ops
*et_ops
,
196 struct aq_pci_func_s
*aq_pci_func
,
198 const struct aq_hw_ops
*aq_hw_ops
)
200 struct net_device
*ndev
= NULL
;
201 struct aq_nic_s
*self
= NULL
;
204 ndev
= aq_nic_ndev_alloc();
210 self
= netdev_priv(ndev
);
212 ndev
->netdev_ops
= ndev_ops
;
213 ndev
->ethtool_ops
= et_ops
;
215 SET_NETDEV_DEV(ndev
, dev
);
217 ndev
->if_port
= port
;
218 ndev
->min_mtu
= ETH_MIN_MTU
;
221 self
->aq_pci_func
= aq_pci_func
;
223 self
->aq_hw_ops
= *aq_hw_ops
;
224 self
->port
= (u8
)port
;
226 self
->aq_hw
= self
->aq_hw_ops
.create(aq_pci_func
, self
->port
,
228 err
= self
->aq_hw_ops
.get_hw_caps(self
->aq_hw
, &self
->aq_hw_caps
);
232 aq_nic_cfg_init_defaults(self
);
236 aq_nic_free_hot_resources(self
);
242 int aq_nic_ndev_register(struct aq_nic_s
*self
)
251 err
= self
->aq_hw_ops
.hw_get_mac_permanent(self
->aq_hw
,
252 self
->aq_nic_cfg
.aq_hw_caps
,
253 self
->ndev
->dev_addr
);
257 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
259 static u8 mac_addr_permanent
[] = AQ_CFG_MAC_ADDR_PERMANENT
;
261 ether_addr_copy(self
->ndev
->dev_addr
, mac_addr_permanent
);
265 netif_carrier_off(self
->ndev
);
267 for (i
= AQ_CFG_VECS_MAX
; i
--;)
268 aq_nic_ndev_queue_stop(self
, i
);
270 err
= register_netdev(self
->ndev
);
278 int aq_nic_ndev_init(struct aq_nic_s
*self
)
280 struct aq_hw_caps_s
*aq_hw_caps
= self
->aq_nic_cfg
.aq_hw_caps
;
281 struct aq_nic_cfg_s
*aq_nic_cfg
= &self
->aq_nic_cfg
;
283 self
->ndev
->hw_features
|= aq_hw_caps
->hw_features
;
284 self
->ndev
->features
= aq_hw_caps
->hw_features
;
285 self
->ndev
->priv_flags
= aq_hw_caps
->hw_priv_flags
;
286 self
->ndev
->mtu
= aq_nic_cfg
->mtu
- ETH_HLEN
;
291 void aq_nic_ndev_free(struct aq_nic_s
*self
)
296 if (self
->ndev
->reg_state
== NETREG_REGISTERED
)
297 unregister_netdev(self
->ndev
);
300 self
->aq_hw_ops
.destroy(self
->aq_hw
);
302 free_netdev(self
->ndev
);
307 struct aq_nic_s
*aq_nic_alloc_hot(struct net_device
*ndev
)
309 struct aq_nic_s
*self
= NULL
;
316 self
= netdev_priv(ndev
);
322 if (netif_running(ndev
)) {
325 for (i
= AQ_CFG_VECS_MAX
; i
--;)
326 netif_stop_subqueue(ndev
, i
);
329 for (self
->aq_vecs
= 0; self
->aq_vecs
< self
->aq_nic_cfg
.vecs
;
331 self
->aq_vec
[self
->aq_vecs
] =
332 aq_vec_alloc(self
, self
->aq_vecs
, &self
->aq_nic_cfg
);
333 if (!self
->aq_vec
[self
->aq_vecs
]) {
341 aq_nic_free_hot_resources(self
);
347 void aq_nic_set_tx_ring(struct aq_nic_s
*self
, unsigned int idx
,
348 struct aq_ring_s
*ring
)
350 self
->aq_ring_tx
[idx
] = ring
;
353 struct device
*aq_nic_get_dev(struct aq_nic_s
*self
)
355 return self
->ndev
->dev
.parent
;
358 struct net_device
*aq_nic_get_ndev(struct aq_nic_s
*self
)
363 int aq_nic_init(struct aq_nic_s
*self
)
365 struct aq_vec_s
*aq_vec
= NULL
;
369 self
->power_state
= AQ_HW_POWER_STATE_D0
;
370 err
= self
->aq_hw_ops
.hw_reset(self
->aq_hw
);
374 err
= self
->aq_hw_ops
.hw_init(self
->aq_hw
, &self
->aq_nic_cfg
,
375 aq_nic_get_ndev(self
)->dev_addr
);
379 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
380 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
381 aq_vec_init(aq_vec
, &self
->aq_hw_ops
, self
->aq_hw
);
387 void aq_nic_ndev_queue_start(struct aq_nic_s
*self
, unsigned int idx
)
389 netif_start_subqueue(self
->ndev
, idx
);
392 void aq_nic_ndev_queue_stop(struct aq_nic_s
*self
, unsigned int idx
)
394 netif_stop_subqueue(self
->ndev
, idx
);
397 int aq_nic_start(struct aq_nic_s
*self
)
399 struct aq_vec_s
*aq_vec
= NULL
;
403 err
= self
->aq_hw_ops
.hw_multicast_list_set(self
->aq_hw
,
405 self
->mc_list
.count
);
409 err
= self
->aq_hw_ops
.hw_packet_filter_set(self
->aq_hw
,
410 self
->packet_filter
);
414 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
415 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
]) {
416 err
= aq_vec_start(aq_vec
);
421 err
= self
->aq_hw_ops
.hw_start(self
->aq_hw
);
425 err
= self
->aq_hw_ops
.hw_interrupt_moderation_set(self
->aq_hw
,
426 self
->aq_nic_cfg
.is_interrupt_moderation
);
429 setup_timer(&self
->service_timer
, &aq_nic_service_timer_cb
,
430 (unsigned long)self
);
431 mod_timer(&self
->service_timer
, jiffies
+
432 AQ_CFG_SERVICE_TIMER_INTERVAL
);
434 if (self
->aq_nic_cfg
.is_polling
) {
435 setup_timer(&self
->polling_timer
, &aq_nic_polling_timer_cb
,
436 (unsigned long)self
);
437 mod_timer(&self
->polling_timer
, jiffies
+
438 AQ_CFG_POLLING_TIMER_INTERVAL
);
440 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
441 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
]) {
442 err
= aq_pci_func_alloc_irq(self
->aq_pci_func
, i
,
443 self
->ndev
->name
, aq_vec
,
444 aq_vec_get_affinity_mask(aq_vec
));
449 err
= self
->aq_hw_ops
.hw_irq_enable(self
->aq_hw
,
455 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
456 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
457 aq_nic_ndev_queue_start(self
, i
);
459 err
= netif_set_real_num_tx_queues(self
->ndev
, self
->aq_vecs
);
463 err
= netif_set_real_num_rx_queues(self
->ndev
, self
->aq_vecs
);
471 static unsigned int aq_nic_map_skb(struct aq_nic_s
*self
,
473 struct aq_ring_s
*ring
)
475 unsigned int ret
= 0U;
476 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
477 unsigned int frag_count
= 0U;
478 unsigned int dx
= ring
->sw_tail
;
479 struct aq_ring_buff_s
*dx_buff
= &ring
->buff_ring
[dx
];
481 if (unlikely(skb_is_gso(skb
))) {
483 dx_buff
->len_pkt
= skb
->len
;
484 dx_buff
->len_l2
= ETH_HLEN
;
485 dx_buff
->len_l3
= ip_hdrlen(skb
);
486 dx_buff
->len_l4
= tcp_hdrlen(skb
);
487 dx_buff
->mss
= skb_shinfo(skb
)->gso_size
;
488 dx_buff
->is_txc
= 1U;
490 dx
= aq_ring_next_dx(ring
, dx
);
491 dx_buff
= &ring
->buff_ring
[dx
];
496 dx_buff
->len
= skb_headlen(skb
);
497 dx_buff
->pa
= dma_map_single(aq_nic_get_dev(self
),
502 if (unlikely(dma_mapping_error(aq_nic_get_dev(self
), dx_buff
->pa
)))
505 dx_buff
->len_pkt
= skb
->len
;
506 dx_buff
->is_sop
= 1U;
507 dx_buff
->is_mapped
= 1U;
510 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
511 dx_buff
->is_ip_cso
= (htons(ETH_P_IP
) == skb
->protocol
) ?
513 dx_buff
->is_tcp_cso
=
514 (ip_hdr(skb
)->protocol
== IPPROTO_TCP
) ? 1U : 0U;
515 dx_buff
->is_udp_cso
=
516 (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) ? 1U : 0U;
519 for (; nr_frags
--; ++frag_count
) {
520 unsigned int frag_len
= 0U;
522 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_count
];
524 frag_len
= skb_frag_size(frag
);
525 frag_pa
= skb_frag_dma_map(aq_nic_get_dev(self
), frag
, 0,
526 frag_len
, DMA_TO_DEVICE
);
528 if (unlikely(dma_mapping_error(aq_nic_get_dev(self
), frag_pa
)))
531 while (frag_len
> AQ_CFG_TX_FRAME_MAX
) {
532 dx
= aq_ring_next_dx(ring
, dx
);
533 dx_buff
= &ring
->buff_ring
[dx
];
536 dx_buff
->len
= AQ_CFG_TX_FRAME_MAX
;
537 dx_buff
->pa
= frag_pa
;
538 dx_buff
->is_mapped
= 1U;
540 frag_len
-= AQ_CFG_TX_FRAME_MAX
;
541 frag_pa
+= AQ_CFG_TX_FRAME_MAX
;
545 dx
= aq_ring_next_dx(ring
, dx
);
546 dx_buff
= &ring
->buff_ring
[dx
];
549 dx_buff
->len
= frag_len
;
550 dx_buff
->pa
= frag_pa
;
551 dx_buff
->is_mapped
= 1U;
555 dx_buff
->is_eop
= 1U;
560 for (dx
= ring
->sw_tail
;
562 --ret
, dx
= aq_ring_next_dx(ring
, dx
)) {
563 dx_buff
= &ring
->buff_ring
[dx
];
565 if (!dx_buff
->is_txc
&& dx_buff
->pa
) {
566 if (unlikely(dx_buff
->is_sop
)) {
567 dma_unmap_single(aq_nic_get_dev(self
),
572 dma_unmap_page(aq_nic_get_dev(self
),
584 int aq_nic_xmit(struct aq_nic_s
*self
, struct sk_buff
*skb
)
585 __releases(&ring
->lock
)
586 __acquires(&ring
->lock
)
588 struct aq_ring_s
*ring
= NULL
;
589 unsigned int frags
= 0U;
590 unsigned int vec
= skb
->queue_mapping
% self
->aq_nic_cfg
.vecs
;
591 unsigned int tc
= 0U;
592 unsigned int trys
= AQ_CFG_LOCK_TRYS
;
593 int err
= NETDEV_TX_OK
;
594 bool is_nic_in_bad_state
;
596 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
598 ring
= self
->aq_ring_tx
[AQ_NIC_TCVEC2RING(self
, tc
, vec
)];
600 if (frags
> AQ_CFG_SKB_FRAGS_MAX
) {
601 dev_kfree_skb_any(skb
);
605 is_nic_in_bad_state
= aq_utils_obj_test(&self
->header
.flags
,
606 AQ_NIC_FLAGS_IS_NOT_TX_READY
) ||
607 (aq_ring_avail_dx(ring
) <
608 AQ_CFG_SKB_FRAGS_MAX
);
610 if (is_nic_in_bad_state
) {
611 aq_nic_ndev_queue_stop(self
, ring
->idx
);
612 err
= NETDEV_TX_BUSY
;
617 if (spin_trylock(&ring
->header
.lock
)) {
618 frags
= aq_nic_map_skb(self
, skb
, ring
);
621 err
= self
->aq_hw_ops
.hw_ring_tx_xmit(
625 if (aq_ring_avail_dx(ring
) <
626 AQ_CFG_SKB_FRAGS_MAX
+ 1)
627 aq_nic_ndev_queue_stop(
631 ++ring
->stats
.tx
.packets
;
632 ring
->stats
.tx
.bytes
+= skb
->len
;
635 err
= NETDEV_TX_BUSY
;
638 spin_unlock(&ring
->header
.lock
);
644 err
= NETDEV_TX_BUSY
;
652 int aq_nic_set_packet_filter(struct aq_nic_s
*self
, unsigned int flags
)
656 err
= self
->aq_hw_ops
.hw_packet_filter_set(self
->aq_hw
, flags
);
660 self
->packet_filter
= flags
;
666 int aq_nic_set_multicast_list(struct aq_nic_s
*self
, struct net_device
*ndev
)
668 struct netdev_hw_addr
*ha
= NULL
;
671 self
->mc_list
.count
= 0U;
673 netdev_for_each_mc_addr(ha
, ndev
) {
674 ether_addr_copy(self
->mc_list
.ar
[i
++], ha
->addr
);
675 ++self
->mc_list
.count
;
678 return self
->aq_hw_ops
.hw_multicast_list_set(self
->aq_hw
,
680 self
->mc_list
.count
);
683 int aq_nic_set_mtu(struct aq_nic_s
*self
, int new_mtu
)
687 if (new_mtu
> self
->aq_hw_caps
.mtu
) {
691 self
->aq_nic_cfg
.mtu
= new_mtu
;
697 int aq_nic_set_mac(struct aq_nic_s
*self
, struct net_device
*ndev
)
699 return self
->aq_hw_ops
.hw_set_mac_address(self
->aq_hw
, ndev
->dev_addr
);
702 unsigned int aq_nic_get_link_speed(struct aq_nic_s
*self
)
704 return self
->link_status
.mbps
;
707 int aq_nic_get_regs(struct aq_nic_s
*self
, struct ethtool_regs
*regs
, void *p
)
714 err
= self
->aq_hw_ops
.hw_get_regs(self
->aq_hw
,
715 &self
->aq_hw_caps
, regs_buff
);
723 int aq_nic_get_regs_count(struct aq_nic_s
*self
)
725 return self
->aq_hw_caps
.mac_regs_count
;
728 void aq_nic_get_stats(struct aq_nic_s
*self
, u64
*data
)
730 struct aq_vec_s
*aq_vec
= NULL
;
732 unsigned int count
= 0U;
735 err
= self
->aq_hw_ops
.hw_get_hw_stats(self
->aq_hw
, data
, &count
);
742 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
743 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
]) {
745 aq_vec_get_sw_stats(aq_vec
, data
, &count
);
752 void aq_nic_get_link_ksettings(struct aq_nic_s
*self
,
753 struct ethtool_link_ksettings
*cmd
)
755 cmd
->base
.port
= PORT_TP
;
756 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
757 cmd
->base
.duplex
= DUPLEX_FULL
;
758 cmd
->base
.autoneg
= self
->aq_nic_cfg
.is_autoneg
;
760 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
762 if (self
->aq_hw_caps
.link_speed_msk
& AQ_NIC_RATE_10G
)
763 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
766 if (self
->aq_hw_caps
.link_speed_msk
& AQ_NIC_RATE_5G
)
767 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
770 if (self
->aq_hw_caps
.link_speed_msk
& AQ_NIC_RATE_2GS
)
771 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
774 if (self
->aq_hw_caps
.link_speed_msk
& AQ_NIC_RATE_1G
)
775 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
778 if (self
->aq_hw_caps
.link_speed_msk
& AQ_NIC_RATE_100M
)
779 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
782 if (self
->aq_hw_caps
.flow_control
)
783 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
786 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
787 ethtool_link_ksettings_add_link_mode(cmd
, supported
, TP
);
789 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
791 if (self
->aq_nic_cfg
.is_autoneg
)
792 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
794 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_10G
)
795 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
798 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_5G
)
799 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
802 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_2GS
)
803 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
806 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_1G
)
807 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
810 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_100M
)
811 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
814 if (self
->aq_nic_cfg
.flow_control
)
815 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
818 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, TP
);
821 int aq_nic_set_link_ksettings(struct aq_nic_s
*self
,
822 const struct ethtool_link_ksettings
*cmd
)
828 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
829 rate
= self
->aq_hw_caps
.link_speed_msk
;
830 self
->aq_nic_cfg
.is_autoneg
= true;
832 speed
= cmd
->base
.speed
;
836 rate
= AQ_NIC_RATE_100M
;
840 rate
= AQ_NIC_RATE_1G
;
844 rate
= AQ_NIC_RATE_2GS
;
848 rate
= AQ_NIC_RATE_5G
;
852 rate
= AQ_NIC_RATE_10G
;
860 if (!(self
->aq_hw_caps
.link_speed_msk
& rate
)) {
865 self
->aq_nic_cfg
.is_autoneg
= false;
868 err
= self
->aq_hw_ops
.hw_set_link_speed(self
->aq_hw
, rate
);
872 self
->aq_nic_cfg
.link_speed_msk
= rate
;
878 struct aq_nic_cfg_s
*aq_nic_get_cfg(struct aq_nic_s
*self
)
880 return &self
->aq_nic_cfg
;
883 u32
aq_nic_get_fw_version(struct aq_nic_s
*self
)
887 self
->aq_hw_ops
.hw_get_fw_version(self
->aq_hw
, &fw_version
);
892 int aq_nic_stop(struct aq_nic_s
*self
)
894 struct aq_vec_s
*aq_vec
= NULL
;
897 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
898 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
899 aq_nic_ndev_queue_stop(self
, i
);
901 del_timer_sync(&self
->service_timer
);
903 self
->aq_hw_ops
.hw_irq_disable(self
->aq_hw
, AQ_CFG_IRQ_MASK
);
905 if (self
->aq_nic_cfg
.is_polling
)
906 del_timer_sync(&self
->polling_timer
);
908 aq_pci_func_free_irqs(self
->aq_pci_func
);
910 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
911 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
914 return self
->aq_hw_ops
.hw_stop(self
->aq_hw
);
917 void aq_nic_deinit(struct aq_nic_s
*self
)
919 struct aq_vec_s
*aq_vec
= NULL
;
925 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
926 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
927 aq_vec_deinit(aq_vec
);
929 if (self
->power_state
== AQ_HW_POWER_STATE_D0
) {
930 (void)self
->aq_hw_ops
.hw_deinit(self
->aq_hw
);
932 (void)self
->aq_hw_ops
.hw_set_power(self
->aq_hw
,
939 void aq_nic_free_hot_resources(struct aq_nic_s
*self
)
946 for (i
= AQ_DIMOF(self
->aq_vec
); i
--;) {
948 aq_vec_free(self
->aq_vec
[i
]);
954 int aq_nic_change_pm_state(struct aq_nic_s
*self
, pm_message_t
*pm_msg
)
958 if (!netif_running(self
->ndev
)) {
963 if (pm_msg
->event
& PM_EVENT_SLEEP
|| pm_msg
->event
& PM_EVENT_FREEZE
) {
964 self
->power_state
= AQ_HW_POWER_STATE_D3
;
965 netif_device_detach(self
->ndev
);
966 netif_tx_stop_all_queues(self
->ndev
);
968 err
= aq_nic_stop(self
);
974 err
= aq_nic_init(self
);
978 err
= aq_nic_start(self
);
982 netif_device_attach(self
->ndev
);
983 netif_tx_start_all_queues(self
->ndev
);