]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
711d7fd56d2640cb7f9eac46fd09a87ceae411af
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_ethtool.c
1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/ctype.h>
12 #include <linux/stringify.h>
13 #include <linux/ethtool.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/etherdevice.h>
17 #include <linux/crc32.h>
18 #include <linux/firmware.h>
19 #include "bnxt_hsi.h"
20 #include "bnxt.h"
21 #include "bnxt_ethtool.h"
22 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
23 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
24 #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
25 #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
26 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
27
28 static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
29
30 static u32 bnxt_get_msglevel(struct net_device *dev)
31 {
32 struct bnxt *bp = netdev_priv(dev);
33
34 return bp->msg_enable;
35 }
36
37 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
38 {
39 struct bnxt *bp = netdev_priv(dev);
40
41 bp->msg_enable = value;
42 }
43
44 static int bnxt_get_coalesce(struct net_device *dev,
45 struct ethtool_coalesce *coal)
46 {
47 struct bnxt *bp = netdev_priv(dev);
48
49 memset(coal, 0, sizeof(*coal));
50
51 coal->rx_coalesce_usecs = bp->rx_coal_ticks;
52 /* 2 completion records per rx packet */
53 coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
54 coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
55 coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
56
57 coal->tx_coalesce_usecs = bp->tx_coal_ticks;
58 coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
59 coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
60 coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
61
62 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
63
64 return 0;
65 }
66
67 static int bnxt_set_coalesce(struct net_device *dev,
68 struct ethtool_coalesce *coal)
69 {
70 struct bnxt *bp = netdev_priv(dev);
71 bool update_stats = false;
72 int rc = 0;
73
74 bp->rx_coal_ticks = coal->rx_coalesce_usecs;
75 /* 2 completion records per rx packet */
76 bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
77 bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
78 bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
79
80 bp->tx_coal_ticks = coal->tx_coalesce_usecs;
81 bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
82 bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
83 bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
84
85 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
86 u32 stats_ticks = coal->stats_block_coalesce_usecs;
87
88 stats_ticks = clamp_t(u32, stats_ticks,
89 BNXT_MIN_STATS_COAL_TICKS,
90 BNXT_MAX_STATS_COAL_TICKS);
91 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
92 bp->stats_coal_ticks = stats_ticks;
93 update_stats = true;
94 }
95
96 if (netif_running(dev)) {
97 if (update_stats) {
98 rc = bnxt_close_nic(bp, true, false);
99 if (!rc)
100 rc = bnxt_open_nic(bp, true, false);
101 } else {
102 rc = bnxt_hwrm_set_coal(bp);
103 }
104 }
105
106 return rc;
107 }
108
109 #define BNXT_NUM_STATS 21
110
111 #define BNXT_RX_STATS_ENTRY(counter) \
112 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
113
114 #define BNXT_TX_STATS_ENTRY(counter) \
115 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
116
117 static const struct {
118 long offset;
119 char string[ETH_GSTRING_LEN];
120 } bnxt_port_stats_arr[] = {
121 BNXT_RX_STATS_ENTRY(rx_64b_frames),
122 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
123 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
124 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
125 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
126 BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
127 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
128 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
129 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
130 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
131 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
132 BNXT_RX_STATS_ENTRY(rx_total_frames),
133 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
134 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
135 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
136 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
137 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
138 BNXT_RX_STATS_ENTRY(rx_pause_frames),
139 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
140 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
141 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
142 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
143 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
144 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
145 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
146 BNXT_RX_STATS_ENTRY(rx_good_frames),
147 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
148 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
149 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
150 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
151 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
152 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
153 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
154 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
155 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
156 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
157 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
158 BNXT_RX_STATS_ENTRY(rx_bytes),
159 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
160 BNXT_RX_STATS_ENTRY(rx_runt_frames),
161
162 BNXT_TX_STATS_ENTRY(tx_64b_frames),
163 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
164 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
165 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
166 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
167 BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
168 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
169 BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
170 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
171 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
172 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
173 BNXT_TX_STATS_ENTRY(tx_good_frames),
174 BNXT_TX_STATS_ENTRY(tx_total_frames),
175 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
176 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
177 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
178 BNXT_TX_STATS_ENTRY(tx_pause_frames),
179 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
180 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
181 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
182 BNXT_TX_STATS_ENTRY(tx_err),
183 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
184 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
185 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
186 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
187 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
188 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
189 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
190 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
191 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
192 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
193 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
194 BNXT_TX_STATS_ENTRY(tx_total_collisions),
195 BNXT_TX_STATS_ENTRY(tx_bytes),
196 };
197
198 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
199
200 static int bnxt_get_sset_count(struct net_device *dev, int sset)
201 {
202 struct bnxt *bp = netdev_priv(dev);
203
204 switch (sset) {
205 case ETH_SS_STATS: {
206 int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
207
208 if (bp->flags & BNXT_FLAG_PORT_STATS)
209 num_stats += BNXT_NUM_PORT_STATS;
210
211 return num_stats;
212 }
213 case ETH_SS_TEST:
214 if (!bp->num_tests)
215 return -EOPNOTSUPP;
216 return bp->num_tests;
217 default:
218 return -EOPNOTSUPP;
219 }
220 }
221
222 static void bnxt_get_ethtool_stats(struct net_device *dev,
223 struct ethtool_stats *stats, u64 *buf)
224 {
225 u32 i, j = 0;
226 struct bnxt *bp = netdev_priv(dev);
227 u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
228 u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
229
230 memset(buf, 0, buf_size);
231
232 if (!bp->bnapi)
233 return;
234
235 for (i = 0; i < bp->cp_nr_rings; i++) {
236 struct bnxt_napi *bnapi = bp->bnapi[i];
237 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
238 __le64 *hw_stats = (__le64 *)cpr->hw_stats;
239 int k;
240
241 for (k = 0; k < stat_fields; j++, k++)
242 buf[j] = le64_to_cpu(hw_stats[k]);
243 buf[j++] = cpr->rx_l4_csum_errors;
244 }
245 if (bp->flags & BNXT_FLAG_PORT_STATS) {
246 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
247
248 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
249 buf[j] = le64_to_cpu(*(port_stats +
250 bnxt_port_stats_arr[i].offset));
251 }
252 }
253 }
254
255 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
256 {
257 struct bnxt *bp = netdev_priv(dev);
258 u32 i;
259
260 switch (stringset) {
261 /* The number of strings must match BNXT_NUM_STATS defined above. */
262 case ETH_SS_STATS:
263 for (i = 0; i < bp->cp_nr_rings; i++) {
264 sprintf(buf, "[%d]: rx_ucast_packets", i);
265 buf += ETH_GSTRING_LEN;
266 sprintf(buf, "[%d]: rx_mcast_packets", i);
267 buf += ETH_GSTRING_LEN;
268 sprintf(buf, "[%d]: rx_bcast_packets", i);
269 buf += ETH_GSTRING_LEN;
270 sprintf(buf, "[%d]: rx_discards", i);
271 buf += ETH_GSTRING_LEN;
272 sprintf(buf, "[%d]: rx_drops", i);
273 buf += ETH_GSTRING_LEN;
274 sprintf(buf, "[%d]: rx_ucast_bytes", i);
275 buf += ETH_GSTRING_LEN;
276 sprintf(buf, "[%d]: rx_mcast_bytes", i);
277 buf += ETH_GSTRING_LEN;
278 sprintf(buf, "[%d]: rx_bcast_bytes", i);
279 buf += ETH_GSTRING_LEN;
280 sprintf(buf, "[%d]: tx_ucast_packets", i);
281 buf += ETH_GSTRING_LEN;
282 sprintf(buf, "[%d]: tx_mcast_packets", i);
283 buf += ETH_GSTRING_LEN;
284 sprintf(buf, "[%d]: tx_bcast_packets", i);
285 buf += ETH_GSTRING_LEN;
286 sprintf(buf, "[%d]: tx_discards", i);
287 buf += ETH_GSTRING_LEN;
288 sprintf(buf, "[%d]: tx_drops", i);
289 buf += ETH_GSTRING_LEN;
290 sprintf(buf, "[%d]: tx_ucast_bytes", i);
291 buf += ETH_GSTRING_LEN;
292 sprintf(buf, "[%d]: tx_mcast_bytes", i);
293 buf += ETH_GSTRING_LEN;
294 sprintf(buf, "[%d]: tx_bcast_bytes", i);
295 buf += ETH_GSTRING_LEN;
296 sprintf(buf, "[%d]: tpa_packets", i);
297 buf += ETH_GSTRING_LEN;
298 sprintf(buf, "[%d]: tpa_bytes", i);
299 buf += ETH_GSTRING_LEN;
300 sprintf(buf, "[%d]: tpa_events", i);
301 buf += ETH_GSTRING_LEN;
302 sprintf(buf, "[%d]: tpa_aborts", i);
303 buf += ETH_GSTRING_LEN;
304 sprintf(buf, "[%d]: rx_l4_csum_errors", i);
305 buf += ETH_GSTRING_LEN;
306 }
307 if (bp->flags & BNXT_FLAG_PORT_STATS) {
308 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
309 strcpy(buf, bnxt_port_stats_arr[i].string);
310 buf += ETH_GSTRING_LEN;
311 }
312 }
313 break;
314 case ETH_SS_TEST:
315 if (bp->num_tests)
316 memcpy(buf, bp->test_info->string,
317 bp->num_tests * ETH_GSTRING_LEN);
318 break;
319 default:
320 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
321 stringset);
322 break;
323 }
324 }
325
326 static void bnxt_get_ringparam(struct net_device *dev,
327 struct ethtool_ringparam *ering)
328 {
329 struct bnxt *bp = netdev_priv(dev);
330
331 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
332 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
333 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
334
335 ering->rx_pending = bp->rx_ring_size;
336 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
337 ering->tx_pending = bp->tx_ring_size;
338 }
339
340 static int bnxt_set_ringparam(struct net_device *dev,
341 struct ethtool_ringparam *ering)
342 {
343 struct bnxt *bp = netdev_priv(dev);
344
345 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
346 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
347 (ering->tx_pending <= MAX_SKB_FRAGS))
348 return -EINVAL;
349
350 if (netif_running(dev))
351 bnxt_close_nic(bp, false, false);
352
353 bp->rx_ring_size = ering->rx_pending;
354 bp->tx_ring_size = ering->tx_pending;
355 bnxt_set_ring_params(bp);
356
357 if (netif_running(dev))
358 return bnxt_open_nic(bp, false, false);
359
360 return 0;
361 }
362
363 static void bnxt_get_channels(struct net_device *dev,
364 struct ethtool_channels *channel)
365 {
366 struct bnxt *bp = netdev_priv(dev);
367 int max_rx_rings, max_tx_rings, tcs;
368
369 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
370 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
371
372 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
373 max_rx_rings = 0;
374 max_tx_rings = 0;
375 }
376
377 tcs = netdev_get_num_tc(dev);
378 if (tcs > 1)
379 max_tx_rings /= tcs;
380
381 channel->max_rx = max_rx_rings;
382 channel->max_tx = max_tx_rings;
383 channel->max_other = 0;
384 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
385 channel->combined_count = bp->rx_nr_rings;
386 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
387 channel->combined_count--;
388 } else {
389 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
390 channel->rx_count = bp->rx_nr_rings;
391 channel->tx_count = bp->tx_nr_rings_per_tc;
392 }
393 }
394 }
395
396 static int bnxt_set_channels(struct net_device *dev,
397 struct ethtool_channels *channel)
398 {
399 struct bnxt *bp = netdev_priv(dev);
400 int req_tx_rings, req_rx_rings, tcs;
401 bool sh = false;
402 int tx_xdp = 0;
403 int rc = 0;
404
405 if (channel->other_count)
406 return -EINVAL;
407
408 if (!channel->combined_count &&
409 (!channel->rx_count || !channel->tx_count))
410 return -EINVAL;
411
412 if (channel->combined_count &&
413 (channel->rx_count || channel->tx_count))
414 return -EINVAL;
415
416 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
417 channel->tx_count))
418 return -EINVAL;
419
420 if (channel->combined_count)
421 sh = true;
422
423 tcs = netdev_get_num_tc(dev);
424
425 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
426 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
427 if (bp->tx_nr_rings_xdp) {
428 if (!sh) {
429 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
430 return -EINVAL;
431 }
432 tx_xdp = req_rx_rings;
433 }
434 rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp);
435 if (rc) {
436 netdev_warn(dev, "Unable to allocate the requested rings\n");
437 return rc;
438 }
439
440 if (netif_running(dev)) {
441 if (BNXT_PF(bp)) {
442 /* TODO CHIMP_FW: Send message to all VF's
443 * before PF unload
444 */
445 }
446 rc = bnxt_close_nic(bp, true, false);
447 if (rc) {
448 netdev_err(bp->dev, "Set channel failure rc :%x\n",
449 rc);
450 return rc;
451 }
452 }
453
454 if (sh) {
455 bp->flags |= BNXT_FLAG_SHARED_RINGS;
456 bp->rx_nr_rings = channel->combined_count;
457 bp->tx_nr_rings_per_tc = channel->combined_count;
458 } else {
459 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
460 bp->rx_nr_rings = channel->rx_count;
461 bp->tx_nr_rings_per_tc = channel->tx_count;
462 }
463 bp->tx_nr_rings_xdp = tx_xdp;
464 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
465 if (tcs > 1)
466 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
467
468 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
469 bp->tx_nr_rings + bp->rx_nr_rings;
470
471 bp->num_stat_ctxs = bp->cp_nr_rings;
472
473 /* After changing number of rx channels, update NTUPLE feature. */
474 netdev_update_features(dev);
475 if (netif_running(dev)) {
476 rc = bnxt_open_nic(bp, true, false);
477 if ((!rc) && BNXT_PF(bp)) {
478 /* TODO CHIMP_FW: Send message to all VF's
479 * to renable
480 */
481 }
482 }
483
484 return rc;
485 }
486
487 #ifdef CONFIG_RFS_ACCEL
488 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
489 u32 *rule_locs)
490 {
491 int i, j = 0;
492
493 cmd->data = bp->ntp_fltr_count;
494 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
495 struct hlist_head *head;
496 struct bnxt_ntuple_filter *fltr;
497
498 head = &bp->ntp_fltr_hash_tbl[i];
499 rcu_read_lock();
500 hlist_for_each_entry_rcu(fltr, head, hash) {
501 if (j == cmd->rule_cnt)
502 break;
503 rule_locs[j++] = fltr->sw_id;
504 }
505 rcu_read_unlock();
506 if (j == cmd->rule_cnt)
507 break;
508 }
509 cmd->rule_cnt = j;
510 return 0;
511 }
512
513 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
514 {
515 struct ethtool_rx_flow_spec *fs =
516 (struct ethtool_rx_flow_spec *)&cmd->fs;
517 struct bnxt_ntuple_filter *fltr;
518 struct flow_keys *fkeys;
519 int i, rc = -EINVAL;
520
521 if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
522 return rc;
523
524 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
525 struct hlist_head *head;
526
527 head = &bp->ntp_fltr_hash_tbl[i];
528 rcu_read_lock();
529 hlist_for_each_entry_rcu(fltr, head, hash) {
530 if (fltr->sw_id == fs->location)
531 goto fltr_found;
532 }
533 rcu_read_unlock();
534 }
535 return rc;
536
537 fltr_found:
538 fkeys = &fltr->fkeys;
539 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
540 if (fkeys->basic.ip_proto == IPPROTO_TCP)
541 fs->flow_type = TCP_V4_FLOW;
542 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
543 fs->flow_type = UDP_V4_FLOW;
544 else
545 goto fltr_err;
546
547 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
548 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
549
550 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
551 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
552
553 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
554 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
555
556 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
557 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
558 } else {
559 int i;
560
561 if (fkeys->basic.ip_proto == IPPROTO_TCP)
562 fs->flow_type = TCP_V6_FLOW;
563 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
564 fs->flow_type = UDP_V6_FLOW;
565 else
566 goto fltr_err;
567
568 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
569 fkeys->addrs.v6addrs.src;
570 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
571 fkeys->addrs.v6addrs.dst;
572 for (i = 0; i < 4; i++) {
573 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
574 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
575 }
576 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
577 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
578
579 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
580 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
581 }
582
583 fs->ring_cookie = fltr->rxq;
584 rc = 0;
585
586 fltr_err:
587 rcu_read_unlock();
588
589 return rc;
590 }
591 #endif
592
593 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
594 {
595 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
596 return RXH_IP_SRC | RXH_IP_DST;
597 return 0;
598 }
599
600 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
601 {
602 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
603 return RXH_IP_SRC | RXH_IP_DST;
604 return 0;
605 }
606
607 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
608 {
609 cmd->data = 0;
610 switch (cmd->flow_type) {
611 case TCP_V4_FLOW:
612 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
613 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
614 RXH_L4_B_0_1 | RXH_L4_B_2_3;
615 cmd->data |= get_ethtool_ipv4_rss(bp);
616 break;
617 case UDP_V4_FLOW:
618 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
619 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
620 RXH_L4_B_0_1 | RXH_L4_B_2_3;
621 /* fall through */
622 case SCTP_V4_FLOW:
623 case AH_ESP_V4_FLOW:
624 case AH_V4_FLOW:
625 case ESP_V4_FLOW:
626 case IPV4_FLOW:
627 cmd->data |= get_ethtool_ipv4_rss(bp);
628 break;
629
630 case TCP_V6_FLOW:
631 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
632 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
633 RXH_L4_B_0_1 | RXH_L4_B_2_3;
634 cmd->data |= get_ethtool_ipv6_rss(bp);
635 break;
636 case UDP_V6_FLOW:
637 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
638 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
639 RXH_L4_B_0_1 | RXH_L4_B_2_3;
640 /* fall through */
641 case SCTP_V6_FLOW:
642 case AH_ESP_V6_FLOW:
643 case AH_V6_FLOW:
644 case ESP_V6_FLOW:
645 case IPV6_FLOW:
646 cmd->data |= get_ethtool_ipv6_rss(bp);
647 break;
648 }
649 return 0;
650 }
651
652 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
653 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
654
655 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
656 {
657 u32 rss_hash_cfg = bp->rss_hash_cfg;
658 int tuple, rc = 0;
659
660 if (cmd->data == RXH_4TUPLE)
661 tuple = 4;
662 else if (cmd->data == RXH_2TUPLE)
663 tuple = 2;
664 else if (!cmd->data)
665 tuple = 0;
666 else
667 return -EINVAL;
668
669 if (cmd->flow_type == TCP_V4_FLOW) {
670 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
671 if (tuple == 4)
672 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
673 } else if (cmd->flow_type == UDP_V4_FLOW) {
674 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
675 return -EINVAL;
676 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
677 if (tuple == 4)
678 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
679 } else if (cmd->flow_type == TCP_V6_FLOW) {
680 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
681 if (tuple == 4)
682 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
683 } else if (cmd->flow_type == UDP_V6_FLOW) {
684 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
685 return -EINVAL;
686 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
687 if (tuple == 4)
688 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
689 } else if (tuple == 4) {
690 return -EINVAL;
691 }
692
693 switch (cmd->flow_type) {
694 case TCP_V4_FLOW:
695 case UDP_V4_FLOW:
696 case SCTP_V4_FLOW:
697 case AH_ESP_V4_FLOW:
698 case AH_V4_FLOW:
699 case ESP_V4_FLOW:
700 case IPV4_FLOW:
701 if (tuple == 2)
702 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
703 else if (!tuple)
704 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
705 break;
706
707 case TCP_V6_FLOW:
708 case UDP_V6_FLOW:
709 case SCTP_V6_FLOW:
710 case AH_ESP_V6_FLOW:
711 case AH_V6_FLOW:
712 case ESP_V6_FLOW:
713 case IPV6_FLOW:
714 if (tuple == 2)
715 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
716 else if (!tuple)
717 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
718 break;
719 }
720
721 if (bp->rss_hash_cfg == rss_hash_cfg)
722 return 0;
723
724 bp->rss_hash_cfg = rss_hash_cfg;
725 if (netif_running(bp->dev)) {
726 bnxt_close_nic(bp, false, false);
727 rc = bnxt_open_nic(bp, false, false);
728 }
729 return rc;
730 }
731
732 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
733 u32 *rule_locs)
734 {
735 struct bnxt *bp = netdev_priv(dev);
736 int rc = 0;
737
738 switch (cmd->cmd) {
739 #ifdef CONFIG_RFS_ACCEL
740 case ETHTOOL_GRXRINGS:
741 cmd->data = bp->rx_nr_rings;
742 break;
743
744 case ETHTOOL_GRXCLSRLCNT:
745 cmd->rule_cnt = bp->ntp_fltr_count;
746 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
747 break;
748
749 case ETHTOOL_GRXCLSRLALL:
750 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
751 break;
752
753 case ETHTOOL_GRXCLSRULE:
754 rc = bnxt_grxclsrule(bp, cmd);
755 break;
756 #endif
757
758 case ETHTOOL_GRXFH:
759 rc = bnxt_grxfh(bp, cmd);
760 break;
761
762 default:
763 rc = -EOPNOTSUPP;
764 break;
765 }
766
767 return rc;
768 }
769
770 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
771 {
772 struct bnxt *bp = netdev_priv(dev);
773 int rc;
774
775 switch (cmd->cmd) {
776 case ETHTOOL_SRXFH:
777 rc = bnxt_srxfh(bp, cmd);
778 break;
779
780 default:
781 rc = -EOPNOTSUPP;
782 break;
783 }
784 return rc;
785 }
786
787 static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
788 {
789 return HW_HASH_INDEX_SIZE;
790 }
791
792 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
793 {
794 return HW_HASH_KEY_SIZE;
795 }
796
797 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
798 u8 *hfunc)
799 {
800 struct bnxt *bp = netdev_priv(dev);
801 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
802 int i = 0;
803
804 if (hfunc)
805 *hfunc = ETH_RSS_HASH_TOP;
806
807 if (indir)
808 for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
809 indir[i] = le16_to_cpu(vnic->rss_table[i]);
810
811 if (key)
812 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
813
814 return 0;
815 }
816
817 static void bnxt_get_drvinfo(struct net_device *dev,
818 struct ethtool_drvinfo *info)
819 {
820 struct bnxt *bp = netdev_priv(dev);
821 char *pkglog;
822 char *pkgver = NULL;
823
824 pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
825 if (pkglog)
826 pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
827 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
828 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
829 if (pkgver && *pkgver != 0 && isdigit(*pkgver))
830 snprintf(info->fw_version, sizeof(info->fw_version) - 1,
831 "%s pkg %s", bp->fw_ver_str, pkgver);
832 else
833 strlcpy(info->fw_version, bp->fw_ver_str,
834 sizeof(info->fw_version));
835 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
836 info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
837 info->testinfo_len = bp->num_tests;
838 /* TODO CHIMP_FW: eeprom dump details */
839 info->eedump_len = 0;
840 /* TODO CHIMP FW: reg dump details */
841 info->regdump_len = 0;
842 kfree(pkglog);
843 }
844
845 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
846 {
847 struct bnxt *bp = netdev_priv(dev);
848
849 wol->supported = 0;
850 wol->wolopts = 0;
851 memset(&wol->sopass, 0, sizeof(wol->sopass));
852 if (bp->flags & BNXT_FLAG_WOL_CAP) {
853 wol->supported = WAKE_MAGIC;
854 if (bp->wol)
855 wol->wolopts = WAKE_MAGIC;
856 }
857 }
858
859 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
860 {
861 struct bnxt *bp = netdev_priv(dev);
862
863 if (wol->wolopts & ~WAKE_MAGIC)
864 return -EINVAL;
865
866 if (wol->wolopts & WAKE_MAGIC) {
867 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
868 return -EINVAL;
869 if (!bp->wol) {
870 if (bnxt_hwrm_alloc_wol_fltr(bp))
871 return -EBUSY;
872 bp->wol = 1;
873 }
874 } else {
875 if (bp->wol) {
876 if (bnxt_hwrm_free_wol_fltr(bp))
877 return -EBUSY;
878 bp->wol = 0;
879 }
880 }
881 return 0;
882 }
883
884 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
885 {
886 u32 speed_mask = 0;
887
888 /* TODO: support 25GB, 40GB, 50GB with different cable type */
889 /* set the advertised speeds */
890 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
891 speed_mask |= ADVERTISED_100baseT_Full;
892 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
893 speed_mask |= ADVERTISED_1000baseT_Full;
894 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
895 speed_mask |= ADVERTISED_2500baseX_Full;
896 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
897 speed_mask |= ADVERTISED_10000baseT_Full;
898 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
899 speed_mask |= ADVERTISED_40000baseCR4_Full;
900
901 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
902 speed_mask |= ADVERTISED_Pause;
903 else if (fw_pause & BNXT_LINK_PAUSE_TX)
904 speed_mask |= ADVERTISED_Asym_Pause;
905 else if (fw_pause & BNXT_LINK_PAUSE_RX)
906 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
907
908 return speed_mask;
909 }
910
911 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
912 { \
913 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
914 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
915 100baseT_Full); \
916 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
917 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
918 1000baseT_Full); \
919 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
920 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
921 10000baseT_Full); \
922 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
923 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
924 25000baseCR_Full); \
925 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
926 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
927 40000baseCR4_Full);\
928 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
929 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
930 50000baseCR2_Full);\
931 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
932 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
933 Pause); \
934 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
935 ethtool_link_ksettings_add_link_mode( \
936 lk_ksettings, name, Asym_Pause);\
937 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
938 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
939 Asym_Pause); \
940 } \
941 }
942
943 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
944 { \
945 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
946 100baseT_Full) || \
947 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
948 100baseT_Half)) \
949 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
950 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
951 1000baseT_Full) || \
952 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
953 1000baseT_Half)) \
954 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
955 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
956 10000baseT_Full)) \
957 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
958 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
959 25000baseCR_Full)) \
960 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
961 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
962 40000baseCR4_Full)) \
963 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
964 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
965 50000baseCR2_Full)) \
966 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
967 }
968
969 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
970 struct ethtool_link_ksettings *lk_ksettings)
971 {
972 u16 fw_speeds = link_info->advertising;
973 u8 fw_pause = 0;
974
975 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
976 fw_pause = link_info->auto_pause_setting;
977
978 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
979 }
980
981 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
982 struct ethtool_link_ksettings *lk_ksettings)
983 {
984 u16 fw_speeds = link_info->lp_auto_link_speeds;
985 u8 fw_pause = 0;
986
987 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
988 fw_pause = link_info->lp_pause;
989
990 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
991 lp_advertising);
992 }
993
994 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
995 struct ethtool_link_ksettings *lk_ksettings)
996 {
997 u16 fw_speeds = link_info->support_speeds;
998
999 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1000
1001 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1002 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1003 Asym_Pause);
1004
1005 if (link_info->support_auto_speeds)
1006 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1007 Autoneg);
1008 }
1009
1010 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1011 {
1012 switch (fw_link_speed) {
1013 case BNXT_LINK_SPEED_100MB:
1014 return SPEED_100;
1015 case BNXT_LINK_SPEED_1GB:
1016 return SPEED_1000;
1017 case BNXT_LINK_SPEED_2_5GB:
1018 return SPEED_2500;
1019 case BNXT_LINK_SPEED_10GB:
1020 return SPEED_10000;
1021 case BNXT_LINK_SPEED_20GB:
1022 return SPEED_20000;
1023 case BNXT_LINK_SPEED_25GB:
1024 return SPEED_25000;
1025 case BNXT_LINK_SPEED_40GB:
1026 return SPEED_40000;
1027 case BNXT_LINK_SPEED_50GB:
1028 return SPEED_50000;
1029 default:
1030 return SPEED_UNKNOWN;
1031 }
1032 }
1033
1034 static int bnxt_get_link_ksettings(struct net_device *dev,
1035 struct ethtool_link_ksettings *lk_ksettings)
1036 {
1037 struct bnxt *bp = netdev_priv(dev);
1038 struct bnxt_link_info *link_info = &bp->link_info;
1039 struct ethtool_link_settings *base = &lk_ksettings->base;
1040 u32 ethtool_speed;
1041
1042 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1043 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1044
1045 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1046 if (link_info->autoneg) {
1047 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1048 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1049 advertising, Autoneg);
1050 base->autoneg = AUTONEG_ENABLE;
1051 if (link_info->phy_link_status == BNXT_LINK_LINK)
1052 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1053 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1054 if (!netif_carrier_ok(dev))
1055 base->duplex = DUPLEX_UNKNOWN;
1056 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1057 base->duplex = DUPLEX_FULL;
1058 else
1059 base->duplex = DUPLEX_HALF;
1060 } else {
1061 base->autoneg = AUTONEG_DISABLE;
1062 ethtool_speed =
1063 bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1064 base->duplex = DUPLEX_HALF;
1065 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1066 base->duplex = DUPLEX_FULL;
1067 }
1068 base->speed = ethtool_speed;
1069
1070 base->port = PORT_NONE;
1071 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1072 base->port = PORT_TP;
1073 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1074 TP);
1075 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1076 TP);
1077 } else {
1078 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1079 FIBRE);
1080 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1081 FIBRE);
1082
1083 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1084 base->port = PORT_DA;
1085 else if (link_info->media_type ==
1086 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1087 base->port = PORT_FIBRE;
1088 }
1089 base->phy_address = link_info->phy_addr;
1090
1091 return 0;
1092 }
1093
1094 static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
1095 {
1096 struct bnxt *bp = netdev_priv(dev);
1097 struct bnxt_link_info *link_info = &bp->link_info;
1098 u16 support_spds = link_info->support_speeds;
1099 u32 fw_speed = 0;
1100
1101 switch (ethtool_speed) {
1102 case SPEED_100:
1103 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1104 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
1105 break;
1106 case SPEED_1000:
1107 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1108 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
1109 break;
1110 case SPEED_2500:
1111 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1112 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
1113 break;
1114 case SPEED_10000:
1115 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1116 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
1117 break;
1118 case SPEED_20000:
1119 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1120 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
1121 break;
1122 case SPEED_25000:
1123 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1124 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
1125 break;
1126 case SPEED_40000:
1127 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1128 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
1129 break;
1130 case SPEED_50000:
1131 if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
1132 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
1133 break;
1134 default:
1135 netdev_err(dev, "unsupported speed!\n");
1136 break;
1137 }
1138 return fw_speed;
1139 }
1140
1141 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1142 {
1143 u16 fw_speed_mask = 0;
1144
1145 /* only support autoneg at speed 100, 1000, and 10000 */
1146 if (advertising & (ADVERTISED_100baseT_Full |
1147 ADVERTISED_100baseT_Half)) {
1148 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1149 }
1150 if (advertising & (ADVERTISED_1000baseT_Full |
1151 ADVERTISED_1000baseT_Half)) {
1152 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1153 }
1154 if (advertising & ADVERTISED_10000baseT_Full)
1155 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1156
1157 if (advertising & ADVERTISED_40000baseCR4_Full)
1158 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1159
1160 return fw_speed_mask;
1161 }
1162
1163 static int bnxt_set_link_ksettings(struct net_device *dev,
1164 const struct ethtool_link_ksettings *lk_ksettings)
1165 {
1166 struct bnxt *bp = netdev_priv(dev);
1167 struct bnxt_link_info *link_info = &bp->link_info;
1168 const struct ethtool_link_settings *base = &lk_ksettings->base;
1169 bool set_pause = false;
1170 u16 fw_advertising = 0;
1171 u32 speed;
1172 int rc = 0;
1173
1174 if (!BNXT_SINGLE_PF(bp))
1175 return -EOPNOTSUPP;
1176
1177 if (base->autoneg == AUTONEG_ENABLE) {
1178 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1179 advertising);
1180 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1181 if (!fw_advertising)
1182 link_info->advertising = link_info->support_auto_speeds;
1183 else
1184 link_info->advertising = fw_advertising;
1185 /* any change to autoneg will cause link change, therefore the
1186 * driver should put back the original pause setting in autoneg
1187 */
1188 set_pause = true;
1189 } else {
1190 u16 fw_speed;
1191 u8 phy_type = link_info->phy_type;
1192
1193 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
1194 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1195 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1196 netdev_err(dev, "10GBase-T devices must autoneg\n");
1197 rc = -EINVAL;
1198 goto set_setting_exit;
1199 }
1200 if (base->duplex == DUPLEX_HALF) {
1201 netdev_err(dev, "HALF DUPLEX is not supported!\n");
1202 rc = -EINVAL;
1203 goto set_setting_exit;
1204 }
1205 speed = base->speed;
1206 fw_speed = bnxt_get_fw_speed(dev, speed);
1207 if (!fw_speed) {
1208 rc = -EINVAL;
1209 goto set_setting_exit;
1210 }
1211 link_info->req_link_speed = fw_speed;
1212 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1213 link_info->autoneg = 0;
1214 link_info->advertising = 0;
1215 }
1216
1217 if (netif_running(dev))
1218 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1219
1220 set_setting_exit:
1221 return rc;
1222 }
1223
1224 static void bnxt_get_pauseparam(struct net_device *dev,
1225 struct ethtool_pauseparam *epause)
1226 {
1227 struct bnxt *bp = netdev_priv(dev);
1228 struct bnxt_link_info *link_info = &bp->link_info;
1229
1230 if (BNXT_VF(bp))
1231 return;
1232 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
1233 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
1234 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
1235 }
1236
1237 static int bnxt_set_pauseparam(struct net_device *dev,
1238 struct ethtool_pauseparam *epause)
1239 {
1240 int rc = 0;
1241 struct bnxt *bp = netdev_priv(dev);
1242 struct bnxt_link_info *link_info = &bp->link_info;
1243
1244 if (!BNXT_SINGLE_PF(bp))
1245 return -EOPNOTSUPP;
1246
1247 if (epause->autoneg) {
1248 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
1249 return -EINVAL;
1250
1251 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
1252 if (bp->hwrm_spec_code >= 0x10201)
1253 link_info->req_flow_ctrl =
1254 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
1255 } else {
1256 /* when transition from auto pause to force pause,
1257 * force a link change
1258 */
1259 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1260 link_info->force_link_chng = true;
1261 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
1262 link_info->req_flow_ctrl = 0;
1263 }
1264 if (epause->rx_pause)
1265 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
1266
1267 if (epause->tx_pause)
1268 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
1269
1270 if (netif_running(dev))
1271 rc = bnxt_hwrm_set_pause(bp);
1272 return rc;
1273 }
1274
1275 static u32 bnxt_get_link(struct net_device *dev)
1276 {
1277 struct bnxt *bp = netdev_priv(dev);
1278
1279 /* TODO: handle MF, VF, driver close case */
1280 return bp->link_info.link_up;
1281 }
1282
1283 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1284 u16 ext, u16 *index, u32 *item_length,
1285 u32 *data_length);
1286
1287 static int bnxt_flash_nvram(struct net_device *dev,
1288 u16 dir_type,
1289 u16 dir_ordinal,
1290 u16 dir_ext,
1291 u16 dir_attr,
1292 const u8 *data,
1293 size_t data_len)
1294 {
1295 struct bnxt *bp = netdev_priv(dev);
1296 int rc;
1297 struct hwrm_nvm_write_input req = {0};
1298 dma_addr_t dma_handle;
1299 u8 *kmem;
1300
1301 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
1302
1303 req.dir_type = cpu_to_le16(dir_type);
1304 req.dir_ordinal = cpu_to_le16(dir_ordinal);
1305 req.dir_ext = cpu_to_le16(dir_ext);
1306 req.dir_attr = cpu_to_le16(dir_attr);
1307 req.dir_data_length = cpu_to_le32(data_len);
1308
1309 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
1310 GFP_KERNEL);
1311 if (!kmem) {
1312 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1313 (unsigned)data_len);
1314 return -ENOMEM;
1315 }
1316 memcpy(kmem, data, data_len);
1317 req.host_src_addr = cpu_to_le64(dma_handle);
1318
1319 rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
1320 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
1321
1322 return rc;
1323 }
1324
1325 static int bnxt_firmware_reset(struct net_device *dev,
1326 u16 dir_type)
1327 {
1328 struct bnxt *bp = netdev_priv(dev);
1329 struct hwrm_fw_reset_input req = {0};
1330
1331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
1332
1333 /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */
1334 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1335 /* (e.g. when firmware isn't already running) */
1336 switch (dir_type) {
1337 case BNX_DIR_TYPE_CHIMP_PATCH:
1338 case BNX_DIR_TYPE_BOOTCODE:
1339 case BNX_DIR_TYPE_BOOTCODE_2:
1340 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
1341 /* Self-reset ChiMP upon next PCIe reset: */
1342 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1343 break;
1344 case BNX_DIR_TYPE_APE_FW:
1345 case BNX_DIR_TYPE_APE_PATCH:
1346 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
1347 /* Self-reset APE upon next PCIe reset: */
1348 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1349 break;
1350 case BNX_DIR_TYPE_KONG_FW:
1351 case BNX_DIR_TYPE_KONG_PATCH:
1352 req.embedded_proc_type =
1353 FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
1354 break;
1355 case BNX_DIR_TYPE_BONO_FW:
1356 case BNX_DIR_TYPE_BONO_PATCH:
1357 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
1358 break;
1359 default:
1360 return -EINVAL;
1361 }
1362
1363 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1364 }
1365
1366 static int bnxt_flash_firmware(struct net_device *dev,
1367 u16 dir_type,
1368 const u8 *fw_data,
1369 size_t fw_size)
1370 {
1371 int rc = 0;
1372 u16 code_type;
1373 u32 stored_crc;
1374 u32 calculated_crc;
1375 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
1376
1377 switch (dir_type) {
1378 case BNX_DIR_TYPE_BOOTCODE:
1379 case BNX_DIR_TYPE_BOOTCODE_2:
1380 code_type = CODE_BOOT;
1381 break;
1382 case BNX_DIR_TYPE_CHIMP_PATCH:
1383 code_type = CODE_CHIMP_PATCH;
1384 break;
1385 case BNX_DIR_TYPE_APE_FW:
1386 code_type = CODE_MCTP_PASSTHRU;
1387 break;
1388 case BNX_DIR_TYPE_APE_PATCH:
1389 code_type = CODE_APE_PATCH;
1390 break;
1391 case BNX_DIR_TYPE_KONG_FW:
1392 code_type = CODE_KONG_FW;
1393 break;
1394 case BNX_DIR_TYPE_KONG_PATCH:
1395 code_type = CODE_KONG_PATCH;
1396 break;
1397 case BNX_DIR_TYPE_BONO_FW:
1398 code_type = CODE_BONO_FW;
1399 break;
1400 case BNX_DIR_TYPE_BONO_PATCH:
1401 code_type = CODE_BONO_PATCH;
1402 break;
1403 default:
1404 netdev_err(dev, "Unsupported directory entry type: %u\n",
1405 dir_type);
1406 return -EINVAL;
1407 }
1408 if (fw_size < sizeof(struct bnxt_fw_header)) {
1409 netdev_err(dev, "Invalid firmware file size: %u\n",
1410 (unsigned int)fw_size);
1411 return -EINVAL;
1412 }
1413 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
1414 netdev_err(dev, "Invalid firmware signature: %08X\n",
1415 le32_to_cpu(header->signature));
1416 return -EINVAL;
1417 }
1418 if (header->code_type != code_type) {
1419 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
1420 code_type, header->code_type);
1421 return -EINVAL;
1422 }
1423 if (header->device != DEVICE_CUMULUS_FAMILY) {
1424 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
1425 DEVICE_CUMULUS_FAMILY, header->device);
1426 return -EINVAL;
1427 }
1428 /* Confirm the CRC32 checksum of the file: */
1429 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1430 sizeof(stored_crc)));
1431 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1432 if (calculated_crc != stored_crc) {
1433 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
1434 (unsigned long)stored_crc,
1435 (unsigned long)calculated_crc);
1436 return -EINVAL;
1437 }
1438 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1439 0, 0, fw_data, fw_size);
1440 if (rc == 0) /* Firmware update successful */
1441 rc = bnxt_firmware_reset(dev, dir_type);
1442
1443 return rc;
1444 }
1445
1446 static int bnxt_flash_microcode(struct net_device *dev,
1447 u16 dir_type,
1448 const u8 *fw_data,
1449 size_t fw_size)
1450 {
1451 struct bnxt_ucode_trailer *trailer;
1452 u32 calculated_crc;
1453 u32 stored_crc;
1454 int rc = 0;
1455
1456 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
1457 netdev_err(dev, "Invalid microcode file size: %u\n",
1458 (unsigned int)fw_size);
1459 return -EINVAL;
1460 }
1461 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
1462 sizeof(*trailer)));
1463 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
1464 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
1465 le32_to_cpu(trailer->sig));
1466 return -EINVAL;
1467 }
1468 if (le16_to_cpu(trailer->dir_type) != dir_type) {
1469 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
1470 dir_type, le16_to_cpu(trailer->dir_type));
1471 return -EINVAL;
1472 }
1473 if (le16_to_cpu(trailer->trailer_length) <
1474 sizeof(struct bnxt_ucode_trailer)) {
1475 netdev_err(dev, "Invalid microcode trailer length: %d\n",
1476 le16_to_cpu(trailer->trailer_length));
1477 return -EINVAL;
1478 }
1479
1480 /* Confirm the CRC32 checksum of the file: */
1481 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1482 sizeof(stored_crc)));
1483 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1484 if (calculated_crc != stored_crc) {
1485 netdev_err(dev,
1486 "CRC32 (%08lX) does not match calculated: %08lX\n",
1487 (unsigned long)stored_crc,
1488 (unsigned long)calculated_crc);
1489 return -EINVAL;
1490 }
1491 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1492 0, 0, fw_data, fw_size);
1493
1494 return rc;
1495 }
1496
1497 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
1498 {
1499 switch (dir_type) {
1500 case BNX_DIR_TYPE_CHIMP_PATCH:
1501 case BNX_DIR_TYPE_BOOTCODE:
1502 case BNX_DIR_TYPE_BOOTCODE_2:
1503 case BNX_DIR_TYPE_APE_FW:
1504 case BNX_DIR_TYPE_APE_PATCH:
1505 case BNX_DIR_TYPE_KONG_FW:
1506 case BNX_DIR_TYPE_KONG_PATCH:
1507 case BNX_DIR_TYPE_BONO_FW:
1508 case BNX_DIR_TYPE_BONO_PATCH:
1509 return true;
1510 }
1511
1512 return false;
1513 }
1514
1515 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
1516 {
1517 switch (dir_type) {
1518 case BNX_DIR_TYPE_AVS:
1519 case BNX_DIR_TYPE_EXP_ROM_MBA:
1520 case BNX_DIR_TYPE_PCIE:
1521 case BNX_DIR_TYPE_TSCF_UCODE:
1522 case BNX_DIR_TYPE_EXT_PHY:
1523 case BNX_DIR_TYPE_CCM:
1524 case BNX_DIR_TYPE_ISCSI_BOOT:
1525 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
1526 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
1527 return true;
1528 }
1529
1530 return false;
1531 }
1532
1533 static bool bnxt_dir_type_is_executable(u16 dir_type)
1534 {
1535 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
1536 bnxt_dir_type_is_other_exec_format(dir_type);
1537 }
1538
1539 static int bnxt_flash_firmware_from_file(struct net_device *dev,
1540 u16 dir_type,
1541 const char *filename)
1542 {
1543 const struct firmware *fw;
1544 int rc;
1545
1546 rc = request_firmware(&fw, filename, &dev->dev);
1547 if (rc != 0) {
1548 netdev_err(dev, "Error %d requesting firmware file: %s\n",
1549 rc, filename);
1550 return rc;
1551 }
1552 if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
1553 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
1554 else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
1555 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
1556 else
1557 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1558 0, 0, fw->data, fw->size);
1559 release_firmware(fw);
1560 return rc;
1561 }
1562
1563 static int bnxt_flash_package_from_file(struct net_device *dev,
1564 char *filename, u32 install_type)
1565 {
1566 struct bnxt *bp = netdev_priv(dev);
1567 struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
1568 struct hwrm_nvm_install_update_input install = {0};
1569 const struct firmware *fw;
1570 u32 item_len;
1571 u16 index;
1572 int rc;
1573
1574 bnxt_hwrm_fw_set_time(bp);
1575
1576 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
1577 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1578 &index, &item_len, NULL) != 0) {
1579 netdev_err(dev, "PKG update area not created in nvram\n");
1580 return -ENOBUFS;
1581 }
1582
1583 rc = request_firmware(&fw, filename, &dev->dev);
1584 if (rc != 0) {
1585 netdev_err(dev, "PKG error %d requesting file: %s\n",
1586 rc, filename);
1587 return rc;
1588 }
1589
1590 if (fw->size > item_len) {
1591 netdev_err(dev, "PKG insufficient update area in nvram: %lu",
1592 (unsigned long)fw->size);
1593 rc = -EFBIG;
1594 } else {
1595 dma_addr_t dma_handle;
1596 u8 *kmem;
1597 struct hwrm_nvm_modify_input modify = {0};
1598
1599 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
1600
1601 modify.dir_idx = cpu_to_le16(index);
1602 modify.len = cpu_to_le32(fw->size);
1603
1604 kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
1605 &dma_handle, GFP_KERNEL);
1606 if (!kmem) {
1607 netdev_err(dev,
1608 "dma_alloc_coherent failure, length = %u\n",
1609 (unsigned int)fw->size);
1610 rc = -ENOMEM;
1611 } else {
1612 memcpy(kmem, fw->data, fw->size);
1613 modify.host_src_addr = cpu_to_le64(dma_handle);
1614
1615 rc = hwrm_send_message(bp, &modify, sizeof(modify),
1616 FLASH_PACKAGE_TIMEOUT);
1617 dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
1618 dma_handle);
1619 }
1620 }
1621 release_firmware(fw);
1622 if (rc)
1623 return rc;
1624
1625 if ((install_type & 0xffff) == 0)
1626 install_type >>= 16;
1627 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
1628 install.install_type = cpu_to_le32(install_type);
1629
1630 mutex_lock(&bp->hwrm_cmd_lock);
1631 rc = _hwrm_send_message(bp, &install, sizeof(install),
1632 INSTALL_PACKAGE_TIMEOUT);
1633 if (rc) {
1634 rc = -EOPNOTSUPP;
1635 goto flash_pkg_exit;
1636 }
1637
1638 if (resp->error_code) {
1639 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
1640
1641 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
1642 install.flags |= cpu_to_le16(
1643 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
1644 rc = _hwrm_send_message(bp, &install, sizeof(install),
1645 INSTALL_PACKAGE_TIMEOUT);
1646 if (rc) {
1647 rc = -EOPNOTSUPP;
1648 goto flash_pkg_exit;
1649 }
1650 }
1651 }
1652
1653 if (resp->result) {
1654 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
1655 (s8)resp->result, (int)resp->problem_item);
1656 rc = -ENOPKG;
1657 }
1658 flash_pkg_exit:
1659 mutex_unlock(&bp->hwrm_cmd_lock);
1660 return rc;
1661 }
1662
1663 static int bnxt_flash_device(struct net_device *dev,
1664 struct ethtool_flash *flash)
1665 {
1666 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
1667 netdev_err(dev, "flashdev not supported from a virtual function\n");
1668 return -EINVAL;
1669 }
1670
1671 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
1672 flash->region > 0xffff)
1673 return bnxt_flash_package_from_file(dev, flash->data,
1674 flash->region);
1675
1676 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
1677 }
1678
1679 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
1680 {
1681 struct bnxt *bp = netdev_priv(dev);
1682 int rc;
1683 struct hwrm_nvm_get_dir_info_input req = {0};
1684 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
1685
1686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
1687
1688 mutex_lock(&bp->hwrm_cmd_lock);
1689 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1690 if (!rc) {
1691 *entries = le32_to_cpu(output->entries);
1692 *length = le32_to_cpu(output->entry_length);
1693 }
1694 mutex_unlock(&bp->hwrm_cmd_lock);
1695 return rc;
1696 }
1697
1698 static int bnxt_get_eeprom_len(struct net_device *dev)
1699 {
1700 /* The -1 return value allows the entire 32-bit range of offsets to be
1701 * passed via the ethtool command-line utility.
1702 */
1703 return -1;
1704 }
1705
1706 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1707 {
1708 struct bnxt *bp = netdev_priv(dev);
1709 int rc;
1710 u32 dir_entries;
1711 u32 entry_length;
1712 u8 *buf;
1713 size_t buflen;
1714 dma_addr_t dma_handle;
1715 struct hwrm_nvm_get_dir_entries_input req = {0};
1716
1717 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
1718 if (rc != 0)
1719 return rc;
1720
1721 /* Insert 2 bytes of directory info (count and size of entries) */
1722 if (len < 2)
1723 return -EINVAL;
1724
1725 *data++ = dir_entries;
1726 *data++ = entry_length;
1727 len -= 2;
1728 memset(data, 0xff, len);
1729
1730 buflen = dir_entries * entry_length;
1731 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
1732 GFP_KERNEL);
1733 if (!buf) {
1734 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1735 (unsigned)buflen);
1736 return -ENOMEM;
1737 }
1738 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
1739 req.host_dest_addr = cpu_to_le64(dma_handle);
1740 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1741 if (rc == 0)
1742 memcpy(data, buf, len > buflen ? buflen : len);
1743 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
1744 return rc;
1745 }
1746
1747 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
1748 u32 length, u8 *data)
1749 {
1750 struct bnxt *bp = netdev_priv(dev);
1751 int rc;
1752 u8 *buf;
1753 dma_addr_t dma_handle;
1754 struct hwrm_nvm_read_input req = {0};
1755
1756 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
1757 GFP_KERNEL);
1758 if (!buf) {
1759 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1760 (unsigned)length);
1761 return -ENOMEM;
1762 }
1763 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
1764 req.host_dest_addr = cpu_to_le64(dma_handle);
1765 req.dir_idx = cpu_to_le16(index);
1766 req.offset = cpu_to_le32(offset);
1767 req.len = cpu_to_le32(length);
1768
1769 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1770 if (rc == 0)
1771 memcpy(data, buf, length);
1772 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
1773 return rc;
1774 }
1775
1776 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1777 u16 ext, u16 *index, u32 *item_length,
1778 u32 *data_length)
1779 {
1780 struct bnxt *bp = netdev_priv(dev);
1781 int rc;
1782 struct hwrm_nvm_find_dir_entry_input req = {0};
1783 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
1784
1785 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
1786 req.enables = 0;
1787 req.dir_idx = 0;
1788 req.dir_type = cpu_to_le16(type);
1789 req.dir_ordinal = cpu_to_le16(ordinal);
1790 req.dir_ext = cpu_to_le16(ext);
1791 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1792 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1793 if (rc == 0) {
1794 if (index)
1795 *index = le16_to_cpu(output->dir_idx);
1796 if (item_length)
1797 *item_length = le32_to_cpu(output->dir_item_length);
1798 if (data_length)
1799 *data_length = le32_to_cpu(output->dir_data_length);
1800 }
1801 return rc;
1802 }
1803
1804 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1805 {
1806 char *retval = NULL;
1807 char *p;
1808 char *value;
1809 int field = 0;
1810
1811 if (datalen < 1)
1812 return NULL;
1813 /* null-terminate the log data (removing last '\n'): */
1814 data[datalen - 1] = 0;
1815 for (p = data; *p != 0; p++) {
1816 field = 0;
1817 retval = NULL;
1818 while (*p != 0 && *p != '\n') {
1819 value = p;
1820 while (*p != 0 && *p != '\t' && *p != '\n')
1821 p++;
1822 if (field == desired_field)
1823 retval = value;
1824 if (*p != '\t')
1825 break;
1826 *p = 0;
1827 field++;
1828 p++;
1829 }
1830 if (*p == 0)
1831 break;
1832 *p = 0;
1833 }
1834 return retval;
1835 }
1836
1837 static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
1838 {
1839 u16 index = 0;
1840 u32 datalen;
1841
1842 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1843 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1844 &index, NULL, &datalen) != 0)
1845 return NULL;
1846
1847 memset(buf, 0, buflen);
1848 if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
1849 return NULL;
1850
1851 return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
1852 datalen);
1853 }
1854
1855 static int bnxt_get_eeprom(struct net_device *dev,
1856 struct ethtool_eeprom *eeprom,
1857 u8 *data)
1858 {
1859 u32 index;
1860 u32 offset;
1861
1862 if (eeprom->offset == 0) /* special offset value to get directory */
1863 return bnxt_get_nvram_directory(dev, eeprom->len, data);
1864
1865 index = eeprom->offset >> 24;
1866 offset = eeprom->offset & 0xffffff;
1867
1868 if (index == 0) {
1869 netdev_err(dev, "unsupported index value: %d\n", index);
1870 return -EINVAL;
1871 }
1872
1873 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
1874 }
1875
1876 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
1877 {
1878 struct bnxt *bp = netdev_priv(dev);
1879 struct hwrm_nvm_erase_dir_entry_input req = {0};
1880
1881 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
1882 req.dir_idx = cpu_to_le16(index);
1883 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1884 }
1885
1886 static int bnxt_set_eeprom(struct net_device *dev,
1887 struct ethtool_eeprom *eeprom,
1888 u8 *data)
1889 {
1890 struct bnxt *bp = netdev_priv(dev);
1891 u8 index, dir_op;
1892 u16 type, ext, ordinal, attr;
1893
1894 if (!BNXT_PF(bp)) {
1895 netdev_err(dev, "NVM write not supported from a virtual function\n");
1896 return -EINVAL;
1897 }
1898
1899 type = eeprom->magic >> 16;
1900
1901 if (type == 0xffff) { /* special value for directory operations */
1902 index = eeprom->magic & 0xff;
1903 dir_op = eeprom->magic >> 8;
1904 if (index == 0)
1905 return -EINVAL;
1906 switch (dir_op) {
1907 case 0x0e: /* erase */
1908 if (eeprom->offset != ~eeprom->magic)
1909 return -EINVAL;
1910 return bnxt_erase_nvram_directory(dev, index - 1);
1911 default:
1912 return -EINVAL;
1913 }
1914 }
1915
1916 /* Create or re-write an NVM item: */
1917 if (bnxt_dir_type_is_executable(type) == true)
1918 return -EOPNOTSUPP;
1919 ext = eeprom->magic & 0xffff;
1920 ordinal = eeprom->offset >> 16;
1921 attr = eeprom->offset & 0xffff;
1922
1923 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
1924 eeprom->len);
1925 }
1926
1927 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1928 {
1929 struct bnxt *bp = netdev_priv(dev);
1930 struct ethtool_eee *eee = &bp->eee;
1931 struct bnxt_link_info *link_info = &bp->link_info;
1932 u32 advertising =
1933 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
1934 int rc = 0;
1935
1936 if (!BNXT_SINGLE_PF(bp))
1937 return -EOPNOTSUPP;
1938
1939 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1940 return -EOPNOTSUPP;
1941
1942 if (!edata->eee_enabled)
1943 goto eee_ok;
1944
1945 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
1946 netdev_warn(dev, "EEE requires autoneg\n");
1947 return -EINVAL;
1948 }
1949 if (edata->tx_lpi_enabled) {
1950 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
1951 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
1952 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
1953 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
1954 return -EINVAL;
1955 } else if (!bp->lpi_tmr_hi) {
1956 edata->tx_lpi_timer = eee->tx_lpi_timer;
1957 }
1958 }
1959 if (!edata->advertised) {
1960 edata->advertised = advertising & eee->supported;
1961 } else if (edata->advertised & ~advertising) {
1962 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
1963 edata->advertised, advertising);
1964 return -EINVAL;
1965 }
1966
1967 eee->advertised = edata->advertised;
1968 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
1969 eee->tx_lpi_timer = edata->tx_lpi_timer;
1970 eee_ok:
1971 eee->eee_enabled = edata->eee_enabled;
1972
1973 if (netif_running(dev))
1974 rc = bnxt_hwrm_set_link_setting(bp, false, true);
1975
1976 return rc;
1977 }
1978
1979 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1980 {
1981 struct bnxt *bp = netdev_priv(dev);
1982
1983 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1984 return -EOPNOTSUPP;
1985
1986 *edata = bp->eee;
1987 if (!bp->eee.eee_enabled) {
1988 /* Preserve tx_lpi_timer so that the last value will be used
1989 * by default when it is re-enabled.
1990 */
1991 edata->advertised = 0;
1992 edata->tx_lpi_enabled = 0;
1993 }
1994
1995 if (!bp->eee.eee_active)
1996 edata->lp_advertised = 0;
1997
1998 return 0;
1999 }
2000
2001 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
2002 u16 page_number, u16 start_addr,
2003 u16 data_length, u8 *buf)
2004 {
2005 struct hwrm_port_phy_i2c_read_input req = {0};
2006 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2007 int rc, byte_offset = 0;
2008
2009 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2010 req.i2c_slave_addr = i2c_addr;
2011 req.page_number = cpu_to_le16(page_number);
2012 req.port_id = cpu_to_le16(bp->pf.port_id);
2013 do {
2014 u16 xfer_size;
2015
2016 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2017 data_length -= xfer_size;
2018 req.page_offset = cpu_to_le16(start_addr + byte_offset);
2019 req.data_length = xfer_size;
2020 req.enables = cpu_to_le32(start_addr + byte_offset ?
2021 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
2022 mutex_lock(&bp->hwrm_cmd_lock);
2023 rc = _hwrm_send_message(bp, &req, sizeof(req),
2024 HWRM_CMD_TIMEOUT);
2025 if (!rc)
2026 memcpy(buf + byte_offset, output->data, xfer_size);
2027 mutex_unlock(&bp->hwrm_cmd_lock);
2028 byte_offset += xfer_size;
2029 } while (!rc && data_length > 0);
2030
2031 return rc;
2032 }
2033
2034 static int bnxt_get_module_info(struct net_device *dev,
2035 struct ethtool_modinfo *modinfo)
2036 {
2037 struct bnxt *bp = netdev_priv(dev);
2038 struct hwrm_port_phy_i2c_read_input req = {0};
2039 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2040 int rc;
2041
2042 /* No point in going further if phy status indicates
2043 * module is not inserted or if it is powered down or
2044 * if it is of type 10GBase-T
2045 */
2046 if (bp->link_info.module_status >
2047 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
2048 return -EOPNOTSUPP;
2049
2050 /* This feature is not supported in older firmware versions */
2051 if (bp->hwrm_spec_code < 0x10202)
2052 return -EOPNOTSUPP;
2053
2054 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2055 req.i2c_slave_addr = I2C_DEV_ADDR_A0;
2056 req.page_number = 0;
2057 req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
2058 req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
2059 req.port_id = cpu_to_le16(bp->pf.port_id);
2060 mutex_lock(&bp->hwrm_cmd_lock);
2061 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2062 if (!rc) {
2063 u32 module_id = le32_to_cpu(output->data[0]);
2064
2065 switch (module_id) {
2066 case SFF_MODULE_ID_SFP:
2067 modinfo->type = ETH_MODULE_SFF_8472;
2068 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2069 break;
2070 case SFF_MODULE_ID_QSFP:
2071 case SFF_MODULE_ID_QSFP_PLUS:
2072 modinfo->type = ETH_MODULE_SFF_8436;
2073 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2074 break;
2075 case SFF_MODULE_ID_QSFP28:
2076 modinfo->type = ETH_MODULE_SFF_8636;
2077 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2078 break;
2079 default:
2080 rc = -EOPNOTSUPP;
2081 break;
2082 }
2083 }
2084 mutex_unlock(&bp->hwrm_cmd_lock);
2085 return rc;
2086 }
2087
2088 static int bnxt_get_module_eeprom(struct net_device *dev,
2089 struct ethtool_eeprom *eeprom,
2090 u8 *data)
2091 {
2092 struct bnxt *bp = netdev_priv(dev);
2093 u16 start = eeprom->offset, length = eeprom->len;
2094 int rc = 0;
2095
2096 memset(data, 0, eeprom->len);
2097
2098 /* Read A0 portion of the EEPROM */
2099 if (start < ETH_MODULE_SFF_8436_LEN) {
2100 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
2101 length = ETH_MODULE_SFF_8436_LEN - start;
2102 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
2103 start, length, data);
2104 if (rc)
2105 return rc;
2106 start += length;
2107 data += length;
2108 length = eeprom->len - length;
2109 }
2110
2111 /* Read A2 portion of the EEPROM */
2112 if (length) {
2113 start -= ETH_MODULE_SFF_8436_LEN;
2114 bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
2115 length, data);
2116 }
2117 return rc;
2118 }
2119
2120 static int bnxt_nway_reset(struct net_device *dev)
2121 {
2122 int rc = 0;
2123
2124 struct bnxt *bp = netdev_priv(dev);
2125 struct bnxt_link_info *link_info = &bp->link_info;
2126
2127 if (!BNXT_SINGLE_PF(bp))
2128 return -EOPNOTSUPP;
2129
2130 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
2131 return -EINVAL;
2132
2133 if (netif_running(dev))
2134 rc = bnxt_hwrm_set_link_setting(bp, true, false);
2135
2136 return rc;
2137 }
2138
2139 static int bnxt_set_phys_id(struct net_device *dev,
2140 enum ethtool_phys_id_state state)
2141 {
2142 struct hwrm_port_led_cfg_input req = {0};
2143 struct bnxt *bp = netdev_priv(dev);
2144 struct bnxt_pf_info *pf = &bp->pf;
2145 struct bnxt_led_cfg *led_cfg;
2146 u8 led_state;
2147 __le16 duration;
2148 int i, rc;
2149
2150 if (!bp->num_leds || BNXT_VF(bp))
2151 return -EOPNOTSUPP;
2152
2153 if (state == ETHTOOL_ID_ACTIVE) {
2154 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
2155 duration = cpu_to_le16(500);
2156 } else if (state == ETHTOOL_ID_INACTIVE) {
2157 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
2158 duration = cpu_to_le16(0);
2159 } else {
2160 return -EINVAL;
2161 }
2162 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
2163 req.port_id = cpu_to_le16(pf->port_id);
2164 req.num_leds = bp->num_leds;
2165 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2166 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2167 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2168 led_cfg->led_id = bp->leds[i].led_id;
2169 led_cfg->led_state = led_state;
2170 led_cfg->led_blink_on = duration;
2171 led_cfg->led_blink_off = duration;
2172 led_cfg->led_group_id = bp->leds[i].led_group_id;
2173 }
2174 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2175 if (rc)
2176 rc = -EIO;
2177 return rc;
2178 }
2179
2180 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
2181 {
2182 struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
2183 struct hwrm_selftest_exec_input req = {0};
2184 int rc;
2185
2186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
2187 mutex_lock(&bp->hwrm_cmd_lock);
2188 resp->test_success = 0;
2189 req.flags = test_mask;
2190 rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
2191 *test_results = resp->test_success;
2192 mutex_unlock(&bp->hwrm_cmd_lock);
2193 return rc;
2194 }
2195
2196 #define BNXT_DRV_TESTS 0
2197
2198 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
2199 u64 *buf)
2200 {
2201 struct bnxt *bp = netdev_priv(dev);
2202 bool offline = false;
2203 u8 test_results = 0;
2204 u8 test_mask = 0;
2205 int rc, i;
2206
2207 if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
2208 return;
2209 memset(buf, 0, sizeof(u64) * bp->num_tests);
2210 if (!netif_running(dev)) {
2211 etest->flags |= ETH_TEST_FL_FAILED;
2212 return;
2213 }
2214
2215 if (etest->flags & ETH_TEST_FL_OFFLINE) {
2216 if (bp->pf.active_vfs) {
2217 etest->flags |= ETH_TEST_FL_FAILED;
2218 netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
2219 return;
2220 }
2221 offline = true;
2222 }
2223
2224 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2225 u8 bit_val = 1 << i;
2226
2227 if (!(bp->test_info->offline_mask & bit_val))
2228 test_mask |= bit_val;
2229 else if (offline)
2230 test_mask |= bit_val;
2231 }
2232 if (!offline) {
2233 bnxt_run_fw_tests(bp, test_mask, &test_results);
2234 } else {
2235 rc = bnxt_close_nic(bp, false, false);
2236 if (rc)
2237 return;
2238 bnxt_run_fw_tests(bp, test_mask, &test_results);
2239 bnxt_open_nic(bp, false, true);
2240 }
2241 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2242 u8 bit_val = 1 << i;
2243
2244 if ((test_mask & bit_val) && !(test_results & bit_val)) {
2245 buf[i] = 1;
2246 etest->flags |= ETH_TEST_FL_FAILED;
2247 }
2248 }
2249 }
2250
2251 void bnxt_ethtool_init(struct bnxt *bp)
2252 {
2253 struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
2254 struct hwrm_selftest_qlist_input req = {0};
2255 struct bnxt_test_info *test_info;
2256 int i, rc;
2257
2258 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
2259 return;
2260
2261 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
2262 mutex_lock(&bp->hwrm_cmd_lock);
2263 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2264 if (rc)
2265 goto ethtool_init_exit;
2266
2267 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
2268 if (!test_info)
2269 goto ethtool_init_exit;
2270
2271 bp->test_info = test_info;
2272 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
2273 if (bp->num_tests > BNXT_MAX_TEST)
2274 bp->num_tests = BNXT_MAX_TEST;
2275
2276 test_info->offline_mask = resp->offline_tests;
2277 test_info->timeout = le16_to_cpu(resp->test_timeout);
2278 if (!test_info->timeout)
2279 test_info->timeout = HWRM_CMD_TIMEOUT;
2280 for (i = 0; i < bp->num_tests; i++) {
2281 char *str = test_info->string[i];
2282 char *fw_str = resp->test0_name + i * 32;
2283
2284 strlcpy(str, fw_str, ETH_GSTRING_LEN);
2285 strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
2286 if (test_info->offline_mask & (1 << i))
2287 strncat(str, " (offline)",
2288 ETH_GSTRING_LEN - strlen(str));
2289 else
2290 strncat(str, " (online)",
2291 ETH_GSTRING_LEN - strlen(str));
2292 }
2293
2294 ethtool_init_exit:
2295 mutex_unlock(&bp->hwrm_cmd_lock);
2296 }
2297
2298 void bnxt_ethtool_free(struct bnxt *bp)
2299 {
2300 kfree(bp->test_info);
2301 bp->test_info = NULL;
2302 }
2303
2304 const struct ethtool_ops bnxt_ethtool_ops = {
2305 .get_link_ksettings = bnxt_get_link_ksettings,
2306 .set_link_ksettings = bnxt_set_link_ksettings,
2307 .get_pauseparam = bnxt_get_pauseparam,
2308 .set_pauseparam = bnxt_set_pauseparam,
2309 .get_drvinfo = bnxt_get_drvinfo,
2310 .get_wol = bnxt_get_wol,
2311 .set_wol = bnxt_set_wol,
2312 .get_coalesce = bnxt_get_coalesce,
2313 .set_coalesce = bnxt_set_coalesce,
2314 .get_msglevel = bnxt_get_msglevel,
2315 .set_msglevel = bnxt_set_msglevel,
2316 .get_sset_count = bnxt_get_sset_count,
2317 .get_strings = bnxt_get_strings,
2318 .get_ethtool_stats = bnxt_get_ethtool_stats,
2319 .set_ringparam = bnxt_set_ringparam,
2320 .get_ringparam = bnxt_get_ringparam,
2321 .get_channels = bnxt_get_channels,
2322 .set_channels = bnxt_set_channels,
2323 .get_rxnfc = bnxt_get_rxnfc,
2324 .set_rxnfc = bnxt_set_rxnfc,
2325 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
2326 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
2327 .get_rxfh = bnxt_get_rxfh,
2328 .flash_device = bnxt_flash_device,
2329 .get_eeprom_len = bnxt_get_eeprom_len,
2330 .get_eeprom = bnxt_get_eeprom,
2331 .set_eeprom = bnxt_set_eeprom,
2332 .get_link = bnxt_get_link,
2333 .get_eee = bnxt_get_eee,
2334 .set_eee = bnxt_set_eee,
2335 .get_module_info = bnxt_get_module_info,
2336 .get_module_eeprom = bnxt_get_module_eeprom,
2337 .nway_reset = bnxt_nway_reset,
2338 .set_phys_id = bnxt_set_phys_id,
2339 .self_test = bnxt_self_test,
2340 };