]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt.c
Merge branch 'am335x-phy-fixes' into omap-for-v5.0/fixes-v2
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
39 #include <net/ip.h>
40 #include <net/tcp.h>
41 #include <net/udp.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
54 #include <linux/hwmon.h>
55 #include <linux/hwmon-sysfs.h>
56
57 #include "bnxt_hsi.h"
58 #include "bnxt.h"
59 #include "bnxt_ulp.h"
60 #include "bnxt_sriov.h"
61 #include "bnxt_ethtool.h"
62 #include "bnxt_dcb.h"
63 #include "bnxt_xdp.h"
64 #include "bnxt_vfr.h"
65 #include "bnxt_tc.h"
66 #include "bnxt_devlink.h"
67 #include "bnxt_debugfs.h"
68
69 #define BNXT_TX_TIMEOUT (5 * HZ)
70
71 static const char version[] =
72 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80 #define BNXT_RX_COPY_THRESH 256
81
82 #define BNXT_TX_PUSH_THRESH 164
83
84 enum board_idx {
85 BCM57301,
86 BCM57302,
87 BCM57304,
88 BCM57417_NPAR,
89 BCM58700,
90 BCM57311,
91 BCM57312,
92 BCM57402,
93 BCM57404,
94 BCM57406,
95 BCM57402_NPAR,
96 BCM57407,
97 BCM57412,
98 BCM57414,
99 BCM57416,
100 BCM57417,
101 BCM57412_NPAR,
102 BCM57314,
103 BCM57417_SFP,
104 BCM57416_SFP,
105 BCM57404_NPAR,
106 BCM57406_NPAR,
107 BCM57407_SFP,
108 BCM57407_NPAR,
109 BCM57414_NPAR,
110 BCM57416_NPAR,
111 BCM57452,
112 BCM57454,
113 BCM5745x_NPAR,
114 BCM57508,
115 BCM58802,
116 BCM58804,
117 BCM58808,
118 NETXTREME_E_VF,
119 NETXTREME_C_VF,
120 NETXTREME_S_VF,
121 NETXTREME_E_P5_VF,
122 };
123
124 /* indexed by enum above */
125 static const struct {
126 char *name;
127 } board_info[] = {
128 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
129 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
130 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
132 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
133 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
134 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
135 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
136 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
137 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
138 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
139 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
140 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
141 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
142 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
143 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
144 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
145 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
146 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
147 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
148 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
149 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
150 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
151 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
152 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
153 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
154 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
155 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
156 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
157 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
158 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
159 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
160 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
161 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
162 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
163 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
164 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
165 };
166
167 static const struct pci_device_id bnxt_pci_tbl[] = {
168 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
171 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
173 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
174 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
175 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
177 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
178 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
179 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
180 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
181 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
182 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
184 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
185 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
186 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
187 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
188 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
190 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
191 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
192 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
195 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
198 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
199 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
200 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
201 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
202 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
203 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
204 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
205 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
206 #ifdef CONFIG_BNXT_SRIOV
207 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
216 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218 { 0 }
219 };
220
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222
223 static const u16 bnxt_vf_req_snif[] = {
224 HWRM_FUNC_CFG,
225 HWRM_FUNC_VF_CFG,
226 HWRM_PORT_PHY_QCFG,
227 HWRM_CFA_L2_FILTER_ALLOC,
228 };
229
230 static const u16 bnxt_async_events_arr[] = {
231 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
233 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
234 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
235 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
236 };
237
238 static struct workqueue_struct *bnxt_pf_wq;
239
240 static bool bnxt_vf_pciid(enum board_idx idx)
241 {
242 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
243 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
244 }
245
246 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
247 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
248 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
249
250 #define BNXT_CP_DB_IRQ_DIS(db) \
251 writel(DB_CP_IRQ_DIS_FLAGS, db)
252
253 #define BNXT_DB_CQ(db, idx) \
254 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
255
256 #define BNXT_DB_NQ_P5(db, idx) \
257 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
258
259 #define BNXT_DB_CQ_ARM(db, idx) \
260 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
261
262 #define BNXT_DB_NQ_ARM_P5(db, idx) \
263 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
264
265 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
266 {
267 if (bp->flags & BNXT_FLAG_CHIP_P5)
268 BNXT_DB_NQ_P5(db, idx);
269 else
270 BNXT_DB_CQ(db, idx);
271 }
272
273 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
274 {
275 if (bp->flags & BNXT_FLAG_CHIP_P5)
276 BNXT_DB_NQ_ARM_P5(db, idx);
277 else
278 BNXT_DB_CQ_ARM(db, idx);
279 }
280
281 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
282 {
283 if (bp->flags & BNXT_FLAG_CHIP_P5)
284 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
285 db->doorbell);
286 else
287 BNXT_DB_CQ(db, idx);
288 }
289
290 const u16 bnxt_lhint_arr[] = {
291 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
292 TX_BD_FLAGS_LHINT_512_TO_1023,
293 TX_BD_FLAGS_LHINT_1024_TO_2047,
294 TX_BD_FLAGS_LHINT_1024_TO_2047,
295 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
296 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
297 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
298 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
299 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 };
311
312 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
313 {
314 struct metadata_dst *md_dst = skb_metadata_dst(skb);
315
316 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
317 return 0;
318
319 return md_dst->u.port_info.port_id;
320 }
321
322 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
323 {
324 struct bnxt *bp = netdev_priv(dev);
325 struct tx_bd *txbd;
326 struct tx_bd_ext *txbd1;
327 struct netdev_queue *txq;
328 int i;
329 dma_addr_t mapping;
330 unsigned int length, pad = 0;
331 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
332 u16 prod, last_frag;
333 struct pci_dev *pdev = bp->pdev;
334 struct bnxt_tx_ring_info *txr;
335 struct bnxt_sw_tx_bd *tx_buf;
336
337 i = skb_get_queue_mapping(skb);
338 if (unlikely(i >= bp->tx_nr_rings)) {
339 dev_kfree_skb_any(skb);
340 return NETDEV_TX_OK;
341 }
342
343 txq = netdev_get_tx_queue(dev, i);
344 txr = &bp->tx_ring[bp->tx_ring_map[i]];
345 prod = txr->tx_prod;
346
347 free_size = bnxt_tx_avail(bp, txr);
348 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
349 netif_tx_stop_queue(txq);
350 return NETDEV_TX_BUSY;
351 }
352
353 length = skb->len;
354 len = skb_headlen(skb);
355 last_frag = skb_shinfo(skb)->nr_frags;
356
357 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
358
359 txbd->tx_bd_opaque = prod;
360
361 tx_buf = &txr->tx_buf_ring[prod];
362 tx_buf->skb = skb;
363 tx_buf->nr_frags = last_frag;
364
365 vlan_tag_flags = 0;
366 cfa_action = bnxt_xmit_get_cfa_action(skb);
367 if (skb_vlan_tag_present(skb)) {
368 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
369 skb_vlan_tag_get(skb);
370 /* Currently supports 8021Q, 8021AD vlan offloads
371 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
372 */
373 if (skb->vlan_proto == htons(ETH_P_8021Q))
374 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
375 }
376
377 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
378 struct tx_push_buffer *tx_push_buf = txr->tx_push;
379 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
380 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
381 void __iomem *db = txr->tx_db.doorbell;
382 void *pdata = tx_push_buf->data;
383 u64 *end;
384 int j, push_len;
385
386 /* Set COAL_NOW to be ready quickly for the next push */
387 tx_push->tx_bd_len_flags_type =
388 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
389 TX_BD_TYPE_LONG_TX_BD |
390 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
391 TX_BD_FLAGS_COAL_NOW |
392 TX_BD_FLAGS_PACKET_END |
393 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
394
395 if (skb->ip_summed == CHECKSUM_PARTIAL)
396 tx_push1->tx_bd_hsize_lflags =
397 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
398 else
399 tx_push1->tx_bd_hsize_lflags = 0;
400
401 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
402 tx_push1->tx_bd_cfa_action =
403 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
404
405 end = pdata + length;
406 end = PTR_ALIGN(end, 8) - 1;
407 *end = 0;
408
409 skb_copy_from_linear_data(skb, pdata, len);
410 pdata += len;
411 for (j = 0; j < last_frag; j++) {
412 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
413 void *fptr;
414
415 fptr = skb_frag_address_safe(frag);
416 if (!fptr)
417 goto normal_tx;
418
419 memcpy(pdata, fptr, skb_frag_size(frag));
420 pdata += skb_frag_size(frag);
421 }
422
423 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
424 txbd->tx_bd_haddr = txr->data_mapping;
425 prod = NEXT_TX(prod);
426 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
427 memcpy(txbd, tx_push1, sizeof(*txbd));
428 prod = NEXT_TX(prod);
429 tx_push->doorbell =
430 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
431 txr->tx_prod = prod;
432
433 tx_buf->is_push = 1;
434 netdev_tx_sent_queue(txq, skb->len);
435 wmb(); /* Sync is_push and byte queue before pushing data */
436
437 push_len = (length + sizeof(*tx_push) + 7) / 8;
438 if (push_len > 16) {
439 __iowrite64_copy(db, tx_push_buf, 16);
440 __iowrite32_copy(db + 4, tx_push_buf + 1,
441 (push_len - 16) << 1);
442 } else {
443 __iowrite64_copy(db, tx_push_buf, push_len);
444 }
445
446 goto tx_done;
447 }
448
449 normal_tx:
450 if (length < BNXT_MIN_PKT_SIZE) {
451 pad = BNXT_MIN_PKT_SIZE - length;
452 if (skb_pad(skb, pad)) {
453 /* SKB already freed. */
454 tx_buf->skb = NULL;
455 return NETDEV_TX_OK;
456 }
457 length = BNXT_MIN_PKT_SIZE;
458 }
459
460 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
461
462 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
463 dev_kfree_skb_any(skb);
464 tx_buf->skb = NULL;
465 return NETDEV_TX_OK;
466 }
467
468 dma_unmap_addr_set(tx_buf, mapping, mapping);
469 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
470 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
471
472 txbd->tx_bd_haddr = cpu_to_le64(mapping);
473
474 prod = NEXT_TX(prod);
475 txbd1 = (struct tx_bd_ext *)
476 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
477
478 txbd1->tx_bd_hsize_lflags = 0;
479 if (skb_is_gso(skb)) {
480 u32 hdr_len;
481
482 if (skb->encapsulation)
483 hdr_len = skb_inner_network_offset(skb) +
484 skb_inner_network_header_len(skb) +
485 inner_tcp_hdrlen(skb);
486 else
487 hdr_len = skb_transport_offset(skb) +
488 tcp_hdrlen(skb);
489
490 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
491 TX_BD_FLAGS_T_IPID |
492 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
493 length = skb_shinfo(skb)->gso_size;
494 txbd1->tx_bd_mss = cpu_to_le32(length);
495 length += hdr_len;
496 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
497 txbd1->tx_bd_hsize_lflags =
498 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
499 txbd1->tx_bd_mss = 0;
500 }
501
502 length >>= 9;
503 flags |= bnxt_lhint_arr[length];
504 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
505
506 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
507 txbd1->tx_bd_cfa_action =
508 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
509 for (i = 0; i < last_frag; i++) {
510 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
511
512 prod = NEXT_TX(prod);
513 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
514
515 len = skb_frag_size(frag);
516 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
517 DMA_TO_DEVICE);
518
519 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
520 goto tx_dma_error;
521
522 tx_buf = &txr->tx_buf_ring[prod];
523 dma_unmap_addr_set(tx_buf, mapping, mapping);
524
525 txbd->tx_bd_haddr = cpu_to_le64(mapping);
526
527 flags = len << TX_BD_LEN_SHIFT;
528 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
529 }
530
531 flags &= ~TX_BD_LEN;
532 txbd->tx_bd_len_flags_type =
533 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
534 TX_BD_FLAGS_PACKET_END);
535
536 netdev_tx_sent_queue(txq, skb->len);
537
538 /* Sync BD data before updating doorbell */
539 wmb();
540
541 prod = NEXT_TX(prod);
542 txr->tx_prod = prod;
543
544 if (!skb->xmit_more || netif_xmit_stopped(txq))
545 bnxt_db_write(bp, &txr->tx_db, prod);
546
547 tx_done:
548
549 mmiowb();
550
551 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
552 if (skb->xmit_more && !tx_buf->is_push)
553 bnxt_db_write(bp, &txr->tx_db, prod);
554
555 netif_tx_stop_queue(txq);
556
557 /* netif_tx_stop_queue() must be done before checking
558 * tx index in bnxt_tx_avail() below, because in
559 * bnxt_tx_int(), we update tx index before checking for
560 * netif_tx_queue_stopped().
561 */
562 smp_mb();
563 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
564 netif_tx_wake_queue(txq);
565 }
566 return NETDEV_TX_OK;
567
568 tx_dma_error:
569 last_frag = i;
570
571 /* start back at beginning and unmap skb */
572 prod = txr->tx_prod;
573 tx_buf = &txr->tx_buf_ring[prod];
574 tx_buf->skb = NULL;
575 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
576 skb_headlen(skb), PCI_DMA_TODEVICE);
577 prod = NEXT_TX(prod);
578
579 /* unmap remaining mapped pages */
580 for (i = 0; i < last_frag; i++) {
581 prod = NEXT_TX(prod);
582 tx_buf = &txr->tx_buf_ring[prod];
583 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
584 skb_frag_size(&skb_shinfo(skb)->frags[i]),
585 PCI_DMA_TODEVICE);
586 }
587
588 dev_kfree_skb_any(skb);
589 return NETDEV_TX_OK;
590 }
591
592 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
593 {
594 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
595 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
596 u16 cons = txr->tx_cons;
597 struct pci_dev *pdev = bp->pdev;
598 int i;
599 unsigned int tx_bytes = 0;
600
601 for (i = 0; i < nr_pkts; i++) {
602 struct bnxt_sw_tx_bd *tx_buf;
603 struct sk_buff *skb;
604 int j, last;
605
606 tx_buf = &txr->tx_buf_ring[cons];
607 cons = NEXT_TX(cons);
608 skb = tx_buf->skb;
609 tx_buf->skb = NULL;
610
611 if (tx_buf->is_push) {
612 tx_buf->is_push = 0;
613 goto next_tx_int;
614 }
615
616 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
617 skb_headlen(skb), PCI_DMA_TODEVICE);
618 last = tx_buf->nr_frags;
619
620 for (j = 0; j < last; j++) {
621 cons = NEXT_TX(cons);
622 tx_buf = &txr->tx_buf_ring[cons];
623 dma_unmap_page(
624 &pdev->dev,
625 dma_unmap_addr(tx_buf, mapping),
626 skb_frag_size(&skb_shinfo(skb)->frags[j]),
627 PCI_DMA_TODEVICE);
628 }
629
630 next_tx_int:
631 cons = NEXT_TX(cons);
632
633 tx_bytes += skb->len;
634 dev_kfree_skb_any(skb);
635 }
636
637 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
638 txr->tx_cons = cons;
639
640 /* Need to make the tx_cons update visible to bnxt_start_xmit()
641 * before checking for netif_tx_queue_stopped(). Without the
642 * memory barrier, there is a small possibility that bnxt_start_xmit()
643 * will miss it and cause the queue to be stopped forever.
644 */
645 smp_mb();
646
647 if (unlikely(netif_tx_queue_stopped(txq)) &&
648 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
649 __netif_tx_lock(txq, smp_processor_id());
650 if (netif_tx_queue_stopped(txq) &&
651 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
652 txr->dev_state != BNXT_DEV_STATE_CLOSING)
653 netif_tx_wake_queue(txq);
654 __netif_tx_unlock(txq);
655 }
656 }
657
658 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
659 gfp_t gfp)
660 {
661 struct device *dev = &bp->pdev->dev;
662 struct page *page;
663
664 page = alloc_page(gfp);
665 if (!page)
666 return NULL;
667
668 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
669 DMA_ATTR_WEAK_ORDERING);
670 if (dma_mapping_error(dev, *mapping)) {
671 __free_page(page);
672 return NULL;
673 }
674 *mapping += bp->rx_dma_offset;
675 return page;
676 }
677
678 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
679 gfp_t gfp)
680 {
681 u8 *data;
682 struct pci_dev *pdev = bp->pdev;
683
684 data = kmalloc(bp->rx_buf_size, gfp);
685 if (!data)
686 return NULL;
687
688 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
689 bp->rx_buf_use_size, bp->rx_dir,
690 DMA_ATTR_WEAK_ORDERING);
691
692 if (dma_mapping_error(&pdev->dev, *mapping)) {
693 kfree(data);
694 data = NULL;
695 }
696 return data;
697 }
698
699 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
700 u16 prod, gfp_t gfp)
701 {
702 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
703 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
704 dma_addr_t mapping;
705
706 if (BNXT_RX_PAGE_MODE(bp)) {
707 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
708
709 if (!page)
710 return -ENOMEM;
711
712 rx_buf->data = page;
713 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
714 } else {
715 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
716
717 if (!data)
718 return -ENOMEM;
719
720 rx_buf->data = data;
721 rx_buf->data_ptr = data + bp->rx_offset;
722 }
723 rx_buf->mapping = mapping;
724
725 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
726 return 0;
727 }
728
729 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
730 {
731 u16 prod = rxr->rx_prod;
732 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
733 struct rx_bd *cons_bd, *prod_bd;
734
735 prod_rx_buf = &rxr->rx_buf_ring[prod];
736 cons_rx_buf = &rxr->rx_buf_ring[cons];
737
738 prod_rx_buf->data = data;
739 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
740
741 prod_rx_buf->mapping = cons_rx_buf->mapping;
742
743 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
744 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
745
746 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
747 }
748
749 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
750 {
751 u16 next, max = rxr->rx_agg_bmap_size;
752
753 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
754 if (next >= max)
755 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
756 return next;
757 }
758
759 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
760 struct bnxt_rx_ring_info *rxr,
761 u16 prod, gfp_t gfp)
762 {
763 struct rx_bd *rxbd =
764 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
765 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
766 struct pci_dev *pdev = bp->pdev;
767 struct page *page;
768 dma_addr_t mapping;
769 u16 sw_prod = rxr->rx_sw_agg_prod;
770 unsigned int offset = 0;
771
772 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
773 page = rxr->rx_page;
774 if (!page) {
775 page = alloc_page(gfp);
776 if (!page)
777 return -ENOMEM;
778 rxr->rx_page = page;
779 rxr->rx_page_offset = 0;
780 }
781 offset = rxr->rx_page_offset;
782 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
783 if (rxr->rx_page_offset == PAGE_SIZE)
784 rxr->rx_page = NULL;
785 else
786 get_page(page);
787 } else {
788 page = alloc_page(gfp);
789 if (!page)
790 return -ENOMEM;
791 }
792
793 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
794 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
795 DMA_ATTR_WEAK_ORDERING);
796 if (dma_mapping_error(&pdev->dev, mapping)) {
797 __free_page(page);
798 return -EIO;
799 }
800
801 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
802 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
803
804 __set_bit(sw_prod, rxr->rx_agg_bmap);
805 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
806 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
807
808 rx_agg_buf->page = page;
809 rx_agg_buf->offset = offset;
810 rx_agg_buf->mapping = mapping;
811 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
812 rxbd->rx_bd_opaque = sw_prod;
813 return 0;
814 }
815
816 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
817 u32 agg_bufs)
818 {
819 struct bnxt_napi *bnapi = cpr->bnapi;
820 struct bnxt *bp = bnapi->bp;
821 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
822 u16 prod = rxr->rx_agg_prod;
823 u16 sw_prod = rxr->rx_sw_agg_prod;
824 u32 i;
825
826 for (i = 0; i < agg_bufs; i++) {
827 u16 cons;
828 struct rx_agg_cmp *agg;
829 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
830 struct rx_bd *prod_bd;
831 struct page *page;
832
833 agg = (struct rx_agg_cmp *)
834 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
835 cons = agg->rx_agg_cmp_opaque;
836 __clear_bit(cons, rxr->rx_agg_bmap);
837
838 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
839 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
840
841 __set_bit(sw_prod, rxr->rx_agg_bmap);
842 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
843 cons_rx_buf = &rxr->rx_agg_ring[cons];
844
845 /* It is possible for sw_prod to be equal to cons, so
846 * set cons_rx_buf->page to NULL first.
847 */
848 page = cons_rx_buf->page;
849 cons_rx_buf->page = NULL;
850 prod_rx_buf->page = page;
851 prod_rx_buf->offset = cons_rx_buf->offset;
852
853 prod_rx_buf->mapping = cons_rx_buf->mapping;
854
855 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
856
857 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
858 prod_bd->rx_bd_opaque = sw_prod;
859
860 prod = NEXT_RX_AGG(prod);
861 sw_prod = NEXT_RX_AGG(sw_prod);
862 cp_cons = NEXT_CMP(cp_cons);
863 }
864 rxr->rx_agg_prod = prod;
865 rxr->rx_sw_agg_prod = sw_prod;
866 }
867
868 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
869 struct bnxt_rx_ring_info *rxr,
870 u16 cons, void *data, u8 *data_ptr,
871 dma_addr_t dma_addr,
872 unsigned int offset_and_len)
873 {
874 unsigned int payload = offset_and_len >> 16;
875 unsigned int len = offset_and_len & 0xffff;
876 struct skb_frag_struct *frag;
877 struct page *page = data;
878 u16 prod = rxr->rx_prod;
879 struct sk_buff *skb;
880 int off, err;
881
882 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
883 if (unlikely(err)) {
884 bnxt_reuse_rx_data(rxr, cons, data);
885 return NULL;
886 }
887 dma_addr -= bp->rx_dma_offset;
888 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
889 DMA_ATTR_WEAK_ORDERING);
890
891 if (unlikely(!payload))
892 payload = eth_get_headlen(data_ptr, len);
893
894 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
895 if (!skb) {
896 __free_page(page);
897 return NULL;
898 }
899
900 off = (void *)data_ptr - page_address(page);
901 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
902 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
903 payload + NET_IP_ALIGN);
904
905 frag = &skb_shinfo(skb)->frags[0];
906 skb_frag_size_sub(frag, payload);
907 frag->page_offset += payload;
908 skb->data_len -= payload;
909 skb->tail += payload;
910
911 return skb;
912 }
913
914 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
915 struct bnxt_rx_ring_info *rxr, u16 cons,
916 void *data, u8 *data_ptr,
917 dma_addr_t dma_addr,
918 unsigned int offset_and_len)
919 {
920 u16 prod = rxr->rx_prod;
921 struct sk_buff *skb;
922 int err;
923
924 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
925 if (unlikely(err)) {
926 bnxt_reuse_rx_data(rxr, cons, data);
927 return NULL;
928 }
929
930 skb = build_skb(data, 0);
931 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
932 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
933 if (!skb) {
934 kfree(data);
935 return NULL;
936 }
937
938 skb_reserve(skb, bp->rx_offset);
939 skb_put(skb, offset_and_len & 0xffff);
940 return skb;
941 }
942
943 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
944 struct bnxt_cp_ring_info *cpr,
945 struct sk_buff *skb, u16 cp_cons,
946 u32 agg_bufs)
947 {
948 struct bnxt_napi *bnapi = cpr->bnapi;
949 struct pci_dev *pdev = bp->pdev;
950 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
951 u16 prod = rxr->rx_agg_prod;
952 u32 i;
953
954 for (i = 0; i < agg_bufs; i++) {
955 u16 cons, frag_len;
956 struct rx_agg_cmp *agg;
957 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
958 struct page *page;
959 dma_addr_t mapping;
960
961 agg = (struct rx_agg_cmp *)
962 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
963 cons = agg->rx_agg_cmp_opaque;
964 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
965 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
966
967 cons_rx_buf = &rxr->rx_agg_ring[cons];
968 skb_fill_page_desc(skb, i, cons_rx_buf->page,
969 cons_rx_buf->offset, frag_len);
970 __clear_bit(cons, rxr->rx_agg_bmap);
971
972 /* It is possible for bnxt_alloc_rx_page() to allocate
973 * a sw_prod index that equals the cons index, so we
974 * need to clear the cons entry now.
975 */
976 mapping = cons_rx_buf->mapping;
977 page = cons_rx_buf->page;
978 cons_rx_buf->page = NULL;
979
980 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
981 struct skb_shared_info *shinfo;
982 unsigned int nr_frags;
983
984 shinfo = skb_shinfo(skb);
985 nr_frags = --shinfo->nr_frags;
986 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
987
988 dev_kfree_skb(skb);
989
990 cons_rx_buf->page = page;
991
992 /* Update prod since possibly some pages have been
993 * allocated already.
994 */
995 rxr->rx_agg_prod = prod;
996 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
997 return NULL;
998 }
999
1000 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1001 PCI_DMA_FROMDEVICE,
1002 DMA_ATTR_WEAK_ORDERING);
1003
1004 skb->data_len += frag_len;
1005 skb->len += frag_len;
1006 skb->truesize += PAGE_SIZE;
1007
1008 prod = NEXT_RX_AGG(prod);
1009 cp_cons = NEXT_CMP(cp_cons);
1010 }
1011 rxr->rx_agg_prod = prod;
1012 return skb;
1013 }
1014
1015 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1016 u8 agg_bufs, u32 *raw_cons)
1017 {
1018 u16 last;
1019 struct rx_agg_cmp *agg;
1020
1021 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1022 last = RING_CMP(*raw_cons);
1023 agg = (struct rx_agg_cmp *)
1024 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1025 return RX_AGG_CMP_VALID(agg, *raw_cons);
1026 }
1027
1028 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1029 unsigned int len,
1030 dma_addr_t mapping)
1031 {
1032 struct bnxt *bp = bnapi->bp;
1033 struct pci_dev *pdev = bp->pdev;
1034 struct sk_buff *skb;
1035
1036 skb = napi_alloc_skb(&bnapi->napi, len);
1037 if (!skb)
1038 return NULL;
1039
1040 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1041 bp->rx_dir);
1042
1043 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1044 len + NET_IP_ALIGN);
1045
1046 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1047 bp->rx_dir);
1048
1049 skb_put(skb, len);
1050 return skb;
1051 }
1052
1053 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1054 u32 *raw_cons, void *cmp)
1055 {
1056 struct rx_cmp *rxcmp = cmp;
1057 u32 tmp_raw_cons = *raw_cons;
1058 u8 cmp_type, agg_bufs = 0;
1059
1060 cmp_type = RX_CMP_TYPE(rxcmp);
1061
1062 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1063 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1064 RX_CMP_AGG_BUFS) >>
1065 RX_CMP_AGG_BUFS_SHIFT;
1066 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1067 struct rx_tpa_end_cmp *tpa_end = cmp;
1068
1069 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1070 RX_TPA_END_CMP_AGG_BUFS) >>
1071 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1072 }
1073
1074 if (agg_bufs) {
1075 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1076 return -EBUSY;
1077 }
1078 *raw_cons = tmp_raw_cons;
1079 return 0;
1080 }
1081
1082 static void bnxt_queue_sp_work(struct bnxt *bp)
1083 {
1084 if (BNXT_PF(bp))
1085 queue_work(bnxt_pf_wq, &bp->sp_task);
1086 else
1087 schedule_work(&bp->sp_task);
1088 }
1089
1090 static void bnxt_cancel_sp_work(struct bnxt *bp)
1091 {
1092 if (BNXT_PF(bp))
1093 flush_workqueue(bnxt_pf_wq);
1094 else
1095 cancel_work_sync(&bp->sp_task);
1096 }
1097
1098 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1099 {
1100 if (!rxr->bnapi->in_reset) {
1101 rxr->bnapi->in_reset = true;
1102 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1103 bnxt_queue_sp_work(bp);
1104 }
1105 rxr->rx_next_cons = 0xffff;
1106 }
1107
1108 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1109 struct rx_tpa_start_cmp *tpa_start,
1110 struct rx_tpa_start_cmp_ext *tpa_start1)
1111 {
1112 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1113 u16 cons, prod;
1114 struct bnxt_tpa_info *tpa_info;
1115 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1116 struct rx_bd *prod_bd;
1117 dma_addr_t mapping;
1118
1119 cons = tpa_start->rx_tpa_start_cmp_opaque;
1120 prod = rxr->rx_prod;
1121 cons_rx_buf = &rxr->rx_buf_ring[cons];
1122 prod_rx_buf = &rxr->rx_buf_ring[prod];
1123 tpa_info = &rxr->rx_tpa[agg_id];
1124
1125 if (unlikely(cons != rxr->rx_next_cons)) {
1126 bnxt_sched_reset(bp, rxr);
1127 return;
1128 }
1129 /* Store cfa_code in tpa_info to use in tpa_end
1130 * completion processing.
1131 */
1132 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1133 prod_rx_buf->data = tpa_info->data;
1134 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1135
1136 mapping = tpa_info->mapping;
1137 prod_rx_buf->mapping = mapping;
1138
1139 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1140
1141 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1142
1143 tpa_info->data = cons_rx_buf->data;
1144 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1145 cons_rx_buf->data = NULL;
1146 tpa_info->mapping = cons_rx_buf->mapping;
1147
1148 tpa_info->len =
1149 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1150 RX_TPA_START_CMP_LEN_SHIFT;
1151 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1152 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1153
1154 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1155 tpa_info->gso_type = SKB_GSO_TCPV4;
1156 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1157 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1158 tpa_info->gso_type = SKB_GSO_TCPV6;
1159 tpa_info->rss_hash =
1160 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1161 } else {
1162 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1163 tpa_info->gso_type = 0;
1164 if (netif_msg_rx_err(bp))
1165 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1166 }
1167 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1168 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1169 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1170
1171 rxr->rx_prod = NEXT_RX(prod);
1172 cons = NEXT_RX(cons);
1173 rxr->rx_next_cons = NEXT_RX(cons);
1174 cons_rx_buf = &rxr->rx_buf_ring[cons];
1175
1176 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1177 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1178 cons_rx_buf->data = NULL;
1179 }
1180
1181 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1182 u32 agg_bufs)
1183 {
1184 if (agg_bufs)
1185 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1186 }
1187
1188 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1189 int payload_off, int tcp_ts,
1190 struct sk_buff *skb)
1191 {
1192 #ifdef CONFIG_INET
1193 struct tcphdr *th;
1194 int len, nw_off;
1195 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1196 u32 hdr_info = tpa_info->hdr_info;
1197 bool loopback = false;
1198
1199 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1200 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1201 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1202
1203 /* If the packet is an internal loopback packet, the offsets will
1204 * have an extra 4 bytes.
1205 */
1206 if (inner_mac_off == 4) {
1207 loopback = true;
1208 } else if (inner_mac_off > 4) {
1209 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1210 ETH_HLEN - 2));
1211
1212 /* We only support inner iPv4/ipv6. If we don't see the
1213 * correct protocol ID, it must be a loopback packet where
1214 * the offsets are off by 4.
1215 */
1216 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1217 loopback = true;
1218 }
1219 if (loopback) {
1220 /* internal loopback packet, subtract all offsets by 4 */
1221 inner_ip_off -= 4;
1222 inner_mac_off -= 4;
1223 outer_ip_off -= 4;
1224 }
1225
1226 nw_off = inner_ip_off - ETH_HLEN;
1227 skb_set_network_header(skb, nw_off);
1228 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1229 struct ipv6hdr *iph = ipv6_hdr(skb);
1230
1231 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1232 len = skb->len - skb_transport_offset(skb);
1233 th = tcp_hdr(skb);
1234 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1235 } else {
1236 struct iphdr *iph = ip_hdr(skb);
1237
1238 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1239 len = skb->len - skb_transport_offset(skb);
1240 th = tcp_hdr(skb);
1241 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1242 }
1243
1244 if (inner_mac_off) { /* tunnel */
1245 struct udphdr *uh = NULL;
1246 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1247 ETH_HLEN - 2));
1248
1249 if (proto == htons(ETH_P_IP)) {
1250 struct iphdr *iph = (struct iphdr *)skb->data;
1251
1252 if (iph->protocol == IPPROTO_UDP)
1253 uh = (struct udphdr *)(iph + 1);
1254 } else {
1255 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1256
1257 if (iph->nexthdr == IPPROTO_UDP)
1258 uh = (struct udphdr *)(iph + 1);
1259 }
1260 if (uh) {
1261 if (uh->check)
1262 skb_shinfo(skb)->gso_type |=
1263 SKB_GSO_UDP_TUNNEL_CSUM;
1264 else
1265 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1266 }
1267 }
1268 #endif
1269 return skb;
1270 }
1271
1272 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1273 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1274
1275 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1276 int payload_off, int tcp_ts,
1277 struct sk_buff *skb)
1278 {
1279 #ifdef CONFIG_INET
1280 struct tcphdr *th;
1281 int len, nw_off, tcp_opt_len = 0;
1282
1283 if (tcp_ts)
1284 tcp_opt_len = 12;
1285
1286 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1287 struct iphdr *iph;
1288
1289 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1290 ETH_HLEN;
1291 skb_set_network_header(skb, nw_off);
1292 iph = ip_hdr(skb);
1293 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1294 len = skb->len - skb_transport_offset(skb);
1295 th = tcp_hdr(skb);
1296 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1297 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1298 struct ipv6hdr *iph;
1299
1300 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1301 ETH_HLEN;
1302 skb_set_network_header(skb, nw_off);
1303 iph = ipv6_hdr(skb);
1304 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1305 len = skb->len - skb_transport_offset(skb);
1306 th = tcp_hdr(skb);
1307 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1308 } else {
1309 dev_kfree_skb_any(skb);
1310 return NULL;
1311 }
1312
1313 if (nw_off) { /* tunnel */
1314 struct udphdr *uh = NULL;
1315
1316 if (skb->protocol == htons(ETH_P_IP)) {
1317 struct iphdr *iph = (struct iphdr *)skb->data;
1318
1319 if (iph->protocol == IPPROTO_UDP)
1320 uh = (struct udphdr *)(iph + 1);
1321 } else {
1322 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1323
1324 if (iph->nexthdr == IPPROTO_UDP)
1325 uh = (struct udphdr *)(iph + 1);
1326 }
1327 if (uh) {
1328 if (uh->check)
1329 skb_shinfo(skb)->gso_type |=
1330 SKB_GSO_UDP_TUNNEL_CSUM;
1331 else
1332 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1333 }
1334 }
1335 #endif
1336 return skb;
1337 }
1338
1339 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1340 struct bnxt_tpa_info *tpa_info,
1341 struct rx_tpa_end_cmp *tpa_end,
1342 struct rx_tpa_end_cmp_ext *tpa_end1,
1343 struct sk_buff *skb)
1344 {
1345 #ifdef CONFIG_INET
1346 int payload_off;
1347 u16 segs;
1348
1349 segs = TPA_END_TPA_SEGS(tpa_end);
1350 if (segs == 1)
1351 return skb;
1352
1353 NAPI_GRO_CB(skb)->count = segs;
1354 skb_shinfo(skb)->gso_size =
1355 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1356 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1357 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1358 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1359 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1360 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1361 if (likely(skb))
1362 tcp_gro_complete(skb);
1363 #endif
1364 return skb;
1365 }
1366
1367 /* Given the cfa_code of a received packet determine which
1368 * netdev (vf-rep or PF) the packet is destined to.
1369 */
1370 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1371 {
1372 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1373
1374 /* if vf-rep dev is NULL, the must belongs to the PF */
1375 return dev ? dev : bp->dev;
1376 }
1377
1378 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1379 struct bnxt_cp_ring_info *cpr,
1380 u32 *raw_cons,
1381 struct rx_tpa_end_cmp *tpa_end,
1382 struct rx_tpa_end_cmp_ext *tpa_end1,
1383 u8 *event)
1384 {
1385 struct bnxt_napi *bnapi = cpr->bnapi;
1386 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1387 u8 agg_id = TPA_END_AGG_ID(tpa_end);
1388 u8 *data_ptr, agg_bufs;
1389 u16 cp_cons = RING_CMP(*raw_cons);
1390 unsigned int len;
1391 struct bnxt_tpa_info *tpa_info;
1392 dma_addr_t mapping;
1393 struct sk_buff *skb;
1394 void *data;
1395
1396 if (unlikely(bnapi->in_reset)) {
1397 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1398
1399 if (rc < 0)
1400 return ERR_PTR(-EBUSY);
1401 return NULL;
1402 }
1403
1404 tpa_info = &rxr->rx_tpa[agg_id];
1405 data = tpa_info->data;
1406 data_ptr = tpa_info->data_ptr;
1407 prefetch(data_ptr);
1408 len = tpa_info->len;
1409 mapping = tpa_info->mapping;
1410
1411 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1412 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1413
1414 if (agg_bufs) {
1415 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1416 return ERR_PTR(-EBUSY);
1417
1418 *event |= BNXT_AGG_EVENT;
1419 cp_cons = NEXT_CMP(cp_cons);
1420 }
1421
1422 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1423 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1424 if (agg_bufs > MAX_SKB_FRAGS)
1425 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1426 agg_bufs, (int)MAX_SKB_FRAGS);
1427 return NULL;
1428 }
1429
1430 if (len <= bp->rx_copy_thresh) {
1431 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1432 if (!skb) {
1433 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1434 return NULL;
1435 }
1436 } else {
1437 u8 *new_data;
1438 dma_addr_t new_mapping;
1439
1440 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1441 if (!new_data) {
1442 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1443 return NULL;
1444 }
1445
1446 tpa_info->data = new_data;
1447 tpa_info->data_ptr = new_data + bp->rx_offset;
1448 tpa_info->mapping = new_mapping;
1449
1450 skb = build_skb(data, 0);
1451 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1452 bp->rx_buf_use_size, bp->rx_dir,
1453 DMA_ATTR_WEAK_ORDERING);
1454
1455 if (!skb) {
1456 kfree(data);
1457 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1458 return NULL;
1459 }
1460 skb_reserve(skb, bp->rx_offset);
1461 skb_put(skb, len);
1462 }
1463
1464 if (agg_bufs) {
1465 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1466 if (!skb) {
1467 /* Page reuse already handled by bnxt_rx_pages(). */
1468 return NULL;
1469 }
1470 }
1471
1472 skb->protocol =
1473 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1474
1475 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1476 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1477
1478 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1479 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1480 u16 vlan_proto = tpa_info->metadata >>
1481 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1482 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1483
1484 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1485 }
1486
1487 skb_checksum_none_assert(skb);
1488 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1489 skb->ip_summed = CHECKSUM_UNNECESSARY;
1490 skb->csum_level =
1491 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1492 }
1493
1494 if (TPA_END_GRO(tpa_end))
1495 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1496
1497 return skb;
1498 }
1499
1500 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1501 struct sk_buff *skb)
1502 {
1503 if (skb->dev != bp->dev) {
1504 /* this packet belongs to a vf-rep */
1505 bnxt_vf_rep_rx(bp, skb);
1506 return;
1507 }
1508 skb_record_rx_queue(skb, bnapi->index);
1509 napi_gro_receive(&bnapi->napi, skb);
1510 }
1511
1512 /* returns the following:
1513 * 1 - 1 packet successfully received
1514 * 0 - successful TPA_START, packet not completed yet
1515 * -EBUSY - completion ring does not have all the agg buffers yet
1516 * -ENOMEM - packet aborted due to out of memory
1517 * -EIO - packet aborted due to hw error indicated in BD
1518 */
1519 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1520 u32 *raw_cons, u8 *event)
1521 {
1522 struct bnxt_napi *bnapi = cpr->bnapi;
1523 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1524 struct net_device *dev = bp->dev;
1525 struct rx_cmp *rxcmp;
1526 struct rx_cmp_ext *rxcmp1;
1527 u32 tmp_raw_cons = *raw_cons;
1528 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1529 struct bnxt_sw_rx_bd *rx_buf;
1530 unsigned int len;
1531 u8 *data_ptr, agg_bufs, cmp_type;
1532 dma_addr_t dma_addr;
1533 struct sk_buff *skb;
1534 void *data;
1535 int rc = 0;
1536 u32 misc;
1537
1538 rxcmp = (struct rx_cmp *)
1539 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1540
1541 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1542 cp_cons = RING_CMP(tmp_raw_cons);
1543 rxcmp1 = (struct rx_cmp_ext *)
1544 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1545
1546 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1547 return -EBUSY;
1548
1549 cmp_type = RX_CMP_TYPE(rxcmp);
1550
1551 prod = rxr->rx_prod;
1552
1553 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1554 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1555 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1556
1557 *event |= BNXT_RX_EVENT;
1558 goto next_rx_no_prod_no_len;
1559
1560 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1561 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1562 (struct rx_tpa_end_cmp *)rxcmp,
1563 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1564
1565 if (IS_ERR(skb))
1566 return -EBUSY;
1567
1568 rc = -ENOMEM;
1569 if (likely(skb)) {
1570 bnxt_deliver_skb(bp, bnapi, skb);
1571 rc = 1;
1572 }
1573 *event |= BNXT_RX_EVENT;
1574 goto next_rx_no_prod_no_len;
1575 }
1576
1577 cons = rxcmp->rx_cmp_opaque;
1578 rx_buf = &rxr->rx_buf_ring[cons];
1579 data = rx_buf->data;
1580 data_ptr = rx_buf->data_ptr;
1581 if (unlikely(cons != rxr->rx_next_cons)) {
1582 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1583
1584 bnxt_sched_reset(bp, rxr);
1585 return rc1;
1586 }
1587 prefetch(data_ptr);
1588
1589 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1590 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1591
1592 if (agg_bufs) {
1593 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1594 return -EBUSY;
1595
1596 cp_cons = NEXT_CMP(cp_cons);
1597 *event |= BNXT_AGG_EVENT;
1598 }
1599 *event |= BNXT_RX_EVENT;
1600
1601 rx_buf->data = NULL;
1602 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1603 bnxt_reuse_rx_data(rxr, cons, data);
1604 if (agg_bufs)
1605 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1606
1607 rc = -EIO;
1608 goto next_rx;
1609 }
1610
1611 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1612 dma_addr = rx_buf->mapping;
1613
1614 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1615 rc = 1;
1616 goto next_rx;
1617 }
1618
1619 if (len <= bp->rx_copy_thresh) {
1620 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1621 bnxt_reuse_rx_data(rxr, cons, data);
1622 if (!skb) {
1623 rc = -ENOMEM;
1624 goto next_rx;
1625 }
1626 } else {
1627 u32 payload;
1628
1629 if (rx_buf->data_ptr == data_ptr)
1630 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1631 else
1632 payload = 0;
1633 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1634 payload | len);
1635 if (!skb) {
1636 rc = -ENOMEM;
1637 goto next_rx;
1638 }
1639 }
1640
1641 if (agg_bufs) {
1642 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1643 if (!skb) {
1644 rc = -ENOMEM;
1645 goto next_rx;
1646 }
1647 }
1648
1649 if (RX_CMP_HASH_VALID(rxcmp)) {
1650 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1651 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1652
1653 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1654 if (hash_type != 1 && hash_type != 3)
1655 type = PKT_HASH_TYPE_L3;
1656 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1657 }
1658
1659 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1660 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1661
1662 if ((rxcmp1->rx_cmp_flags2 &
1663 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1664 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1665 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1666 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1667 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1668
1669 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1670 }
1671
1672 skb_checksum_none_assert(skb);
1673 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1674 if (dev->features & NETIF_F_RXCSUM) {
1675 skb->ip_summed = CHECKSUM_UNNECESSARY;
1676 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1677 }
1678 } else {
1679 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1680 if (dev->features & NETIF_F_RXCSUM)
1681 bnapi->cp_ring.rx_l4_csum_errors++;
1682 }
1683 }
1684
1685 bnxt_deliver_skb(bp, bnapi, skb);
1686 rc = 1;
1687
1688 next_rx:
1689 rxr->rx_prod = NEXT_RX(prod);
1690 rxr->rx_next_cons = NEXT_RX(cons);
1691
1692 cpr->rx_packets += 1;
1693 cpr->rx_bytes += len;
1694
1695 next_rx_no_prod_no_len:
1696 *raw_cons = tmp_raw_cons;
1697
1698 return rc;
1699 }
1700
1701 /* In netpoll mode, if we are using a combined completion ring, we need to
1702 * discard the rx packets and recycle the buffers.
1703 */
1704 static int bnxt_force_rx_discard(struct bnxt *bp,
1705 struct bnxt_cp_ring_info *cpr,
1706 u32 *raw_cons, u8 *event)
1707 {
1708 u32 tmp_raw_cons = *raw_cons;
1709 struct rx_cmp_ext *rxcmp1;
1710 struct rx_cmp *rxcmp;
1711 u16 cp_cons;
1712 u8 cmp_type;
1713
1714 cp_cons = RING_CMP(tmp_raw_cons);
1715 rxcmp = (struct rx_cmp *)
1716 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1717
1718 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1719 cp_cons = RING_CMP(tmp_raw_cons);
1720 rxcmp1 = (struct rx_cmp_ext *)
1721 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1722
1723 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1724 return -EBUSY;
1725
1726 cmp_type = RX_CMP_TYPE(rxcmp);
1727 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1728 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1729 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1730 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1731 struct rx_tpa_end_cmp_ext *tpa_end1;
1732
1733 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1734 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1735 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1736 }
1737 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1738 }
1739
1740 #define BNXT_GET_EVENT_PORT(data) \
1741 ((data) & \
1742 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1743
1744 static int bnxt_async_event_process(struct bnxt *bp,
1745 struct hwrm_async_event_cmpl *cmpl)
1746 {
1747 u16 event_id = le16_to_cpu(cmpl->event_id);
1748
1749 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1750 switch (event_id) {
1751 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1752 u32 data1 = le32_to_cpu(cmpl->event_data1);
1753 struct bnxt_link_info *link_info = &bp->link_info;
1754
1755 if (BNXT_VF(bp))
1756 goto async_event_process_exit;
1757
1758 /* print unsupported speed warning in forced speed mode only */
1759 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1760 (data1 & 0x20000)) {
1761 u16 fw_speed = link_info->force_link_speed;
1762 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1763
1764 if (speed != SPEED_UNKNOWN)
1765 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1766 speed);
1767 }
1768 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1769 }
1770 /* fall through */
1771 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1772 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1773 break;
1774 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1775 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1776 break;
1777 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1778 u32 data1 = le32_to_cpu(cmpl->event_data1);
1779 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1780
1781 if (BNXT_VF(bp))
1782 break;
1783
1784 if (bp->pf.port_id != port_id)
1785 break;
1786
1787 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1788 break;
1789 }
1790 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1791 if (BNXT_PF(bp))
1792 goto async_event_process_exit;
1793 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1794 break;
1795 default:
1796 goto async_event_process_exit;
1797 }
1798 bnxt_queue_sp_work(bp);
1799 async_event_process_exit:
1800 bnxt_ulp_async_events(bp, cmpl);
1801 return 0;
1802 }
1803
1804 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1805 {
1806 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1807 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1808 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1809 (struct hwrm_fwd_req_cmpl *)txcmp;
1810
1811 switch (cmpl_type) {
1812 case CMPL_BASE_TYPE_HWRM_DONE:
1813 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1814 if (seq_id == bp->hwrm_intr_seq_id)
1815 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
1816 else
1817 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1818 break;
1819
1820 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1821 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1822
1823 if ((vf_id < bp->pf.first_vf_id) ||
1824 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1825 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1826 vf_id);
1827 return -EINVAL;
1828 }
1829
1830 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1831 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1832 bnxt_queue_sp_work(bp);
1833 break;
1834
1835 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1836 bnxt_async_event_process(bp,
1837 (struct hwrm_async_event_cmpl *)txcmp);
1838
1839 default:
1840 break;
1841 }
1842
1843 return 0;
1844 }
1845
1846 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1847 {
1848 struct bnxt_napi *bnapi = dev_instance;
1849 struct bnxt *bp = bnapi->bp;
1850 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1851 u32 cons = RING_CMP(cpr->cp_raw_cons);
1852
1853 cpr->event_ctr++;
1854 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1855 napi_schedule(&bnapi->napi);
1856 return IRQ_HANDLED;
1857 }
1858
1859 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1860 {
1861 u32 raw_cons = cpr->cp_raw_cons;
1862 u16 cons = RING_CMP(raw_cons);
1863 struct tx_cmp *txcmp;
1864
1865 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1866
1867 return TX_CMP_VALID(txcmp, raw_cons);
1868 }
1869
1870 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1871 {
1872 struct bnxt_napi *bnapi = dev_instance;
1873 struct bnxt *bp = bnapi->bp;
1874 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1875 u32 cons = RING_CMP(cpr->cp_raw_cons);
1876 u32 int_status;
1877
1878 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1879
1880 if (!bnxt_has_work(bp, cpr)) {
1881 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1882 /* return if erroneous interrupt */
1883 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1884 return IRQ_NONE;
1885 }
1886
1887 /* disable ring IRQ */
1888 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
1889
1890 /* Return here if interrupt is shared and is disabled. */
1891 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1892 return IRQ_HANDLED;
1893
1894 napi_schedule(&bnapi->napi);
1895 return IRQ_HANDLED;
1896 }
1897
1898 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1899 int budget)
1900 {
1901 struct bnxt_napi *bnapi = cpr->bnapi;
1902 u32 raw_cons = cpr->cp_raw_cons;
1903 u32 cons;
1904 int tx_pkts = 0;
1905 int rx_pkts = 0;
1906 u8 event = 0;
1907 struct tx_cmp *txcmp;
1908
1909 cpr->has_more_work = 0;
1910 while (1) {
1911 int rc;
1912
1913 cons = RING_CMP(raw_cons);
1914 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1915
1916 if (!TX_CMP_VALID(txcmp, raw_cons))
1917 break;
1918
1919 /* The valid test of the entry must be done first before
1920 * reading any further.
1921 */
1922 dma_rmb();
1923 cpr->had_work_done = 1;
1924 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1925 tx_pkts++;
1926 /* return full budget so NAPI will complete. */
1927 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1928 rx_pkts = budget;
1929 raw_cons = NEXT_RAW_CMP(raw_cons);
1930 if (budget)
1931 cpr->has_more_work = 1;
1932 break;
1933 }
1934 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1935 if (likely(budget))
1936 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
1937 else
1938 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
1939 &event);
1940 if (likely(rc >= 0))
1941 rx_pkts += rc;
1942 /* Increment rx_pkts when rc is -ENOMEM to count towards
1943 * the NAPI budget. Otherwise, we may potentially loop
1944 * here forever if we consistently cannot allocate
1945 * buffers.
1946 */
1947 else if (rc == -ENOMEM && budget)
1948 rx_pkts++;
1949 else if (rc == -EBUSY) /* partial completion */
1950 break;
1951 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1952 CMPL_BASE_TYPE_HWRM_DONE) ||
1953 (TX_CMP_TYPE(txcmp) ==
1954 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1955 (TX_CMP_TYPE(txcmp) ==
1956 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1957 bnxt_hwrm_handler(bp, txcmp);
1958 }
1959 raw_cons = NEXT_RAW_CMP(raw_cons);
1960
1961 if (rx_pkts && rx_pkts == budget) {
1962 cpr->has_more_work = 1;
1963 break;
1964 }
1965 }
1966
1967 if (event & BNXT_TX_EVENT) {
1968 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1969 u16 prod = txr->tx_prod;
1970
1971 /* Sync BD data before updating doorbell */
1972 wmb();
1973
1974 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
1975 }
1976
1977 cpr->cp_raw_cons = raw_cons;
1978 bnapi->tx_pkts += tx_pkts;
1979 bnapi->events |= event;
1980 return rx_pkts;
1981 }
1982
1983 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1984 {
1985 if (bnapi->tx_pkts) {
1986 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1987 bnapi->tx_pkts = 0;
1988 }
1989
1990 if (bnapi->events & BNXT_RX_EVENT) {
1991 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1992
1993 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1994 if (bnapi->events & BNXT_AGG_EVENT)
1995 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1996 }
1997 bnapi->events = 0;
1998 }
1999
2000 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2001 int budget)
2002 {
2003 struct bnxt_napi *bnapi = cpr->bnapi;
2004 int rx_pkts;
2005
2006 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2007
2008 /* ACK completion ring before freeing tx ring and producing new
2009 * buffers in rx/agg rings to prevent overflowing the completion
2010 * ring.
2011 */
2012 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2013
2014 __bnxt_poll_work_done(bp, bnapi);
2015 return rx_pkts;
2016 }
2017
2018 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2019 {
2020 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2021 struct bnxt *bp = bnapi->bp;
2022 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2023 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2024 struct tx_cmp *txcmp;
2025 struct rx_cmp_ext *rxcmp1;
2026 u32 cp_cons, tmp_raw_cons;
2027 u32 raw_cons = cpr->cp_raw_cons;
2028 u32 rx_pkts = 0;
2029 u8 event = 0;
2030
2031 while (1) {
2032 int rc;
2033
2034 cp_cons = RING_CMP(raw_cons);
2035 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2036
2037 if (!TX_CMP_VALID(txcmp, raw_cons))
2038 break;
2039
2040 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2041 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2042 cp_cons = RING_CMP(tmp_raw_cons);
2043 rxcmp1 = (struct rx_cmp_ext *)
2044 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2045
2046 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2047 break;
2048
2049 /* force an error to recycle the buffer */
2050 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2051 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2052
2053 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2054 if (likely(rc == -EIO) && budget)
2055 rx_pkts++;
2056 else if (rc == -EBUSY) /* partial completion */
2057 break;
2058 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2059 CMPL_BASE_TYPE_HWRM_DONE)) {
2060 bnxt_hwrm_handler(bp, txcmp);
2061 } else {
2062 netdev_err(bp->dev,
2063 "Invalid completion received on special ring\n");
2064 }
2065 raw_cons = NEXT_RAW_CMP(raw_cons);
2066
2067 if (rx_pkts == budget)
2068 break;
2069 }
2070
2071 cpr->cp_raw_cons = raw_cons;
2072 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2073 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2074
2075 if (event & BNXT_AGG_EVENT)
2076 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2077
2078 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2079 napi_complete_done(napi, rx_pkts);
2080 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2081 }
2082 return rx_pkts;
2083 }
2084
2085 static int bnxt_poll(struct napi_struct *napi, int budget)
2086 {
2087 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2088 struct bnxt *bp = bnapi->bp;
2089 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2090 int work_done = 0;
2091
2092 while (1) {
2093 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2094
2095 if (work_done >= budget) {
2096 if (!budget)
2097 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2098 break;
2099 }
2100
2101 if (!bnxt_has_work(bp, cpr)) {
2102 if (napi_complete_done(napi, work_done))
2103 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2104 break;
2105 }
2106 }
2107 if (bp->flags & BNXT_FLAG_DIM) {
2108 struct net_dim_sample dim_sample;
2109
2110 net_dim_sample(cpr->event_ctr,
2111 cpr->rx_packets,
2112 cpr->rx_bytes,
2113 &dim_sample);
2114 net_dim(&cpr->dim, dim_sample);
2115 }
2116 mmiowb();
2117 return work_done;
2118 }
2119
2120 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2121 {
2122 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2123 int i, work_done = 0;
2124
2125 for (i = 0; i < 2; i++) {
2126 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2127
2128 if (cpr2) {
2129 work_done += __bnxt_poll_work(bp, cpr2,
2130 budget - work_done);
2131 cpr->has_more_work |= cpr2->has_more_work;
2132 }
2133 }
2134 return work_done;
2135 }
2136
2137 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2138 u64 dbr_type, bool all)
2139 {
2140 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2141 int i;
2142
2143 for (i = 0; i < 2; i++) {
2144 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2145 struct bnxt_db_info *db;
2146
2147 if (cpr2 && (all || cpr2->had_work_done)) {
2148 db = &cpr2->cp_db;
2149 writeq(db->db_key64 | dbr_type |
2150 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2151 cpr2->had_work_done = 0;
2152 }
2153 }
2154 __bnxt_poll_work_done(bp, bnapi);
2155 }
2156
2157 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2158 {
2159 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2160 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2161 u32 raw_cons = cpr->cp_raw_cons;
2162 struct bnxt *bp = bnapi->bp;
2163 struct nqe_cn *nqcmp;
2164 int work_done = 0;
2165 u32 cons;
2166
2167 if (cpr->has_more_work) {
2168 cpr->has_more_work = 0;
2169 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2170 if (cpr->has_more_work) {
2171 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2172 return work_done;
2173 }
2174 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2175 if (napi_complete_done(napi, work_done))
2176 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2177 return work_done;
2178 }
2179 while (1) {
2180 cons = RING_CMP(raw_cons);
2181 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2182
2183 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2184 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2185 false);
2186 cpr->cp_raw_cons = raw_cons;
2187 if (napi_complete_done(napi, work_done))
2188 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2189 cpr->cp_raw_cons);
2190 return work_done;
2191 }
2192
2193 /* The valid test of the entry must be done first before
2194 * reading any further.
2195 */
2196 dma_rmb();
2197
2198 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2199 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2200 struct bnxt_cp_ring_info *cpr2;
2201
2202 cpr2 = cpr->cp_ring_arr[idx];
2203 work_done += __bnxt_poll_work(bp, cpr2,
2204 budget - work_done);
2205 cpr->has_more_work = cpr2->has_more_work;
2206 } else {
2207 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2208 }
2209 raw_cons = NEXT_RAW_CMP(raw_cons);
2210 if (cpr->has_more_work)
2211 break;
2212 }
2213 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2214 cpr->cp_raw_cons = raw_cons;
2215 return work_done;
2216 }
2217
2218 static void bnxt_free_tx_skbs(struct bnxt *bp)
2219 {
2220 int i, max_idx;
2221 struct pci_dev *pdev = bp->pdev;
2222
2223 if (!bp->tx_ring)
2224 return;
2225
2226 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2227 for (i = 0; i < bp->tx_nr_rings; i++) {
2228 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2229 int j;
2230
2231 for (j = 0; j < max_idx;) {
2232 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2233 struct sk_buff *skb = tx_buf->skb;
2234 int k, last;
2235
2236 if (!skb) {
2237 j++;
2238 continue;
2239 }
2240
2241 tx_buf->skb = NULL;
2242
2243 if (tx_buf->is_push) {
2244 dev_kfree_skb(skb);
2245 j += 2;
2246 continue;
2247 }
2248
2249 dma_unmap_single(&pdev->dev,
2250 dma_unmap_addr(tx_buf, mapping),
2251 skb_headlen(skb),
2252 PCI_DMA_TODEVICE);
2253
2254 last = tx_buf->nr_frags;
2255 j += 2;
2256 for (k = 0; k < last; k++, j++) {
2257 int ring_idx = j & bp->tx_ring_mask;
2258 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2259
2260 tx_buf = &txr->tx_buf_ring[ring_idx];
2261 dma_unmap_page(
2262 &pdev->dev,
2263 dma_unmap_addr(tx_buf, mapping),
2264 skb_frag_size(frag), PCI_DMA_TODEVICE);
2265 }
2266 dev_kfree_skb(skb);
2267 }
2268 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2269 }
2270 }
2271
2272 static void bnxt_free_rx_skbs(struct bnxt *bp)
2273 {
2274 int i, max_idx, max_agg_idx;
2275 struct pci_dev *pdev = bp->pdev;
2276
2277 if (!bp->rx_ring)
2278 return;
2279
2280 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2281 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2282 for (i = 0; i < bp->rx_nr_rings; i++) {
2283 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2284 int j;
2285
2286 if (rxr->rx_tpa) {
2287 for (j = 0; j < MAX_TPA; j++) {
2288 struct bnxt_tpa_info *tpa_info =
2289 &rxr->rx_tpa[j];
2290 u8 *data = tpa_info->data;
2291
2292 if (!data)
2293 continue;
2294
2295 dma_unmap_single_attrs(&pdev->dev,
2296 tpa_info->mapping,
2297 bp->rx_buf_use_size,
2298 bp->rx_dir,
2299 DMA_ATTR_WEAK_ORDERING);
2300
2301 tpa_info->data = NULL;
2302
2303 kfree(data);
2304 }
2305 }
2306
2307 for (j = 0; j < max_idx; j++) {
2308 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2309 dma_addr_t mapping = rx_buf->mapping;
2310 void *data = rx_buf->data;
2311
2312 if (!data)
2313 continue;
2314
2315 rx_buf->data = NULL;
2316
2317 if (BNXT_RX_PAGE_MODE(bp)) {
2318 mapping -= bp->rx_dma_offset;
2319 dma_unmap_page_attrs(&pdev->dev, mapping,
2320 PAGE_SIZE, bp->rx_dir,
2321 DMA_ATTR_WEAK_ORDERING);
2322 __free_page(data);
2323 } else {
2324 dma_unmap_single_attrs(&pdev->dev, mapping,
2325 bp->rx_buf_use_size,
2326 bp->rx_dir,
2327 DMA_ATTR_WEAK_ORDERING);
2328 kfree(data);
2329 }
2330 }
2331
2332 for (j = 0; j < max_agg_idx; j++) {
2333 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2334 &rxr->rx_agg_ring[j];
2335 struct page *page = rx_agg_buf->page;
2336
2337 if (!page)
2338 continue;
2339
2340 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2341 BNXT_RX_PAGE_SIZE,
2342 PCI_DMA_FROMDEVICE,
2343 DMA_ATTR_WEAK_ORDERING);
2344
2345 rx_agg_buf->page = NULL;
2346 __clear_bit(j, rxr->rx_agg_bmap);
2347
2348 __free_page(page);
2349 }
2350 if (rxr->rx_page) {
2351 __free_page(rxr->rx_page);
2352 rxr->rx_page = NULL;
2353 }
2354 }
2355 }
2356
2357 static void bnxt_free_skbs(struct bnxt *bp)
2358 {
2359 bnxt_free_tx_skbs(bp);
2360 bnxt_free_rx_skbs(bp);
2361 }
2362
2363 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2364 {
2365 struct pci_dev *pdev = bp->pdev;
2366 int i;
2367
2368 for (i = 0; i < rmem->nr_pages; i++) {
2369 if (!rmem->pg_arr[i])
2370 continue;
2371
2372 dma_free_coherent(&pdev->dev, rmem->page_size,
2373 rmem->pg_arr[i], rmem->dma_arr[i]);
2374
2375 rmem->pg_arr[i] = NULL;
2376 }
2377 if (rmem->pg_tbl) {
2378 size_t pg_tbl_size = rmem->nr_pages * 8;
2379
2380 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2381 pg_tbl_size = rmem->page_size;
2382 dma_free_coherent(&pdev->dev, pg_tbl_size,
2383 rmem->pg_tbl, rmem->pg_tbl_map);
2384 rmem->pg_tbl = NULL;
2385 }
2386 if (rmem->vmem_size && *rmem->vmem) {
2387 vfree(*rmem->vmem);
2388 *rmem->vmem = NULL;
2389 }
2390 }
2391
2392 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2393 {
2394 struct pci_dev *pdev = bp->pdev;
2395 u64 valid_bit = 0;
2396 int i;
2397
2398 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2399 valid_bit = PTU_PTE_VALID;
2400 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2401 size_t pg_tbl_size = rmem->nr_pages * 8;
2402
2403 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2404 pg_tbl_size = rmem->page_size;
2405 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2406 &rmem->pg_tbl_map,
2407 GFP_KERNEL);
2408 if (!rmem->pg_tbl)
2409 return -ENOMEM;
2410 }
2411
2412 for (i = 0; i < rmem->nr_pages; i++) {
2413 u64 extra_bits = valid_bit;
2414
2415 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2416 rmem->page_size,
2417 &rmem->dma_arr[i],
2418 GFP_KERNEL);
2419 if (!rmem->pg_arr[i])
2420 return -ENOMEM;
2421
2422 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2423 if (i == rmem->nr_pages - 2 &&
2424 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2425 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2426 else if (i == rmem->nr_pages - 1 &&
2427 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2428 extra_bits |= PTU_PTE_LAST;
2429 rmem->pg_tbl[i] =
2430 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2431 }
2432 }
2433
2434 if (rmem->vmem_size) {
2435 *rmem->vmem = vzalloc(rmem->vmem_size);
2436 if (!(*rmem->vmem))
2437 return -ENOMEM;
2438 }
2439 return 0;
2440 }
2441
2442 static void bnxt_free_rx_rings(struct bnxt *bp)
2443 {
2444 int i;
2445
2446 if (!bp->rx_ring)
2447 return;
2448
2449 for (i = 0; i < bp->rx_nr_rings; i++) {
2450 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2451 struct bnxt_ring_struct *ring;
2452
2453 if (rxr->xdp_prog)
2454 bpf_prog_put(rxr->xdp_prog);
2455
2456 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2457 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2458
2459 kfree(rxr->rx_tpa);
2460 rxr->rx_tpa = NULL;
2461
2462 kfree(rxr->rx_agg_bmap);
2463 rxr->rx_agg_bmap = NULL;
2464
2465 ring = &rxr->rx_ring_struct;
2466 bnxt_free_ring(bp, &ring->ring_mem);
2467
2468 ring = &rxr->rx_agg_ring_struct;
2469 bnxt_free_ring(bp, &ring->ring_mem);
2470 }
2471 }
2472
2473 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2474 {
2475 int i, rc, agg_rings = 0, tpa_rings = 0;
2476
2477 if (!bp->rx_ring)
2478 return -ENOMEM;
2479
2480 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2481 agg_rings = 1;
2482
2483 if (bp->flags & BNXT_FLAG_TPA)
2484 tpa_rings = 1;
2485
2486 for (i = 0; i < bp->rx_nr_rings; i++) {
2487 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2488 struct bnxt_ring_struct *ring;
2489
2490 ring = &rxr->rx_ring_struct;
2491
2492 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2493 if (rc < 0)
2494 return rc;
2495
2496 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2497 if (rc)
2498 return rc;
2499
2500 ring->grp_idx = i;
2501 if (agg_rings) {
2502 u16 mem_size;
2503
2504 ring = &rxr->rx_agg_ring_struct;
2505 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2506 if (rc)
2507 return rc;
2508
2509 ring->grp_idx = i;
2510 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2511 mem_size = rxr->rx_agg_bmap_size / 8;
2512 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2513 if (!rxr->rx_agg_bmap)
2514 return -ENOMEM;
2515
2516 if (tpa_rings) {
2517 rxr->rx_tpa = kcalloc(MAX_TPA,
2518 sizeof(struct bnxt_tpa_info),
2519 GFP_KERNEL);
2520 if (!rxr->rx_tpa)
2521 return -ENOMEM;
2522 }
2523 }
2524 }
2525 return 0;
2526 }
2527
2528 static void bnxt_free_tx_rings(struct bnxt *bp)
2529 {
2530 int i;
2531 struct pci_dev *pdev = bp->pdev;
2532
2533 if (!bp->tx_ring)
2534 return;
2535
2536 for (i = 0; i < bp->tx_nr_rings; i++) {
2537 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2538 struct bnxt_ring_struct *ring;
2539
2540 if (txr->tx_push) {
2541 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2542 txr->tx_push, txr->tx_push_mapping);
2543 txr->tx_push = NULL;
2544 }
2545
2546 ring = &txr->tx_ring_struct;
2547
2548 bnxt_free_ring(bp, &ring->ring_mem);
2549 }
2550 }
2551
2552 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2553 {
2554 int i, j, rc;
2555 struct pci_dev *pdev = bp->pdev;
2556
2557 bp->tx_push_size = 0;
2558 if (bp->tx_push_thresh) {
2559 int push_size;
2560
2561 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2562 bp->tx_push_thresh);
2563
2564 if (push_size > 256) {
2565 push_size = 0;
2566 bp->tx_push_thresh = 0;
2567 }
2568
2569 bp->tx_push_size = push_size;
2570 }
2571
2572 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2573 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2574 struct bnxt_ring_struct *ring;
2575 u8 qidx;
2576
2577 ring = &txr->tx_ring_struct;
2578
2579 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2580 if (rc)
2581 return rc;
2582
2583 ring->grp_idx = txr->bnapi->index;
2584 if (bp->tx_push_size) {
2585 dma_addr_t mapping;
2586
2587 /* One pre-allocated DMA buffer to backup
2588 * TX push operation
2589 */
2590 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2591 bp->tx_push_size,
2592 &txr->tx_push_mapping,
2593 GFP_KERNEL);
2594
2595 if (!txr->tx_push)
2596 return -ENOMEM;
2597
2598 mapping = txr->tx_push_mapping +
2599 sizeof(struct tx_push_bd);
2600 txr->data_mapping = cpu_to_le64(mapping);
2601
2602 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2603 }
2604 qidx = bp->tc_to_qidx[j];
2605 ring->queue_id = bp->q_info[qidx].queue_id;
2606 if (i < bp->tx_nr_rings_xdp)
2607 continue;
2608 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2609 j++;
2610 }
2611 return 0;
2612 }
2613
2614 static void bnxt_free_cp_rings(struct bnxt *bp)
2615 {
2616 int i;
2617
2618 if (!bp->bnapi)
2619 return;
2620
2621 for (i = 0; i < bp->cp_nr_rings; i++) {
2622 struct bnxt_napi *bnapi = bp->bnapi[i];
2623 struct bnxt_cp_ring_info *cpr;
2624 struct bnxt_ring_struct *ring;
2625 int j;
2626
2627 if (!bnapi)
2628 continue;
2629
2630 cpr = &bnapi->cp_ring;
2631 ring = &cpr->cp_ring_struct;
2632
2633 bnxt_free_ring(bp, &ring->ring_mem);
2634
2635 for (j = 0; j < 2; j++) {
2636 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2637
2638 if (cpr2) {
2639 ring = &cpr2->cp_ring_struct;
2640 bnxt_free_ring(bp, &ring->ring_mem);
2641 kfree(cpr2);
2642 cpr->cp_ring_arr[j] = NULL;
2643 }
2644 }
2645 }
2646 }
2647
2648 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2649 {
2650 struct bnxt_ring_mem_info *rmem;
2651 struct bnxt_ring_struct *ring;
2652 struct bnxt_cp_ring_info *cpr;
2653 int rc;
2654
2655 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2656 if (!cpr)
2657 return NULL;
2658
2659 ring = &cpr->cp_ring_struct;
2660 rmem = &ring->ring_mem;
2661 rmem->nr_pages = bp->cp_nr_pages;
2662 rmem->page_size = HW_CMPD_RING_SIZE;
2663 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2664 rmem->dma_arr = cpr->cp_desc_mapping;
2665 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2666 rc = bnxt_alloc_ring(bp, rmem);
2667 if (rc) {
2668 bnxt_free_ring(bp, rmem);
2669 kfree(cpr);
2670 cpr = NULL;
2671 }
2672 return cpr;
2673 }
2674
2675 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2676 {
2677 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
2678 int i, rc, ulp_base_vec, ulp_msix;
2679
2680 ulp_msix = bnxt_get_ulp_msix_num(bp);
2681 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
2682 for (i = 0; i < bp->cp_nr_rings; i++) {
2683 struct bnxt_napi *bnapi = bp->bnapi[i];
2684 struct bnxt_cp_ring_info *cpr;
2685 struct bnxt_ring_struct *ring;
2686
2687 if (!bnapi)
2688 continue;
2689
2690 cpr = &bnapi->cp_ring;
2691 cpr->bnapi = bnapi;
2692 ring = &cpr->cp_ring_struct;
2693
2694 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2695 if (rc)
2696 return rc;
2697
2698 if (ulp_msix && i >= ulp_base_vec)
2699 ring->map_idx = i + ulp_msix;
2700 else
2701 ring->map_idx = i;
2702
2703 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2704 continue;
2705
2706 if (i < bp->rx_nr_rings) {
2707 struct bnxt_cp_ring_info *cpr2 =
2708 bnxt_alloc_cp_sub_ring(bp);
2709
2710 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2711 if (!cpr2)
2712 return -ENOMEM;
2713 cpr2->bnapi = bnapi;
2714 }
2715 if ((sh && i < bp->tx_nr_rings) ||
2716 (!sh && i >= bp->rx_nr_rings)) {
2717 struct bnxt_cp_ring_info *cpr2 =
2718 bnxt_alloc_cp_sub_ring(bp);
2719
2720 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2721 if (!cpr2)
2722 return -ENOMEM;
2723 cpr2->bnapi = bnapi;
2724 }
2725 }
2726 return 0;
2727 }
2728
2729 static void bnxt_init_ring_struct(struct bnxt *bp)
2730 {
2731 int i;
2732
2733 for (i = 0; i < bp->cp_nr_rings; i++) {
2734 struct bnxt_napi *bnapi = bp->bnapi[i];
2735 struct bnxt_ring_mem_info *rmem;
2736 struct bnxt_cp_ring_info *cpr;
2737 struct bnxt_rx_ring_info *rxr;
2738 struct bnxt_tx_ring_info *txr;
2739 struct bnxt_ring_struct *ring;
2740
2741 if (!bnapi)
2742 continue;
2743
2744 cpr = &bnapi->cp_ring;
2745 ring = &cpr->cp_ring_struct;
2746 rmem = &ring->ring_mem;
2747 rmem->nr_pages = bp->cp_nr_pages;
2748 rmem->page_size = HW_CMPD_RING_SIZE;
2749 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2750 rmem->dma_arr = cpr->cp_desc_mapping;
2751 rmem->vmem_size = 0;
2752
2753 rxr = bnapi->rx_ring;
2754 if (!rxr)
2755 goto skip_rx;
2756
2757 ring = &rxr->rx_ring_struct;
2758 rmem = &ring->ring_mem;
2759 rmem->nr_pages = bp->rx_nr_pages;
2760 rmem->page_size = HW_RXBD_RING_SIZE;
2761 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2762 rmem->dma_arr = rxr->rx_desc_mapping;
2763 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2764 rmem->vmem = (void **)&rxr->rx_buf_ring;
2765
2766 ring = &rxr->rx_agg_ring_struct;
2767 rmem = &ring->ring_mem;
2768 rmem->nr_pages = bp->rx_agg_nr_pages;
2769 rmem->page_size = HW_RXBD_RING_SIZE;
2770 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2771 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2772 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2773 rmem->vmem = (void **)&rxr->rx_agg_ring;
2774
2775 skip_rx:
2776 txr = bnapi->tx_ring;
2777 if (!txr)
2778 continue;
2779
2780 ring = &txr->tx_ring_struct;
2781 rmem = &ring->ring_mem;
2782 rmem->nr_pages = bp->tx_nr_pages;
2783 rmem->page_size = HW_RXBD_RING_SIZE;
2784 rmem->pg_arr = (void **)txr->tx_desc_ring;
2785 rmem->dma_arr = txr->tx_desc_mapping;
2786 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2787 rmem->vmem = (void **)&txr->tx_buf_ring;
2788 }
2789 }
2790
2791 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2792 {
2793 int i;
2794 u32 prod;
2795 struct rx_bd **rx_buf_ring;
2796
2797 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2798 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
2799 int j;
2800 struct rx_bd *rxbd;
2801
2802 rxbd = rx_buf_ring[i];
2803 if (!rxbd)
2804 continue;
2805
2806 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2807 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2808 rxbd->rx_bd_opaque = prod;
2809 }
2810 }
2811 }
2812
2813 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2814 {
2815 struct net_device *dev = bp->dev;
2816 struct bnxt_rx_ring_info *rxr;
2817 struct bnxt_ring_struct *ring;
2818 u32 prod, type;
2819 int i;
2820
2821 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2822 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2823
2824 if (NET_IP_ALIGN == 2)
2825 type |= RX_BD_FLAGS_SOP;
2826
2827 rxr = &bp->rx_ring[ring_nr];
2828 ring = &rxr->rx_ring_struct;
2829 bnxt_init_rxbd_pages(ring, type);
2830
2831 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2832 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2833 if (IS_ERR(rxr->xdp_prog)) {
2834 int rc = PTR_ERR(rxr->xdp_prog);
2835
2836 rxr->xdp_prog = NULL;
2837 return rc;
2838 }
2839 }
2840 prod = rxr->rx_prod;
2841 for (i = 0; i < bp->rx_ring_size; i++) {
2842 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2843 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2844 ring_nr, i, bp->rx_ring_size);
2845 break;
2846 }
2847 prod = NEXT_RX(prod);
2848 }
2849 rxr->rx_prod = prod;
2850 ring->fw_ring_id = INVALID_HW_RING_ID;
2851
2852 ring = &rxr->rx_agg_ring_struct;
2853 ring->fw_ring_id = INVALID_HW_RING_ID;
2854
2855 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2856 return 0;
2857
2858 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2859 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2860
2861 bnxt_init_rxbd_pages(ring, type);
2862
2863 prod = rxr->rx_agg_prod;
2864 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2865 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2866 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2867 ring_nr, i, bp->rx_ring_size);
2868 break;
2869 }
2870 prod = NEXT_RX_AGG(prod);
2871 }
2872 rxr->rx_agg_prod = prod;
2873
2874 if (bp->flags & BNXT_FLAG_TPA) {
2875 if (rxr->rx_tpa) {
2876 u8 *data;
2877 dma_addr_t mapping;
2878
2879 for (i = 0; i < MAX_TPA; i++) {
2880 data = __bnxt_alloc_rx_data(bp, &mapping,
2881 GFP_KERNEL);
2882 if (!data)
2883 return -ENOMEM;
2884
2885 rxr->rx_tpa[i].data = data;
2886 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2887 rxr->rx_tpa[i].mapping = mapping;
2888 }
2889 } else {
2890 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2891 return -ENOMEM;
2892 }
2893 }
2894
2895 return 0;
2896 }
2897
2898 static void bnxt_init_cp_rings(struct bnxt *bp)
2899 {
2900 int i, j;
2901
2902 for (i = 0; i < bp->cp_nr_rings; i++) {
2903 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2904 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2905
2906 ring->fw_ring_id = INVALID_HW_RING_ID;
2907 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2908 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2909 for (j = 0; j < 2; j++) {
2910 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2911
2912 if (!cpr2)
2913 continue;
2914
2915 ring = &cpr2->cp_ring_struct;
2916 ring->fw_ring_id = INVALID_HW_RING_ID;
2917 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2918 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2919 }
2920 }
2921 }
2922
2923 static int bnxt_init_rx_rings(struct bnxt *bp)
2924 {
2925 int i, rc = 0;
2926
2927 if (BNXT_RX_PAGE_MODE(bp)) {
2928 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2929 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2930 } else {
2931 bp->rx_offset = BNXT_RX_OFFSET;
2932 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2933 }
2934
2935 for (i = 0; i < bp->rx_nr_rings; i++) {
2936 rc = bnxt_init_one_rx_ring(bp, i);
2937 if (rc)
2938 break;
2939 }
2940
2941 return rc;
2942 }
2943
2944 static int bnxt_init_tx_rings(struct bnxt *bp)
2945 {
2946 u16 i;
2947
2948 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2949 MAX_SKB_FRAGS + 1);
2950
2951 for (i = 0; i < bp->tx_nr_rings; i++) {
2952 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2953 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2954
2955 ring->fw_ring_id = INVALID_HW_RING_ID;
2956 }
2957
2958 return 0;
2959 }
2960
2961 static void bnxt_free_ring_grps(struct bnxt *bp)
2962 {
2963 kfree(bp->grp_info);
2964 bp->grp_info = NULL;
2965 }
2966
2967 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2968 {
2969 int i;
2970
2971 if (irq_re_init) {
2972 bp->grp_info = kcalloc(bp->cp_nr_rings,
2973 sizeof(struct bnxt_ring_grp_info),
2974 GFP_KERNEL);
2975 if (!bp->grp_info)
2976 return -ENOMEM;
2977 }
2978 for (i = 0; i < bp->cp_nr_rings; i++) {
2979 if (irq_re_init)
2980 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2981 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2982 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2983 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2984 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2985 }
2986 return 0;
2987 }
2988
2989 static void bnxt_free_vnics(struct bnxt *bp)
2990 {
2991 kfree(bp->vnic_info);
2992 bp->vnic_info = NULL;
2993 bp->nr_vnics = 0;
2994 }
2995
2996 static int bnxt_alloc_vnics(struct bnxt *bp)
2997 {
2998 int num_vnics = 1;
2999
3000 #ifdef CONFIG_RFS_ACCEL
3001 if (bp->flags & BNXT_FLAG_RFS)
3002 num_vnics += bp->rx_nr_rings;
3003 #endif
3004
3005 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3006 num_vnics++;
3007
3008 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3009 GFP_KERNEL);
3010 if (!bp->vnic_info)
3011 return -ENOMEM;
3012
3013 bp->nr_vnics = num_vnics;
3014 return 0;
3015 }
3016
3017 static void bnxt_init_vnics(struct bnxt *bp)
3018 {
3019 int i;
3020
3021 for (i = 0; i < bp->nr_vnics; i++) {
3022 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3023 int j;
3024
3025 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3026 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3027 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3028
3029 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3030
3031 if (bp->vnic_info[i].rss_hash_key) {
3032 if (i == 0)
3033 prandom_bytes(vnic->rss_hash_key,
3034 HW_HASH_KEY_SIZE);
3035 else
3036 memcpy(vnic->rss_hash_key,
3037 bp->vnic_info[0].rss_hash_key,
3038 HW_HASH_KEY_SIZE);
3039 }
3040 }
3041 }
3042
3043 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3044 {
3045 int pages;
3046
3047 pages = ring_size / desc_per_pg;
3048
3049 if (!pages)
3050 return 1;
3051
3052 pages++;
3053
3054 while (pages & (pages - 1))
3055 pages++;
3056
3057 return pages;
3058 }
3059
3060 void bnxt_set_tpa_flags(struct bnxt *bp)
3061 {
3062 bp->flags &= ~BNXT_FLAG_TPA;
3063 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3064 return;
3065 if (bp->dev->features & NETIF_F_LRO)
3066 bp->flags |= BNXT_FLAG_LRO;
3067 else if (bp->dev->features & NETIF_F_GRO_HW)
3068 bp->flags |= BNXT_FLAG_GRO;
3069 }
3070
3071 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3072 * be set on entry.
3073 */
3074 void bnxt_set_ring_params(struct bnxt *bp)
3075 {
3076 u32 ring_size, rx_size, rx_space;
3077 u32 agg_factor = 0, agg_ring_size = 0;
3078
3079 /* 8 for CRC and VLAN */
3080 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3081
3082 rx_space = rx_size + NET_SKB_PAD +
3083 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3084
3085 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3086 ring_size = bp->rx_ring_size;
3087 bp->rx_agg_ring_size = 0;
3088 bp->rx_agg_nr_pages = 0;
3089
3090 if (bp->flags & BNXT_FLAG_TPA)
3091 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3092
3093 bp->flags &= ~BNXT_FLAG_JUMBO;
3094 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3095 u32 jumbo_factor;
3096
3097 bp->flags |= BNXT_FLAG_JUMBO;
3098 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3099 if (jumbo_factor > agg_factor)
3100 agg_factor = jumbo_factor;
3101 }
3102 agg_ring_size = ring_size * agg_factor;
3103
3104 if (agg_ring_size) {
3105 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3106 RX_DESC_CNT);
3107 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3108 u32 tmp = agg_ring_size;
3109
3110 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3111 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3112 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3113 tmp, agg_ring_size);
3114 }
3115 bp->rx_agg_ring_size = agg_ring_size;
3116 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3117 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3118 rx_space = rx_size + NET_SKB_PAD +
3119 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3120 }
3121
3122 bp->rx_buf_use_size = rx_size;
3123 bp->rx_buf_size = rx_space;
3124
3125 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3126 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3127
3128 ring_size = bp->tx_ring_size;
3129 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3130 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3131
3132 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3133 bp->cp_ring_size = ring_size;
3134
3135 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3136 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3137 bp->cp_nr_pages = MAX_CP_PAGES;
3138 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3139 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3140 ring_size, bp->cp_ring_size);
3141 }
3142 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3143 bp->cp_ring_mask = bp->cp_bit - 1;
3144 }
3145
3146 /* Changing allocation mode of RX rings.
3147 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3148 */
3149 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3150 {
3151 if (page_mode) {
3152 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3153 return -EOPNOTSUPP;
3154 bp->dev->max_mtu =
3155 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3156 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3157 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3158 bp->rx_dir = DMA_BIDIRECTIONAL;
3159 bp->rx_skb_func = bnxt_rx_page_skb;
3160 /* Disable LRO or GRO_HW */
3161 netdev_update_features(bp->dev);
3162 } else {
3163 bp->dev->max_mtu = bp->max_mtu;
3164 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3165 bp->rx_dir = DMA_FROM_DEVICE;
3166 bp->rx_skb_func = bnxt_rx_skb;
3167 }
3168 return 0;
3169 }
3170
3171 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3172 {
3173 int i;
3174 struct bnxt_vnic_info *vnic;
3175 struct pci_dev *pdev = bp->pdev;
3176
3177 if (!bp->vnic_info)
3178 return;
3179
3180 for (i = 0; i < bp->nr_vnics; i++) {
3181 vnic = &bp->vnic_info[i];
3182
3183 kfree(vnic->fw_grp_ids);
3184 vnic->fw_grp_ids = NULL;
3185
3186 kfree(vnic->uc_list);
3187 vnic->uc_list = NULL;
3188
3189 if (vnic->mc_list) {
3190 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3191 vnic->mc_list, vnic->mc_list_mapping);
3192 vnic->mc_list = NULL;
3193 }
3194
3195 if (vnic->rss_table) {
3196 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3197 vnic->rss_table,
3198 vnic->rss_table_dma_addr);
3199 vnic->rss_table = NULL;
3200 }
3201
3202 vnic->rss_hash_key = NULL;
3203 vnic->flags = 0;
3204 }
3205 }
3206
3207 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3208 {
3209 int i, rc = 0, size;
3210 struct bnxt_vnic_info *vnic;
3211 struct pci_dev *pdev = bp->pdev;
3212 int max_rings;
3213
3214 for (i = 0; i < bp->nr_vnics; i++) {
3215 vnic = &bp->vnic_info[i];
3216
3217 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3218 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3219
3220 if (mem_size > 0) {
3221 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3222 if (!vnic->uc_list) {
3223 rc = -ENOMEM;
3224 goto out;
3225 }
3226 }
3227 }
3228
3229 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3230 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3231 vnic->mc_list =
3232 dma_alloc_coherent(&pdev->dev,
3233 vnic->mc_list_size,
3234 &vnic->mc_list_mapping,
3235 GFP_KERNEL);
3236 if (!vnic->mc_list) {
3237 rc = -ENOMEM;
3238 goto out;
3239 }
3240 }
3241
3242 if (bp->flags & BNXT_FLAG_CHIP_P5)
3243 goto vnic_skip_grps;
3244
3245 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3246 max_rings = bp->rx_nr_rings;
3247 else
3248 max_rings = 1;
3249
3250 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3251 if (!vnic->fw_grp_ids) {
3252 rc = -ENOMEM;
3253 goto out;
3254 }
3255 vnic_skip_grps:
3256 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3257 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3258 continue;
3259
3260 /* Allocate rss table and hash key */
3261 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3262 &vnic->rss_table_dma_addr,
3263 GFP_KERNEL);
3264 if (!vnic->rss_table) {
3265 rc = -ENOMEM;
3266 goto out;
3267 }
3268
3269 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3270
3271 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3272 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3273 }
3274 return 0;
3275
3276 out:
3277 return rc;
3278 }
3279
3280 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3281 {
3282 struct pci_dev *pdev = bp->pdev;
3283
3284 if (bp->hwrm_cmd_resp_addr) {
3285 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3286 bp->hwrm_cmd_resp_dma_addr);
3287 bp->hwrm_cmd_resp_addr = NULL;
3288 }
3289
3290 if (bp->hwrm_cmd_kong_resp_addr) {
3291 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3292 bp->hwrm_cmd_kong_resp_addr,
3293 bp->hwrm_cmd_kong_resp_dma_addr);
3294 bp->hwrm_cmd_kong_resp_addr = NULL;
3295 }
3296 }
3297
3298 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3299 {
3300 struct pci_dev *pdev = bp->pdev;
3301
3302 bp->hwrm_cmd_kong_resp_addr =
3303 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3304 &bp->hwrm_cmd_kong_resp_dma_addr,
3305 GFP_KERNEL);
3306 if (!bp->hwrm_cmd_kong_resp_addr)
3307 return -ENOMEM;
3308
3309 return 0;
3310 }
3311
3312 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3313 {
3314 struct pci_dev *pdev = bp->pdev;
3315
3316 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3317 &bp->hwrm_cmd_resp_dma_addr,
3318 GFP_KERNEL);
3319 if (!bp->hwrm_cmd_resp_addr)
3320 return -ENOMEM;
3321
3322 return 0;
3323 }
3324
3325 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3326 {
3327 if (bp->hwrm_short_cmd_req_addr) {
3328 struct pci_dev *pdev = bp->pdev;
3329
3330 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3331 bp->hwrm_short_cmd_req_addr,
3332 bp->hwrm_short_cmd_req_dma_addr);
3333 bp->hwrm_short_cmd_req_addr = NULL;
3334 }
3335 }
3336
3337 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3338 {
3339 struct pci_dev *pdev = bp->pdev;
3340
3341 bp->hwrm_short_cmd_req_addr =
3342 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3343 &bp->hwrm_short_cmd_req_dma_addr,
3344 GFP_KERNEL);
3345 if (!bp->hwrm_short_cmd_req_addr)
3346 return -ENOMEM;
3347
3348 return 0;
3349 }
3350
3351 static void bnxt_free_port_stats(struct bnxt *bp)
3352 {
3353 struct pci_dev *pdev = bp->pdev;
3354
3355 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3356 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3357
3358 if (bp->hw_rx_port_stats) {
3359 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3360 bp->hw_rx_port_stats,
3361 bp->hw_rx_port_stats_map);
3362 bp->hw_rx_port_stats = NULL;
3363 }
3364
3365 if (bp->hw_tx_port_stats_ext) {
3366 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3367 bp->hw_tx_port_stats_ext,
3368 bp->hw_tx_port_stats_ext_map);
3369 bp->hw_tx_port_stats_ext = NULL;
3370 }
3371
3372 if (bp->hw_rx_port_stats_ext) {
3373 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3374 bp->hw_rx_port_stats_ext,
3375 bp->hw_rx_port_stats_ext_map);
3376 bp->hw_rx_port_stats_ext = NULL;
3377 }
3378 }
3379
3380 static void bnxt_free_ring_stats(struct bnxt *bp)
3381 {
3382 struct pci_dev *pdev = bp->pdev;
3383 int size, i;
3384
3385 if (!bp->bnapi)
3386 return;
3387
3388 size = sizeof(struct ctx_hw_stats);
3389
3390 for (i = 0; i < bp->cp_nr_rings; i++) {
3391 struct bnxt_napi *bnapi = bp->bnapi[i];
3392 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3393
3394 if (cpr->hw_stats) {
3395 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3396 cpr->hw_stats_map);
3397 cpr->hw_stats = NULL;
3398 }
3399 }
3400 }
3401
3402 static int bnxt_alloc_stats(struct bnxt *bp)
3403 {
3404 u32 size, i;
3405 struct pci_dev *pdev = bp->pdev;
3406
3407 size = sizeof(struct ctx_hw_stats);
3408
3409 for (i = 0; i < bp->cp_nr_rings; i++) {
3410 struct bnxt_napi *bnapi = bp->bnapi[i];
3411 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3412
3413 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3414 &cpr->hw_stats_map,
3415 GFP_KERNEL);
3416 if (!cpr->hw_stats)
3417 return -ENOMEM;
3418
3419 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3420 }
3421
3422 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3423 if (bp->hw_rx_port_stats)
3424 goto alloc_ext_stats;
3425
3426 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3427 sizeof(struct tx_port_stats) + 1024;
3428
3429 bp->hw_rx_port_stats =
3430 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3431 &bp->hw_rx_port_stats_map,
3432 GFP_KERNEL);
3433 if (!bp->hw_rx_port_stats)
3434 return -ENOMEM;
3435
3436 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3437 512;
3438 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3439 sizeof(struct rx_port_stats) + 512;
3440 bp->flags |= BNXT_FLAG_PORT_STATS;
3441
3442 alloc_ext_stats:
3443 /* Display extended statistics only if FW supports it */
3444 if (bp->hwrm_spec_code < 0x10804 ||
3445 bp->hwrm_spec_code == 0x10900)
3446 return 0;
3447
3448 if (bp->hw_rx_port_stats_ext)
3449 goto alloc_tx_ext_stats;
3450
3451 bp->hw_rx_port_stats_ext =
3452 dma_zalloc_coherent(&pdev->dev,
3453 sizeof(struct rx_port_stats_ext),
3454 &bp->hw_rx_port_stats_ext_map,
3455 GFP_KERNEL);
3456 if (!bp->hw_rx_port_stats_ext)
3457 return 0;
3458
3459 alloc_tx_ext_stats:
3460 if (bp->hw_tx_port_stats_ext)
3461 return 0;
3462
3463 if (bp->hwrm_spec_code >= 0x10902) {
3464 bp->hw_tx_port_stats_ext =
3465 dma_zalloc_coherent(&pdev->dev,
3466 sizeof(struct tx_port_stats_ext),
3467 &bp->hw_tx_port_stats_ext_map,
3468 GFP_KERNEL);
3469 }
3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3471 }
3472 return 0;
3473 }
3474
3475 static void bnxt_clear_ring_indices(struct bnxt *bp)
3476 {
3477 int i;
3478
3479 if (!bp->bnapi)
3480 return;
3481
3482 for (i = 0; i < bp->cp_nr_rings; i++) {
3483 struct bnxt_napi *bnapi = bp->bnapi[i];
3484 struct bnxt_cp_ring_info *cpr;
3485 struct bnxt_rx_ring_info *rxr;
3486 struct bnxt_tx_ring_info *txr;
3487
3488 if (!bnapi)
3489 continue;
3490
3491 cpr = &bnapi->cp_ring;
3492 cpr->cp_raw_cons = 0;
3493
3494 txr = bnapi->tx_ring;
3495 if (txr) {
3496 txr->tx_prod = 0;
3497 txr->tx_cons = 0;
3498 }
3499
3500 rxr = bnapi->rx_ring;
3501 if (rxr) {
3502 rxr->rx_prod = 0;
3503 rxr->rx_agg_prod = 0;
3504 rxr->rx_sw_agg_prod = 0;
3505 rxr->rx_next_cons = 0;
3506 }
3507 }
3508 }
3509
3510 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3511 {
3512 #ifdef CONFIG_RFS_ACCEL
3513 int i;
3514
3515 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3516 * safe to delete the hash table.
3517 */
3518 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3519 struct hlist_head *head;
3520 struct hlist_node *tmp;
3521 struct bnxt_ntuple_filter *fltr;
3522
3523 head = &bp->ntp_fltr_hash_tbl[i];
3524 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3525 hlist_del(&fltr->hash);
3526 kfree(fltr);
3527 }
3528 }
3529 if (irq_reinit) {
3530 kfree(bp->ntp_fltr_bmap);
3531 bp->ntp_fltr_bmap = NULL;
3532 }
3533 bp->ntp_fltr_count = 0;
3534 #endif
3535 }
3536
3537 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3538 {
3539 #ifdef CONFIG_RFS_ACCEL
3540 int i, rc = 0;
3541
3542 if (!(bp->flags & BNXT_FLAG_RFS))
3543 return 0;
3544
3545 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3546 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3547
3548 bp->ntp_fltr_count = 0;
3549 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3550 sizeof(long),
3551 GFP_KERNEL);
3552
3553 if (!bp->ntp_fltr_bmap)
3554 rc = -ENOMEM;
3555
3556 return rc;
3557 #else
3558 return 0;
3559 #endif
3560 }
3561
3562 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3563 {
3564 bnxt_free_vnic_attributes(bp);
3565 bnxt_free_tx_rings(bp);
3566 bnxt_free_rx_rings(bp);
3567 bnxt_free_cp_rings(bp);
3568 bnxt_free_ntp_fltrs(bp, irq_re_init);
3569 if (irq_re_init) {
3570 bnxt_free_ring_stats(bp);
3571 bnxt_free_ring_grps(bp);
3572 bnxt_free_vnics(bp);
3573 kfree(bp->tx_ring_map);
3574 bp->tx_ring_map = NULL;
3575 kfree(bp->tx_ring);
3576 bp->tx_ring = NULL;
3577 kfree(bp->rx_ring);
3578 bp->rx_ring = NULL;
3579 kfree(bp->bnapi);
3580 bp->bnapi = NULL;
3581 } else {
3582 bnxt_clear_ring_indices(bp);
3583 }
3584 }
3585
3586 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3587 {
3588 int i, j, rc, size, arr_size;
3589 void *bnapi;
3590
3591 if (irq_re_init) {
3592 /* Allocate bnapi mem pointer array and mem block for
3593 * all queues
3594 */
3595 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3596 bp->cp_nr_rings);
3597 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3598 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3599 if (!bnapi)
3600 return -ENOMEM;
3601
3602 bp->bnapi = bnapi;
3603 bnapi += arr_size;
3604 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3605 bp->bnapi[i] = bnapi;
3606 bp->bnapi[i]->index = i;
3607 bp->bnapi[i]->bp = bp;
3608 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3609 struct bnxt_cp_ring_info *cpr =
3610 &bp->bnapi[i]->cp_ring;
3611
3612 cpr->cp_ring_struct.ring_mem.flags =
3613 BNXT_RMEM_RING_PTE_FLAG;
3614 }
3615 }
3616
3617 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3618 sizeof(struct bnxt_rx_ring_info),
3619 GFP_KERNEL);
3620 if (!bp->rx_ring)
3621 return -ENOMEM;
3622
3623 for (i = 0; i < bp->rx_nr_rings; i++) {
3624 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3625
3626 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3627 rxr->rx_ring_struct.ring_mem.flags =
3628 BNXT_RMEM_RING_PTE_FLAG;
3629 rxr->rx_agg_ring_struct.ring_mem.flags =
3630 BNXT_RMEM_RING_PTE_FLAG;
3631 }
3632 rxr->bnapi = bp->bnapi[i];
3633 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3634 }
3635
3636 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3637 sizeof(struct bnxt_tx_ring_info),
3638 GFP_KERNEL);
3639 if (!bp->tx_ring)
3640 return -ENOMEM;
3641
3642 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3643 GFP_KERNEL);
3644
3645 if (!bp->tx_ring_map)
3646 return -ENOMEM;
3647
3648 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3649 j = 0;
3650 else
3651 j = bp->rx_nr_rings;
3652
3653 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3654 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3655
3656 if (bp->flags & BNXT_FLAG_CHIP_P5)
3657 txr->tx_ring_struct.ring_mem.flags =
3658 BNXT_RMEM_RING_PTE_FLAG;
3659 txr->bnapi = bp->bnapi[j];
3660 bp->bnapi[j]->tx_ring = txr;
3661 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3662 if (i >= bp->tx_nr_rings_xdp) {
3663 txr->txq_index = i - bp->tx_nr_rings_xdp;
3664 bp->bnapi[j]->tx_int = bnxt_tx_int;
3665 } else {
3666 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3667 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3668 }
3669 }
3670
3671 rc = bnxt_alloc_stats(bp);
3672 if (rc)
3673 goto alloc_mem_err;
3674
3675 rc = bnxt_alloc_ntp_fltrs(bp);
3676 if (rc)
3677 goto alloc_mem_err;
3678
3679 rc = bnxt_alloc_vnics(bp);
3680 if (rc)
3681 goto alloc_mem_err;
3682 }
3683
3684 bnxt_init_ring_struct(bp);
3685
3686 rc = bnxt_alloc_rx_rings(bp);
3687 if (rc)
3688 goto alloc_mem_err;
3689
3690 rc = bnxt_alloc_tx_rings(bp);
3691 if (rc)
3692 goto alloc_mem_err;
3693
3694 rc = bnxt_alloc_cp_rings(bp);
3695 if (rc)
3696 goto alloc_mem_err;
3697
3698 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3699 BNXT_VNIC_UCAST_FLAG;
3700 rc = bnxt_alloc_vnic_attributes(bp);
3701 if (rc)
3702 goto alloc_mem_err;
3703 return 0;
3704
3705 alloc_mem_err:
3706 bnxt_free_mem(bp, true);
3707 return rc;
3708 }
3709
3710 static void bnxt_disable_int(struct bnxt *bp)
3711 {
3712 int i;
3713
3714 if (!bp->bnapi)
3715 return;
3716
3717 for (i = 0; i < bp->cp_nr_rings; i++) {
3718 struct bnxt_napi *bnapi = bp->bnapi[i];
3719 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3720 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3721
3722 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3723 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3724 }
3725 }
3726
3727 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3728 {
3729 struct bnxt_napi *bnapi = bp->bnapi[n];
3730 struct bnxt_cp_ring_info *cpr;
3731
3732 cpr = &bnapi->cp_ring;
3733 return cpr->cp_ring_struct.map_idx;
3734 }
3735
3736 static void bnxt_disable_int_sync(struct bnxt *bp)
3737 {
3738 int i;
3739
3740 atomic_inc(&bp->intr_sem);
3741
3742 bnxt_disable_int(bp);
3743 for (i = 0; i < bp->cp_nr_rings; i++) {
3744 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3745
3746 synchronize_irq(bp->irq_tbl[map_idx].vector);
3747 }
3748 }
3749
3750 static void bnxt_enable_int(struct bnxt *bp)
3751 {
3752 int i;
3753
3754 atomic_set(&bp->intr_sem, 0);
3755 for (i = 0; i < bp->cp_nr_rings; i++) {
3756 struct bnxt_napi *bnapi = bp->bnapi[i];
3757 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3758
3759 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
3760 }
3761 }
3762
3763 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3764 u16 cmpl_ring, u16 target_id)
3765 {
3766 struct input *req = request;
3767
3768 req->req_type = cpu_to_le16(req_type);
3769 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3770 req->target_id = cpu_to_le16(target_id);
3771 if (bnxt_kong_hwrm_message(bp, req))
3772 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
3773 else
3774 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3775 }
3776
3777 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3778 int timeout, bool silent)
3779 {
3780 int i, intr_process, rc, tmo_count;
3781 struct input *req = msg;
3782 u32 *data = msg;
3783 __le32 *resp_len;
3784 u8 *valid;
3785 u16 cp_ring_id, len = 0;
3786 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3787 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3788 struct hwrm_short_input short_input = {0};
3789 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
3790 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
3791 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
3792 u16 dst = BNXT_HWRM_CHNL_CHIMP;
3793
3794 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3795 if (msg_len > bp->hwrm_max_ext_req_len ||
3796 !bp->hwrm_short_cmd_req_addr)
3797 return -EINVAL;
3798 }
3799
3800 if (bnxt_hwrm_kong_chnl(bp, req)) {
3801 dst = BNXT_HWRM_CHNL_KONG;
3802 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
3803 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
3804 resp = bp->hwrm_cmd_kong_resp_addr;
3805 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
3806 }
3807
3808 memset(resp, 0, PAGE_SIZE);
3809 cp_ring_id = le16_to_cpu(req->cmpl_ring);
3810 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3811
3812 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
3813 /* currently supports only one outstanding message */
3814 if (intr_process)
3815 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3816
3817 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3818 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3819 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3820 u16 max_msg_len;
3821
3822 /* Set boundary for maximum extended request length for short
3823 * cmd format. If passed up from device use the max supported
3824 * internal req length.
3825 */
3826 max_msg_len = bp->hwrm_max_ext_req_len;
3827
3828 memcpy(short_cmd_req, req, msg_len);
3829 if (msg_len < max_msg_len)
3830 memset(short_cmd_req + msg_len, 0,
3831 max_msg_len - msg_len);
3832
3833 short_input.req_type = req->req_type;
3834 short_input.signature =
3835 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3836 short_input.size = cpu_to_le16(msg_len);
3837 short_input.req_addr =
3838 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3839
3840 data = (u32 *)&short_input;
3841 msg_len = sizeof(short_input);
3842
3843 /* Sync memory write before updating doorbell */
3844 wmb();
3845
3846 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3847 }
3848
3849 /* Write request msg to hwrm channel */
3850 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
3851
3852 for (i = msg_len; i < max_req_len; i += 4)
3853 writel(0, bp->bar0 + bar_offset + i);
3854
3855 /* Ring channel doorbell */
3856 writel(1, bp->bar0 + doorbell_offset);
3857
3858 if (!timeout)
3859 timeout = DFLT_HWRM_CMD_TIMEOUT;
3860 /* convert timeout to usec */
3861 timeout *= 1000;
3862
3863 i = 0;
3864 /* Short timeout for the first few iterations:
3865 * number of loops = number of loops for short timeout +
3866 * number of loops for standard timeout.
3867 */
3868 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3869 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3870 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
3871 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
3872
3873 if (intr_process) {
3874 u16 seq_id = bp->hwrm_intr_seq_id;
3875
3876 /* Wait until hwrm response cmpl interrupt is processed */
3877 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
3878 i++ < tmo_count) {
3879 /* on first few passes, just barely sleep */
3880 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3881 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3882 HWRM_SHORT_MAX_TIMEOUT);
3883 else
3884 usleep_range(HWRM_MIN_TIMEOUT,
3885 HWRM_MAX_TIMEOUT);
3886 }
3887
3888 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
3889 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3890 le16_to_cpu(req->req_type));
3891 return -1;
3892 }
3893 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3894 HWRM_RESP_LEN_SFT;
3895 valid = resp_addr + len - 1;
3896 } else {
3897 int j;
3898
3899 /* Check if response len is updated */
3900 for (i = 0; i < tmo_count; i++) {
3901 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3902 HWRM_RESP_LEN_SFT;
3903 if (len)
3904 break;
3905 /* on first few passes, just barely sleep */
3906 if (i < DFLT_HWRM_CMD_TIMEOUT)
3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3908 HWRM_SHORT_MAX_TIMEOUT);
3909 else
3910 usleep_range(HWRM_MIN_TIMEOUT,
3911 HWRM_MAX_TIMEOUT);
3912 }
3913
3914 if (i >= tmo_count) {
3915 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3916 HWRM_TOTAL_TIMEOUT(i),
3917 le16_to_cpu(req->req_type),
3918 le16_to_cpu(req->seq_id), len);
3919 return -1;
3920 }
3921
3922 /* Last byte of resp contains valid bit */
3923 valid = resp_addr + len - 1;
3924 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
3925 /* make sure we read from updated DMA memory */
3926 dma_rmb();
3927 if (*valid)
3928 break;
3929 udelay(1);
3930 }
3931
3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
3933 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3934 HWRM_TOTAL_TIMEOUT(i),
3935 le16_to_cpu(req->req_type),
3936 le16_to_cpu(req->seq_id), len, *valid);
3937 return -1;
3938 }
3939 }
3940
3941 /* Zero valid bit for compatibility. Valid bit in an older spec
3942 * may become a new field in a newer spec. We must make sure that
3943 * a new field not implemented by old spec will read zero.
3944 */
3945 *valid = 0;
3946 rc = le16_to_cpu(resp->error_code);
3947 if (rc && !silent)
3948 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3949 le16_to_cpu(resp->req_type),
3950 le16_to_cpu(resp->seq_id), rc);
3951 return rc;
3952 }
3953
3954 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3955 {
3956 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3957 }
3958
3959 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3960 int timeout)
3961 {
3962 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3963 }
3964
3965 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3966 {
3967 int rc;
3968
3969 mutex_lock(&bp->hwrm_cmd_lock);
3970 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3971 mutex_unlock(&bp->hwrm_cmd_lock);
3972 return rc;
3973 }
3974
3975 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3976 int timeout)
3977 {
3978 int rc;
3979
3980 mutex_lock(&bp->hwrm_cmd_lock);
3981 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3982 mutex_unlock(&bp->hwrm_cmd_lock);
3983 return rc;
3984 }
3985
3986 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3987 int bmap_size)
3988 {
3989 struct hwrm_func_drv_rgtr_input req = {0};
3990 DECLARE_BITMAP(async_events_bmap, 256);
3991 u32 *events = (u32 *)async_events_bmap;
3992 int i;
3993
3994 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3995
3996 req.enables =
3997 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3998
3999 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4000 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
4001 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4002
4003 if (bmap && bmap_size) {
4004 for (i = 0; i < bmap_size; i++) {
4005 if (test_bit(i, bmap))
4006 __set_bit(i, async_events_bmap);
4007 }
4008 }
4009
4010 for (i = 0; i < 8; i++)
4011 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4012
4013 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4014 }
4015
4016 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4017 {
4018 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4019 struct hwrm_func_drv_rgtr_input req = {0};
4020 int rc;
4021
4022 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4023
4024 req.enables =
4025 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4026 FUNC_DRV_RGTR_REQ_ENABLES_VER);
4027
4028 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4029 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4030 req.ver_maj_8b = DRV_VER_MAJ;
4031 req.ver_min_8b = DRV_VER_MIN;
4032 req.ver_upd_8b = DRV_VER_UPD;
4033 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4034 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4035 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4036
4037 if (BNXT_PF(bp)) {
4038 u32 data[8];
4039 int i;
4040
4041 memset(data, 0, sizeof(data));
4042 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4043 u16 cmd = bnxt_vf_req_snif[i];
4044 unsigned int bit, idx;
4045
4046 idx = cmd / 32;
4047 bit = cmd % 32;
4048 data[idx] |= 1 << bit;
4049 }
4050
4051 for (i = 0; i < 8; i++)
4052 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4053
4054 req.enables |=
4055 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4056 }
4057
4058 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4059 req.flags |= cpu_to_le32(
4060 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4061
4062 mutex_lock(&bp->hwrm_cmd_lock);
4063 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4064 if (rc)
4065 rc = -EIO;
4066 else if (resp->flags &
4067 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4068 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4069 mutex_unlock(&bp->hwrm_cmd_lock);
4070 return rc;
4071 }
4072
4073 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4074 {
4075 struct hwrm_func_drv_unrgtr_input req = {0};
4076
4077 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4078 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4079 }
4080
4081 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4082 {
4083 u32 rc = 0;
4084 struct hwrm_tunnel_dst_port_free_input req = {0};
4085
4086 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4087 req.tunnel_type = tunnel_type;
4088
4089 switch (tunnel_type) {
4090 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4091 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4092 break;
4093 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4094 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4095 break;
4096 default:
4097 break;
4098 }
4099
4100 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4101 if (rc)
4102 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4103 rc);
4104 return rc;
4105 }
4106
4107 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4108 u8 tunnel_type)
4109 {
4110 u32 rc = 0;
4111 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4112 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4113
4114 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4115
4116 req.tunnel_type = tunnel_type;
4117 req.tunnel_dst_port_val = port;
4118
4119 mutex_lock(&bp->hwrm_cmd_lock);
4120 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4121 if (rc) {
4122 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4123 rc);
4124 goto err_out;
4125 }
4126
4127 switch (tunnel_type) {
4128 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4129 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4130 break;
4131 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4132 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4133 break;
4134 default:
4135 break;
4136 }
4137
4138 err_out:
4139 mutex_unlock(&bp->hwrm_cmd_lock);
4140 return rc;
4141 }
4142
4143 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4144 {
4145 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4146 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4147
4148 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4149 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4150
4151 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4152 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4153 req.mask = cpu_to_le32(vnic->rx_mask);
4154 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4155 }
4156
4157 #ifdef CONFIG_RFS_ACCEL
4158 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4159 struct bnxt_ntuple_filter *fltr)
4160 {
4161 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4162
4163 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4164 req.ntuple_filter_id = fltr->filter_id;
4165 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4166 }
4167
4168 #define BNXT_NTP_FLTR_FLAGS \
4169 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4170 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4171 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4172 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4173 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4174 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4175 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4176 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4177 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4178 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4179 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4180 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4181 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4182 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4183
4184 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4185 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4186
4187 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4188 struct bnxt_ntuple_filter *fltr)
4189 {
4190 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
4191 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4192 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4193 struct flow_keys *keys = &fltr->fkeys;
4194 int rc = 0;
4195
4196 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4197 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4198
4199 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4200
4201 req.ethertype = htons(ETH_P_IP);
4202 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4203 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4204 req.ip_protocol = keys->basic.ip_proto;
4205
4206 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4207 int i;
4208
4209 req.ethertype = htons(ETH_P_IPV6);
4210 req.ip_addr_type =
4211 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4212 *(struct in6_addr *)&req.src_ipaddr[0] =
4213 keys->addrs.v6addrs.src;
4214 *(struct in6_addr *)&req.dst_ipaddr[0] =
4215 keys->addrs.v6addrs.dst;
4216 for (i = 0; i < 4; i++) {
4217 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4218 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4219 }
4220 } else {
4221 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4222 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4223 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4224 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4225 }
4226 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4227 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4228 req.tunnel_type =
4229 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4230 }
4231
4232 req.src_port = keys->ports.src;
4233 req.src_port_mask = cpu_to_be16(0xffff);
4234 req.dst_port = keys->ports.dst;
4235 req.dst_port_mask = cpu_to_be16(0xffff);
4236
4237 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4238 mutex_lock(&bp->hwrm_cmd_lock);
4239 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4240 if (!rc) {
4241 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4242 fltr->filter_id = resp->ntuple_filter_id;
4243 }
4244 mutex_unlock(&bp->hwrm_cmd_lock);
4245 return rc;
4246 }
4247 #endif
4248
4249 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4250 u8 *mac_addr)
4251 {
4252 u32 rc = 0;
4253 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4254 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4255
4256 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4257 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4258 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4259 req.flags |=
4260 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4261 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4262 req.enables =
4263 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4264 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4265 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4266 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4267 req.l2_addr_mask[0] = 0xff;
4268 req.l2_addr_mask[1] = 0xff;
4269 req.l2_addr_mask[2] = 0xff;
4270 req.l2_addr_mask[3] = 0xff;
4271 req.l2_addr_mask[4] = 0xff;
4272 req.l2_addr_mask[5] = 0xff;
4273
4274 mutex_lock(&bp->hwrm_cmd_lock);
4275 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4276 if (!rc)
4277 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4278 resp->l2_filter_id;
4279 mutex_unlock(&bp->hwrm_cmd_lock);
4280 return rc;
4281 }
4282
4283 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4284 {
4285 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4286 int rc = 0;
4287
4288 /* Any associated ntuple filters will also be cleared by firmware. */
4289 mutex_lock(&bp->hwrm_cmd_lock);
4290 for (i = 0; i < num_of_vnics; i++) {
4291 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4292
4293 for (j = 0; j < vnic->uc_filter_count; j++) {
4294 struct hwrm_cfa_l2_filter_free_input req = {0};
4295
4296 bnxt_hwrm_cmd_hdr_init(bp, &req,
4297 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4298
4299 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4300
4301 rc = _hwrm_send_message(bp, &req, sizeof(req),
4302 HWRM_CMD_TIMEOUT);
4303 }
4304 vnic->uc_filter_count = 0;
4305 }
4306 mutex_unlock(&bp->hwrm_cmd_lock);
4307
4308 return rc;
4309 }
4310
4311 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4312 {
4313 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4314 struct hwrm_vnic_tpa_cfg_input req = {0};
4315
4316 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4317 return 0;
4318
4319 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4320
4321 if (tpa_flags) {
4322 u16 mss = bp->dev->mtu - 40;
4323 u32 nsegs, n, segs = 0, flags;
4324
4325 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4326 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4327 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4328 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4329 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4330 if (tpa_flags & BNXT_FLAG_GRO)
4331 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4332
4333 req.flags = cpu_to_le32(flags);
4334
4335 req.enables =
4336 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4337 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4338 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4339
4340 /* Number of segs are log2 units, and first packet is not
4341 * included as part of this units.
4342 */
4343 if (mss <= BNXT_RX_PAGE_SIZE) {
4344 n = BNXT_RX_PAGE_SIZE / mss;
4345 nsegs = (MAX_SKB_FRAGS - 1) * n;
4346 } else {
4347 n = mss / BNXT_RX_PAGE_SIZE;
4348 if (mss & (BNXT_RX_PAGE_SIZE - 1))
4349 n++;
4350 nsegs = (MAX_SKB_FRAGS - n) / n;
4351 }
4352
4353 segs = ilog2(nsegs);
4354 req.max_agg_segs = cpu_to_le16(segs);
4355 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
4356
4357 req.min_agg_len = cpu_to_le32(512);
4358 }
4359 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4360
4361 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4362 }
4363
4364 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4365 {
4366 struct bnxt_ring_grp_info *grp_info;
4367
4368 grp_info = &bp->grp_info[ring->grp_idx];
4369 return grp_info->cp_fw_ring_id;
4370 }
4371
4372 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4373 {
4374 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4375 struct bnxt_napi *bnapi = rxr->bnapi;
4376 struct bnxt_cp_ring_info *cpr;
4377
4378 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4379 return cpr->cp_ring_struct.fw_ring_id;
4380 } else {
4381 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4382 }
4383 }
4384
4385 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4386 {
4387 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4388 struct bnxt_napi *bnapi = txr->bnapi;
4389 struct bnxt_cp_ring_info *cpr;
4390
4391 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4392 return cpr->cp_ring_struct.fw_ring_id;
4393 } else {
4394 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4395 }
4396 }
4397
4398 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4399 {
4400 u32 i, j, max_rings;
4401 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4402 struct hwrm_vnic_rss_cfg_input req = {0};
4403
4404 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4405 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4406 return 0;
4407
4408 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4409 if (set_rss) {
4410 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4411 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4412 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4413 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4414 max_rings = bp->rx_nr_rings - 1;
4415 else
4416 max_rings = bp->rx_nr_rings;
4417 } else {
4418 max_rings = 1;
4419 }
4420
4421 /* Fill the RSS indirection table with ring group ids */
4422 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4423 if (j == max_rings)
4424 j = 0;
4425 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4426 }
4427
4428 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4429 req.hash_key_tbl_addr =
4430 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4431 }
4432 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4433 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4434 }
4435
4436 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4437 {
4438 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4439 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4440 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4441 struct hwrm_vnic_rss_cfg_input req = {0};
4442
4443 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4444 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4445 if (!set_rss) {
4446 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4447 return 0;
4448 }
4449 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4450 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4451 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4452 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4453 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4454 for (i = 0, k = 0; i < nr_ctxs; i++) {
4455 __le16 *ring_tbl = vnic->rss_table;
4456 int rc;
4457
4458 req.ring_table_pair_index = i;
4459 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4460 for (j = 0; j < 64; j++) {
4461 u16 ring_id;
4462
4463 ring_id = rxr->rx_ring_struct.fw_ring_id;
4464 *ring_tbl++ = cpu_to_le16(ring_id);
4465 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4466 *ring_tbl++ = cpu_to_le16(ring_id);
4467 rxr++;
4468 k++;
4469 if (k == max_rings) {
4470 k = 0;
4471 rxr = &bp->rx_ring[0];
4472 }
4473 }
4474 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4475 if (rc)
4476 return -EIO;
4477 }
4478 return 0;
4479 }
4480
4481 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4482 {
4483 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4484 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4485
4486 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4487 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4488 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4489 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4490 req.enables =
4491 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4492 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4493 /* thresholds not implemented in firmware yet */
4494 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4495 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4496 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4497 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4498 }
4499
4500 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4501 u16 ctx_idx)
4502 {
4503 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4504
4505 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4506 req.rss_cos_lb_ctx_id =
4507 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4508
4509 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4510 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4511 }
4512
4513 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4514 {
4515 int i, j;
4516
4517 for (i = 0; i < bp->nr_vnics; i++) {
4518 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4519
4520 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4521 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4522 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4523 }
4524 }
4525 bp->rsscos_nr_ctxs = 0;
4526 }
4527
4528 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4529 {
4530 int rc;
4531 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4532 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4533 bp->hwrm_cmd_resp_addr;
4534
4535 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4536 -1);
4537
4538 mutex_lock(&bp->hwrm_cmd_lock);
4539 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4540 if (!rc)
4541 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4542 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4543 mutex_unlock(&bp->hwrm_cmd_lock);
4544
4545 return rc;
4546 }
4547
4548 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4549 {
4550 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4551 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4552 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4553 }
4554
4555 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4556 {
4557 unsigned int ring = 0, grp_idx;
4558 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4559 struct hwrm_vnic_cfg_input req = {0};
4560 u16 def_vlan = 0;
4561
4562 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4563
4564 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4565 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4566
4567 req.default_rx_ring_id =
4568 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4569 req.default_cmpl_ring_id =
4570 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4571 req.enables =
4572 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4573 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4574 goto vnic_mru;
4575 }
4576 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4577 /* Only RSS support for now TBD: COS & LB */
4578 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4579 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4580 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4581 VNIC_CFG_REQ_ENABLES_MRU);
4582 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4583 req.rss_rule =
4584 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4585 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4586 VNIC_CFG_REQ_ENABLES_MRU);
4587 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
4588 } else {
4589 req.rss_rule = cpu_to_le16(0xffff);
4590 }
4591
4592 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4593 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
4594 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4595 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4596 } else {
4597 req.cos_rule = cpu_to_le16(0xffff);
4598 }
4599
4600 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4601 ring = 0;
4602 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
4603 ring = vnic_id - 1;
4604 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4605 ring = bp->rx_nr_rings - 1;
4606
4607 grp_idx = bp->rx_ring[ring].bnapi->index;
4608 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4609 req.lb_rule = cpu_to_le16(0xffff);
4610 vnic_mru:
4611 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4612 VLAN_HLEN);
4613
4614 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4615 #ifdef CONFIG_BNXT_SRIOV
4616 if (BNXT_VF(bp))
4617 def_vlan = bp->vf.vlan;
4618 #endif
4619 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4620 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4621 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4622 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
4623
4624 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4625 }
4626
4627 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4628 {
4629 u32 rc = 0;
4630
4631 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4632 struct hwrm_vnic_free_input req = {0};
4633
4634 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4635 req.vnic_id =
4636 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4637
4638 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4639 if (rc)
4640 return rc;
4641 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4642 }
4643 return rc;
4644 }
4645
4646 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4647 {
4648 u16 i;
4649
4650 for (i = 0; i < bp->nr_vnics; i++)
4651 bnxt_hwrm_vnic_free_one(bp, i);
4652 }
4653
4654 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4655 unsigned int start_rx_ring_idx,
4656 unsigned int nr_rings)
4657 {
4658 int rc = 0;
4659 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4660 struct hwrm_vnic_alloc_input req = {0};
4661 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4662 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4663
4664 if (bp->flags & BNXT_FLAG_CHIP_P5)
4665 goto vnic_no_ring_grps;
4666
4667 /* map ring groups to this vnic */
4668 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4669 grp_idx = bp->rx_ring[i].bnapi->index;
4670 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4671 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4672 j, nr_rings);
4673 break;
4674 }
4675 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
4676 }
4677
4678 vnic_no_ring_grps:
4679 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4680 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
4681 if (vnic_id == 0)
4682 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4683
4684 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4685
4686 mutex_lock(&bp->hwrm_cmd_lock);
4687 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4688 if (!rc)
4689 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
4690 mutex_unlock(&bp->hwrm_cmd_lock);
4691 return rc;
4692 }
4693
4694 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4695 {
4696 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4697 struct hwrm_vnic_qcaps_input req = {0};
4698 int rc;
4699
4700 if (bp->hwrm_spec_code < 0x10600)
4701 return 0;
4702
4703 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4704 mutex_lock(&bp->hwrm_cmd_lock);
4705 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4706 if (!rc) {
4707 u32 flags = le32_to_cpu(resp->flags);
4708
4709 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4710 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4711 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4712 if (flags &
4713 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4714 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
4715 }
4716 mutex_unlock(&bp->hwrm_cmd_lock);
4717 return rc;
4718 }
4719
4720 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4721 {
4722 u16 i;
4723 u32 rc = 0;
4724
4725 if (bp->flags & BNXT_FLAG_CHIP_P5)
4726 return 0;
4727
4728 mutex_lock(&bp->hwrm_cmd_lock);
4729 for (i = 0; i < bp->rx_nr_rings; i++) {
4730 struct hwrm_ring_grp_alloc_input req = {0};
4731 struct hwrm_ring_grp_alloc_output *resp =
4732 bp->hwrm_cmd_resp_addr;
4733 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4734
4735 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4736
4737 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4738 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4739 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4740 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4741
4742 rc = _hwrm_send_message(bp, &req, sizeof(req),
4743 HWRM_CMD_TIMEOUT);
4744 if (rc)
4745 break;
4746
4747 bp->grp_info[grp_idx].fw_grp_id =
4748 le32_to_cpu(resp->ring_group_id);
4749 }
4750 mutex_unlock(&bp->hwrm_cmd_lock);
4751 return rc;
4752 }
4753
4754 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4755 {
4756 u16 i;
4757 u32 rc = 0;
4758 struct hwrm_ring_grp_free_input req = {0};
4759
4760 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
4761 return 0;
4762
4763 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4764
4765 mutex_lock(&bp->hwrm_cmd_lock);
4766 for (i = 0; i < bp->cp_nr_rings; i++) {
4767 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4768 continue;
4769 req.ring_group_id =
4770 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4771
4772 rc = _hwrm_send_message(bp, &req, sizeof(req),
4773 HWRM_CMD_TIMEOUT);
4774 if (rc)
4775 break;
4776 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4777 }
4778 mutex_unlock(&bp->hwrm_cmd_lock);
4779 return rc;
4780 }
4781
4782 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4783 struct bnxt_ring_struct *ring,
4784 u32 ring_type, u32 map_index)
4785 {
4786 int rc = 0, err = 0;
4787 struct hwrm_ring_alloc_input req = {0};
4788 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4789 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
4790 struct bnxt_ring_grp_info *grp_info;
4791 u16 ring_id;
4792
4793 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4794
4795 req.enables = 0;
4796 if (rmem->nr_pages > 1) {
4797 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
4798 /* Page size is in log2 units */
4799 req.page_size = BNXT_PAGE_SHIFT;
4800 req.page_tbl_depth = 1;
4801 } else {
4802 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
4803 }
4804 req.fbo = 0;
4805 /* Association of ring index with doorbell index and MSIX number */
4806 req.logical_id = cpu_to_le16(map_index);
4807
4808 switch (ring_type) {
4809 case HWRM_RING_ALLOC_TX: {
4810 struct bnxt_tx_ring_info *txr;
4811
4812 txr = container_of(ring, struct bnxt_tx_ring_info,
4813 tx_ring_struct);
4814 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4815 /* Association of transmit ring with completion ring */
4816 grp_info = &bp->grp_info[ring->grp_idx];
4817 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
4818 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4819 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4820 req.queue_id = cpu_to_le16(ring->queue_id);
4821 break;
4822 }
4823 case HWRM_RING_ALLOC_RX:
4824 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4825 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4826 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4827 u16 flags = 0;
4828
4829 /* Association of rx ring with stats context */
4830 grp_info = &bp->grp_info[ring->grp_idx];
4831 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4832 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4833 req.enables |= cpu_to_le32(
4834 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4835 if (NET_IP_ALIGN == 2)
4836 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4837 req.flags = cpu_to_le16(flags);
4838 }
4839 break;
4840 case HWRM_RING_ALLOC_AGG:
4841 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4842 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4843 /* Association of agg ring with rx ring */
4844 grp_info = &bp->grp_info[ring->grp_idx];
4845 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4846 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4847 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4848 req.enables |= cpu_to_le32(
4849 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4850 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4851 } else {
4852 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4853 }
4854 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4855 break;
4856 case HWRM_RING_ALLOC_CMPL:
4857 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4858 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4859 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4860 /* Association of cp ring with nq */
4861 grp_info = &bp->grp_info[map_index];
4862 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4863 req.cq_handle = cpu_to_le64(ring->handle);
4864 req.enables |= cpu_to_le32(
4865 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4866 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4867 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4868 }
4869 break;
4870 case HWRM_RING_ALLOC_NQ:
4871 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4872 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4873 if (bp->flags & BNXT_FLAG_USING_MSIX)
4874 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4875 break;
4876 default:
4877 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4878 ring_type);
4879 return -1;
4880 }
4881
4882 mutex_lock(&bp->hwrm_cmd_lock);
4883 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4884 err = le16_to_cpu(resp->error_code);
4885 ring_id = le16_to_cpu(resp->ring_id);
4886 mutex_unlock(&bp->hwrm_cmd_lock);
4887
4888 if (rc || err) {
4889 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4890 ring_type, rc, err);
4891 return -EIO;
4892 }
4893 ring->fw_ring_id = ring_id;
4894 return rc;
4895 }
4896
4897 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4898 {
4899 int rc;
4900
4901 if (BNXT_PF(bp)) {
4902 struct hwrm_func_cfg_input req = {0};
4903
4904 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4905 req.fid = cpu_to_le16(0xffff);
4906 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4907 req.async_event_cr = cpu_to_le16(idx);
4908 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4909 } else {
4910 struct hwrm_func_vf_cfg_input req = {0};
4911
4912 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4913 req.enables =
4914 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4915 req.async_event_cr = cpu_to_le16(idx);
4916 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4917 }
4918 return rc;
4919 }
4920
4921 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4922 u32 map_idx, u32 xid)
4923 {
4924 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4925 if (BNXT_PF(bp))
4926 db->doorbell = bp->bar1 + 0x10000;
4927 else
4928 db->doorbell = bp->bar1 + 0x4000;
4929 switch (ring_type) {
4930 case HWRM_RING_ALLOC_TX:
4931 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4932 break;
4933 case HWRM_RING_ALLOC_RX:
4934 case HWRM_RING_ALLOC_AGG:
4935 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4936 break;
4937 case HWRM_RING_ALLOC_CMPL:
4938 db->db_key64 = DBR_PATH_L2;
4939 break;
4940 case HWRM_RING_ALLOC_NQ:
4941 db->db_key64 = DBR_PATH_L2;
4942 break;
4943 }
4944 db->db_key64 |= (u64)xid << DBR_XID_SFT;
4945 } else {
4946 db->doorbell = bp->bar1 + map_idx * 0x80;
4947 switch (ring_type) {
4948 case HWRM_RING_ALLOC_TX:
4949 db->db_key32 = DB_KEY_TX;
4950 break;
4951 case HWRM_RING_ALLOC_RX:
4952 case HWRM_RING_ALLOC_AGG:
4953 db->db_key32 = DB_KEY_RX;
4954 break;
4955 case HWRM_RING_ALLOC_CMPL:
4956 db->db_key32 = DB_KEY_CP;
4957 break;
4958 }
4959 }
4960 }
4961
4962 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4963 {
4964 int i, rc = 0;
4965 u32 type;
4966
4967 if (bp->flags & BNXT_FLAG_CHIP_P5)
4968 type = HWRM_RING_ALLOC_NQ;
4969 else
4970 type = HWRM_RING_ALLOC_CMPL;
4971 for (i = 0; i < bp->cp_nr_rings; i++) {
4972 struct bnxt_napi *bnapi = bp->bnapi[i];
4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4975 u32 map_idx = ring->map_idx;
4976
4977 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
4978 if (rc)
4979 goto err_out;
4980 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4981 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4982 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4983
4984 if (!i) {
4985 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4986 if (rc)
4987 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4988 }
4989 }
4990
4991 type = HWRM_RING_ALLOC_TX;
4992 for (i = 0; i < bp->tx_nr_rings; i++) {
4993 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4994 struct bnxt_ring_struct *ring;
4995 u32 map_idx;
4996
4997 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4998 struct bnxt_napi *bnapi = txr->bnapi;
4999 struct bnxt_cp_ring_info *cpr, *cpr2;
5000 u32 type2 = HWRM_RING_ALLOC_CMPL;
5001
5002 cpr = &bnapi->cp_ring;
5003 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5004 ring = &cpr2->cp_ring_struct;
5005 ring->handle = BNXT_TX_HDL;
5006 map_idx = bnapi->index;
5007 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5008 if (rc)
5009 goto err_out;
5010 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5011 ring->fw_ring_id);
5012 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5013 }
5014 ring = &txr->tx_ring_struct;
5015 map_idx = i;
5016 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5017 if (rc)
5018 goto err_out;
5019 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5020 }
5021
5022 type = HWRM_RING_ALLOC_RX;
5023 for (i = 0; i < bp->rx_nr_rings; i++) {
5024 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5025 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5026 struct bnxt_napi *bnapi = rxr->bnapi;
5027 u32 map_idx = bnapi->index;
5028
5029 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5030 if (rc)
5031 goto err_out;
5032 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5033 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5034 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5035 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5036 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5037 u32 type2 = HWRM_RING_ALLOC_CMPL;
5038 struct bnxt_cp_ring_info *cpr2;
5039
5040 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5041 ring = &cpr2->cp_ring_struct;
5042 ring->handle = BNXT_RX_HDL;
5043 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5044 if (rc)
5045 goto err_out;
5046 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5047 ring->fw_ring_id);
5048 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5049 }
5050 }
5051
5052 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5053 type = HWRM_RING_ALLOC_AGG;
5054 for (i = 0; i < bp->rx_nr_rings; i++) {
5055 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5056 struct bnxt_ring_struct *ring =
5057 &rxr->rx_agg_ring_struct;
5058 u32 grp_idx = ring->grp_idx;
5059 u32 map_idx = grp_idx + bp->rx_nr_rings;
5060
5061 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5062 if (rc)
5063 goto err_out;
5064
5065 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5066 ring->fw_ring_id);
5067 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5068 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5069 }
5070 }
5071 err_out:
5072 return rc;
5073 }
5074
5075 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5076 struct bnxt_ring_struct *ring,
5077 u32 ring_type, int cmpl_ring_id)
5078 {
5079 int rc;
5080 struct hwrm_ring_free_input req = {0};
5081 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5082 u16 error_code;
5083
5084 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5085 req.ring_type = ring_type;
5086 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5087
5088 mutex_lock(&bp->hwrm_cmd_lock);
5089 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5090 error_code = le16_to_cpu(resp->error_code);
5091 mutex_unlock(&bp->hwrm_cmd_lock);
5092
5093 if (rc || error_code) {
5094 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5095 ring_type, rc, error_code);
5096 return -EIO;
5097 }
5098 return 0;
5099 }
5100
5101 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5102 {
5103 u32 type;
5104 int i;
5105
5106 if (!bp->bnapi)
5107 return;
5108
5109 for (i = 0; i < bp->tx_nr_rings; i++) {
5110 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5111 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5112 u32 cmpl_ring_id;
5113
5114 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5115 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5116 hwrm_ring_free_send_msg(bp, ring,
5117 RING_FREE_REQ_RING_TYPE_TX,
5118 close_path ? cmpl_ring_id :
5119 INVALID_HW_RING_ID);
5120 ring->fw_ring_id = INVALID_HW_RING_ID;
5121 }
5122 }
5123
5124 for (i = 0; i < bp->rx_nr_rings; i++) {
5125 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5126 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5127 u32 grp_idx = rxr->bnapi->index;
5128 u32 cmpl_ring_id;
5129
5130 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5131 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5132 hwrm_ring_free_send_msg(bp, ring,
5133 RING_FREE_REQ_RING_TYPE_RX,
5134 close_path ? cmpl_ring_id :
5135 INVALID_HW_RING_ID);
5136 ring->fw_ring_id = INVALID_HW_RING_ID;
5137 bp->grp_info[grp_idx].rx_fw_ring_id =
5138 INVALID_HW_RING_ID;
5139 }
5140 }
5141
5142 if (bp->flags & BNXT_FLAG_CHIP_P5)
5143 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5144 else
5145 type = RING_FREE_REQ_RING_TYPE_RX;
5146 for (i = 0; i < bp->rx_nr_rings; i++) {
5147 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5148 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5149 u32 grp_idx = rxr->bnapi->index;
5150 u32 cmpl_ring_id;
5151
5152 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5153 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5154 hwrm_ring_free_send_msg(bp, ring, type,
5155 close_path ? cmpl_ring_id :
5156 INVALID_HW_RING_ID);
5157 ring->fw_ring_id = INVALID_HW_RING_ID;
5158 bp->grp_info[grp_idx].agg_fw_ring_id =
5159 INVALID_HW_RING_ID;
5160 }
5161 }
5162
5163 /* The completion rings are about to be freed. After that the
5164 * IRQ doorbell will not work anymore. So we need to disable
5165 * IRQ here.
5166 */
5167 bnxt_disable_int_sync(bp);
5168
5169 if (bp->flags & BNXT_FLAG_CHIP_P5)
5170 type = RING_FREE_REQ_RING_TYPE_NQ;
5171 else
5172 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5173 for (i = 0; i < bp->cp_nr_rings; i++) {
5174 struct bnxt_napi *bnapi = bp->bnapi[i];
5175 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5176 struct bnxt_ring_struct *ring;
5177 int j;
5178
5179 for (j = 0; j < 2; j++) {
5180 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5181
5182 if (cpr2) {
5183 ring = &cpr2->cp_ring_struct;
5184 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5185 continue;
5186 hwrm_ring_free_send_msg(bp, ring,
5187 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5188 INVALID_HW_RING_ID);
5189 ring->fw_ring_id = INVALID_HW_RING_ID;
5190 }
5191 }
5192 ring = &cpr->cp_ring_struct;
5193 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5194 hwrm_ring_free_send_msg(bp, ring, type,
5195 INVALID_HW_RING_ID);
5196 ring->fw_ring_id = INVALID_HW_RING_ID;
5197 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5198 }
5199 }
5200 }
5201
5202 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5203 bool shared);
5204
5205 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5206 {
5207 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5208 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5209 struct hwrm_func_qcfg_input req = {0};
5210 int rc;
5211
5212 if (bp->hwrm_spec_code < 0x10601)
5213 return 0;
5214
5215 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5216 req.fid = cpu_to_le16(0xffff);
5217 mutex_lock(&bp->hwrm_cmd_lock);
5218 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5219 if (rc) {
5220 mutex_unlock(&bp->hwrm_cmd_lock);
5221 return -EIO;
5222 }
5223
5224 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5225 if (BNXT_NEW_RM(bp)) {
5226 u16 cp, stats;
5227
5228 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5229 hw_resc->resv_hw_ring_grps =
5230 le32_to_cpu(resp->alloc_hw_ring_grps);
5231 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5232 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5233 stats = le16_to_cpu(resp->alloc_stat_ctx);
5234 hw_resc->resv_irqs = cp;
5235 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5236 int rx = hw_resc->resv_rx_rings;
5237 int tx = hw_resc->resv_tx_rings;
5238
5239 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5240 rx >>= 1;
5241 if (cp < (rx + tx)) {
5242 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5243 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5244 rx <<= 1;
5245 hw_resc->resv_rx_rings = rx;
5246 hw_resc->resv_tx_rings = tx;
5247 }
5248 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5249 hw_resc->resv_hw_ring_grps = rx;
5250 }
5251 hw_resc->resv_cp_rings = cp;
5252 hw_resc->resv_stat_ctxs = stats;
5253 }
5254 mutex_unlock(&bp->hwrm_cmd_lock);
5255 return 0;
5256 }
5257
5258 /* Caller must hold bp->hwrm_cmd_lock */
5259 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5260 {
5261 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5262 struct hwrm_func_qcfg_input req = {0};
5263 int rc;
5264
5265 if (bp->hwrm_spec_code < 0x10601)
5266 return 0;
5267
5268 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5269 req.fid = cpu_to_le16(fid);
5270 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5271 if (!rc)
5272 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5273
5274 return rc;
5275 }
5276
5277 static bool bnxt_rfs_supported(struct bnxt *bp);
5278
5279 static void
5280 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5281 int tx_rings, int rx_rings, int ring_grps,
5282 int cp_rings, int stats, int vnics)
5283 {
5284 u32 enables = 0;
5285
5286 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5287 req->fid = cpu_to_le16(0xffff);
5288 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5289 req->num_tx_rings = cpu_to_le16(tx_rings);
5290 if (BNXT_NEW_RM(bp)) {
5291 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5292 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5293 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5294 enables |= tx_rings + ring_grps ?
5295 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5296 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5297 enables |= rx_rings ?
5298 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5299 } else {
5300 enables |= cp_rings ?
5301 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5302 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5303 enables |= ring_grps ?
5304 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5305 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5306 }
5307 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5308
5309 req->num_rx_rings = cpu_to_le16(rx_rings);
5310 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5311 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5312 req->num_msix = cpu_to_le16(cp_rings);
5313 req->num_rsscos_ctxs =
5314 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5315 } else {
5316 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5317 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5318 req->num_rsscos_ctxs = cpu_to_le16(1);
5319 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5320 bnxt_rfs_supported(bp))
5321 req->num_rsscos_ctxs =
5322 cpu_to_le16(ring_grps + 1);
5323 }
5324 req->num_stat_ctxs = cpu_to_le16(stats);
5325 req->num_vnics = cpu_to_le16(vnics);
5326 }
5327 req->enables = cpu_to_le32(enables);
5328 }
5329
5330 static void
5331 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5332 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5333 int rx_rings, int ring_grps, int cp_rings,
5334 int stats, int vnics)
5335 {
5336 u32 enables = 0;
5337
5338 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5339 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5340 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5341 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5342 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5343 enables |= tx_rings + ring_grps ?
5344 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5345 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5346 } else {
5347 enables |= cp_rings ?
5348 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5349 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5350 enables |= ring_grps ?
5351 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5352 }
5353 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5354 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5355
5356 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5357 req->num_tx_rings = cpu_to_le16(tx_rings);
5358 req->num_rx_rings = cpu_to_le16(rx_rings);
5359 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5360 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5361 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5362 } else {
5363 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5364 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5365 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5366 }
5367 req->num_stat_ctxs = cpu_to_le16(stats);
5368 req->num_vnics = cpu_to_le16(vnics);
5369
5370 req->enables = cpu_to_le32(enables);
5371 }
5372
5373 static int
5374 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5375 int ring_grps, int cp_rings, int stats, int vnics)
5376 {
5377 struct hwrm_func_cfg_input req = {0};
5378 int rc;
5379
5380 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5381 cp_rings, stats, vnics);
5382 if (!req.enables)
5383 return 0;
5384
5385 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5386 if (rc)
5387 return -ENOMEM;
5388
5389 if (bp->hwrm_spec_code < 0x10601)
5390 bp->hw_resc.resv_tx_rings = tx_rings;
5391
5392 rc = bnxt_hwrm_get_rings(bp);
5393 return rc;
5394 }
5395
5396 static int
5397 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5398 int ring_grps, int cp_rings, int stats, int vnics)
5399 {
5400 struct hwrm_func_vf_cfg_input req = {0};
5401 int rc;
5402
5403 if (!BNXT_NEW_RM(bp)) {
5404 bp->hw_resc.resv_tx_rings = tx_rings;
5405 return 0;
5406 }
5407
5408 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5409 cp_rings, stats, vnics);
5410 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5411 if (rc)
5412 return -ENOMEM;
5413
5414 rc = bnxt_hwrm_get_rings(bp);
5415 return rc;
5416 }
5417
5418 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5419 int cp, int stat, int vnic)
5420 {
5421 if (BNXT_PF(bp))
5422 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5423 vnic);
5424 else
5425 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5426 vnic);
5427 }
5428
5429 int bnxt_nq_rings_in_use(struct bnxt *bp)
5430 {
5431 int cp = bp->cp_nr_rings;
5432 int ulp_msix, ulp_base;
5433
5434 ulp_msix = bnxt_get_ulp_msix_num(bp);
5435 if (ulp_msix) {
5436 ulp_base = bnxt_get_ulp_msix_base(bp);
5437 cp += ulp_msix;
5438 if ((ulp_base + ulp_msix) > cp)
5439 cp = ulp_base + ulp_msix;
5440 }
5441 return cp;
5442 }
5443
5444 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5445 {
5446 int cp;
5447
5448 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5449 return bnxt_nq_rings_in_use(bp);
5450
5451 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5452 return cp;
5453 }
5454
5455 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5456 {
5457 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
5458 }
5459
5460 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5461 {
5462 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5463 int cp = bnxt_cp_rings_in_use(bp);
5464 int nq = bnxt_nq_rings_in_use(bp);
5465 int rx = bp->rx_nr_rings, stat;
5466 int vnic = 1, grp = rx;
5467
5468 if (bp->hwrm_spec_code < 0x10601)
5469 return false;
5470
5471 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5472 return true;
5473
5474 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5475 vnic = rx + 1;
5476 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5477 rx <<= 1;
5478 stat = bnxt_get_func_stat_ctxs(bp);
5479 if (BNXT_NEW_RM(bp) &&
5480 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5481 hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
5482 hw_resc->resv_stat_ctxs != stat ||
5483 (hw_resc->resv_hw_ring_grps != grp &&
5484 !(bp->flags & BNXT_FLAG_CHIP_P5))))
5485 return true;
5486 return false;
5487 }
5488
5489 static int __bnxt_reserve_rings(struct bnxt *bp)
5490 {
5491 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5492 int cp = bnxt_nq_rings_in_use(bp);
5493 int tx = bp->tx_nr_rings;
5494 int rx = bp->rx_nr_rings;
5495 int grp, rx_rings, rc;
5496 int vnic = 1, stat;
5497 bool sh = false;
5498
5499 if (!bnxt_need_reserve_rings(bp))
5500 return 0;
5501
5502 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5503 sh = true;
5504 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5505 vnic = rx + 1;
5506 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5507 rx <<= 1;
5508 grp = bp->rx_nr_rings;
5509 stat = bnxt_get_func_stat_ctxs(bp);
5510
5511 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5512 if (rc)
5513 return rc;
5514
5515 tx = hw_resc->resv_tx_rings;
5516 if (BNXT_NEW_RM(bp)) {
5517 rx = hw_resc->resv_rx_rings;
5518 cp = hw_resc->resv_irqs;
5519 grp = hw_resc->resv_hw_ring_grps;
5520 vnic = hw_resc->resv_vnics;
5521 stat = hw_resc->resv_stat_ctxs;
5522 }
5523
5524 rx_rings = rx;
5525 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5526 if (rx >= 2) {
5527 rx_rings = rx >> 1;
5528 } else {
5529 if (netif_running(bp->dev))
5530 return -ENOMEM;
5531
5532 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5533 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5534 bp->dev->hw_features &= ~NETIF_F_LRO;
5535 bp->dev->features &= ~NETIF_F_LRO;
5536 bnxt_set_ring_params(bp);
5537 }
5538 }
5539 rx_rings = min_t(int, rx_rings, grp);
5540 cp = min_t(int, cp, bp->cp_nr_rings);
5541 if (stat > bnxt_get_ulp_stat_ctxs(bp))
5542 stat -= bnxt_get_ulp_stat_ctxs(bp);
5543 cp = min_t(int, cp, stat);
5544 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5545 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5546 rx = rx_rings << 1;
5547 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5548 bp->tx_nr_rings = tx;
5549 bp->rx_nr_rings = rx_rings;
5550 bp->cp_nr_rings = cp;
5551
5552 if (!tx || !rx || !cp || !grp || !vnic || !stat)
5553 return -ENOMEM;
5554
5555 return rc;
5556 }
5557
5558 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5559 int ring_grps, int cp_rings, int stats,
5560 int vnics)
5561 {
5562 struct hwrm_func_vf_cfg_input req = {0};
5563 u32 flags;
5564 int rc;
5565
5566 if (!BNXT_NEW_RM(bp))
5567 return 0;
5568
5569 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5570 cp_rings, stats, vnics);
5571 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5572 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5573 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5574 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5575 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5576 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5577 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5578 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5579
5580 req.flags = cpu_to_le32(flags);
5581 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5582 if (rc)
5583 return -ENOMEM;
5584 return 0;
5585 }
5586
5587 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5588 int ring_grps, int cp_rings, int stats,
5589 int vnics)
5590 {
5591 struct hwrm_func_cfg_input req = {0};
5592 u32 flags;
5593 int rc;
5594
5595 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5596 cp_rings, stats, vnics);
5597 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
5598 if (BNXT_NEW_RM(bp)) {
5599 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5600 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5601 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5602 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5603 if (bp->flags & BNXT_FLAG_CHIP_P5)
5604 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5605 else
5606 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5607 }
5608
5609 req.flags = cpu_to_le32(flags);
5610 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5611 if (rc)
5612 return -ENOMEM;
5613 return 0;
5614 }
5615
5616 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5617 int ring_grps, int cp_rings, int stats,
5618 int vnics)
5619 {
5620 if (bp->hwrm_spec_code < 0x10801)
5621 return 0;
5622
5623 if (BNXT_PF(bp))
5624 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
5625 ring_grps, cp_rings, stats,
5626 vnics);
5627
5628 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
5629 cp_rings, stats, vnics);
5630 }
5631
5632 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5633 {
5634 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5635 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5636 struct hwrm_ring_aggint_qcaps_input req = {0};
5637 int rc;
5638
5639 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5640 coal_cap->num_cmpl_dma_aggr_max = 63;
5641 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5642 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5643 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5644 coal_cap->int_lat_tmr_min_max = 65535;
5645 coal_cap->int_lat_tmr_max_max = 65535;
5646 coal_cap->num_cmpl_aggr_int_max = 65535;
5647 coal_cap->timer_units = 80;
5648
5649 if (bp->hwrm_spec_code < 0x10902)
5650 return;
5651
5652 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5653 mutex_lock(&bp->hwrm_cmd_lock);
5654 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5655 if (!rc) {
5656 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
5657 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
5658 coal_cap->num_cmpl_dma_aggr_max =
5659 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5660 coal_cap->num_cmpl_dma_aggr_during_int_max =
5661 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5662 coal_cap->cmpl_aggr_dma_tmr_max =
5663 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5664 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5665 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5666 coal_cap->int_lat_tmr_min_max =
5667 le16_to_cpu(resp->int_lat_tmr_min_max);
5668 coal_cap->int_lat_tmr_max_max =
5669 le16_to_cpu(resp->int_lat_tmr_max_max);
5670 coal_cap->num_cmpl_aggr_int_max =
5671 le16_to_cpu(resp->num_cmpl_aggr_int_max);
5672 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5673 }
5674 mutex_unlock(&bp->hwrm_cmd_lock);
5675 }
5676
5677 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5678 {
5679 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5680
5681 return usec * 1000 / coal_cap->timer_units;
5682 }
5683
5684 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5685 struct bnxt_coal *hw_coal,
5686 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5687 {
5688 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5689 u32 cmpl_params = coal_cap->cmpl_params;
5690 u16 val, tmr, max, flags = 0;
5691
5692 max = hw_coal->bufs_per_record * 128;
5693 if (hw_coal->budget)
5694 max = hw_coal->bufs_per_record * hw_coal->budget;
5695 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
5696
5697 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5698 req->num_cmpl_aggr_int = cpu_to_le16(val);
5699
5700 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
5701 req->num_cmpl_dma_aggr = cpu_to_le16(val);
5702
5703 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5704 coal_cap->num_cmpl_dma_aggr_during_int_max);
5705 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5706
5707 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5708 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
5709 req->int_lat_tmr_max = cpu_to_le16(tmr);
5710
5711 /* min timer set to 1/2 of interrupt timer */
5712 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5713 val = tmr / 2;
5714 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5715 req->int_lat_tmr_min = cpu_to_le16(val);
5716 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5717 }
5718
5719 /* buf timer set to 1/4 of interrupt timer */
5720 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
5721 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5722
5723 if (cmpl_params &
5724 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5725 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5726 val = clamp_t(u16, tmr, 1,
5727 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5728 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5729 req->enables |=
5730 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5731 }
5732
5733 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5734 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5735 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5736 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
5737 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
5738 req->flags = cpu_to_le16(flags);
5739 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
5740 }
5741
5742 /* Caller holds bp->hwrm_cmd_lock */
5743 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5744 struct bnxt_coal *hw_coal)
5745 {
5746 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5747 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5748 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5749 u32 nq_params = coal_cap->nq_params;
5750 u16 tmr;
5751
5752 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5753 return 0;
5754
5755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5756 -1, -1);
5757 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5758 req.flags =
5759 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5760
5761 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5762 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5763 req.int_lat_tmr_min = cpu_to_le16(tmr);
5764 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5765 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5766 }
5767
5768 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5769 {
5770 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5771 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5772 struct bnxt_coal coal;
5773
5774 /* Tick values in micro seconds.
5775 * 1 coal_buf x bufs_per_record = 1 completion record.
5776 */
5777 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5778
5779 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5780 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5781
5782 if (!bnapi->rx_ring)
5783 return -ENODEV;
5784
5785 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5786 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5787
5788 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
5789
5790 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
5791
5792 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5793 HWRM_CMD_TIMEOUT);
5794 }
5795
5796 int bnxt_hwrm_set_coal(struct bnxt *bp)
5797 {
5798 int i, rc = 0;
5799 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5800 req_tx = {0}, *req;
5801
5802 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5803 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5804 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5805 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5806
5807 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5808 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
5809
5810 mutex_lock(&bp->hwrm_cmd_lock);
5811 for (i = 0; i < bp->cp_nr_rings; i++) {
5812 struct bnxt_napi *bnapi = bp->bnapi[i];
5813 struct bnxt_coal *hw_coal;
5814 u16 ring_id;
5815
5816 req = &req_rx;
5817 if (!bnapi->rx_ring) {
5818 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5819 req = &req_tx;
5820 } else {
5821 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5822 }
5823 req->ring_id = cpu_to_le16(ring_id);
5824
5825 rc = _hwrm_send_message(bp, req, sizeof(*req),
5826 HWRM_CMD_TIMEOUT);
5827 if (rc)
5828 break;
5829
5830 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5831 continue;
5832
5833 if (bnapi->rx_ring && bnapi->tx_ring) {
5834 req = &req_tx;
5835 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5836 req->ring_id = cpu_to_le16(ring_id);
5837 rc = _hwrm_send_message(bp, req, sizeof(*req),
5838 HWRM_CMD_TIMEOUT);
5839 if (rc)
5840 break;
5841 }
5842 if (bnapi->rx_ring)
5843 hw_coal = &bp->rx_coal;
5844 else
5845 hw_coal = &bp->tx_coal;
5846 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
5847 }
5848 mutex_unlock(&bp->hwrm_cmd_lock);
5849 return rc;
5850 }
5851
5852 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5853 {
5854 int rc = 0, i;
5855 struct hwrm_stat_ctx_free_input req = {0};
5856
5857 if (!bp->bnapi)
5858 return 0;
5859
5860 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5861 return 0;
5862
5863 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5864
5865 mutex_lock(&bp->hwrm_cmd_lock);
5866 for (i = 0; i < bp->cp_nr_rings; i++) {
5867 struct bnxt_napi *bnapi = bp->bnapi[i];
5868 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5869
5870 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5871 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5872
5873 rc = _hwrm_send_message(bp, &req, sizeof(req),
5874 HWRM_CMD_TIMEOUT);
5875 if (rc)
5876 break;
5877
5878 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5879 }
5880 }
5881 mutex_unlock(&bp->hwrm_cmd_lock);
5882 return rc;
5883 }
5884
5885 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5886 {
5887 int rc = 0, i;
5888 struct hwrm_stat_ctx_alloc_input req = {0};
5889 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5890
5891 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5892 return 0;
5893
5894 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5895
5896 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
5897
5898 mutex_lock(&bp->hwrm_cmd_lock);
5899 for (i = 0; i < bp->cp_nr_rings; i++) {
5900 struct bnxt_napi *bnapi = bp->bnapi[i];
5901 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5902
5903 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5904
5905 rc = _hwrm_send_message(bp, &req, sizeof(req),
5906 HWRM_CMD_TIMEOUT);
5907 if (rc)
5908 break;
5909
5910 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5911
5912 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5913 }
5914 mutex_unlock(&bp->hwrm_cmd_lock);
5915 return rc;
5916 }
5917
5918 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5919 {
5920 struct hwrm_func_qcfg_input req = {0};
5921 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5922 u16 flags;
5923 int rc;
5924
5925 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5926 req.fid = cpu_to_le16(0xffff);
5927 mutex_lock(&bp->hwrm_cmd_lock);
5928 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5929 if (rc)
5930 goto func_qcfg_exit;
5931
5932 #ifdef CONFIG_BNXT_SRIOV
5933 if (BNXT_VF(bp)) {
5934 struct bnxt_vf_info *vf = &bp->vf;
5935
5936 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5937 }
5938 #endif
5939 flags = le16_to_cpu(resp->flags);
5940 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5941 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
5942 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
5943 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
5944 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
5945 }
5946 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5947 bp->flags |= BNXT_FLAG_MULTI_HOST;
5948
5949 switch (resp->port_partition_type) {
5950 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5951 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5952 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5953 bp->port_partition_type = resp->port_partition_type;
5954 break;
5955 }
5956 if (bp->hwrm_spec_code < 0x10707 ||
5957 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5958 bp->br_mode = BRIDGE_MODE_VEB;
5959 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5960 bp->br_mode = BRIDGE_MODE_VEPA;
5961 else
5962 bp->br_mode = BRIDGE_MODE_UNDEF;
5963
5964 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5965 if (!bp->max_mtu)
5966 bp->max_mtu = BNXT_MAX_MTU;
5967
5968 func_qcfg_exit:
5969 mutex_unlock(&bp->hwrm_cmd_lock);
5970 return rc;
5971 }
5972
5973 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5974 {
5975 struct hwrm_func_backing_store_qcaps_input req = {0};
5976 struct hwrm_func_backing_store_qcaps_output *resp =
5977 bp->hwrm_cmd_resp_addr;
5978 int rc;
5979
5980 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
5981 return 0;
5982
5983 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
5984 mutex_lock(&bp->hwrm_cmd_lock);
5985 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5986 if (!rc) {
5987 struct bnxt_ctx_pg_info *ctx_pg;
5988 struct bnxt_ctx_mem_info *ctx;
5989 int i;
5990
5991 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
5992 if (!ctx) {
5993 rc = -ENOMEM;
5994 goto ctx_err;
5995 }
5996 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
5997 if (!ctx_pg) {
5998 kfree(ctx);
5999 rc = -ENOMEM;
6000 goto ctx_err;
6001 }
6002 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6003 ctx->tqm_mem[i] = ctx_pg;
6004
6005 bp->ctx = ctx;
6006 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6007 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6008 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6009 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6010 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6011 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6012 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6013 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6014 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6015 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6016 ctx->vnic_max_vnic_entries =
6017 le16_to_cpu(resp->vnic_max_vnic_entries);
6018 ctx->vnic_max_ring_table_entries =
6019 le16_to_cpu(resp->vnic_max_ring_table_entries);
6020 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6021 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6022 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6023 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6024 ctx->tqm_min_entries_per_ring =
6025 le32_to_cpu(resp->tqm_min_entries_per_ring);
6026 ctx->tqm_max_entries_per_ring =
6027 le32_to_cpu(resp->tqm_max_entries_per_ring);
6028 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6029 if (!ctx->tqm_entries_multiple)
6030 ctx->tqm_entries_multiple = 1;
6031 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6032 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6033 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6034 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6035 } else {
6036 rc = 0;
6037 }
6038 ctx_err:
6039 mutex_unlock(&bp->hwrm_cmd_lock);
6040 return rc;
6041 }
6042
6043 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6044 __le64 *pg_dir)
6045 {
6046 u8 pg_size = 0;
6047
6048 if (BNXT_PAGE_SHIFT == 13)
6049 pg_size = 1 << 4;
6050 else if (BNXT_PAGE_SIZE == 16)
6051 pg_size = 2 << 4;
6052
6053 *pg_attr = pg_size;
6054 if (rmem->depth >= 1) {
6055 if (rmem->depth == 2)
6056 *pg_attr |= 2;
6057 else
6058 *pg_attr |= 1;
6059 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6060 } else {
6061 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6062 }
6063 }
6064
6065 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6066 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6067 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6068 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6069 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6070 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6071
6072 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6073 {
6074 struct hwrm_func_backing_store_cfg_input req = {0};
6075 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6076 struct bnxt_ctx_pg_info *ctx_pg;
6077 __le32 *num_entries;
6078 __le64 *pg_dir;
6079 u8 *pg_attr;
6080 int i, rc;
6081 u32 ena;
6082
6083 if (!ctx)
6084 return 0;
6085
6086 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6087 req.enables = cpu_to_le32(enables);
6088
6089 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6090 ctx_pg = &ctx->qp_mem;
6091 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6092 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6093 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6094 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6095 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6096 &req.qpc_pg_size_qpc_lvl,
6097 &req.qpc_page_dir);
6098 }
6099 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6100 ctx_pg = &ctx->srq_mem;
6101 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6102 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6103 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6104 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6105 &req.srq_pg_size_srq_lvl,
6106 &req.srq_page_dir);
6107 }
6108 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6109 ctx_pg = &ctx->cq_mem;
6110 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6111 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6112 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6113 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6114 &req.cq_page_dir);
6115 }
6116 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6117 ctx_pg = &ctx->vnic_mem;
6118 req.vnic_num_vnic_entries =
6119 cpu_to_le16(ctx->vnic_max_vnic_entries);
6120 req.vnic_num_ring_table_entries =
6121 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6122 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6123 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6124 &req.vnic_pg_size_vnic_lvl,
6125 &req.vnic_page_dir);
6126 }
6127 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6128 ctx_pg = &ctx->stat_mem;
6129 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6130 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6131 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6132 &req.stat_pg_size_stat_lvl,
6133 &req.stat_page_dir);
6134 }
6135 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6136 ctx_pg = &ctx->mrav_mem;
6137 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6138 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6139 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6140 &req.mrav_pg_size_mrav_lvl,
6141 &req.mrav_page_dir);
6142 }
6143 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6144 ctx_pg = &ctx->tim_mem;
6145 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6146 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6147 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6148 &req.tim_pg_size_tim_lvl,
6149 &req.tim_page_dir);
6150 }
6151 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6152 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6153 pg_dir = &req.tqm_sp_page_dir,
6154 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6155 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6156 if (!(enables & ena))
6157 continue;
6158
6159 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6160 ctx_pg = ctx->tqm_mem[i];
6161 *num_entries = cpu_to_le32(ctx_pg->entries);
6162 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6163 }
6164 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6165 if (rc)
6166 rc = -EIO;
6167 return rc;
6168 }
6169
6170 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6171 struct bnxt_ctx_pg_info *ctx_pg)
6172 {
6173 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6174
6175 rmem->page_size = BNXT_PAGE_SIZE;
6176 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6177 rmem->dma_arr = ctx_pg->ctx_dma_arr;
6178 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6179 if (rmem->depth >= 1)
6180 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6181 return bnxt_alloc_ring(bp, rmem);
6182 }
6183
6184 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6185 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6186 u8 depth)
6187 {
6188 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6189 int rc;
6190
6191 if (!mem_size)
6192 return 0;
6193
6194 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6195 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6196 ctx_pg->nr_pages = 0;
6197 return -EINVAL;
6198 }
6199 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6200 int nr_tbls, i;
6201
6202 rmem->depth = 2;
6203 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6204 GFP_KERNEL);
6205 if (!ctx_pg->ctx_pg_tbl)
6206 return -ENOMEM;
6207 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6208 rmem->nr_pages = nr_tbls;
6209 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6210 if (rc)
6211 return rc;
6212 for (i = 0; i < nr_tbls; i++) {
6213 struct bnxt_ctx_pg_info *pg_tbl;
6214
6215 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6216 if (!pg_tbl)
6217 return -ENOMEM;
6218 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6219 rmem = &pg_tbl->ring_mem;
6220 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6221 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6222 rmem->depth = 1;
6223 rmem->nr_pages = MAX_CTX_PAGES;
6224 if (i == (nr_tbls - 1))
6225 rmem->nr_pages = ctx_pg->nr_pages %
6226 MAX_CTX_PAGES;
6227 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6228 if (rc)
6229 break;
6230 }
6231 } else {
6232 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6233 if (rmem->nr_pages > 1 || depth)
6234 rmem->depth = 1;
6235 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6236 }
6237 return rc;
6238 }
6239
6240 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6241 struct bnxt_ctx_pg_info *ctx_pg)
6242 {
6243 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6244
6245 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6246 ctx_pg->ctx_pg_tbl) {
6247 int i, nr_tbls = rmem->nr_pages;
6248
6249 for (i = 0; i < nr_tbls; i++) {
6250 struct bnxt_ctx_pg_info *pg_tbl;
6251 struct bnxt_ring_mem_info *rmem2;
6252
6253 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6254 if (!pg_tbl)
6255 continue;
6256 rmem2 = &pg_tbl->ring_mem;
6257 bnxt_free_ring(bp, rmem2);
6258 ctx_pg->ctx_pg_arr[i] = NULL;
6259 kfree(pg_tbl);
6260 ctx_pg->ctx_pg_tbl[i] = NULL;
6261 }
6262 kfree(ctx_pg->ctx_pg_tbl);
6263 ctx_pg->ctx_pg_tbl = NULL;
6264 }
6265 bnxt_free_ring(bp, rmem);
6266 ctx_pg->nr_pages = 0;
6267 }
6268
6269 static void bnxt_free_ctx_mem(struct bnxt *bp)
6270 {
6271 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6272 int i;
6273
6274 if (!ctx)
6275 return;
6276
6277 if (ctx->tqm_mem[0]) {
6278 for (i = 0; i < bp->max_q + 1; i++)
6279 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6280 kfree(ctx->tqm_mem[0]);
6281 ctx->tqm_mem[0] = NULL;
6282 }
6283
6284 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6285 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6286 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6287 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6288 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6289 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6290 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6291 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6292 }
6293
6294 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6295 {
6296 struct bnxt_ctx_pg_info *ctx_pg;
6297 struct bnxt_ctx_mem_info *ctx;
6298 u32 mem_size, ena, entries;
6299 u32 extra_srqs = 0;
6300 u32 extra_qps = 0;
6301 u8 pg_lvl = 1;
6302 int i, rc;
6303
6304 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6305 if (rc) {
6306 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6307 rc);
6308 return rc;
6309 }
6310 ctx = bp->ctx;
6311 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6312 return 0;
6313
6314 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
6315 pg_lvl = 2;
6316 extra_qps = 65536;
6317 extra_srqs = 8192;
6318 }
6319
6320 ctx_pg = &ctx->qp_mem;
6321 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6322 extra_qps;
6323 mem_size = ctx->qp_entry_size * ctx_pg->entries;
6324 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6325 if (rc)
6326 return rc;
6327
6328 ctx_pg = &ctx->srq_mem;
6329 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6330 mem_size = ctx->srq_entry_size * ctx_pg->entries;
6331 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6332 if (rc)
6333 return rc;
6334
6335 ctx_pg = &ctx->cq_mem;
6336 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6337 mem_size = ctx->cq_entry_size * ctx_pg->entries;
6338 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6339 if (rc)
6340 return rc;
6341
6342 ctx_pg = &ctx->vnic_mem;
6343 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6344 ctx->vnic_max_ring_table_entries;
6345 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6346 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6347 if (rc)
6348 return rc;
6349
6350 ctx_pg = &ctx->stat_mem;
6351 ctx_pg->entries = ctx->stat_max_entries;
6352 mem_size = ctx->stat_entry_size * ctx_pg->entries;
6353 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6354 if (rc)
6355 return rc;
6356
6357 ena = 0;
6358 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6359 goto skip_rdma;
6360
6361 ctx_pg = &ctx->mrav_mem;
6362 ctx_pg->entries = extra_qps * 4;
6363 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6364 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6365 if (rc)
6366 return rc;
6367 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6368
6369 ctx_pg = &ctx->tim_mem;
6370 ctx_pg->entries = ctx->qp_mem.entries;
6371 mem_size = ctx->tim_entry_size * ctx_pg->entries;
6372 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6373 if (rc)
6374 return rc;
6375 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6376
6377 skip_rdma:
6378 entries = ctx->qp_max_l2_entries + extra_qps;
6379 entries = roundup(entries, ctx->tqm_entries_multiple);
6380 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6381 ctx->tqm_max_entries_per_ring);
6382 for (i = 0; i < bp->max_q + 1; i++) {
6383 ctx_pg = ctx->tqm_mem[i];
6384 ctx_pg->entries = entries;
6385 mem_size = ctx->tqm_entry_size * entries;
6386 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6387 if (rc)
6388 return rc;
6389 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6390 }
6391 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6392 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6393 if (rc)
6394 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6395 rc);
6396 else
6397 ctx->flags |= BNXT_CTX_FLAG_INITED;
6398
6399 return 0;
6400 }
6401
6402 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6403 {
6404 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6405 struct hwrm_func_resource_qcaps_input req = {0};
6406 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6407 int rc;
6408
6409 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6410 req.fid = cpu_to_le16(0xffff);
6411
6412 mutex_lock(&bp->hwrm_cmd_lock);
6413 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6414 HWRM_CMD_TIMEOUT);
6415 if (rc) {
6416 rc = -EIO;
6417 goto hwrm_func_resc_qcaps_exit;
6418 }
6419
6420 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6421 if (!all)
6422 goto hwrm_func_resc_qcaps_exit;
6423
6424 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6425 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6426 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6427 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6428 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6429 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6430 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6431 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6432 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6433 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6434 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6435 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6436 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6437 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6438 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6439 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6440
6441 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6442 u16 max_msix = le16_to_cpu(resp->max_msix);
6443
6444 hw_resc->max_nqs = max_msix;
6445 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6446 }
6447
6448 if (BNXT_PF(bp)) {
6449 struct bnxt_pf_info *pf = &bp->pf;
6450
6451 pf->vf_resv_strategy =
6452 le16_to_cpu(resp->vf_reservation_strategy);
6453 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6454 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6455 }
6456 hwrm_func_resc_qcaps_exit:
6457 mutex_unlock(&bp->hwrm_cmd_lock);
6458 return rc;
6459 }
6460
6461 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6462 {
6463 int rc = 0;
6464 struct hwrm_func_qcaps_input req = {0};
6465 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6466 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6467 u32 flags;
6468
6469 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6470 req.fid = cpu_to_le16(0xffff);
6471
6472 mutex_lock(&bp->hwrm_cmd_lock);
6473 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6474 if (rc)
6475 goto hwrm_func_qcaps_exit;
6476
6477 flags = le32_to_cpu(resp->flags);
6478 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6479 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6480 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6481 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6482
6483 bp->tx_push_thresh = 0;
6484 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6485 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6486
6487 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6488 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6489 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6490 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6491 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6492 if (!hw_resc->max_hw_ring_grps)
6493 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6494 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6495 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6496 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6497
6498 if (BNXT_PF(bp)) {
6499 struct bnxt_pf_info *pf = &bp->pf;
6500
6501 pf->fw_fid = le16_to_cpu(resp->fid);
6502 pf->port_id = le16_to_cpu(resp->port_id);
6503 bp->dev->dev_port = pf->port_id;
6504 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
6505 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6506 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6507 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6508 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6509 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6510 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6511 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6512 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6513 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
6514 bp->flags |= BNXT_FLAG_WOL_CAP;
6515 } else {
6516 #ifdef CONFIG_BNXT_SRIOV
6517 struct bnxt_vf_info *vf = &bp->vf;
6518
6519 vf->fw_fid = le16_to_cpu(resp->fid);
6520 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
6521 #endif
6522 }
6523
6524 hwrm_func_qcaps_exit:
6525 mutex_unlock(&bp->hwrm_cmd_lock);
6526 return rc;
6527 }
6528
6529 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6530
6531 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6532 {
6533 int rc;
6534
6535 rc = __bnxt_hwrm_func_qcaps(bp);
6536 if (rc)
6537 return rc;
6538 rc = bnxt_hwrm_queue_qportcfg(bp);
6539 if (rc) {
6540 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6541 return rc;
6542 }
6543 if (bp->hwrm_spec_code >= 0x10803) {
6544 rc = bnxt_alloc_ctx_mem(bp);
6545 if (rc)
6546 return rc;
6547 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
6548 if (!rc)
6549 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
6550 }
6551 return 0;
6552 }
6553
6554 static int bnxt_hwrm_func_reset(struct bnxt *bp)
6555 {
6556 struct hwrm_func_reset_input req = {0};
6557
6558 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6559 req.enables = 0;
6560
6561 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6562 }
6563
6564 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6565 {
6566 int rc = 0;
6567 struct hwrm_queue_qportcfg_input req = {0};
6568 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
6569 u8 i, j, *qptr;
6570 bool no_rdma;
6571
6572 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6573
6574 mutex_lock(&bp->hwrm_cmd_lock);
6575 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6576 if (rc)
6577 goto qportcfg_exit;
6578
6579 if (!resp->max_configurable_queues) {
6580 rc = -EINVAL;
6581 goto qportcfg_exit;
6582 }
6583 bp->max_tc = resp->max_configurable_queues;
6584 bp->max_lltc = resp->max_configurable_lossless_queues;
6585 if (bp->max_tc > BNXT_MAX_QUEUE)
6586 bp->max_tc = BNXT_MAX_QUEUE;
6587
6588 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6589 qptr = &resp->queue_id0;
6590 for (i = 0, j = 0; i < bp->max_tc; i++) {
6591 bp->q_info[j].queue_id = *qptr;
6592 bp->q_ids[i] = *qptr++;
6593 bp->q_info[j].queue_profile = *qptr++;
6594 bp->tc_to_qidx[j] = j;
6595 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6596 (no_rdma && BNXT_PF(bp)))
6597 j++;
6598 }
6599 bp->max_q = bp->max_tc;
6600 bp->max_tc = max_t(u8, j, 1);
6601
6602 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6603 bp->max_tc = 1;
6604
6605 if (bp->max_lltc > bp->max_tc)
6606 bp->max_lltc = bp->max_tc;
6607
6608 qportcfg_exit:
6609 mutex_unlock(&bp->hwrm_cmd_lock);
6610 return rc;
6611 }
6612
6613 static int bnxt_hwrm_ver_get(struct bnxt *bp)
6614 {
6615 int rc;
6616 struct hwrm_ver_get_input req = {0};
6617 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6618 u32 dev_caps_cfg;
6619
6620 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
6621 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6622 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6623 req.hwrm_intf_min = HWRM_VERSION_MINOR;
6624 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6625 mutex_lock(&bp->hwrm_cmd_lock);
6626 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6627 if (rc)
6628 goto hwrm_ver_get_exit;
6629
6630 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6631
6632 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6633 resp->hwrm_intf_min_8b << 8 |
6634 resp->hwrm_intf_upd_8b;
6635 if (resp->hwrm_intf_maj_8b < 1) {
6636 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
6637 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6638 resp->hwrm_intf_upd_8b);
6639 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
6640 }
6641 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
6642 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6643 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
6644
6645 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6646 if (!bp->hwrm_cmd_timeout)
6647 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6648
6649 if (resp->hwrm_intf_maj_8b >= 1) {
6650 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
6651 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6652 }
6653 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6654 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
6655
6656 bp->chip_num = le16_to_cpu(resp->chip_num);
6657 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6658 !resp->chip_metal)
6659 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
6660
6661 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6662 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6663 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
6664 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
6665
6666 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
6667 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
6668
6669 if (dev_caps_cfg &
6670 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
6671 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
6672
6673 hwrm_ver_get_exit:
6674 mutex_unlock(&bp->hwrm_cmd_lock);
6675 return rc;
6676 }
6677
6678 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6679 {
6680 struct hwrm_fw_set_time_input req = {0};
6681 struct tm tm;
6682 time64_t now = ktime_get_real_seconds();
6683
6684 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6685 bp->hwrm_spec_code < 0x10400)
6686 return -EOPNOTSUPP;
6687
6688 time64_to_tm(now, 0, &tm);
6689 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6690 req.year = cpu_to_le16(1900 + tm.tm_year);
6691 req.month = 1 + tm.tm_mon;
6692 req.day = tm.tm_mday;
6693 req.hour = tm.tm_hour;
6694 req.minute = tm.tm_min;
6695 req.second = tm.tm_sec;
6696 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6697 }
6698
6699 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6700 {
6701 int rc;
6702 struct bnxt_pf_info *pf = &bp->pf;
6703 struct hwrm_port_qstats_input req = {0};
6704
6705 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6706 return 0;
6707
6708 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6709 req.port_id = cpu_to_le16(pf->port_id);
6710 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6711 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6712 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6713 return rc;
6714 }
6715
6716 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6717 {
6718 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
6719 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
6720 struct hwrm_port_qstats_ext_input req = {0};
6721 struct bnxt_pf_info *pf = &bp->pf;
6722 int rc;
6723
6724 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6725 return 0;
6726
6727 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6728 req.port_id = cpu_to_le16(pf->port_id);
6729 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6730 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
6731 req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
6732 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6733 mutex_lock(&bp->hwrm_cmd_lock);
6734 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6735 if (!rc) {
6736 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6737 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
6738 } else {
6739 bp->fw_rx_stats_ext_size = 0;
6740 bp->fw_tx_stats_ext_size = 0;
6741 }
6742 if (bp->fw_tx_stats_ext_size <=
6743 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
6744 mutex_unlock(&bp->hwrm_cmd_lock);
6745 bp->pri2cos_valid = 0;
6746 return rc;
6747 }
6748
6749 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
6750 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
6751
6752 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
6753 if (!rc) {
6754 struct hwrm_queue_pri2cos_qcfg_output *resp2;
6755 u8 *pri2cos;
6756 int i, j;
6757
6758 resp2 = bp->hwrm_cmd_resp_addr;
6759 pri2cos = &resp2->pri0_cos_queue_id;
6760 for (i = 0; i < 8; i++) {
6761 u8 queue_id = pri2cos[i];
6762
6763 for (j = 0; j < bp->max_q; j++) {
6764 if (bp->q_ids[j] == queue_id)
6765 bp->pri2cos[i] = j;
6766 }
6767 }
6768 bp->pri2cos_valid = 1;
6769 }
6770 mutex_unlock(&bp->hwrm_cmd_lock);
6771 return rc;
6772 }
6773
6774 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6775 {
6776 if (bp->vxlan_port_cnt) {
6777 bnxt_hwrm_tunnel_dst_port_free(
6778 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6779 }
6780 bp->vxlan_port_cnt = 0;
6781 if (bp->nge_port_cnt) {
6782 bnxt_hwrm_tunnel_dst_port_free(
6783 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6784 }
6785 bp->nge_port_cnt = 0;
6786 }
6787
6788 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6789 {
6790 int rc, i;
6791 u32 tpa_flags = 0;
6792
6793 if (set_tpa)
6794 tpa_flags = bp->flags & BNXT_FLAG_TPA;
6795 for (i = 0; i < bp->nr_vnics; i++) {
6796 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6797 if (rc) {
6798 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
6799 i, rc);
6800 return rc;
6801 }
6802 }
6803 return 0;
6804 }
6805
6806 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6807 {
6808 int i;
6809
6810 for (i = 0; i < bp->nr_vnics; i++)
6811 bnxt_hwrm_vnic_set_rss(bp, i, false);
6812 }
6813
6814 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6815 bool irq_re_init)
6816 {
6817 if (bp->vnic_info) {
6818 bnxt_hwrm_clear_vnic_filter(bp);
6819 /* clear all RSS setting before free vnic ctx */
6820 bnxt_hwrm_clear_vnic_rss(bp);
6821 bnxt_hwrm_vnic_ctx_free(bp);
6822 /* before free the vnic, undo the vnic tpa settings */
6823 if (bp->flags & BNXT_FLAG_TPA)
6824 bnxt_set_tpa(bp, false);
6825 bnxt_hwrm_vnic_free(bp);
6826 }
6827 bnxt_hwrm_ring_free(bp, close_path);
6828 bnxt_hwrm_ring_grp_free(bp);
6829 if (irq_re_init) {
6830 bnxt_hwrm_stat_ctx_free(bp);
6831 bnxt_hwrm_free_tunnel_ports(bp);
6832 }
6833 }
6834
6835 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6836 {
6837 struct hwrm_func_cfg_input req = {0};
6838 int rc;
6839
6840 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6841 req.fid = cpu_to_le16(0xffff);
6842 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6843 if (br_mode == BRIDGE_MODE_VEB)
6844 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6845 else if (br_mode == BRIDGE_MODE_VEPA)
6846 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6847 else
6848 return -EINVAL;
6849 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6850 if (rc)
6851 rc = -EIO;
6852 return rc;
6853 }
6854
6855 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6856 {
6857 struct hwrm_func_cfg_input req = {0};
6858 int rc;
6859
6860 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6861 return 0;
6862
6863 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6864 req.fid = cpu_to_le16(0xffff);
6865 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
6866 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
6867 if (size == 128)
6868 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
6869
6870 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6871 if (rc)
6872 rc = -EIO;
6873 return rc;
6874 }
6875
6876 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6877 {
6878 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6879 int rc;
6880
6881 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6882 goto skip_rss_ctx;
6883
6884 /* allocate context for vnic */
6885 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
6886 if (rc) {
6887 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6888 vnic_id, rc);
6889 goto vnic_setup_err;
6890 }
6891 bp->rsscos_nr_ctxs++;
6892
6893 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6894 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6895 if (rc) {
6896 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6897 vnic_id, rc);
6898 goto vnic_setup_err;
6899 }
6900 bp->rsscos_nr_ctxs++;
6901 }
6902
6903 skip_rss_ctx:
6904 /* configure default vnic, ring grp */
6905 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6906 if (rc) {
6907 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6908 vnic_id, rc);
6909 goto vnic_setup_err;
6910 }
6911
6912 /* Enable RSS hashing on vnic */
6913 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6914 if (rc) {
6915 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6916 vnic_id, rc);
6917 goto vnic_setup_err;
6918 }
6919
6920 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6921 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6922 if (rc) {
6923 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6924 vnic_id, rc);
6925 }
6926 }
6927
6928 vnic_setup_err:
6929 return rc;
6930 }
6931
6932 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6933 {
6934 int rc, i, nr_ctxs;
6935
6936 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6937 for (i = 0; i < nr_ctxs; i++) {
6938 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6939 if (rc) {
6940 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6941 vnic_id, i, rc);
6942 break;
6943 }
6944 bp->rsscos_nr_ctxs++;
6945 }
6946 if (i < nr_ctxs)
6947 return -ENOMEM;
6948
6949 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6950 if (rc) {
6951 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6952 vnic_id, rc);
6953 return rc;
6954 }
6955 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6956 if (rc) {
6957 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6958 vnic_id, rc);
6959 return rc;
6960 }
6961 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6962 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6963 if (rc) {
6964 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6965 vnic_id, rc);
6966 }
6967 }
6968 return rc;
6969 }
6970
6971 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6972 {
6973 if (bp->flags & BNXT_FLAG_CHIP_P5)
6974 return __bnxt_setup_vnic_p5(bp, vnic_id);
6975 else
6976 return __bnxt_setup_vnic(bp, vnic_id);
6977 }
6978
6979 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
6980 {
6981 #ifdef CONFIG_RFS_ACCEL
6982 int i, rc = 0;
6983
6984 for (i = 0; i < bp->rx_nr_rings; i++) {
6985 struct bnxt_vnic_info *vnic;
6986 u16 vnic_id = i + 1;
6987 u16 ring_id = i;
6988
6989 if (vnic_id >= bp->nr_vnics)
6990 break;
6991
6992 vnic = &bp->vnic_info[vnic_id];
6993 vnic->flags |= BNXT_VNIC_RFS_FLAG;
6994 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6995 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
6996 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
6997 if (rc) {
6998 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6999 vnic_id, rc);
7000 break;
7001 }
7002 rc = bnxt_setup_vnic(bp, vnic_id);
7003 if (rc)
7004 break;
7005 }
7006 return rc;
7007 #else
7008 return 0;
7009 #endif
7010 }
7011
7012 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7013 static bool bnxt_promisc_ok(struct bnxt *bp)
7014 {
7015 #ifdef CONFIG_BNXT_SRIOV
7016 if (BNXT_VF(bp) && !bp->vf.vlan)
7017 return false;
7018 #endif
7019 return true;
7020 }
7021
7022 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7023 {
7024 unsigned int rc = 0;
7025
7026 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7027 if (rc) {
7028 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7029 rc);
7030 return rc;
7031 }
7032
7033 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7034 if (rc) {
7035 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7036 rc);
7037 return rc;
7038 }
7039 return rc;
7040 }
7041
7042 static int bnxt_cfg_rx_mode(struct bnxt *);
7043 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7044
7045 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7046 {
7047 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7048 int rc = 0;
7049 unsigned int rx_nr_rings = bp->rx_nr_rings;
7050
7051 if (irq_re_init) {
7052 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7053 if (rc) {
7054 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7055 rc);
7056 goto err_out;
7057 }
7058 }
7059
7060 rc = bnxt_hwrm_ring_alloc(bp);
7061 if (rc) {
7062 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7063 goto err_out;
7064 }
7065
7066 rc = bnxt_hwrm_ring_grp_alloc(bp);
7067 if (rc) {
7068 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7069 goto err_out;
7070 }
7071
7072 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7073 rx_nr_rings--;
7074
7075 /* default vnic 0 */
7076 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7077 if (rc) {
7078 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7079 goto err_out;
7080 }
7081
7082 rc = bnxt_setup_vnic(bp, 0);
7083 if (rc)
7084 goto err_out;
7085
7086 if (bp->flags & BNXT_FLAG_RFS) {
7087 rc = bnxt_alloc_rfs_vnics(bp);
7088 if (rc)
7089 goto err_out;
7090 }
7091
7092 if (bp->flags & BNXT_FLAG_TPA) {
7093 rc = bnxt_set_tpa(bp, true);
7094 if (rc)
7095 goto err_out;
7096 }
7097
7098 if (BNXT_VF(bp))
7099 bnxt_update_vf_mac(bp);
7100
7101 /* Filter for default vnic 0 */
7102 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7103 if (rc) {
7104 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7105 goto err_out;
7106 }
7107 vnic->uc_filter_count = 1;
7108
7109 vnic->rx_mask = 0;
7110 if (bp->dev->flags & IFF_BROADCAST)
7111 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7112
7113 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7114 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7115
7116 if (bp->dev->flags & IFF_ALLMULTI) {
7117 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7118 vnic->mc_list_count = 0;
7119 } else {
7120 u32 mask = 0;
7121
7122 bnxt_mc_list_updated(bp, &mask);
7123 vnic->rx_mask |= mask;
7124 }
7125
7126 rc = bnxt_cfg_rx_mode(bp);
7127 if (rc)
7128 goto err_out;
7129
7130 rc = bnxt_hwrm_set_coal(bp);
7131 if (rc)
7132 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7133 rc);
7134
7135 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7136 rc = bnxt_setup_nitroa0_vnic(bp);
7137 if (rc)
7138 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7139 rc);
7140 }
7141
7142 if (BNXT_VF(bp)) {
7143 bnxt_hwrm_func_qcfg(bp);
7144 netdev_update_features(bp->dev);
7145 }
7146
7147 return 0;
7148
7149 err_out:
7150 bnxt_hwrm_resource_free(bp, 0, true);
7151
7152 return rc;
7153 }
7154
7155 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7156 {
7157 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7158 return 0;
7159 }
7160
7161 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7162 {
7163 bnxt_init_cp_rings(bp);
7164 bnxt_init_rx_rings(bp);
7165 bnxt_init_tx_rings(bp);
7166 bnxt_init_ring_grps(bp, irq_re_init);
7167 bnxt_init_vnics(bp);
7168
7169 return bnxt_init_chip(bp, irq_re_init);
7170 }
7171
7172 static int bnxt_set_real_num_queues(struct bnxt *bp)
7173 {
7174 int rc;
7175 struct net_device *dev = bp->dev;
7176
7177 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7178 bp->tx_nr_rings_xdp);
7179 if (rc)
7180 return rc;
7181
7182 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7183 if (rc)
7184 return rc;
7185
7186 #ifdef CONFIG_RFS_ACCEL
7187 if (bp->flags & BNXT_FLAG_RFS)
7188 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7189 #endif
7190
7191 return rc;
7192 }
7193
7194 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7195 bool shared)
7196 {
7197 int _rx = *rx, _tx = *tx;
7198
7199 if (shared) {
7200 *rx = min_t(int, _rx, max);
7201 *tx = min_t(int, _tx, max);
7202 } else {
7203 if (max < 2)
7204 return -ENOMEM;
7205
7206 while (_rx + _tx > max) {
7207 if (_rx > _tx && _rx > 1)
7208 _rx--;
7209 else if (_tx > 1)
7210 _tx--;
7211 }
7212 *rx = _rx;
7213 *tx = _tx;
7214 }
7215 return 0;
7216 }
7217
7218 static void bnxt_setup_msix(struct bnxt *bp)
7219 {
7220 const int len = sizeof(bp->irq_tbl[0].name);
7221 struct net_device *dev = bp->dev;
7222 int tcs, i;
7223
7224 tcs = netdev_get_num_tc(dev);
7225 if (tcs > 1) {
7226 int i, off, count;
7227
7228 for (i = 0; i < tcs; i++) {
7229 count = bp->tx_nr_rings_per_tc;
7230 off = i * count;
7231 netdev_set_tc_queue(dev, i, count, off);
7232 }
7233 }
7234
7235 for (i = 0; i < bp->cp_nr_rings; i++) {
7236 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7237 char *attr;
7238
7239 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7240 attr = "TxRx";
7241 else if (i < bp->rx_nr_rings)
7242 attr = "rx";
7243 else
7244 attr = "tx";
7245
7246 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7247 attr, i);
7248 bp->irq_tbl[map_idx].handler = bnxt_msix;
7249 }
7250 }
7251
7252 static void bnxt_setup_inta(struct bnxt *bp)
7253 {
7254 const int len = sizeof(bp->irq_tbl[0].name);
7255
7256 if (netdev_get_num_tc(bp->dev))
7257 netdev_reset_tc(bp->dev);
7258
7259 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7260 0);
7261 bp->irq_tbl[0].handler = bnxt_inta;
7262 }
7263
7264 static int bnxt_setup_int_mode(struct bnxt *bp)
7265 {
7266 int rc;
7267
7268 if (bp->flags & BNXT_FLAG_USING_MSIX)
7269 bnxt_setup_msix(bp);
7270 else
7271 bnxt_setup_inta(bp);
7272
7273 rc = bnxt_set_real_num_queues(bp);
7274 return rc;
7275 }
7276
7277 #ifdef CONFIG_RFS_ACCEL
7278 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7279 {
7280 return bp->hw_resc.max_rsscos_ctxs;
7281 }
7282
7283 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7284 {
7285 return bp->hw_resc.max_vnics;
7286 }
7287 #endif
7288
7289 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7290 {
7291 return bp->hw_resc.max_stat_ctxs;
7292 }
7293
7294 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7295 {
7296 return bp->hw_resc.max_cp_rings;
7297 }
7298
7299 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7300 {
7301 unsigned int cp = bp->hw_resc.max_cp_rings;
7302
7303 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7304 cp -= bnxt_get_ulp_msix_num(bp);
7305
7306 return cp;
7307 }
7308
7309 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7310 {
7311 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7312
7313 if (bp->flags & BNXT_FLAG_CHIP_P5)
7314 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7315
7316 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7317 }
7318
7319 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7320 {
7321 bp->hw_resc.max_irqs = max_irqs;
7322 }
7323
7324 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7325 {
7326 unsigned int cp;
7327
7328 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7329 if (bp->flags & BNXT_FLAG_CHIP_P5)
7330 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7331 else
7332 return cp - bp->cp_nr_rings;
7333 }
7334
7335 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7336 {
7337 unsigned int stat;
7338
7339 stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
7340 stat -= bp->cp_nr_rings;
7341 return stat;
7342 }
7343
7344 int bnxt_get_avail_msix(struct bnxt *bp, int num)
7345 {
7346 int max_cp = bnxt_get_max_func_cp_rings(bp);
7347 int max_irq = bnxt_get_max_func_irqs(bp);
7348 int total_req = bp->cp_nr_rings + num;
7349 int max_idx, avail_msix;
7350
7351 max_idx = bp->total_irqs;
7352 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7353 max_idx = min_t(int, bp->total_irqs, max_cp);
7354 avail_msix = max_idx - bp->cp_nr_rings;
7355 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
7356 return avail_msix;
7357
7358 if (max_irq < total_req) {
7359 num = max_irq - bp->cp_nr_rings;
7360 if (num <= 0)
7361 return 0;
7362 }
7363 return num;
7364 }
7365
7366 static int bnxt_get_num_msix(struct bnxt *bp)
7367 {
7368 if (!BNXT_NEW_RM(bp))
7369 return bnxt_get_max_func_irqs(bp);
7370
7371 return bnxt_nq_rings_in_use(bp);
7372 }
7373
7374 static int bnxt_init_msix(struct bnxt *bp)
7375 {
7376 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7377 struct msix_entry *msix_ent;
7378
7379 total_vecs = bnxt_get_num_msix(bp);
7380 max = bnxt_get_max_func_irqs(bp);
7381 if (total_vecs > max)
7382 total_vecs = max;
7383
7384 if (!total_vecs)
7385 return 0;
7386
7387 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7388 if (!msix_ent)
7389 return -ENOMEM;
7390
7391 for (i = 0; i < total_vecs; i++) {
7392 msix_ent[i].entry = i;
7393 msix_ent[i].vector = 0;
7394 }
7395
7396 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7397 min = 2;
7398
7399 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
7400 ulp_msix = bnxt_get_ulp_msix_num(bp);
7401 if (total_vecs < 0 || total_vecs < ulp_msix) {
7402 rc = -ENODEV;
7403 goto msix_setup_exit;
7404 }
7405
7406 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7407 if (bp->irq_tbl) {
7408 for (i = 0; i < total_vecs; i++)
7409 bp->irq_tbl[i].vector = msix_ent[i].vector;
7410
7411 bp->total_irqs = total_vecs;
7412 /* Trim rings based upon num of vectors allocated */
7413 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
7414 total_vecs - ulp_msix, min == 1);
7415 if (rc)
7416 goto msix_setup_exit;
7417
7418 bp->cp_nr_rings = (min == 1) ?
7419 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7420 bp->tx_nr_rings + bp->rx_nr_rings;
7421
7422 } else {
7423 rc = -ENOMEM;
7424 goto msix_setup_exit;
7425 }
7426 bp->flags |= BNXT_FLAG_USING_MSIX;
7427 kfree(msix_ent);
7428 return 0;
7429
7430 msix_setup_exit:
7431 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7432 kfree(bp->irq_tbl);
7433 bp->irq_tbl = NULL;
7434 pci_disable_msix(bp->pdev);
7435 kfree(msix_ent);
7436 return rc;
7437 }
7438
7439 static int bnxt_init_inta(struct bnxt *bp)
7440 {
7441 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7442 if (!bp->irq_tbl)
7443 return -ENOMEM;
7444
7445 bp->total_irqs = 1;
7446 bp->rx_nr_rings = 1;
7447 bp->tx_nr_rings = 1;
7448 bp->cp_nr_rings = 1;
7449 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7450 bp->irq_tbl[0].vector = bp->pdev->irq;
7451 return 0;
7452 }
7453
7454 static int bnxt_init_int_mode(struct bnxt *bp)
7455 {
7456 int rc = 0;
7457
7458 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7459 rc = bnxt_init_msix(bp);
7460
7461 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
7462 /* fallback to INTA */
7463 rc = bnxt_init_inta(bp);
7464 }
7465 return rc;
7466 }
7467
7468 static void bnxt_clear_int_mode(struct bnxt *bp)
7469 {
7470 if (bp->flags & BNXT_FLAG_USING_MSIX)
7471 pci_disable_msix(bp->pdev);
7472
7473 kfree(bp->irq_tbl);
7474 bp->irq_tbl = NULL;
7475 bp->flags &= ~BNXT_FLAG_USING_MSIX;
7476 }
7477
7478 int bnxt_reserve_rings(struct bnxt *bp)
7479 {
7480 int tcs = netdev_get_num_tc(bp->dev);
7481 bool reinit_irq = false;
7482 int rc;
7483
7484 if (!bnxt_need_reserve_rings(bp))
7485 return 0;
7486
7487 if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
7488 bnxt_ulp_irq_stop(bp);
7489 bnxt_clear_int_mode(bp);
7490 reinit_irq = true;
7491 }
7492 rc = __bnxt_reserve_rings(bp);
7493 if (reinit_irq) {
7494 if (!rc)
7495 rc = bnxt_init_int_mode(bp);
7496 bnxt_ulp_irq_restart(bp, rc);
7497 }
7498 if (rc) {
7499 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7500 return rc;
7501 }
7502 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7503 netdev_err(bp->dev, "tx ring reservation failure\n");
7504 netdev_reset_tc(bp->dev);
7505 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7506 return -ENOMEM;
7507 }
7508 return 0;
7509 }
7510
7511 static void bnxt_free_irq(struct bnxt *bp)
7512 {
7513 struct bnxt_irq *irq;
7514 int i;
7515
7516 #ifdef CONFIG_RFS_ACCEL
7517 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7518 bp->dev->rx_cpu_rmap = NULL;
7519 #endif
7520 if (!bp->irq_tbl || !bp->bnapi)
7521 return;
7522
7523 for (i = 0; i < bp->cp_nr_rings; i++) {
7524 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7525
7526 irq = &bp->irq_tbl[map_idx];
7527 if (irq->requested) {
7528 if (irq->have_cpumask) {
7529 irq_set_affinity_hint(irq->vector, NULL);
7530 free_cpumask_var(irq->cpu_mask);
7531 irq->have_cpumask = 0;
7532 }
7533 free_irq(irq->vector, bp->bnapi[i]);
7534 }
7535
7536 irq->requested = 0;
7537 }
7538 }
7539
7540 static int bnxt_request_irq(struct bnxt *bp)
7541 {
7542 int i, j, rc = 0;
7543 unsigned long flags = 0;
7544 #ifdef CONFIG_RFS_ACCEL
7545 struct cpu_rmap *rmap;
7546 #endif
7547
7548 rc = bnxt_setup_int_mode(bp);
7549 if (rc) {
7550 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7551 rc);
7552 return rc;
7553 }
7554 #ifdef CONFIG_RFS_ACCEL
7555 rmap = bp->dev->rx_cpu_rmap;
7556 #endif
7557 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7558 flags = IRQF_SHARED;
7559
7560 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
7561 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7562 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7563
7564 #ifdef CONFIG_RFS_ACCEL
7565 if (rmap && bp->bnapi[i]->rx_ring) {
7566 rc = irq_cpu_rmap_add(rmap, irq->vector);
7567 if (rc)
7568 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
7569 j);
7570 j++;
7571 }
7572 #endif
7573 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7574 bp->bnapi[i]);
7575 if (rc)
7576 break;
7577
7578 irq->requested = 1;
7579
7580 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7581 int numa_node = dev_to_node(&bp->pdev->dev);
7582
7583 irq->have_cpumask = 1;
7584 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7585 irq->cpu_mask);
7586 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7587 if (rc) {
7588 netdev_warn(bp->dev,
7589 "Set affinity failed, IRQ = %d\n",
7590 irq->vector);
7591 break;
7592 }
7593 }
7594 }
7595 return rc;
7596 }
7597
7598 static void bnxt_del_napi(struct bnxt *bp)
7599 {
7600 int i;
7601
7602 if (!bp->bnapi)
7603 return;
7604
7605 for (i = 0; i < bp->cp_nr_rings; i++) {
7606 struct bnxt_napi *bnapi = bp->bnapi[i];
7607
7608 napi_hash_del(&bnapi->napi);
7609 netif_napi_del(&bnapi->napi);
7610 }
7611 /* We called napi_hash_del() before netif_napi_del(), we need
7612 * to respect an RCU grace period before freeing napi structures.
7613 */
7614 synchronize_net();
7615 }
7616
7617 static void bnxt_init_napi(struct bnxt *bp)
7618 {
7619 int i;
7620 unsigned int cp_nr_rings = bp->cp_nr_rings;
7621 struct bnxt_napi *bnapi;
7622
7623 if (bp->flags & BNXT_FLAG_USING_MSIX) {
7624 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7625
7626 if (bp->flags & BNXT_FLAG_CHIP_P5)
7627 poll_fn = bnxt_poll_p5;
7628 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7629 cp_nr_rings--;
7630 for (i = 0; i < cp_nr_rings; i++) {
7631 bnapi = bp->bnapi[i];
7632 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
7633 }
7634 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7635 bnapi = bp->bnapi[cp_nr_rings];
7636 netif_napi_add(bp->dev, &bnapi->napi,
7637 bnxt_poll_nitroa0, 64);
7638 }
7639 } else {
7640 bnapi = bp->bnapi[0];
7641 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
7642 }
7643 }
7644
7645 static void bnxt_disable_napi(struct bnxt *bp)
7646 {
7647 int i;
7648
7649 if (!bp->bnapi)
7650 return;
7651
7652 for (i = 0; i < bp->cp_nr_rings; i++) {
7653 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7654
7655 if (bp->bnapi[i]->rx_ring)
7656 cancel_work_sync(&cpr->dim.work);
7657
7658 napi_disable(&bp->bnapi[i]->napi);
7659 }
7660 }
7661
7662 static void bnxt_enable_napi(struct bnxt *bp)
7663 {
7664 int i;
7665
7666 for (i = 0; i < bp->cp_nr_rings; i++) {
7667 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7668 bp->bnapi[i]->in_reset = false;
7669
7670 if (bp->bnapi[i]->rx_ring) {
7671 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7672 cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7673 }
7674 napi_enable(&bp->bnapi[i]->napi);
7675 }
7676 }
7677
7678 void bnxt_tx_disable(struct bnxt *bp)
7679 {
7680 int i;
7681 struct bnxt_tx_ring_info *txr;
7682
7683 if (bp->tx_ring) {
7684 for (i = 0; i < bp->tx_nr_rings; i++) {
7685 txr = &bp->tx_ring[i];
7686 txr->dev_state = BNXT_DEV_STATE_CLOSING;
7687 }
7688 }
7689 /* Stop all TX queues */
7690 netif_tx_disable(bp->dev);
7691 netif_carrier_off(bp->dev);
7692 }
7693
7694 void bnxt_tx_enable(struct bnxt *bp)
7695 {
7696 int i;
7697 struct bnxt_tx_ring_info *txr;
7698
7699 for (i = 0; i < bp->tx_nr_rings; i++) {
7700 txr = &bp->tx_ring[i];
7701 txr->dev_state = 0;
7702 }
7703 netif_tx_wake_all_queues(bp->dev);
7704 if (bp->link_info.link_up)
7705 netif_carrier_on(bp->dev);
7706 }
7707
7708 static void bnxt_report_link(struct bnxt *bp)
7709 {
7710 if (bp->link_info.link_up) {
7711 const char *duplex;
7712 const char *flow_ctrl;
7713 u32 speed;
7714 u16 fec;
7715
7716 netif_carrier_on(bp->dev);
7717 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7718 duplex = "full";
7719 else
7720 duplex = "half";
7721 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7722 flow_ctrl = "ON - receive & transmit";
7723 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7724 flow_ctrl = "ON - transmit";
7725 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7726 flow_ctrl = "ON - receive";
7727 else
7728 flow_ctrl = "none";
7729 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
7730 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
7731 speed, duplex, flow_ctrl);
7732 if (bp->flags & BNXT_FLAG_EEE_CAP)
7733 netdev_info(bp->dev, "EEE is %s\n",
7734 bp->eee.eee_active ? "active" :
7735 "not active");
7736 fec = bp->link_info.fec_cfg;
7737 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7738 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7739 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7740 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7741 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
7742 } else {
7743 netif_carrier_off(bp->dev);
7744 netdev_err(bp->dev, "NIC Link is Down\n");
7745 }
7746 }
7747
7748 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7749 {
7750 int rc = 0;
7751 struct hwrm_port_phy_qcaps_input req = {0};
7752 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7753 struct bnxt_link_info *link_info = &bp->link_info;
7754
7755 if (bp->hwrm_spec_code < 0x10201)
7756 return 0;
7757
7758 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7759
7760 mutex_lock(&bp->hwrm_cmd_lock);
7761 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7762 if (rc)
7763 goto hwrm_phy_qcaps_exit;
7764
7765 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
7766 struct ethtool_eee *eee = &bp->eee;
7767 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7768
7769 bp->flags |= BNXT_FLAG_EEE_CAP;
7770 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7771 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7772 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7773 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7774 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7775 }
7776 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7777 if (bp->test_info)
7778 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7779 }
7780 if (resp->supported_speeds_auto_mode)
7781 link_info->support_auto_speeds =
7782 le16_to_cpu(resp->supported_speeds_auto_mode);
7783
7784 bp->port_count = resp->port_cnt;
7785
7786 hwrm_phy_qcaps_exit:
7787 mutex_unlock(&bp->hwrm_cmd_lock);
7788 return rc;
7789 }
7790
7791 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7792 {
7793 int rc = 0;
7794 struct bnxt_link_info *link_info = &bp->link_info;
7795 struct hwrm_port_phy_qcfg_input req = {0};
7796 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7797 u8 link_up = link_info->link_up;
7798 u16 diff;
7799
7800 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7801
7802 mutex_lock(&bp->hwrm_cmd_lock);
7803 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7804 if (rc) {
7805 mutex_unlock(&bp->hwrm_cmd_lock);
7806 return rc;
7807 }
7808
7809 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7810 link_info->phy_link_status = resp->link;
7811 link_info->duplex = resp->duplex_cfg;
7812 if (bp->hwrm_spec_code >= 0x10800)
7813 link_info->duplex = resp->duplex_state;
7814 link_info->pause = resp->pause;
7815 link_info->auto_mode = resp->auto_mode;
7816 link_info->auto_pause_setting = resp->auto_pause;
7817 link_info->lp_pause = resp->link_partner_adv_pause;
7818 link_info->force_pause_setting = resp->force_pause;
7819 link_info->duplex_setting = resp->duplex_cfg;
7820 if (link_info->phy_link_status == BNXT_LINK_LINK)
7821 link_info->link_speed = le16_to_cpu(resp->link_speed);
7822 else
7823 link_info->link_speed = 0;
7824 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
7825 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7826 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
7827 link_info->lp_auto_link_speeds =
7828 le16_to_cpu(resp->link_partner_adv_speeds);
7829 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7830 link_info->phy_ver[0] = resp->phy_maj;
7831 link_info->phy_ver[1] = resp->phy_min;
7832 link_info->phy_ver[2] = resp->phy_bld;
7833 link_info->media_type = resp->media_type;
7834 link_info->phy_type = resp->phy_type;
7835 link_info->transceiver = resp->xcvr_pkg_type;
7836 link_info->phy_addr = resp->eee_config_phy_addr &
7837 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
7838 link_info->module_status = resp->module_status;
7839
7840 if (bp->flags & BNXT_FLAG_EEE_CAP) {
7841 struct ethtool_eee *eee = &bp->eee;
7842 u16 fw_speeds;
7843
7844 eee->eee_active = 0;
7845 if (resp->eee_config_phy_addr &
7846 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7847 eee->eee_active = 1;
7848 fw_speeds = le16_to_cpu(
7849 resp->link_partner_adv_eee_link_speed_mask);
7850 eee->lp_advertised =
7851 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7852 }
7853
7854 /* Pull initial EEE config */
7855 if (!chng_link_state) {
7856 if (resp->eee_config_phy_addr &
7857 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7858 eee->eee_enabled = 1;
7859
7860 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7861 eee->advertised =
7862 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7863
7864 if (resp->eee_config_phy_addr &
7865 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7866 __le32 tmr;
7867
7868 eee->tx_lpi_enabled = 1;
7869 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7870 eee->tx_lpi_timer = le32_to_cpu(tmr) &
7871 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7872 }
7873 }
7874 }
7875
7876 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7877 if (bp->hwrm_spec_code >= 0x10504)
7878 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7879
7880 /* TODO: need to add more logic to report VF link */
7881 if (chng_link_state) {
7882 if (link_info->phy_link_status == BNXT_LINK_LINK)
7883 link_info->link_up = 1;
7884 else
7885 link_info->link_up = 0;
7886 if (link_up != link_info->link_up)
7887 bnxt_report_link(bp);
7888 } else {
7889 /* alwasy link down if not require to update link state */
7890 link_info->link_up = 0;
7891 }
7892 mutex_unlock(&bp->hwrm_cmd_lock);
7893
7894 if (!BNXT_SINGLE_PF(bp))
7895 return 0;
7896
7897 diff = link_info->support_auto_speeds ^ link_info->advertising;
7898 if ((link_info->support_auto_speeds | diff) !=
7899 link_info->support_auto_speeds) {
7900 /* An advertised speed is no longer supported, so we need to
7901 * update the advertisement settings. Caller holds RTNL
7902 * so we can modify link settings.
7903 */
7904 link_info->advertising = link_info->support_auto_speeds;
7905 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
7906 bnxt_hwrm_set_link_setting(bp, true, false);
7907 }
7908 return 0;
7909 }
7910
7911 static void bnxt_get_port_module_status(struct bnxt *bp)
7912 {
7913 struct bnxt_link_info *link_info = &bp->link_info;
7914 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7915 u8 module_status;
7916
7917 if (bnxt_update_link(bp, true))
7918 return;
7919
7920 module_status = link_info->module_status;
7921 switch (module_status) {
7922 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7923 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7924 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7925 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7926 bp->pf.port_id);
7927 if (bp->hwrm_spec_code >= 0x10201) {
7928 netdev_warn(bp->dev, "Module part number %s\n",
7929 resp->phy_vendor_partnumber);
7930 }
7931 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7932 netdev_warn(bp->dev, "TX is disabled\n");
7933 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7934 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7935 }
7936 }
7937
7938 static void
7939 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7940 {
7941 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
7942 if (bp->hwrm_spec_code >= 0x10201)
7943 req->auto_pause =
7944 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
7945 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7946 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7947 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7948 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
7949 req->enables |=
7950 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7951 } else {
7952 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7953 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7954 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7955 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7956 req->enables |=
7957 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
7958 if (bp->hwrm_spec_code >= 0x10201) {
7959 req->auto_pause = req->force_pause;
7960 req->enables |= cpu_to_le32(
7961 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7962 }
7963 }
7964 }
7965
7966 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
7967 struct hwrm_port_phy_cfg_input *req)
7968 {
7969 u8 autoneg = bp->link_info.autoneg;
7970 u16 fw_link_speed = bp->link_info.req_link_speed;
7971 u16 advertising = bp->link_info.advertising;
7972
7973 if (autoneg & BNXT_AUTONEG_SPEED) {
7974 req->auto_mode |=
7975 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
7976
7977 req->enables |= cpu_to_le32(
7978 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
7979 req->auto_link_speed_mask = cpu_to_le16(advertising);
7980
7981 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
7982 req->flags |=
7983 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
7984 } else {
7985 req->force_link_speed = cpu_to_le16(fw_link_speed);
7986 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
7987 }
7988
7989 /* tell chimp that the setting takes effect immediately */
7990 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
7991 }
7992
7993 int bnxt_hwrm_set_pause(struct bnxt *bp)
7994 {
7995 struct hwrm_port_phy_cfg_input req = {0};
7996 int rc;
7997
7998 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
7999 bnxt_hwrm_set_pause_common(bp, &req);
8000
8001 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8002 bp->link_info.force_link_chng)
8003 bnxt_hwrm_set_link_common(bp, &req);
8004
8005 mutex_lock(&bp->hwrm_cmd_lock);
8006 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8007 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8008 /* since changing of pause setting doesn't trigger any link
8009 * change event, the driver needs to update the current pause
8010 * result upon successfully return of the phy_cfg command
8011 */
8012 bp->link_info.pause =
8013 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8014 bp->link_info.auto_pause_setting = 0;
8015 if (!bp->link_info.force_link_chng)
8016 bnxt_report_link(bp);
8017 }
8018 bp->link_info.force_link_chng = false;
8019 mutex_unlock(&bp->hwrm_cmd_lock);
8020 return rc;
8021 }
8022
8023 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8024 struct hwrm_port_phy_cfg_input *req)
8025 {
8026 struct ethtool_eee *eee = &bp->eee;
8027
8028 if (eee->eee_enabled) {
8029 u16 eee_speeds;
8030 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8031
8032 if (eee->tx_lpi_enabled)
8033 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8034 else
8035 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8036
8037 req->flags |= cpu_to_le32(flags);
8038 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8039 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8040 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8041 } else {
8042 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8043 }
8044 }
8045
8046 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8047 {
8048 struct hwrm_port_phy_cfg_input req = {0};
8049
8050 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8051 if (set_pause)
8052 bnxt_hwrm_set_pause_common(bp, &req);
8053
8054 bnxt_hwrm_set_link_common(bp, &req);
8055
8056 if (set_eee)
8057 bnxt_hwrm_set_eee(bp, &req);
8058 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8059 }
8060
8061 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8062 {
8063 struct hwrm_port_phy_cfg_input req = {0};
8064
8065 if (!BNXT_SINGLE_PF(bp))
8066 return 0;
8067
8068 if (pci_num_vf(bp->pdev))
8069 return 0;
8070
8071 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8072 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8073 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8074 }
8075
8076 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8077 {
8078 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8079 struct hwrm_func_drv_if_change_input req = {0};
8080 bool resc_reinit = false;
8081 int rc;
8082
8083 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8084 return 0;
8085
8086 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8087 if (up)
8088 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8089 mutex_lock(&bp->hwrm_cmd_lock);
8090 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8091 if (!rc && (resp->flags &
8092 cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
8093 resc_reinit = true;
8094 mutex_unlock(&bp->hwrm_cmd_lock);
8095
8096 if (up && resc_reinit && BNXT_NEW_RM(bp)) {
8097 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8098
8099 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8100 hw_resc->resv_cp_rings = 0;
8101 hw_resc->resv_stat_ctxs = 0;
8102 hw_resc->resv_irqs = 0;
8103 hw_resc->resv_tx_rings = 0;
8104 hw_resc->resv_rx_rings = 0;
8105 hw_resc->resv_hw_ring_grps = 0;
8106 hw_resc->resv_vnics = 0;
8107 bp->tx_nr_rings = 0;
8108 bp->rx_nr_rings = 0;
8109 }
8110 return rc;
8111 }
8112
8113 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8114 {
8115 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8116 struct hwrm_port_led_qcaps_input req = {0};
8117 struct bnxt_pf_info *pf = &bp->pf;
8118 int rc;
8119
8120 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8121 return 0;
8122
8123 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8124 req.port_id = cpu_to_le16(pf->port_id);
8125 mutex_lock(&bp->hwrm_cmd_lock);
8126 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8127 if (rc) {
8128 mutex_unlock(&bp->hwrm_cmd_lock);
8129 return rc;
8130 }
8131 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8132 int i;
8133
8134 bp->num_leds = resp->num_leds;
8135 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8136 bp->num_leds);
8137 for (i = 0; i < bp->num_leds; i++) {
8138 struct bnxt_led_info *led = &bp->leds[i];
8139 __le16 caps = led->led_state_caps;
8140
8141 if (!led->led_group_id ||
8142 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8143 bp->num_leds = 0;
8144 break;
8145 }
8146 }
8147 }
8148 mutex_unlock(&bp->hwrm_cmd_lock);
8149 return 0;
8150 }
8151
8152 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8153 {
8154 struct hwrm_wol_filter_alloc_input req = {0};
8155 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8156 int rc;
8157
8158 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8159 req.port_id = cpu_to_le16(bp->pf.port_id);
8160 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8161 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8162 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8163 mutex_lock(&bp->hwrm_cmd_lock);
8164 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8165 if (!rc)
8166 bp->wol_filter_id = resp->wol_filter_id;
8167 mutex_unlock(&bp->hwrm_cmd_lock);
8168 return rc;
8169 }
8170
8171 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8172 {
8173 struct hwrm_wol_filter_free_input req = {0};
8174 int rc;
8175
8176 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8177 req.port_id = cpu_to_le16(bp->pf.port_id);
8178 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8179 req.wol_filter_id = bp->wol_filter_id;
8180 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8181 return rc;
8182 }
8183
8184 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8185 {
8186 struct hwrm_wol_filter_qcfg_input req = {0};
8187 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8188 u16 next_handle = 0;
8189 int rc;
8190
8191 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8192 req.port_id = cpu_to_le16(bp->pf.port_id);
8193 req.handle = cpu_to_le16(handle);
8194 mutex_lock(&bp->hwrm_cmd_lock);
8195 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8196 if (!rc) {
8197 next_handle = le16_to_cpu(resp->next_handle);
8198 if (next_handle != 0) {
8199 if (resp->wol_type ==
8200 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8201 bp->wol = 1;
8202 bp->wol_filter_id = resp->wol_filter_id;
8203 }
8204 }
8205 }
8206 mutex_unlock(&bp->hwrm_cmd_lock);
8207 return next_handle;
8208 }
8209
8210 static void bnxt_get_wol_settings(struct bnxt *bp)
8211 {
8212 u16 handle = 0;
8213
8214 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8215 return;
8216
8217 do {
8218 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8219 } while (handle && handle != 0xffff);
8220 }
8221
8222 #ifdef CONFIG_BNXT_HWMON
8223 static ssize_t bnxt_show_temp(struct device *dev,
8224 struct device_attribute *devattr, char *buf)
8225 {
8226 struct hwrm_temp_monitor_query_input req = {0};
8227 struct hwrm_temp_monitor_query_output *resp;
8228 struct bnxt *bp = dev_get_drvdata(dev);
8229 u32 temp = 0;
8230
8231 resp = bp->hwrm_cmd_resp_addr;
8232 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8233 mutex_lock(&bp->hwrm_cmd_lock);
8234 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8235 temp = resp->temp * 1000; /* display millidegree */
8236 mutex_unlock(&bp->hwrm_cmd_lock);
8237
8238 return sprintf(buf, "%u\n", temp);
8239 }
8240 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8241
8242 static struct attribute *bnxt_attrs[] = {
8243 &sensor_dev_attr_temp1_input.dev_attr.attr,
8244 NULL
8245 };
8246 ATTRIBUTE_GROUPS(bnxt);
8247
8248 static void bnxt_hwmon_close(struct bnxt *bp)
8249 {
8250 if (bp->hwmon_dev) {
8251 hwmon_device_unregister(bp->hwmon_dev);
8252 bp->hwmon_dev = NULL;
8253 }
8254 }
8255
8256 static void bnxt_hwmon_open(struct bnxt *bp)
8257 {
8258 struct pci_dev *pdev = bp->pdev;
8259
8260 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8261 DRV_MODULE_NAME, bp,
8262 bnxt_groups);
8263 if (IS_ERR(bp->hwmon_dev)) {
8264 bp->hwmon_dev = NULL;
8265 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8266 }
8267 }
8268 #else
8269 static void bnxt_hwmon_close(struct bnxt *bp)
8270 {
8271 }
8272
8273 static void bnxt_hwmon_open(struct bnxt *bp)
8274 {
8275 }
8276 #endif
8277
8278 static bool bnxt_eee_config_ok(struct bnxt *bp)
8279 {
8280 struct ethtool_eee *eee = &bp->eee;
8281 struct bnxt_link_info *link_info = &bp->link_info;
8282
8283 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8284 return true;
8285
8286 if (eee->eee_enabled) {
8287 u32 advertising =
8288 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8289
8290 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8291 eee->eee_enabled = 0;
8292 return false;
8293 }
8294 if (eee->advertised & ~advertising) {
8295 eee->advertised = advertising & eee->supported;
8296 return false;
8297 }
8298 }
8299 return true;
8300 }
8301
8302 static int bnxt_update_phy_setting(struct bnxt *bp)
8303 {
8304 int rc;
8305 bool update_link = false;
8306 bool update_pause = false;
8307 bool update_eee = false;
8308 struct bnxt_link_info *link_info = &bp->link_info;
8309
8310 rc = bnxt_update_link(bp, true);
8311 if (rc) {
8312 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8313 rc);
8314 return rc;
8315 }
8316 if (!BNXT_SINGLE_PF(bp))
8317 return 0;
8318
8319 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8320 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8321 link_info->req_flow_ctrl)
8322 update_pause = true;
8323 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8324 link_info->force_pause_setting != link_info->req_flow_ctrl)
8325 update_pause = true;
8326 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8327 if (BNXT_AUTO_MODE(link_info->auto_mode))
8328 update_link = true;
8329 if (link_info->req_link_speed != link_info->force_link_speed)
8330 update_link = true;
8331 if (link_info->req_duplex != link_info->duplex_setting)
8332 update_link = true;
8333 } else {
8334 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8335 update_link = true;
8336 if (link_info->advertising != link_info->auto_link_speeds)
8337 update_link = true;
8338 }
8339
8340 /* The last close may have shutdown the link, so need to call
8341 * PHY_CFG to bring it back up.
8342 */
8343 if (!netif_carrier_ok(bp->dev))
8344 update_link = true;
8345
8346 if (!bnxt_eee_config_ok(bp))
8347 update_eee = true;
8348
8349 if (update_link)
8350 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
8351 else if (update_pause)
8352 rc = bnxt_hwrm_set_pause(bp);
8353 if (rc) {
8354 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8355 rc);
8356 return rc;
8357 }
8358
8359 return rc;
8360 }
8361
8362 /* Common routine to pre-map certain register block to different GRC window.
8363 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8364 * in PF and 3 windows in VF that can be customized to map in different
8365 * register blocks.
8366 */
8367 static void bnxt_preset_reg_win(struct bnxt *bp)
8368 {
8369 if (BNXT_PF(bp)) {
8370 /* CAG registers map to GRC window #4 */
8371 writel(BNXT_CAG_REG_BASE,
8372 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8373 }
8374 }
8375
8376 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8377
8378 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8379 {
8380 int rc = 0;
8381
8382 bnxt_preset_reg_win(bp);
8383 netif_carrier_off(bp->dev);
8384 if (irq_re_init) {
8385 /* Reserve rings now if none were reserved at driver probe. */
8386 rc = bnxt_init_dflt_ring_mode(bp);
8387 if (rc) {
8388 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8389 return rc;
8390 }
8391 }
8392 rc = bnxt_reserve_rings(bp);
8393 if (rc)
8394 return rc;
8395 if ((bp->flags & BNXT_FLAG_RFS) &&
8396 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8397 /* disable RFS if falling back to INTA */
8398 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8399 bp->flags &= ~BNXT_FLAG_RFS;
8400 }
8401
8402 rc = bnxt_alloc_mem(bp, irq_re_init);
8403 if (rc) {
8404 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8405 goto open_err_free_mem;
8406 }
8407
8408 if (irq_re_init) {
8409 bnxt_init_napi(bp);
8410 rc = bnxt_request_irq(bp);
8411 if (rc) {
8412 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
8413 goto open_err_irq;
8414 }
8415 }
8416
8417 bnxt_enable_napi(bp);
8418 bnxt_debug_dev_init(bp);
8419
8420 rc = bnxt_init_nic(bp, irq_re_init);
8421 if (rc) {
8422 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8423 goto open_err;
8424 }
8425
8426 if (link_re_init) {
8427 mutex_lock(&bp->link_lock);
8428 rc = bnxt_update_phy_setting(bp);
8429 mutex_unlock(&bp->link_lock);
8430 if (rc) {
8431 netdev_warn(bp->dev, "failed to update phy settings\n");
8432 if (BNXT_SINGLE_PF(bp)) {
8433 bp->link_info.phy_retry = true;
8434 bp->link_info.phy_retry_expires =
8435 jiffies + 5 * HZ;
8436 }
8437 }
8438 }
8439
8440 if (irq_re_init)
8441 udp_tunnel_get_rx_info(bp->dev);
8442
8443 set_bit(BNXT_STATE_OPEN, &bp->state);
8444 bnxt_enable_int(bp);
8445 /* Enable TX queues */
8446 bnxt_tx_enable(bp);
8447 mod_timer(&bp->timer, jiffies + bp->current_interval);
8448 /* Poll link status and check for SFP+ module status */
8449 bnxt_get_port_module_status(bp);
8450
8451 /* VF-reps may need to be re-opened after the PF is re-opened */
8452 if (BNXT_PF(bp))
8453 bnxt_vf_reps_open(bp);
8454 return 0;
8455
8456 open_err:
8457 bnxt_debug_dev_exit(bp);
8458 bnxt_disable_napi(bp);
8459
8460 open_err_irq:
8461 bnxt_del_napi(bp);
8462
8463 open_err_free_mem:
8464 bnxt_free_skbs(bp);
8465 bnxt_free_irq(bp);
8466 bnxt_free_mem(bp, true);
8467 return rc;
8468 }
8469
8470 /* rtnl_lock held */
8471 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8472 {
8473 int rc = 0;
8474
8475 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8476 if (rc) {
8477 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8478 dev_close(bp->dev);
8479 }
8480 return rc;
8481 }
8482
8483 /* rtnl_lock held, open the NIC half way by allocating all resources, but
8484 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
8485 * self tests.
8486 */
8487 int bnxt_half_open_nic(struct bnxt *bp)
8488 {
8489 int rc = 0;
8490
8491 rc = bnxt_alloc_mem(bp, false);
8492 if (rc) {
8493 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8494 goto half_open_err;
8495 }
8496 rc = bnxt_init_nic(bp, false);
8497 if (rc) {
8498 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8499 goto half_open_err;
8500 }
8501 return 0;
8502
8503 half_open_err:
8504 bnxt_free_skbs(bp);
8505 bnxt_free_mem(bp, false);
8506 dev_close(bp->dev);
8507 return rc;
8508 }
8509
8510 /* rtnl_lock held, this call can only be made after a previous successful
8511 * call to bnxt_half_open_nic().
8512 */
8513 void bnxt_half_close_nic(struct bnxt *bp)
8514 {
8515 bnxt_hwrm_resource_free(bp, false, false);
8516 bnxt_free_skbs(bp);
8517 bnxt_free_mem(bp, false);
8518 }
8519
8520 static int bnxt_open(struct net_device *dev)
8521 {
8522 struct bnxt *bp = netdev_priv(dev);
8523 int rc;
8524
8525 bnxt_hwrm_if_change(bp, true);
8526 rc = __bnxt_open_nic(bp, true, true);
8527 if (rc)
8528 bnxt_hwrm_if_change(bp, false);
8529
8530 bnxt_hwmon_open(bp);
8531
8532 return rc;
8533 }
8534
8535 static bool bnxt_drv_busy(struct bnxt *bp)
8536 {
8537 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8538 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8539 }
8540
8541 static void bnxt_get_ring_stats(struct bnxt *bp,
8542 struct rtnl_link_stats64 *stats);
8543
8544 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8545 bool link_re_init)
8546 {
8547 /* Close the VF-reps before closing PF */
8548 if (BNXT_PF(bp))
8549 bnxt_vf_reps_close(bp);
8550
8551 /* Change device state to avoid TX queue wake up's */
8552 bnxt_tx_disable(bp);
8553
8554 clear_bit(BNXT_STATE_OPEN, &bp->state);
8555 smp_mb__after_atomic();
8556 while (bnxt_drv_busy(bp))
8557 msleep(20);
8558
8559 /* Flush rings and and disable interrupts */
8560 bnxt_shutdown_nic(bp, irq_re_init);
8561
8562 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8563
8564 bnxt_debug_dev_exit(bp);
8565 bnxt_disable_napi(bp);
8566 del_timer_sync(&bp->timer);
8567 bnxt_free_skbs(bp);
8568
8569 /* Save ring stats before shutdown */
8570 if (bp->bnapi)
8571 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
8572 if (irq_re_init) {
8573 bnxt_free_irq(bp);
8574 bnxt_del_napi(bp);
8575 }
8576 bnxt_free_mem(bp, irq_re_init);
8577 }
8578
8579 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8580 {
8581 int rc = 0;
8582
8583 #ifdef CONFIG_BNXT_SRIOV
8584 if (bp->sriov_cfg) {
8585 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8586 !bp->sriov_cfg,
8587 BNXT_SRIOV_CFG_WAIT_TMO);
8588 if (rc)
8589 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8590 }
8591 #endif
8592 __bnxt_close_nic(bp, irq_re_init, link_re_init);
8593 return rc;
8594 }
8595
8596 static int bnxt_close(struct net_device *dev)
8597 {
8598 struct bnxt *bp = netdev_priv(dev);
8599
8600 bnxt_hwmon_close(bp);
8601 bnxt_close_nic(bp, true, true);
8602 bnxt_hwrm_shutdown_link(bp);
8603 bnxt_hwrm_if_change(bp, false);
8604 return 0;
8605 }
8606
8607 /* rtnl_lock held */
8608 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8609 {
8610 switch (cmd) {
8611 case SIOCGMIIPHY:
8612 /* fallthru */
8613 case SIOCGMIIREG: {
8614 if (!netif_running(dev))
8615 return -EAGAIN;
8616
8617 return 0;
8618 }
8619
8620 case SIOCSMIIREG:
8621 if (!netif_running(dev))
8622 return -EAGAIN;
8623
8624 return 0;
8625
8626 default:
8627 /* do nothing */
8628 break;
8629 }
8630 return -EOPNOTSUPP;
8631 }
8632
8633 static void bnxt_get_ring_stats(struct bnxt *bp,
8634 struct rtnl_link_stats64 *stats)
8635 {
8636 int i;
8637
8638
8639 for (i = 0; i < bp->cp_nr_rings; i++) {
8640 struct bnxt_napi *bnapi = bp->bnapi[i];
8641 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8642 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8643
8644 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8645 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8646 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8647
8648 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8649 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8650 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8651
8652 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8653 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8654 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8655
8656 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8657 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8658 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8659
8660 stats->rx_missed_errors +=
8661 le64_to_cpu(hw_stats->rx_discard_pkts);
8662
8663 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8664
8665 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8666 }
8667 }
8668
8669 static void bnxt_add_prev_stats(struct bnxt *bp,
8670 struct rtnl_link_stats64 *stats)
8671 {
8672 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
8673
8674 stats->rx_packets += prev_stats->rx_packets;
8675 stats->tx_packets += prev_stats->tx_packets;
8676 stats->rx_bytes += prev_stats->rx_bytes;
8677 stats->tx_bytes += prev_stats->tx_bytes;
8678 stats->rx_missed_errors += prev_stats->rx_missed_errors;
8679 stats->multicast += prev_stats->multicast;
8680 stats->tx_dropped += prev_stats->tx_dropped;
8681 }
8682
8683 static void
8684 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8685 {
8686 struct bnxt *bp = netdev_priv(dev);
8687
8688 set_bit(BNXT_STATE_READ_STATS, &bp->state);
8689 /* Make sure bnxt_close_nic() sees that we are reading stats before
8690 * we check the BNXT_STATE_OPEN flag.
8691 */
8692 smp_mb__after_atomic();
8693 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8694 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8695 *stats = bp->net_stats_prev;
8696 return;
8697 }
8698
8699 bnxt_get_ring_stats(bp, stats);
8700 bnxt_add_prev_stats(bp, stats);
8701
8702 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8703 struct rx_port_stats *rx = bp->hw_rx_port_stats;
8704 struct tx_port_stats *tx = bp->hw_tx_port_stats;
8705
8706 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8707 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8708 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8709 le64_to_cpu(rx->rx_ovrsz_frames) +
8710 le64_to_cpu(rx->rx_runt_frames);
8711 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8712 le64_to_cpu(rx->rx_jbr_frames);
8713 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8714 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8715 stats->tx_errors = le64_to_cpu(tx->tx_err);
8716 }
8717 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8718 }
8719
8720 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8721 {
8722 struct net_device *dev = bp->dev;
8723 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8724 struct netdev_hw_addr *ha;
8725 u8 *haddr;
8726 int mc_count = 0;
8727 bool update = false;
8728 int off = 0;
8729
8730 netdev_for_each_mc_addr(ha, dev) {
8731 if (mc_count >= BNXT_MAX_MC_ADDRS) {
8732 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8733 vnic->mc_list_count = 0;
8734 return false;
8735 }
8736 haddr = ha->addr;
8737 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8738 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8739 update = true;
8740 }
8741 off += ETH_ALEN;
8742 mc_count++;
8743 }
8744 if (mc_count)
8745 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8746
8747 if (mc_count != vnic->mc_list_count) {
8748 vnic->mc_list_count = mc_count;
8749 update = true;
8750 }
8751 return update;
8752 }
8753
8754 static bool bnxt_uc_list_updated(struct bnxt *bp)
8755 {
8756 struct net_device *dev = bp->dev;
8757 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8758 struct netdev_hw_addr *ha;
8759 int off = 0;
8760
8761 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8762 return true;
8763
8764 netdev_for_each_uc_addr(ha, dev) {
8765 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8766 return true;
8767
8768 off += ETH_ALEN;
8769 }
8770 return false;
8771 }
8772
8773 static void bnxt_set_rx_mode(struct net_device *dev)
8774 {
8775 struct bnxt *bp = netdev_priv(dev);
8776 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8777 u32 mask = vnic->rx_mask;
8778 bool mc_update = false;
8779 bool uc_update;
8780
8781 if (!netif_running(dev))
8782 return;
8783
8784 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8785 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
8786 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8787 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
8788
8789 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8790 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8791
8792 uc_update = bnxt_uc_list_updated(bp);
8793
8794 if (dev->flags & IFF_BROADCAST)
8795 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8796 if (dev->flags & IFF_ALLMULTI) {
8797 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8798 vnic->mc_list_count = 0;
8799 } else {
8800 mc_update = bnxt_mc_list_updated(bp, &mask);
8801 }
8802
8803 if (mask != vnic->rx_mask || uc_update || mc_update) {
8804 vnic->rx_mask = mask;
8805
8806 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
8807 bnxt_queue_sp_work(bp);
8808 }
8809 }
8810
8811 static int bnxt_cfg_rx_mode(struct bnxt *bp)
8812 {
8813 struct net_device *dev = bp->dev;
8814 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8815 struct netdev_hw_addr *ha;
8816 int i, off = 0, rc;
8817 bool uc_update;
8818
8819 netif_addr_lock_bh(dev);
8820 uc_update = bnxt_uc_list_updated(bp);
8821 netif_addr_unlock_bh(dev);
8822
8823 if (!uc_update)
8824 goto skip_uc;
8825
8826 mutex_lock(&bp->hwrm_cmd_lock);
8827 for (i = 1; i < vnic->uc_filter_count; i++) {
8828 struct hwrm_cfa_l2_filter_free_input req = {0};
8829
8830 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8831 -1);
8832
8833 req.l2_filter_id = vnic->fw_l2_filter_id[i];
8834
8835 rc = _hwrm_send_message(bp, &req, sizeof(req),
8836 HWRM_CMD_TIMEOUT);
8837 }
8838 mutex_unlock(&bp->hwrm_cmd_lock);
8839
8840 vnic->uc_filter_count = 1;
8841
8842 netif_addr_lock_bh(dev);
8843 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8844 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8845 } else {
8846 netdev_for_each_uc_addr(ha, dev) {
8847 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8848 off += ETH_ALEN;
8849 vnic->uc_filter_count++;
8850 }
8851 }
8852 netif_addr_unlock_bh(dev);
8853
8854 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8855 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8856 if (rc) {
8857 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8858 rc);
8859 vnic->uc_filter_count = i;
8860 return rc;
8861 }
8862 }
8863
8864 skip_uc:
8865 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8866 if (rc)
8867 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
8868 rc);
8869
8870 return rc;
8871 }
8872
8873 static bool bnxt_can_reserve_rings(struct bnxt *bp)
8874 {
8875 #ifdef CONFIG_BNXT_SRIOV
8876 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
8877 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8878
8879 /* No minimum rings were provisioned by the PF. Don't
8880 * reserve rings by default when device is down.
8881 */
8882 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8883 return true;
8884
8885 if (!netif_running(bp->dev))
8886 return false;
8887 }
8888 #endif
8889 return true;
8890 }
8891
8892 /* If the chip and firmware supports RFS */
8893 static bool bnxt_rfs_supported(struct bnxt *bp)
8894 {
8895 if (bp->flags & BNXT_FLAG_CHIP_P5)
8896 return false;
8897 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
8898 return true;
8899 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8900 return true;
8901 return false;
8902 }
8903
8904 /* If runtime conditions support RFS */
8905 static bool bnxt_rfs_capable(struct bnxt *bp)
8906 {
8907 #ifdef CONFIG_RFS_ACCEL
8908 int vnics, max_vnics, max_rss_ctxs;
8909
8910 if (bp->flags & BNXT_FLAG_CHIP_P5)
8911 return false;
8912 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
8913 return false;
8914
8915 vnics = 1 + bp->rx_nr_rings;
8916 max_vnics = bnxt_get_max_func_vnics(bp);
8917 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
8918
8919 /* RSS contexts not a limiting factor */
8920 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8921 max_rss_ctxs = max_vnics;
8922 if (vnics > max_vnics || vnics > max_rss_ctxs) {
8923 if (bp->rx_nr_rings > 1)
8924 netdev_warn(bp->dev,
8925 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8926 min(max_rss_ctxs - 1, max_vnics - 1));
8927 return false;
8928 }
8929
8930 if (!BNXT_NEW_RM(bp))
8931 return true;
8932
8933 if (vnics == bp->hw_resc.resv_vnics)
8934 return true;
8935
8936 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
8937 if (vnics <= bp->hw_resc.resv_vnics)
8938 return true;
8939
8940 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
8941 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
8942 return false;
8943 #else
8944 return false;
8945 #endif
8946 }
8947
8948 static netdev_features_t bnxt_fix_features(struct net_device *dev,
8949 netdev_features_t features)
8950 {
8951 struct bnxt *bp = netdev_priv(dev);
8952
8953 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
8954 features &= ~NETIF_F_NTUPLE;
8955
8956 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8957 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8958
8959 if (!(features & NETIF_F_GRO))
8960 features &= ~NETIF_F_GRO_HW;
8961
8962 if (features & NETIF_F_GRO_HW)
8963 features &= ~NETIF_F_LRO;
8964
8965 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
8966 * turned on or off together.
8967 */
8968 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
8969 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
8970 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
8971 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8972 NETIF_F_HW_VLAN_STAG_RX);
8973 else
8974 features |= NETIF_F_HW_VLAN_CTAG_RX |
8975 NETIF_F_HW_VLAN_STAG_RX;
8976 }
8977 #ifdef CONFIG_BNXT_SRIOV
8978 if (BNXT_VF(bp)) {
8979 if (bp->vf.vlan) {
8980 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8981 NETIF_F_HW_VLAN_STAG_RX);
8982 }
8983 }
8984 #endif
8985 return features;
8986 }
8987
8988 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
8989 {
8990 struct bnxt *bp = netdev_priv(dev);
8991 u32 flags = bp->flags;
8992 u32 changes;
8993 int rc = 0;
8994 bool re_init = false;
8995 bool update_tpa = false;
8996
8997 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
8998 if (features & NETIF_F_GRO_HW)
8999 flags |= BNXT_FLAG_GRO;
9000 else if (features & NETIF_F_LRO)
9001 flags |= BNXT_FLAG_LRO;
9002
9003 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9004 flags &= ~BNXT_FLAG_TPA;
9005
9006 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9007 flags |= BNXT_FLAG_STRIP_VLAN;
9008
9009 if (features & NETIF_F_NTUPLE)
9010 flags |= BNXT_FLAG_RFS;
9011
9012 changes = flags ^ bp->flags;
9013 if (changes & BNXT_FLAG_TPA) {
9014 update_tpa = true;
9015 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9016 (flags & BNXT_FLAG_TPA) == 0)
9017 re_init = true;
9018 }
9019
9020 if (changes & ~BNXT_FLAG_TPA)
9021 re_init = true;
9022
9023 if (flags != bp->flags) {
9024 u32 old_flags = bp->flags;
9025
9026 bp->flags = flags;
9027
9028 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9029 if (update_tpa)
9030 bnxt_set_ring_params(bp);
9031 return rc;
9032 }
9033
9034 if (re_init) {
9035 bnxt_close_nic(bp, false, false);
9036 if (update_tpa)
9037 bnxt_set_ring_params(bp);
9038
9039 return bnxt_open_nic(bp, false, false);
9040 }
9041 if (update_tpa) {
9042 rc = bnxt_set_tpa(bp,
9043 (flags & BNXT_FLAG_TPA) ?
9044 true : false);
9045 if (rc)
9046 bp->flags = old_flags;
9047 }
9048 }
9049 return rc;
9050 }
9051
9052 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9053 u32 ring_id, u32 *prod, u32 *cons)
9054 {
9055 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9056 struct hwrm_dbg_ring_info_get_input req = {0};
9057 int rc;
9058
9059 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9060 req.ring_type = ring_type;
9061 req.fw_ring_id = cpu_to_le32(ring_id);
9062 mutex_lock(&bp->hwrm_cmd_lock);
9063 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9064 if (!rc) {
9065 *prod = le32_to_cpu(resp->producer_index);
9066 *cons = le32_to_cpu(resp->consumer_index);
9067 }
9068 mutex_unlock(&bp->hwrm_cmd_lock);
9069 return rc;
9070 }
9071
9072 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9073 {
9074 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9075 int i = bnapi->index;
9076
9077 if (!txr)
9078 return;
9079
9080 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9081 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9082 txr->tx_cons);
9083 }
9084
9085 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9086 {
9087 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9088 int i = bnapi->index;
9089
9090 if (!rxr)
9091 return;
9092
9093 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9094 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9095 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9096 rxr->rx_sw_agg_prod);
9097 }
9098
9099 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9100 {
9101 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9102 int i = bnapi->index;
9103
9104 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9105 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9106 }
9107
9108 static void bnxt_dbg_dump_states(struct bnxt *bp)
9109 {
9110 int i;
9111 struct bnxt_napi *bnapi;
9112
9113 for (i = 0; i < bp->cp_nr_rings; i++) {
9114 bnapi = bp->bnapi[i];
9115 if (netif_msg_drv(bp)) {
9116 bnxt_dump_tx_sw_state(bnapi);
9117 bnxt_dump_rx_sw_state(bnapi);
9118 bnxt_dump_cp_sw_state(bnapi);
9119 }
9120 }
9121 }
9122
9123 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9124 {
9125 if (!silent)
9126 bnxt_dbg_dump_states(bp);
9127 if (netif_running(bp->dev)) {
9128 int rc;
9129
9130 if (!silent)
9131 bnxt_ulp_stop(bp);
9132 bnxt_close_nic(bp, false, false);
9133 rc = bnxt_open_nic(bp, false, false);
9134 if (!silent && !rc)
9135 bnxt_ulp_start(bp);
9136 }
9137 }
9138
9139 static void bnxt_tx_timeout(struct net_device *dev)
9140 {
9141 struct bnxt *bp = netdev_priv(dev);
9142
9143 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9144 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9145 bnxt_queue_sp_work(bp);
9146 }
9147
9148 static void bnxt_timer(struct timer_list *t)
9149 {
9150 struct bnxt *bp = from_timer(bp, t, timer);
9151 struct net_device *dev = bp->dev;
9152
9153 if (!netif_running(dev))
9154 return;
9155
9156 if (atomic_read(&bp->intr_sem) != 0)
9157 goto bnxt_restart_timer;
9158
9159 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9160 bp->stats_coal_ticks) {
9161 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
9162 bnxt_queue_sp_work(bp);
9163 }
9164
9165 if (bnxt_tc_flower_enabled(bp)) {
9166 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9167 bnxt_queue_sp_work(bp);
9168 }
9169
9170 if (bp->link_info.phy_retry) {
9171 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9172 bp->link_info.phy_retry = 0;
9173 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9174 } else {
9175 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9176 bnxt_queue_sp_work(bp);
9177 }
9178 }
9179
9180 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9181 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9182 bnxt_queue_sp_work(bp);
9183 }
9184 bnxt_restart_timer:
9185 mod_timer(&bp->timer, jiffies + bp->current_interval);
9186 }
9187
9188 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
9189 {
9190 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9191 * set. If the device is being closed, bnxt_close() may be holding
9192 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
9193 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9194 */
9195 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9196 rtnl_lock();
9197 }
9198
9199 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9200 {
9201 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9202 rtnl_unlock();
9203 }
9204
9205 /* Only called from bnxt_sp_task() */
9206 static void bnxt_reset(struct bnxt *bp, bool silent)
9207 {
9208 bnxt_rtnl_lock_sp(bp);
9209 if (test_bit(BNXT_STATE_OPEN, &bp->state))
9210 bnxt_reset_task(bp, silent);
9211 bnxt_rtnl_unlock_sp(bp);
9212 }
9213
9214 static void bnxt_chk_missed_irq(struct bnxt *bp)
9215 {
9216 int i;
9217
9218 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9219 return;
9220
9221 for (i = 0; i < bp->cp_nr_rings; i++) {
9222 struct bnxt_napi *bnapi = bp->bnapi[i];
9223 struct bnxt_cp_ring_info *cpr;
9224 u32 fw_ring_id;
9225 int j;
9226
9227 if (!bnapi)
9228 continue;
9229
9230 cpr = &bnapi->cp_ring;
9231 for (j = 0; j < 2; j++) {
9232 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9233 u32 val[2];
9234
9235 if (!cpr2 || cpr2->has_more_work ||
9236 !bnxt_has_work(bp, cpr2))
9237 continue;
9238
9239 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9240 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9241 continue;
9242 }
9243 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9244 bnxt_dbg_hwrm_ring_info_get(bp,
9245 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9246 fw_ring_id, &val[0], &val[1]);
9247 cpr->missed_irqs++;
9248 }
9249 }
9250 }
9251
9252 static void bnxt_cfg_ntp_filters(struct bnxt *);
9253
9254 static void bnxt_sp_task(struct work_struct *work)
9255 {
9256 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
9257
9258 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9259 smp_mb__after_atomic();
9260 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9261 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9262 return;
9263 }
9264
9265 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9266 bnxt_cfg_rx_mode(bp);
9267
9268 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9269 bnxt_cfg_ntp_filters(bp);
9270 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9271 bnxt_hwrm_exec_fwd_req(bp);
9272 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9273 bnxt_hwrm_tunnel_dst_port_alloc(
9274 bp, bp->vxlan_port,
9275 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9276 }
9277 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9278 bnxt_hwrm_tunnel_dst_port_free(
9279 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9280 }
9281 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9282 bnxt_hwrm_tunnel_dst_port_alloc(
9283 bp, bp->nge_port,
9284 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9285 }
9286 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9287 bnxt_hwrm_tunnel_dst_port_free(
9288 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9289 }
9290 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
9291 bnxt_hwrm_port_qstats(bp);
9292 bnxt_hwrm_port_qstats_ext(bp);
9293 }
9294
9295 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
9296 int rc;
9297
9298 mutex_lock(&bp->link_lock);
9299 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9300 &bp->sp_event))
9301 bnxt_hwrm_phy_qcaps(bp);
9302
9303 rc = bnxt_update_link(bp, true);
9304 mutex_unlock(&bp->link_lock);
9305 if (rc)
9306 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9307 rc);
9308 }
9309 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9310 int rc;
9311
9312 mutex_lock(&bp->link_lock);
9313 rc = bnxt_update_phy_setting(bp);
9314 mutex_unlock(&bp->link_lock);
9315 if (rc) {
9316 netdev_warn(bp->dev, "update phy settings retry failed\n");
9317 } else {
9318 bp->link_info.phy_retry = false;
9319 netdev_info(bp->dev, "update phy settings retry succeeded\n");
9320 }
9321 }
9322 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
9323 mutex_lock(&bp->link_lock);
9324 bnxt_get_port_module_status(bp);
9325 mutex_unlock(&bp->link_lock);
9326 }
9327
9328 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9329 bnxt_tc_flow_stats_work(bp);
9330
9331 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9332 bnxt_chk_missed_irq(bp);
9333
9334 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
9335 * must be the last functions to be called before exiting.
9336 */
9337 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9338 bnxt_reset(bp, false);
9339
9340 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9341 bnxt_reset(bp, true);
9342
9343 smp_mb__before_atomic();
9344 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9345 }
9346
9347 /* Under rtnl_lock */
9348 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9349 int tx_xdp)
9350 {
9351 int max_rx, max_tx, tx_sets = 1;
9352 int tx_rings_needed, stats;
9353 int rx_rings = rx;
9354 int cp, vnics, rc;
9355
9356 if (tcs)
9357 tx_sets = tcs;
9358
9359 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9360 if (rc)
9361 return rc;
9362
9363 if (max_rx < rx)
9364 return -ENOMEM;
9365
9366 tx_rings_needed = tx * tx_sets + tx_xdp;
9367 if (max_tx < tx_rings_needed)
9368 return -ENOMEM;
9369
9370 vnics = 1;
9371 if (bp->flags & BNXT_FLAG_RFS)
9372 vnics += rx_rings;
9373
9374 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9375 rx_rings <<= 1;
9376 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
9377 stats = cp;
9378 if (BNXT_NEW_RM(bp)) {
9379 cp += bnxt_get_ulp_msix_num(bp);
9380 stats += bnxt_get_ulp_stat_ctxs(bp);
9381 }
9382 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
9383 stats, vnics);
9384 }
9385
9386 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9387 {
9388 if (bp->bar2) {
9389 pci_iounmap(pdev, bp->bar2);
9390 bp->bar2 = NULL;
9391 }
9392
9393 if (bp->bar1) {
9394 pci_iounmap(pdev, bp->bar1);
9395 bp->bar1 = NULL;
9396 }
9397
9398 if (bp->bar0) {
9399 pci_iounmap(pdev, bp->bar0);
9400 bp->bar0 = NULL;
9401 }
9402 }
9403
9404 static void bnxt_cleanup_pci(struct bnxt *bp)
9405 {
9406 bnxt_unmap_bars(bp, bp->pdev);
9407 pci_release_regions(bp->pdev);
9408 pci_disable_device(bp->pdev);
9409 }
9410
9411 static void bnxt_init_dflt_coal(struct bnxt *bp)
9412 {
9413 struct bnxt_coal *coal;
9414
9415 /* Tick values in micro seconds.
9416 * 1 coal_buf x bufs_per_record = 1 completion record.
9417 */
9418 coal = &bp->rx_coal;
9419 coal->coal_ticks = 10;
9420 coal->coal_bufs = 30;
9421 coal->coal_ticks_irq = 1;
9422 coal->coal_bufs_irq = 2;
9423 coal->idle_thresh = 50;
9424 coal->bufs_per_record = 2;
9425 coal->budget = 64; /* NAPI budget */
9426
9427 coal = &bp->tx_coal;
9428 coal->coal_ticks = 28;
9429 coal->coal_bufs = 30;
9430 coal->coal_ticks_irq = 2;
9431 coal->coal_bufs_irq = 2;
9432 coal->bufs_per_record = 1;
9433
9434 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9435 }
9436
9437 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9438 {
9439 int rc;
9440 struct bnxt *bp = netdev_priv(dev);
9441
9442 SET_NETDEV_DEV(dev, &pdev->dev);
9443
9444 /* enable device (incl. PCI PM wakeup), and bus-mastering */
9445 rc = pci_enable_device(pdev);
9446 if (rc) {
9447 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9448 goto init_err;
9449 }
9450
9451 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9452 dev_err(&pdev->dev,
9453 "Cannot find PCI device base address, aborting\n");
9454 rc = -ENODEV;
9455 goto init_err_disable;
9456 }
9457
9458 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9459 if (rc) {
9460 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9461 goto init_err_disable;
9462 }
9463
9464 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9465 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9466 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9467 goto init_err_disable;
9468 }
9469
9470 pci_set_master(pdev);
9471
9472 bp->dev = dev;
9473 bp->pdev = pdev;
9474
9475 bp->bar0 = pci_ioremap_bar(pdev, 0);
9476 if (!bp->bar0) {
9477 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9478 rc = -ENOMEM;
9479 goto init_err_release;
9480 }
9481
9482 bp->bar1 = pci_ioremap_bar(pdev, 2);
9483 if (!bp->bar1) {
9484 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9485 rc = -ENOMEM;
9486 goto init_err_release;
9487 }
9488
9489 bp->bar2 = pci_ioremap_bar(pdev, 4);
9490 if (!bp->bar2) {
9491 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9492 rc = -ENOMEM;
9493 goto init_err_release;
9494 }
9495
9496 pci_enable_pcie_error_reporting(pdev);
9497
9498 INIT_WORK(&bp->sp_task, bnxt_sp_task);
9499
9500 spin_lock_init(&bp->ntp_fltr_lock);
9501 #if BITS_PER_LONG == 32
9502 spin_lock_init(&bp->db_lock);
9503 #endif
9504
9505 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9506 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9507
9508 bnxt_init_dflt_coal(bp);
9509
9510 timer_setup(&bp->timer, bnxt_timer, 0);
9511 bp->current_interval = BNXT_TIMER_INTERVAL;
9512
9513 clear_bit(BNXT_STATE_OPEN, &bp->state);
9514 return 0;
9515
9516 init_err_release:
9517 bnxt_unmap_bars(bp, pdev);
9518 pci_release_regions(pdev);
9519
9520 init_err_disable:
9521 pci_disable_device(pdev);
9522
9523 init_err:
9524 return rc;
9525 }
9526
9527 /* rtnl_lock held */
9528 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9529 {
9530 struct sockaddr *addr = p;
9531 struct bnxt *bp = netdev_priv(dev);
9532 int rc = 0;
9533
9534 if (!is_valid_ether_addr(addr->sa_data))
9535 return -EADDRNOTAVAIL;
9536
9537 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9538 return 0;
9539
9540 rc = bnxt_approve_mac(bp, addr->sa_data, true);
9541 if (rc)
9542 return rc;
9543
9544 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9545 if (netif_running(dev)) {
9546 bnxt_close_nic(bp, false, false);
9547 rc = bnxt_open_nic(bp, false, false);
9548 }
9549
9550 return rc;
9551 }
9552
9553 /* rtnl_lock held */
9554 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9555 {
9556 struct bnxt *bp = netdev_priv(dev);
9557
9558 if (netif_running(dev))
9559 bnxt_close_nic(bp, false, false);
9560
9561 dev->mtu = new_mtu;
9562 bnxt_set_ring_params(bp);
9563
9564 if (netif_running(dev))
9565 return bnxt_open_nic(bp, false, false);
9566
9567 return 0;
9568 }
9569
9570 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
9571 {
9572 struct bnxt *bp = netdev_priv(dev);
9573 bool sh = false;
9574 int rc;
9575
9576 if (tc > bp->max_tc) {
9577 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
9578 tc, bp->max_tc);
9579 return -EINVAL;
9580 }
9581
9582 if (netdev_get_num_tc(dev) == tc)
9583 return 0;
9584
9585 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9586 sh = true;
9587
9588 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9589 sh, tc, bp->tx_nr_rings_xdp);
9590 if (rc)
9591 return rc;
9592
9593 /* Needs to close the device and do hw resource re-allocations */
9594 if (netif_running(bp->dev))
9595 bnxt_close_nic(bp, true, false);
9596
9597 if (tc) {
9598 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9599 netdev_set_num_tc(dev, tc);
9600 } else {
9601 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9602 netdev_reset_tc(dev);
9603 }
9604 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
9605 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9606 bp->tx_nr_rings + bp->rx_nr_rings;
9607
9608 if (netif_running(bp->dev))
9609 return bnxt_open_nic(bp, true, false);
9610
9611 return 0;
9612 }
9613
9614 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9615 void *cb_priv)
9616 {
9617 struct bnxt *bp = cb_priv;
9618
9619 if (!bnxt_tc_flower_enabled(bp) ||
9620 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
9621 return -EOPNOTSUPP;
9622
9623 switch (type) {
9624 case TC_SETUP_CLSFLOWER:
9625 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9626 default:
9627 return -EOPNOTSUPP;
9628 }
9629 }
9630
9631 static int bnxt_setup_tc_block(struct net_device *dev,
9632 struct tc_block_offload *f)
9633 {
9634 struct bnxt *bp = netdev_priv(dev);
9635
9636 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9637 return -EOPNOTSUPP;
9638
9639 switch (f->command) {
9640 case TC_BLOCK_BIND:
9641 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
9642 bp, bp, f->extack);
9643 case TC_BLOCK_UNBIND:
9644 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9645 return 0;
9646 default:
9647 return -EOPNOTSUPP;
9648 }
9649 }
9650
9651 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
9652 void *type_data)
9653 {
9654 switch (type) {
9655 case TC_SETUP_BLOCK:
9656 return bnxt_setup_tc_block(dev, type_data);
9657 case TC_SETUP_QDISC_MQPRIO: {
9658 struct tc_mqprio_qopt *mqprio = type_data;
9659
9660 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9661
9662 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9663 }
9664 default:
9665 return -EOPNOTSUPP;
9666 }
9667 }
9668
9669 #ifdef CONFIG_RFS_ACCEL
9670 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9671 struct bnxt_ntuple_filter *f2)
9672 {
9673 struct flow_keys *keys1 = &f1->fkeys;
9674 struct flow_keys *keys2 = &f2->fkeys;
9675
9676 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9677 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9678 keys1->ports.ports == keys2->ports.ports &&
9679 keys1->basic.ip_proto == keys2->basic.ip_proto &&
9680 keys1->basic.n_proto == keys2->basic.n_proto &&
9681 keys1->control.flags == keys2->control.flags &&
9682 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9683 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
9684 return true;
9685
9686 return false;
9687 }
9688
9689 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9690 u16 rxq_index, u32 flow_id)
9691 {
9692 struct bnxt *bp = netdev_priv(dev);
9693 struct bnxt_ntuple_filter *fltr, *new_fltr;
9694 struct flow_keys *fkeys;
9695 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
9696 int rc = 0, idx, bit_id, l2_idx = 0;
9697 struct hlist_head *head;
9698
9699 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9700 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9701 int off = 0, j;
9702
9703 netif_addr_lock_bh(dev);
9704 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9705 if (ether_addr_equal(eth->h_dest,
9706 vnic->uc_list + off)) {
9707 l2_idx = j + 1;
9708 break;
9709 }
9710 }
9711 netif_addr_unlock_bh(dev);
9712 if (!l2_idx)
9713 return -EINVAL;
9714 }
9715 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9716 if (!new_fltr)
9717 return -ENOMEM;
9718
9719 fkeys = &new_fltr->fkeys;
9720 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9721 rc = -EPROTONOSUPPORT;
9722 goto err_free;
9723 }
9724
9725 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9726 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
9727 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9728 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9729 rc = -EPROTONOSUPPORT;
9730 goto err_free;
9731 }
9732 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9733 bp->hwrm_spec_code < 0x10601) {
9734 rc = -EPROTONOSUPPORT;
9735 goto err_free;
9736 }
9737 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9738 bp->hwrm_spec_code < 0x10601) {
9739 rc = -EPROTONOSUPPORT;
9740 goto err_free;
9741 }
9742
9743 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
9744 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9745
9746 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9747 head = &bp->ntp_fltr_hash_tbl[idx];
9748 rcu_read_lock();
9749 hlist_for_each_entry_rcu(fltr, head, hash) {
9750 if (bnxt_fltr_match(fltr, new_fltr)) {
9751 rcu_read_unlock();
9752 rc = 0;
9753 goto err_free;
9754 }
9755 }
9756 rcu_read_unlock();
9757
9758 spin_lock_bh(&bp->ntp_fltr_lock);
9759 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9760 BNXT_NTP_FLTR_MAX_FLTR, 0);
9761 if (bit_id < 0) {
9762 spin_unlock_bh(&bp->ntp_fltr_lock);
9763 rc = -ENOMEM;
9764 goto err_free;
9765 }
9766
9767 new_fltr->sw_id = (u16)bit_id;
9768 new_fltr->flow_id = flow_id;
9769 new_fltr->l2_fltr_idx = l2_idx;
9770 new_fltr->rxq = rxq_index;
9771 hlist_add_head_rcu(&new_fltr->hash, head);
9772 bp->ntp_fltr_count++;
9773 spin_unlock_bh(&bp->ntp_fltr_lock);
9774
9775 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
9776 bnxt_queue_sp_work(bp);
9777
9778 return new_fltr->sw_id;
9779
9780 err_free:
9781 kfree(new_fltr);
9782 return rc;
9783 }
9784
9785 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9786 {
9787 int i;
9788
9789 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9790 struct hlist_head *head;
9791 struct hlist_node *tmp;
9792 struct bnxt_ntuple_filter *fltr;
9793 int rc;
9794
9795 head = &bp->ntp_fltr_hash_tbl[i];
9796 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9797 bool del = false;
9798
9799 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9800 if (rps_may_expire_flow(bp->dev, fltr->rxq,
9801 fltr->flow_id,
9802 fltr->sw_id)) {
9803 bnxt_hwrm_cfa_ntuple_filter_free(bp,
9804 fltr);
9805 del = true;
9806 }
9807 } else {
9808 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9809 fltr);
9810 if (rc)
9811 del = true;
9812 else
9813 set_bit(BNXT_FLTR_VALID, &fltr->state);
9814 }
9815
9816 if (del) {
9817 spin_lock_bh(&bp->ntp_fltr_lock);
9818 hlist_del_rcu(&fltr->hash);
9819 bp->ntp_fltr_count--;
9820 spin_unlock_bh(&bp->ntp_fltr_lock);
9821 synchronize_rcu();
9822 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9823 kfree(fltr);
9824 }
9825 }
9826 }
9827 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9828 netdev_info(bp->dev, "Receive PF driver unload event!");
9829 }
9830
9831 #else
9832
9833 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9834 {
9835 }
9836
9837 #endif /* CONFIG_RFS_ACCEL */
9838
9839 static void bnxt_udp_tunnel_add(struct net_device *dev,
9840 struct udp_tunnel_info *ti)
9841 {
9842 struct bnxt *bp = netdev_priv(dev);
9843
9844 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9845 return;
9846
9847 if (!netif_running(dev))
9848 return;
9849
9850 switch (ti->type) {
9851 case UDP_TUNNEL_TYPE_VXLAN:
9852 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9853 return;
9854
9855 bp->vxlan_port_cnt++;
9856 if (bp->vxlan_port_cnt == 1) {
9857 bp->vxlan_port = ti->port;
9858 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
9859 bnxt_queue_sp_work(bp);
9860 }
9861 break;
9862 case UDP_TUNNEL_TYPE_GENEVE:
9863 if (bp->nge_port_cnt && bp->nge_port != ti->port)
9864 return;
9865
9866 bp->nge_port_cnt++;
9867 if (bp->nge_port_cnt == 1) {
9868 bp->nge_port = ti->port;
9869 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9870 }
9871 break;
9872 default:
9873 return;
9874 }
9875
9876 bnxt_queue_sp_work(bp);
9877 }
9878
9879 static void bnxt_udp_tunnel_del(struct net_device *dev,
9880 struct udp_tunnel_info *ti)
9881 {
9882 struct bnxt *bp = netdev_priv(dev);
9883
9884 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9885 return;
9886
9887 if (!netif_running(dev))
9888 return;
9889
9890 switch (ti->type) {
9891 case UDP_TUNNEL_TYPE_VXLAN:
9892 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
9893 return;
9894 bp->vxlan_port_cnt--;
9895
9896 if (bp->vxlan_port_cnt != 0)
9897 return;
9898
9899 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
9900 break;
9901 case UDP_TUNNEL_TYPE_GENEVE:
9902 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
9903 return;
9904 bp->nge_port_cnt--;
9905
9906 if (bp->nge_port_cnt != 0)
9907 return;
9908
9909 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
9910 break;
9911 default:
9912 return;
9913 }
9914
9915 bnxt_queue_sp_work(bp);
9916 }
9917
9918 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9919 struct net_device *dev, u32 filter_mask,
9920 int nlflags)
9921 {
9922 struct bnxt *bp = netdev_priv(dev);
9923
9924 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
9925 nlflags, filter_mask, NULL);
9926 }
9927
9928 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
9929 u16 flags, struct netlink_ext_ack *extack)
9930 {
9931 struct bnxt *bp = netdev_priv(dev);
9932 struct nlattr *attr, *br_spec;
9933 int rem, rc = 0;
9934
9935 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
9936 return -EOPNOTSUPP;
9937
9938 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9939 if (!br_spec)
9940 return -EINVAL;
9941
9942 nla_for_each_nested(attr, br_spec, rem) {
9943 u16 mode;
9944
9945 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9946 continue;
9947
9948 if (nla_len(attr) < sizeof(mode))
9949 return -EINVAL;
9950
9951 mode = nla_get_u16(attr);
9952 if (mode == bp->br_mode)
9953 break;
9954
9955 rc = bnxt_hwrm_set_br_mode(bp, mode);
9956 if (!rc)
9957 bp->br_mode = mode;
9958 break;
9959 }
9960 return rc;
9961 }
9962
9963 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
9964 size_t len)
9965 {
9966 struct bnxt *bp = netdev_priv(dev);
9967 int rc;
9968
9969 /* The PF and it's VF-reps only support the switchdev framework */
9970 if (!BNXT_PF(bp))
9971 return -EOPNOTSUPP;
9972
9973 rc = snprintf(buf, len, "p%d", bp->pf.port_id);
9974
9975 if (rc >= len)
9976 return -EOPNOTSUPP;
9977 return 0;
9978 }
9979
9980 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
9981 {
9982 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
9983 return -EOPNOTSUPP;
9984
9985 /* The PF and it's VF-reps only support the switchdev framework */
9986 if (!BNXT_PF(bp))
9987 return -EOPNOTSUPP;
9988
9989 switch (attr->id) {
9990 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
9991 attr->u.ppid.id_len = sizeof(bp->switch_id);
9992 memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
9993 break;
9994 default:
9995 return -EOPNOTSUPP;
9996 }
9997 return 0;
9998 }
9999
10000 static int bnxt_swdev_port_attr_get(struct net_device *dev,
10001 struct switchdev_attr *attr)
10002 {
10003 return bnxt_port_attr_get(netdev_priv(dev), attr);
10004 }
10005
10006 static const struct switchdev_ops bnxt_switchdev_ops = {
10007 .switchdev_port_attr_get = bnxt_swdev_port_attr_get
10008 };
10009
10010 static const struct net_device_ops bnxt_netdev_ops = {
10011 .ndo_open = bnxt_open,
10012 .ndo_start_xmit = bnxt_start_xmit,
10013 .ndo_stop = bnxt_close,
10014 .ndo_get_stats64 = bnxt_get_stats64,
10015 .ndo_set_rx_mode = bnxt_set_rx_mode,
10016 .ndo_do_ioctl = bnxt_ioctl,
10017 .ndo_validate_addr = eth_validate_addr,
10018 .ndo_set_mac_address = bnxt_change_mac_addr,
10019 .ndo_change_mtu = bnxt_change_mtu,
10020 .ndo_fix_features = bnxt_fix_features,
10021 .ndo_set_features = bnxt_set_features,
10022 .ndo_tx_timeout = bnxt_tx_timeout,
10023 #ifdef CONFIG_BNXT_SRIOV
10024 .ndo_get_vf_config = bnxt_get_vf_config,
10025 .ndo_set_vf_mac = bnxt_set_vf_mac,
10026 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
10027 .ndo_set_vf_rate = bnxt_set_vf_bw,
10028 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
10029 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
10030 .ndo_set_vf_trust = bnxt_set_vf_trust,
10031 #endif
10032 .ndo_setup_tc = bnxt_setup_tc,
10033 #ifdef CONFIG_RFS_ACCEL
10034 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
10035 #endif
10036 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
10037 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
10038 .ndo_bpf = bnxt_xdp,
10039 .ndo_bridge_getlink = bnxt_bridge_getlink,
10040 .ndo_bridge_setlink = bnxt_bridge_setlink,
10041 .ndo_get_phys_port_name = bnxt_get_phys_port_name
10042 };
10043
10044 static void bnxt_remove_one(struct pci_dev *pdev)
10045 {
10046 struct net_device *dev = pci_get_drvdata(pdev);
10047 struct bnxt *bp = netdev_priv(dev);
10048
10049 if (BNXT_PF(bp)) {
10050 bnxt_sriov_disable(bp);
10051 bnxt_dl_unregister(bp);
10052 }
10053
10054 pci_disable_pcie_error_reporting(pdev);
10055 unregister_netdev(dev);
10056 bnxt_shutdown_tc(bp);
10057 bnxt_cancel_sp_work(bp);
10058 bp->sp_event = 0;
10059
10060 bnxt_clear_int_mode(bp);
10061 bnxt_hwrm_func_drv_unrgtr(bp);
10062 bnxt_free_hwrm_resources(bp);
10063 bnxt_free_hwrm_short_cmd_req(bp);
10064 bnxt_ethtool_free(bp);
10065 bnxt_dcb_free(bp);
10066 kfree(bp->edev);
10067 bp->edev = NULL;
10068 bnxt_free_ctx_mem(bp);
10069 kfree(bp->ctx);
10070 bp->ctx = NULL;
10071 bnxt_cleanup_pci(bp);
10072 bnxt_free_port_stats(bp);
10073 free_netdev(dev);
10074 }
10075
10076 static int bnxt_probe_phy(struct bnxt *bp)
10077 {
10078 int rc = 0;
10079 struct bnxt_link_info *link_info = &bp->link_info;
10080
10081 rc = bnxt_hwrm_phy_qcaps(bp);
10082 if (rc) {
10083 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10084 rc);
10085 return rc;
10086 }
10087 mutex_init(&bp->link_lock);
10088
10089 rc = bnxt_update_link(bp, false);
10090 if (rc) {
10091 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10092 rc);
10093 return rc;
10094 }
10095
10096 /* Older firmware does not have supported_auto_speeds, so assume
10097 * that all supported speeds can be autonegotiated.
10098 */
10099 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10100 link_info->support_auto_speeds = link_info->support_speeds;
10101
10102 /*initialize the ethool setting copy with NVM settings */
10103 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10104 link_info->autoneg = BNXT_AUTONEG_SPEED;
10105 if (bp->hwrm_spec_code >= 0x10201) {
10106 if (link_info->auto_pause_setting &
10107 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10108 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10109 } else {
10110 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10111 }
10112 link_info->advertising = link_info->auto_link_speeds;
10113 } else {
10114 link_info->req_link_speed = link_info->force_link_speed;
10115 link_info->req_duplex = link_info->duplex_setting;
10116 }
10117 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10118 link_info->req_flow_ctrl =
10119 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10120 else
10121 link_info->req_flow_ctrl = link_info->force_pause_setting;
10122 return rc;
10123 }
10124
10125 static int bnxt_get_max_irq(struct pci_dev *pdev)
10126 {
10127 u16 ctrl;
10128
10129 if (!pdev->msix_cap)
10130 return 1;
10131
10132 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
10133 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
10134 }
10135
10136 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10137 int *max_cp)
10138 {
10139 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10140 int max_ring_grps = 0, max_irq;
10141
10142 *max_tx = hw_resc->max_tx_rings;
10143 *max_rx = hw_resc->max_rx_rings;
10144 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
10145 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
10146 bnxt_get_ulp_msix_num(bp),
10147 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
10148 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10149 *max_cp = min_t(int, *max_cp, max_irq);
10150 max_ring_grps = hw_resc->max_hw_ring_grps;
10151 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
10152 *max_cp -= 1;
10153 *max_rx -= 2;
10154 }
10155 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10156 *max_rx >>= 1;
10157 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10158 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
10159 /* On P5 chips, max_cp output param should be available NQs */
10160 *max_cp = max_irq;
10161 }
10162 *max_rx = min_t(int, *max_rx, max_ring_grps);
10163 }
10164
10165 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
10166 {
10167 int rx, tx, cp;
10168
10169 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
10170 *max_rx = rx;
10171 *max_tx = tx;
10172 if (!rx || !tx || !cp)
10173 return -ENOMEM;
10174
10175 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
10176 }
10177
10178 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10179 bool shared)
10180 {
10181 int rc;
10182
10183 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10184 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
10185 /* Not enough rings, try disabling agg rings. */
10186 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
10187 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10188 if (rc) {
10189 /* set BNXT_FLAG_AGG_RINGS back for consistency */
10190 bp->flags |= BNXT_FLAG_AGG_RINGS;
10191 return rc;
10192 }
10193 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
10194 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10195 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10196 bnxt_set_ring_params(bp);
10197 }
10198
10199 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10200 int max_cp, max_stat, max_irq;
10201
10202 /* Reserve minimum resources for RoCE */
10203 max_cp = bnxt_get_max_func_cp_rings(bp);
10204 max_stat = bnxt_get_max_func_stat_ctxs(bp);
10205 max_irq = bnxt_get_max_func_irqs(bp);
10206 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10207 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10208 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10209 return 0;
10210
10211 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10212 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10213 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10214 max_cp = min_t(int, max_cp, max_irq);
10215 max_cp = min_t(int, max_cp, max_stat);
10216 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10217 if (rc)
10218 rc = 0;
10219 }
10220 return rc;
10221 }
10222
10223 /* In initial default shared ring setting, each shared ring must have a
10224 * RX/TX ring pair.
10225 */
10226 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10227 {
10228 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10229 bp->rx_nr_rings = bp->cp_nr_rings;
10230 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10231 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10232 }
10233
10234 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
10235 {
10236 int dflt_rings, max_rx_rings, max_tx_rings, rc;
10237
10238 if (!bnxt_can_reserve_rings(bp))
10239 return 0;
10240
10241 if (sh)
10242 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10243 dflt_rings = netif_get_num_default_rss_queues();
10244 /* Reduce default rings on multi-port cards so that total default
10245 * rings do not exceed CPU count.
10246 */
10247 if (bp->port_count > 1) {
10248 int max_rings =
10249 max_t(int, num_online_cpus() / bp->port_count, 1);
10250
10251 dflt_rings = min_t(int, dflt_rings, max_rings);
10252 }
10253 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
10254 if (rc)
10255 return rc;
10256 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10257 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
10258 if (sh)
10259 bnxt_trim_dflt_sh_rings(bp);
10260 else
10261 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10262 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10263
10264 rc = __bnxt_reserve_rings(bp);
10265 if (rc)
10266 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
10267 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10268 if (sh)
10269 bnxt_trim_dflt_sh_rings(bp);
10270
10271 /* Rings may have been trimmed, re-reserve the trimmed rings. */
10272 if (bnxt_need_reserve_rings(bp)) {
10273 rc = __bnxt_reserve_rings(bp);
10274 if (rc)
10275 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10276 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10277 }
10278 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10279 bp->rx_nr_rings++;
10280 bp->cp_nr_rings++;
10281 }
10282 return rc;
10283 }
10284
10285 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10286 {
10287 int rc;
10288
10289 if (bp->tx_nr_rings)
10290 return 0;
10291
10292 bnxt_ulp_irq_stop(bp);
10293 bnxt_clear_int_mode(bp);
10294 rc = bnxt_set_dflt_rings(bp, true);
10295 if (rc) {
10296 netdev_err(bp->dev, "Not enough rings available.\n");
10297 goto init_dflt_ring_err;
10298 }
10299 rc = bnxt_init_int_mode(bp);
10300 if (rc)
10301 goto init_dflt_ring_err;
10302
10303 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10304 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10305 bp->flags |= BNXT_FLAG_RFS;
10306 bp->dev->features |= NETIF_F_NTUPLE;
10307 }
10308 init_dflt_ring_err:
10309 bnxt_ulp_irq_restart(bp, rc);
10310 return rc;
10311 }
10312
10313 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
10314 {
10315 int rc;
10316
10317 ASSERT_RTNL();
10318 bnxt_hwrm_func_qcaps(bp);
10319
10320 if (netif_running(bp->dev))
10321 __bnxt_close_nic(bp, true, false);
10322
10323 bnxt_ulp_irq_stop(bp);
10324 bnxt_clear_int_mode(bp);
10325 rc = bnxt_init_int_mode(bp);
10326 bnxt_ulp_irq_restart(bp, rc);
10327
10328 if (netif_running(bp->dev)) {
10329 if (rc)
10330 dev_close(bp->dev);
10331 else
10332 rc = bnxt_open_nic(bp, true, false);
10333 }
10334
10335 return rc;
10336 }
10337
10338 static int bnxt_init_mac_addr(struct bnxt *bp)
10339 {
10340 int rc = 0;
10341
10342 if (BNXT_PF(bp)) {
10343 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10344 } else {
10345 #ifdef CONFIG_BNXT_SRIOV
10346 struct bnxt_vf_info *vf = &bp->vf;
10347 bool strict_approval = true;
10348
10349 if (is_valid_ether_addr(vf->mac_addr)) {
10350 /* overwrite netdev dev_addr with admin VF MAC */
10351 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
10352 /* Older PF driver or firmware may not approve this
10353 * correctly.
10354 */
10355 strict_approval = false;
10356 } else {
10357 eth_hw_addr_random(bp->dev);
10358 }
10359 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
10360 #endif
10361 }
10362 return rc;
10363 }
10364
10365 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10366 {
10367 static int version_printed;
10368 struct net_device *dev;
10369 struct bnxt *bp;
10370 int rc, max_irqs;
10371
10372 if (pci_is_bridge(pdev))
10373 return -ENODEV;
10374
10375 if (version_printed++ == 0)
10376 pr_info("%s", version);
10377
10378 max_irqs = bnxt_get_max_irq(pdev);
10379 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10380 if (!dev)
10381 return -ENOMEM;
10382
10383 bp = netdev_priv(dev);
10384 bnxt_set_max_func_irqs(bp, max_irqs);
10385
10386 if (bnxt_vf_pciid(ent->driver_data))
10387 bp->flags |= BNXT_FLAG_VF;
10388
10389 if (pdev->msix_cap)
10390 bp->flags |= BNXT_FLAG_MSIX_CAP;
10391
10392 rc = bnxt_init_board(pdev, dev);
10393 if (rc < 0)
10394 goto init_err_free;
10395
10396 dev->netdev_ops = &bnxt_netdev_ops;
10397 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10398 dev->ethtool_ops = &bnxt_ethtool_ops;
10399 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
10400 pci_set_drvdata(pdev, dev);
10401
10402 rc = bnxt_alloc_hwrm_resources(bp);
10403 if (rc)
10404 goto init_err_pci_clean;
10405
10406 mutex_init(&bp->hwrm_cmd_lock);
10407 rc = bnxt_hwrm_ver_get(bp);
10408 if (rc)
10409 goto init_err_pci_clean;
10410
10411 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10412 rc = bnxt_alloc_kong_hwrm_resources(bp);
10413 if (rc)
10414 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10415 }
10416
10417 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10418 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10419 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10420 if (rc)
10421 goto init_err_pci_clean;
10422 }
10423
10424 if (BNXT_CHIP_P5(bp))
10425 bp->flags |= BNXT_FLAG_CHIP_P5;
10426
10427 rc = bnxt_hwrm_func_reset(bp);
10428 if (rc)
10429 goto init_err_pci_clean;
10430
10431 bnxt_hwrm_fw_set_time(bp);
10432
10433 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10434 NETIF_F_TSO | NETIF_F_TSO6 |
10435 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10436 NETIF_F_GSO_IPXIP4 |
10437 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10438 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
10439 NETIF_F_RXCSUM | NETIF_F_GRO;
10440
10441 if (BNXT_SUPPORTS_TPA(bp))
10442 dev->hw_features |= NETIF_F_LRO;
10443
10444 dev->hw_enc_features =
10445 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10446 NETIF_F_TSO | NETIF_F_TSO6 |
10447 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10448 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10449 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
10450 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10451 NETIF_F_GSO_GRE_CSUM;
10452 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10453 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10454 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
10455 if (BNXT_SUPPORTS_TPA(bp))
10456 dev->hw_features |= NETIF_F_GRO_HW;
10457 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
10458 if (dev->features & NETIF_F_GRO_HW)
10459 dev->features &= ~NETIF_F_LRO;
10460 dev->priv_flags |= IFF_UNICAST_FLT;
10461
10462 #ifdef CONFIG_BNXT_SRIOV
10463 init_waitqueue_head(&bp->sriov_cfg_wait);
10464 mutex_init(&bp->sriov_lock);
10465 #endif
10466 if (BNXT_SUPPORTS_TPA(bp)) {
10467 bp->gro_func = bnxt_gro_func_5730x;
10468 if (BNXT_CHIP_P4(bp))
10469 bp->gro_func = bnxt_gro_func_5731x;
10470 }
10471 if (!BNXT_CHIP_P4_PLUS(bp))
10472 bp->flags |= BNXT_FLAG_DOUBLE_DB;
10473
10474 rc = bnxt_hwrm_func_drv_rgtr(bp);
10475 if (rc)
10476 goto init_err_pci_clean;
10477
10478 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10479 if (rc)
10480 goto init_err_pci_clean;
10481
10482 bp->ulp_probe = bnxt_ulp_probe;
10483
10484 rc = bnxt_hwrm_queue_qportcfg(bp);
10485 if (rc) {
10486 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10487 rc);
10488 rc = -1;
10489 goto init_err_pci_clean;
10490 }
10491 /* Get the MAX capabilities for this function */
10492 rc = bnxt_hwrm_func_qcaps(bp);
10493 if (rc) {
10494 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10495 rc);
10496 rc = -1;
10497 goto init_err_pci_clean;
10498 }
10499 rc = bnxt_init_mac_addr(bp);
10500 if (rc) {
10501 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10502 rc = -EADDRNOTAVAIL;
10503 goto init_err_pci_clean;
10504 }
10505
10506 bnxt_hwrm_func_qcfg(bp);
10507 bnxt_hwrm_vnic_qcaps(bp);
10508 bnxt_hwrm_port_led_qcaps(bp);
10509 bnxt_ethtool_init(bp);
10510 bnxt_dcb_init(bp);
10511
10512 /* MTU range: 60 - FW defined max */
10513 dev->min_mtu = ETH_ZLEN;
10514 dev->max_mtu = bp->max_mtu;
10515
10516 rc = bnxt_probe_phy(bp);
10517 if (rc)
10518 goto init_err_pci_clean;
10519
10520 bnxt_set_rx_skb_mode(bp, false);
10521 bnxt_set_tpa_flags(bp);
10522 bnxt_set_ring_params(bp);
10523 rc = bnxt_set_dflt_rings(bp, true);
10524 if (rc) {
10525 netdev_err(bp->dev, "Not enough rings available.\n");
10526 rc = -ENOMEM;
10527 goto init_err_pci_clean;
10528 }
10529
10530 /* Default RSS hash cfg. */
10531 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10532 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10533 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10534 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10535 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10536 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10537 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10538 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10539 }
10540
10541 if (bnxt_rfs_supported(bp)) {
10542 dev->hw_features |= NETIF_F_NTUPLE;
10543 if (bnxt_rfs_capable(bp)) {
10544 bp->flags |= BNXT_FLAG_RFS;
10545 dev->features |= NETIF_F_NTUPLE;
10546 }
10547 }
10548
10549 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10550 bp->flags |= BNXT_FLAG_STRIP_VLAN;
10551
10552 rc = bnxt_init_int_mode(bp);
10553 if (rc)
10554 goto init_err_pci_clean;
10555
10556 /* No TC has been set yet and rings may have been trimmed due to
10557 * limited MSIX, so we re-initialize the TX rings per TC.
10558 */
10559 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10560
10561 bnxt_get_wol_settings(bp);
10562 if (bp->flags & BNXT_FLAG_WOL_CAP)
10563 device_set_wakeup_enable(&pdev->dev, bp->wol);
10564 else
10565 device_set_wakeup_capable(&pdev->dev, false);
10566
10567 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10568
10569 bnxt_hwrm_coal_params_qcaps(bp);
10570
10571 if (BNXT_PF(bp)) {
10572 if (!bnxt_pf_wq) {
10573 bnxt_pf_wq =
10574 create_singlethread_workqueue("bnxt_pf_wq");
10575 if (!bnxt_pf_wq) {
10576 dev_err(&pdev->dev, "Unable to create workqueue.\n");
10577 goto init_err_pci_clean;
10578 }
10579 }
10580 bnxt_init_tc(bp);
10581 }
10582
10583 rc = register_netdev(dev);
10584 if (rc)
10585 goto init_err_cleanup_tc;
10586
10587 if (BNXT_PF(bp))
10588 bnxt_dl_register(bp);
10589
10590 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10591 board_info[ent->driver_data].name,
10592 (long)pci_resource_start(pdev, 0), dev->dev_addr);
10593 pcie_print_link_status(pdev);
10594
10595 return 0;
10596
10597 init_err_cleanup_tc:
10598 bnxt_shutdown_tc(bp);
10599 bnxt_clear_int_mode(bp);
10600
10601 init_err_pci_clean:
10602 bnxt_free_hwrm_resources(bp);
10603 bnxt_free_ctx_mem(bp);
10604 kfree(bp->ctx);
10605 bp->ctx = NULL;
10606 bnxt_cleanup_pci(bp);
10607
10608 init_err_free:
10609 free_netdev(dev);
10610 return rc;
10611 }
10612
10613 static void bnxt_shutdown(struct pci_dev *pdev)
10614 {
10615 struct net_device *dev = pci_get_drvdata(pdev);
10616 struct bnxt *bp;
10617
10618 if (!dev)
10619 return;
10620
10621 rtnl_lock();
10622 bp = netdev_priv(dev);
10623 if (!bp)
10624 goto shutdown_exit;
10625
10626 if (netif_running(dev))
10627 dev_close(dev);
10628
10629 bnxt_ulp_shutdown(bp);
10630
10631 if (system_state == SYSTEM_POWER_OFF) {
10632 bnxt_clear_int_mode(bp);
10633 pci_wake_from_d3(pdev, bp->wol);
10634 pci_set_power_state(pdev, PCI_D3hot);
10635 }
10636
10637 shutdown_exit:
10638 rtnl_unlock();
10639 }
10640
10641 #ifdef CONFIG_PM_SLEEP
10642 static int bnxt_suspend(struct device *device)
10643 {
10644 struct pci_dev *pdev = to_pci_dev(device);
10645 struct net_device *dev = pci_get_drvdata(pdev);
10646 struct bnxt *bp = netdev_priv(dev);
10647 int rc = 0;
10648
10649 rtnl_lock();
10650 if (netif_running(dev)) {
10651 netif_device_detach(dev);
10652 rc = bnxt_close(dev);
10653 }
10654 bnxt_hwrm_func_drv_unrgtr(bp);
10655 rtnl_unlock();
10656 return rc;
10657 }
10658
10659 static int bnxt_resume(struct device *device)
10660 {
10661 struct pci_dev *pdev = to_pci_dev(device);
10662 struct net_device *dev = pci_get_drvdata(pdev);
10663 struct bnxt *bp = netdev_priv(dev);
10664 int rc = 0;
10665
10666 rtnl_lock();
10667 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10668 rc = -ENODEV;
10669 goto resume_exit;
10670 }
10671 rc = bnxt_hwrm_func_reset(bp);
10672 if (rc) {
10673 rc = -EBUSY;
10674 goto resume_exit;
10675 }
10676 bnxt_get_wol_settings(bp);
10677 if (netif_running(dev)) {
10678 rc = bnxt_open(dev);
10679 if (!rc)
10680 netif_device_attach(dev);
10681 }
10682
10683 resume_exit:
10684 rtnl_unlock();
10685 return rc;
10686 }
10687
10688 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10689 #define BNXT_PM_OPS (&bnxt_pm_ops)
10690
10691 #else
10692
10693 #define BNXT_PM_OPS NULL
10694
10695 #endif /* CONFIG_PM_SLEEP */
10696
10697 /**
10698 * bnxt_io_error_detected - called when PCI error is detected
10699 * @pdev: Pointer to PCI device
10700 * @state: The current pci connection state
10701 *
10702 * This function is called after a PCI bus error affecting
10703 * this device has been detected.
10704 */
10705 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10706 pci_channel_state_t state)
10707 {
10708 struct net_device *netdev = pci_get_drvdata(pdev);
10709 struct bnxt *bp = netdev_priv(netdev);
10710
10711 netdev_info(netdev, "PCI I/O error detected\n");
10712
10713 rtnl_lock();
10714 netif_device_detach(netdev);
10715
10716 bnxt_ulp_stop(bp);
10717
10718 if (state == pci_channel_io_perm_failure) {
10719 rtnl_unlock();
10720 return PCI_ERS_RESULT_DISCONNECT;
10721 }
10722
10723 if (netif_running(netdev))
10724 bnxt_close(netdev);
10725
10726 pci_disable_device(pdev);
10727 rtnl_unlock();
10728
10729 /* Request a slot slot reset. */
10730 return PCI_ERS_RESULT_NEED_RESET;
10731 }
10732
10733 /**
10734 * bnxt_io_slot_reset - called after the pci bus has been reset.
10735 * @pdev: Pointer to PCI device
10736 *
10737 * Restart the card from scratch, as if from a cold-boot.
10738 * At this point, the card has exprienced a hard reset,
10739 * followed by fixups by BIOS, and has its config space
10740 * set up identically to what it was at cold boot.
10741 */
10742 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10743 {
10744 struct net_device *netdev = pci_get_drvdata(pdev);
10745 struct bnxt *bp = netdev_priv(netdev);
10746 int err = 0;
10747 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10748
10749 netdev_info(bp->dev, "PCI Slot Reset\n");
10750
10751 rtnl_lock();
10752
10753 if (pci_enable_device(pdev)) {
10754 dev_err(&pdev->dev,
10755 "Cannot re-enable PCI device after reset.\n");
10756 } else {
10757 pci_set_master(pdev);
10758
10759 err = bnxt_hwrm_func_reset(bp);
10760 if (!err && netif_running(netdev))
10761 err = bnxt_open(netdev);
10762
10763 if (!err) {
10764 result = PCI_ERS_RESULT_RECOVERED;
10765 bnxt_ulp_start(bp);
10766 }
10767 }
10768
10769 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10770 dev_close(netdev);
10771
10772 rtnl_unlock();
10773
10774 return PCI_ERS_RESULT_RECOVERED;
10775 }
10776
10777 /**
10778 * bnxt_io_resume - called when traffic can start flowing again.
10779 * @pdev: Pointer to PCI device
10780 *
10781 * This callback is called when the error recovery driver tells
10782 * us that its OK to resume normal operation.
10783 */
10784 static void bnxt_io_resume(struct pci_dev *pdev)
10785 {
10786 struct net_device *netdev = pci_get_drvdata(pdev);
10787
10788 rtnl_lock();
10789
10790 netif_device_attach(netdev);
10791
10792 rtnl_unlock();
10793 }
10794
10795 static const struct pci_error_handlers bnxt_err_handler = {
10796 .error_detected = bnxt_io_error_detected,
10797 .slot_reset = bnxt_io_slot_reset,
10798 .resume = bnxt_io_resume
10799 };
10800
10801 static struct pci_driver bnxt_pci_driver = {
10802 .name = DRV_MODULE_NAME,
10803 .id_table = bnxt_pci_tbl,
10804 .probe = bnxt_init_one,
10805 .remove = bnxt_remove_one,
10806 .shutdown = bnxt_shutdown,
10807 .driver.pm = BNXT_PM_OPS,
10808 .err_handler = &bnxt_err_handler,
10809 #if defined(CONFIG_BNXT_SRIOV)
10810 .sriov_configure = bnxt_sriov_configure,
10811 #endif
10812 };
10813
10814 static int __init bnxt_init(void)
10815 {
10816 bnxt_debug_init();
10817 return pci_register_driver(&bnxt_pci_driver);
10818 }
10819
10820 static void __exit bnxt_exit(void)
10821 {
10822 pci_unregister_driver(&bnxt_pci_driver);
10823 if (bnxt_pf_wq)
10824 destroy_workqueue(bnxt_pf_wq);
10825 bnxt_debug_exit();
10826 }
10827
10828 module_init(bnxt_init);
10829 module_exit(bnxt_exit);