]>
Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <linux/of.h> | |
14 | ||
15 | #include "nic_reg.h" | |
16 | #include "nic.h" | |
17 | #include "q_struct.h" | |
18 | #include "thunder_bgx.h" | |
19 | ||
20 | #define DRV_NAME "thunder-nic" | |
21 | #define DRV_VERSION "1.0" | |
22 | ||
a5c3d498 SG |
23 | struct hw_info { |
24 | u8 bgx_cnt; | |
25 | u8 chans_per_lmac; | |
26 | u8 chans_per_bgx; /* Rx/Tx chans */ | |
0025d93e SG |
27 | u8 chans_per_rgx; |
28 | u8 chans_per_lbk; | |
a5c3d498 SG |
29 | u16 cpi_cnt; |
30 | u16 rssi_cnt; | |
31 | u16 rss_ind_tbl_size; | |
32 | u16 tl4_cnt; | |
33 | u16 tl3_cnt; | |
34 | u8 tl2_cnt; | |
35 | u8 tl1_cnt; | |
36 | bool tl1_per_bgx; /* TL1 per BGX or per LMAC */ | |
37 | }; | |
38 | ||
4863dea3 SG |
39 | struct nicpf { |
40 | struct pci_dev *pdev; | |
a5c3d498 | 41 | struct hw_info *hw; |
4863dea3 SG |
42 | u8 node; |
43 | unsigned int flags; | |
44 | u8 num_vf_en; /* No of VF enabled */ | |
45 | bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; | |
46 | void __iomem *reg_base; /* Register start address */ | |
92dc8769 SG |
47 | u8 num_sqs_en; /* Secondary qsets enabled */ |
48 | u64 nicvf[MAX_NUM_VFS_SUPPORTED]; | |
49 | u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF]; | |
50 | u8 pqs_vf[MAX_NUM_VFS_SUPPORTED]; | |
51 | bool sqs_used[MAX_NUM_VFS_SUPPORTED]; | |
4863dea3 SG |
52 | struct pkind_cfg pkind; |
53 | #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) | |
54 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) | |
55 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) | |
949b5331 | 56 | u8 *vf_lmac_map; |
4863dea3 SG |
57 | struct delayed_work dwork; |
58 | struct workqueue_struct *check_link; | |
949b5331 SG |
59 | u8 *link; |
60 | u8 *duplex; | |
61 | u32 *speed; | |
4863dea3 | 62 | u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; |
34411b68 | 63 | u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; |
4863dea3 SG |
64 | bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; |
65 | ||
66 | /* MSI-X */ | |
67 | bool msix_enabled; | |
68 | u8 num_vec; | |
52358aad | 69 | struct msix_entry *msix_entries; |
4863dea3 | 70 | bool irq_allocated[NIC_PF_MSIX_VECTORS]; |
52358aad | 71 | char irq_name[NIC_PF_MSIX_VECTORS][20]; |
4863dea3 SG |
72 | }; |
73 | ||
74 | /* Supported devices */ | |
75 | static const struct pci_device_id nic_id_table[] = { | |
76 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, | |
77 | { 0, } /* end of table */ | |
78 | }; | |
79 | ||
80 | MODULE_AUTHOR("Sunil Goutham"); | |
81 | MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver"); | |
82 | MODULE_LICENSE("GPL v2"); | |
83 | MODULE_VERSION(DRV_VERSION); | |
84 | MODULE_DEVICE_TABLE(pci, nic_id_table); | |
85 | ||
86 | /* The Cavium ThunderX network controller can *only* be found in SoCs | |
87 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | |
88 | * registers on this platform are implicitly strongly ordered with respect | |
89 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use | |
90 | * with no memory barriers in this driver. The readq()/writeq() functions add | |
91 | * explicit ordering operation which in this case are redundant, and only | |
92 | * add overhead. | |
93 | */ | |
94 | ||
95 | /* Register read/write APIs */ | |
96 | static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) | |
97 | { | |
98 | writeq_relaxed(val, nic->reg_base + offset); | |
99 | } | |
100 | ||
101 | static u64 nic_reg_read(struct nicpf *nic, u64 offset) | |
102 | { | |
103 | return readq_relaxed(nic->reg_base + offset); | |
104 | } | |
105 | ||
106 | /* PF -> VF mailbox communication APIs */ | |
107 | static void nic_enable_mbx_intr(struct nicpf *nic) | |
108 | { | |
52358aad SG |
109 | int vf_cnt = pci_sriov_get_totalvfs(nic->pdev); |
110 | ||
111 | #define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull)) | |
112 | ||
113 | /* Clear it, to avoid spurious interrupts (if any) */ | |
114 | nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt)); | |
115 | ||
116 | /* Enable mailbox interrupt for all VFs */ | |
117 | nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt)); | |
118 | /* One mailbox intr enable reg per 64 VFs */ | |
119 | if (vf_cnt > 64) { | |
120 | nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64), | |
121 | INTR_MASK(vf_cnt - 64)); | |
122 | nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), | |
123 | INTR_MASK(vf_cnt - 64)); | |
124 | } | |
4863dea3 SG |
125 | } |
126 | ||
127 | static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) | |
128 | { | |
129 | nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf)); | |
130 | } | |
131 | ||
132 | static u64 nic_get_mbx_addr(int vf) | |
133 | { | |
134 | return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); | |
135 | } | |
136 | ||
137 | /* Send a mailbox message to VF | |
138 | * @vf: vf to which this message to be sent | |
139 | * @mbx: Message to be sent | |
140 | */ | |
141 | static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) | |
142 | { | |
143 | void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf); | |
144 | u64 *msg = (u64 *)mbx; | |
145 | ||
146 | /* In first revision HW, mbox interrupt is triggerred | |
147 | * when PF writes to MBOX(1), in next revisions when | |
148 | * PF writes to MBOX(0) | |
149 | */ | |
40fb5f8a | 150 | if (pass1_silicon(nic->pdev)) { |
4863dea3 SG |
151 | /* see the comment for nic_reg_write()/nic_reg_read() |
152 | * functions above | |
153 | */ | |
154 | writeq_relaxed(msg[0], mbx_addr); | |
155 | writeq_relaxed(msg[1], mbx_addr + 8); | |
156 | } else { | |
157 | writeq_relaxed(msg[1], mbx_addr + 8); | |
158 | writeq_relaxed(msg[0], mbx_addr); | |
159 | } | |
160 | } | |
161 | ||
162 | /* Responds to VF's READY message with VF's | |
163 | * ID, node, MAC address e.t.c | |
164 | * @vf: VF which sent READY message | |
165 | */ | |
166 | static void nic_mbx_send_ready(struct nicpf *nic, int vf) | |
167 | { | |
168 | union nic_mbx mbx = {}; | |
169 | int bgx_idx, lmac; | |
170 | const char *mac; | |
171 | ||
172 | mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; | |
173 | mbx.nic_cfg.vf_id = vf; | |
174 | ||
175 | mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; | |
176 | ||
949b5331 | 177 | if (vf < nic->num_vf_en) { |
92dc8769 SG |
178 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
179 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
4863dea3 | 180 | |
92dc8769 SG |
181 | mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); |
182 | if (mac) | |
183 | ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); | |
184 | } | |
185 | mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; | |
4863dea3 | 186 | mbx.nic_cfg.node_id = nic->node; |
d77a2384 | 187 | |
949b5331 | 188 | mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en; |
d77a2384 | 189 | |
4863dea3 SG |
190 | nic_send_msg_to_vf(nic, vf, &mbx); |
191 | } | |
192 | ||
193 | /* ACKs VF's mailbox message | |
194 | * @vf: VF to which ACK to be sent | |
195 | */ | |
196 | static void nic_mbx_send_ack(struct nicpf *nic, int vf) | |
197 | { | |
198 | union nic_mbx mbx = {}; | |
199 | ||
200 | mbx.msg.msg = NIC_MBOX_MSG_ACK; | |
201 | nic_send_msg_to_vf(nic, vf, &mbx); | |
202 | } | |
203 | ||
204 | /* NACKs VF's mailbox message that PF is not able to | |
205 | * complete the action | |
206 | * @vf: VF to which ACK to be sent | |
207 | */ | |
208 | static void nic_mbx_send_nack(struct nicpf *nic, int vf) | |
209 | { | |
210 | union nic_mbx mbx = {}; | |
211 | ||
212 | mbx.msg.msg = NIC_MBOX_MSG_NACK; | |
213 | nic_send_msg_to_vf(nic, vf, &mbx); | |
214 | } | |
215 | ||
216 | /* Flush all in flight receive packets to memory and | |
217 | * bring down an active RQ | |
218 | */ | |
219 | static int nic_rcv_queue_sw_sync(struct nicpf *nic) | |
220 | { | |
221 | u16 timeout = ~0x00; | |
222 | ||
223 | nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); | |
224 | /* Wait till sync cycle is finished */ | |
225 | while (timeout) { | |
226 | if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) | |
227 | break; | |
228 | timeout--; | |
229 | } | |
230 | nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); | |
231 | if (!timeout) { | |
232 | dev_err(&nic->pdev->dev, "Receive queue software sync failed"); | |
233 | return 1; | |
234 | } | |
235 | return 0; | |
236 | } | |
237 | ||
238 | /* Get BGX Rx/Tx stats and respond to VF's request */ | |
239 | static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) | |
240 | { | |
241 | int bgx_idx, lmac; | |
242 | union nic_mbx mbx = {}; | |
243 | ||
244 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); | |
245 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); | |
246 | ||
247 | mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; | |
248 | mbx.bgx_stats.vf_id = bgx->vf_id; | |
249 | mbx.bgx_stats.rx = bgx->rx; | |
250 | mbx.bgx_stats.idx = bgx->idx; | |
251 | if (bgx->rx) | |
252 | mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx, | |
253 | lmac, bgx->idx); | |
254 | else | |
255 | mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx, | |
256 | lmac, bgx->idx); | |
257 | nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); | |
258 | } | |
259 | ||
260 | /* Update hardware min/max frame size */ | |
261 | static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) | |
262 | { | |
263 | if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { | |
264 | dev_err(&nic->pdev->dev, | |
265 | "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", | |
266 | vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); | |
267 | return 1; | |
268 | } | |
269 | new_frs += ETH_HLEN; | |
270 | if (new_frs <= nic->pkind.maxlen) | |
271 | return 0; | |
272 | ||
273 | nic->pkind.maxlen = new_frs; | |
274 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); | |
275 | return 0; | |
276 | } | |
277 | ||
278 | /* Set minimum transmit packet size */ | |
279 | static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) | |
280 | { | |
949b5331 SG |
281 | int lmac, max_lmac; |
282 | u16 sdevid; | |
4863dea3 SG |
283 | u64 lmac_cfg; |
284 | ||
285 | /* Max value that can be set is 60 */ | |
286 | if (size > 60) | |
287 | size = 60; | |
288 | ||
949b5331 SG |
289 | pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); |
290 | /* 81xx's RGX has only one LMAC */ | |
291 | if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF) | |
292 | max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1; | |
293 | else | |
294 | max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; | |
295 | ||
296 | for (lmac = 0; lmac < max_lmac; lmac++) { | |
4863dea3 SG |
297 | lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); |
298 | lmac_cfg &= ~(0xF << 2); | |
299 | lmac_cfg |= ((size / 4) << 2); | |
300 | nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); | |
301 | } | |
302 | } | |
303 | ||
304 | /* Function to check number of LMACs present and set VF::LMAC mapping. | |
305 | * Mapping will be used while initializing channels. | |
306 | */ | |
307 | static void nic_set_lmac_vf_mapping(struct nicpf *nic) | |
308 | { | |
309 | unsigned bgx_map = bgx_get_map(nic->node); | |
310 | int bgx, next_bgx_lmac = 0; | |
311 | int lmac, lmac_cnt = 0; | |
312 | u64 lmac_credit; | |
313 | ||
314 | nic->num_vf_en = 0; | |
315 | ||
a5c3d498 | 316 | for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) { |
4863dea3 SG |
317 | if (!(bgx_map & (1 << bgx))) |
318 | continue; | |
319 | lmac_cnt = bgx_get_lmac_count(nic->node, bgx); | |
320 | for (lmac = 0; lmac < lmac_cnt; lmac++) | |
321 | nic->vf_lmac_map[next_bgx_lmac++] = | |
322 | NIC_SET_VF_LMAC_MAP(bgx, lmac); | |
323 | nic->num_vf_en += lmac_cnt; | |
324 | ||
325 | /* Program LMAC credits */ | |
326 | lmac_credit = (1ull << 1); /* channel credit enable */ | |
327 | lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ | |
328 | /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ | |
329 | lmac_credit |= (((((48 * 1024) / lmac_cnt) - | |
330 | NIC_HW_MAX_FRS) / 16) << 12); | |
331 | lmac = bgx * MAX_LMAC_PER_BGX; | |
332 | for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) | |
333 | nic_reg_write(nic, | |
334 | NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), | |
335 | lmac_credit); | |
6465859a SG |
336 | |
337 | /* On CN81XX there are only 8 VFs but max possible no of | |
338 | * interfaces are 9. | |
339 | */ | |
340 | if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) { | |
341 | nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev); | |
342 | break; | |
343 | } | |
4863dea3 SG |
344 | } |
345 | } | |
346 | ||
949b5331 | 347 | static void nic_free_lmacmem(struct nicpf *nic) |
a5c3d498 | 348 | { |
949b5331 SG |
349 | kfree(nic->vf_lmac_map); |
350 | kfree(nic->link); | |
351 | kfree(nic->duplex); | |
352 | kfree(nic->speed); | |
353 | } | |
354 | ||
355 | static int nic_get_hw_info(struct nicpf *nic) | |
356 | { | |
357 | u8 max_lmac; | |
a5c3d498 SG |
358 | u16 sdevid; |
359 | struct hw_info *hw = nic->hw; | |
360 | ||
361 | pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); | |
362 | ||
363 | switch (sdevid) { | |
364 | case PCI_SUBSYS_DEVID_88XX_NIC_PF: | |
365 | hw->bgx_cnt = MAX_BGX_PER_CN88XX; | |
366 | hw->chans_per_lmac = 16; | |
367 | hw->chans_per_bgx = 128; | |
368 | hw->cpi_cnt = 2048; | |
369 | hw->rssi_cnt = 4096; | |
370 | hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; | |
371 | hw->tl3_cnt = 256; | |
372 | hw->tl2_cnt = 64; | |
373 | hw->tl1_cnt = 2; | |
374 | hw->tl1_per_bgx = true; | |
375 | break; | |
0025d93e SG |
376 | case PCI_SUBSYS_DEVID_81XX_NIC_PF: |
377 | hw->bgx_cnt = MAX_BGX_PER_CN81XX; | |
378 | hw->chans_per_lmac = 8; | |
379 | hw->chans_per_bgx = 32; | |
380 | hw->chans_per_rgx = 8; | |
381 | hw->chans_per_lbk = 24; | |
382 | hw->cpi_cnt = 512; | |
383 | hw->rssi_cnt = 256; | |
384 | hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */ | |
385 | hw->tl3_cnt = 64; | |
386 | hw->tl2_cnt = 16; | |
387 | hw->tl1_cnt = 10; | |
388 | hw->tl1_per_bgx = false; | |
389 | break; | |
390 | case PCI_SUBSYS_DEVID_83XX_NIC_PF: | |
391 | hw->bgx_cnt = MAX_BGX_PER_CN83XX; | |
392 | hw->chans_per_lmac = 8; | |
393 | hw->chans_per_bgx = 32; | |
394 | hw->chans_per_lbk = 64; | |
395 | hw->cpi_cnt = 2048; | |
396 | hw->rssi_cnt = 1024; | |
397 | hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */ | |
398 | hw->tl3_cnt = 256; | |
399 | hw->tl2_cnt = 64; | |
400 | hw->tl1_cnt = 18; | |
401 | hw->tl1_per_bgx = false; | |
402 | break; | |
a5c3d498 SG |
403 | } |
404 | hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev); | |
949b5331 SG |
405 | |
406 | /* Allocate memory for LMAC tracking elements */ | |
407 | max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX; | |
408 | nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); | |
409 | if (!nic->vf_lmac_map) | |
410 | goto error; | |
411 | nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); | |
412 | if (!nic->link) | |
413 | goto error; | |
414 | nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); | |
415 | if (!nic->duplex) | |
416 | goto error; | |
417 | nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL); | |
418 | if (!nic->speed) | |
419 | goto error; | |
420 | return 0; | |
421 | ||
422 | error: | |
423 | nic_free_lmacmem(nic); | |
424 | return -ENOMEM; | |
a5c3d498 SG |
425 | } |
426 | ||
4863dea3 SG |
427 | #define BGX0_BLOCK 8 |
428 | #define BGX1_BLOCK 9 | |
429 | ||
949b5331 | 430 | static int nic_init_hw(struct nicpf *nic) |
4863dea3 | 431 | { |
949b5331 | 432 | int i, err; |
4c0b6eaf | 433 | u64 cqm_cfg; |
4863dea3 | 434 | |
a5c3d498 | 435 | /* Get HW capability info */ |
949b5331 SG |
436 | err = nic_get_hw_info(nic); |
437 | if (err) | |
438 | return err; | |
a5c3d498 | 439 | |
4863dea3 SG |
440 | /* Enable NIC HW block */ |
441 | nic_reg_write(nic, NIC_PF_CFG, 0x3); | |
442 | ||
443 | /* Enable backpressure */ | |
444 | nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); | |
445 | ||
0025d93e SG |
446 | /* TNS and TNS bypass modes are present only on 88xx */ |
447 | if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) { | |
448 | /* Disable TNS mode on both interfaces */ | |
449 | nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, | |
450 | (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); | |
451 | nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), | |
452 | (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); | |
453 | } | |
454 | ||
4863dea3 SG |
455 | nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, |
456 | (1ULL << 63) | BGX0_BLOCK); | |
457 | nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), | |
458 | (1ULL << 63) | BGX1_BLOCK); | |
459 | ||
460 | /* PKIND configuration */ | |
461 | nic->pkind.minlen = 0; | |
462 | nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; | |
463 | nic->pkind.lenerr_en = 1; | |
464 | nic->pkind.rx_hdr = 0; | |
465 | nic->pkind.hdr_sl = 0; | |
466 | ||
467 | for (i = 0; i < NIC_MAX_PKIND; i++) | |
468 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), | |
469 | *(u64 *)&nic->pkind); | |
470 | ||
471 | nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); | |
472 | ||
473 | /* Timer config */ | |
474 | nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); | |
aa2e259b SG |
475 | |
476 | /* Enable VLAN ethertype matching and stripping */ | |
477 | nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, | |
478 | (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); | |
4c0b6eaf SG |
479 | |
480 | /* Check if HW expected value is higher (could be in future chips) */ | |
481 | cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); | |
482 | if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) | |
483 | nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); | |
949b5331 SG |
484 | |
485 | return 0; | |
4863dea3 SG |
486 | } |
487 | ||
488 | /* Channel parse index configuration */ | |
489 | static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) | |
490 | { | |
a5c3d498 | 491 | struct hw_info *hw = nic->hw; |
4863dea3 SG |
492 | u32 vnic, bgx, lmac, chan; |
493 | u32 padd, cpi_count = 0; | |
494 | u64 cpi_base, cpi, rssi_base, rssi; | |
495 | u8 qset, rq_idx = 0; | |
496 | ||
497 | vnic = cfg->vf_id; | |
498 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | |
499 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | |
500 | ||
a5c3d498 | 501 | chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); |
6465859a SG |
502 | cpi_base = vnic * NIC_MAX_CPI_PER_LMAC; |
503 | rssi_base = vnic * hw->rss_ind_tbl_size; | |
4863dea3 SG |
504 | |
505 | /* Rx channel configuration */ | |
506 | nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), | |
507 | (1ull << 63) | (vnic << 0)); | |
508 | nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), | |
509 | ((u64)cfg->cpi_alg << 62) | (cpi_base << 48)); | |
510 | ||
511 | if (cfg->cpi_alg == CPI_ALG_NONE) | |
512 | cpi_count = 1; | |
513 | else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ | |
514 | cpi_count = 8; | |
515 | else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ | |
516 | cpi_count = 16; | |
517 | else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ | |
518 | cpi_count = NIC_MAX_CPI_PER_LMAC; | |
519 | ||
520 | /* RSS Qset, Qidx mapping */ | |
521 | qset = cfg->vf_id; | |
522 | rssi = rssi_base; | |
523 | for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { | |
524 | nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), | |
525 | (qset << 3) | rq_idx); | |
526 | rq_idx++; | |
527 | } | |
528 | ||
529 | rssi = 0; | |
530 | cpi = cpi_base; | |
531 | for (; cpi < (cpi_base + cpi_count); cpi++) { | |
532 | /* Determine port to channel adder */ | |
533 | if (cfg->cpi_alg != CPI_ALG_DIFF) | |
534 | padd = cpi % cpi_count; | |
535 | else | |
536 | padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ | |
537 | ||
538 | /* Leave RSS_SIZE as '0' to disable RSS */ | |
40fb5f8a | 539 | if (pass1_silicon(nic->pdev)) { |
34411b68 TS |
540 | nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), |
541 | (vnic << 24) | (padd << 16) | | |
542 | (rssi_base + rssi)); | |
543 | } else { | |
544 | /* Set MPI_ALG to '0' to disable MCAM parsing */ | |
545 | nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), | |
546 | (padd << 16)); | |
547 | /* MPI index is same as CPI if MPI_ALG is not enabled */ | |
548 | nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), | |
549 | (vnic << 24) | (rssi_base + rssi)); | |
550 | } | |
4863dea3 SG |
551 | |
552 | if ((rssi + 1) >= cfg->rq_cnt) | |
553 | continue; | |
554 | ||
555 | if (cfg->cpi_alg == CPI_ALG_VLAN) | |
556 | rssi++; | |
557 | else if (cfg->cpi_alg == CPI_ALG_VLAN16) | |
558 | rssi = ((cpi - cpi_base) & 0xe) >> 1; | |
559 | else if (cfg->cpi_alg == CPI_ALG_DIFF) | |
560 | rssi = ((cpi - cpi_base) & 0x38) >> 3; | |
561 | } | |
562 | nic->cpi_base[cfg->vf_id] = cpi_base; | |
34411b68 | 563 | nic->rssi_base[cfg->vf_id] = rssi_base; |
4863dea3 SG |
564 | } |
565 | ||
566 | /* Responsds to VF with its RSS indirection table size */ | |
567 | static void nic_send_rss_size(struct nicpf *nic, int vf) | |
568 | { | |
569 | union nic_mbx mbx = {}; | |
570 | u64 *msg; | |
571 | ||
572 | msg = (u64 *)&mbx; | |
573 | ||
574 | mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; | |
a5c3d498 | 575 | mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size; |
4863dea3 SG |
576 | nic_send_msg_to_vf(nic, vf, &mbx); |
577 | } | |
578 | ||
579 | /* Receive side scaling configuration | |
580 | * configure: | |
581 | * - RSS index | |
582 | * - indir table i.e hash::RQ mapping | |
583 | * - no of hash bits to consider | |
584 | */ | |
585 | static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) | |
586 | { | |
587 | u8 qset, idx = 0; | |
588 | u64 cpi_cfg, cpi_base, rssi_base, rssi; | |
34411b68 | 589 | u64 idx_addr; |
4863dea3 | 590 | |
34411b68 | 591 | rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; |
4863dea3 SG |
592 | |
593 | rssi = rssi_base; | |
594 | qset = cfg->vf_id; | |
595 | ||
596 | for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { | |
92dc8769 SG |
597 | u8 svf = cfg->ind_tbl[idx] >> 3; |
598 | ||
599 | if (svf) | |
600 | qset = nic->vf_sqs[cfg->vf_id][svf - 1]; | |
601 | else | |
602 | qset = cfg->vf_id; | |
4863dea3 SG |
603 | nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), |
604 | (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); | |
605 | idx++; | |
606 | } | |
607 | ||
34411b68 | 608 | cpi_base = nic->cpi_base[cfg->vf_id]; |
40fb5f8a | 609 | if (pass1_silicon(nic->pdev)) |
34411b68 TS |
610 | idx_addr = NIC_PF_CPI_0_2047_CFG; |
611 | else | |
612 | idx_addr = NIC_PF_MPI_0_2047_CFG; | |
613 | cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); | |
4863dea3 SG |
614 | cpi_cfg &= ~(0xFULL << 20); |
615 | cpi_cfg |= (cfg->hash_bits << 20); | |
34411b68 | 616 | nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); |
4863dea3 SG |
617 | } |
618 | ||
619 | /* 4 level transmit side scheduler configutation | |
620 | * for TNS bypass mode | |
621 | * | |
0025d93e | 622 | * Sample configuration for SQ0 on 88xx |
4863dea3 SG |
623 | * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 |
624 | * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 | |
625 | * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 | |
626 | * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 | |
627 | * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 | |
628 | * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 | |
629 | * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 | |
630 | * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 | |
631 | */ | |
92dc8769 SG |
632 | static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, |
633 | struct sq_cfg_msg *sq) | |
4863dea3 | 634 | { |
a5c3d498 | 635 | struct hw_info *hw = nic->hw; |
4863dea3 SG |
636 | u32 bgx, lmac, chan; |
637 | u32 tl2, tl3, tl4; | |
638 | u32 rr_quantum; | |
92dc8769 SG |
639 | u8 sq_idx = sq->sq_num; |
640 | u8 pqs_vnic; | |
3e29adba | 641 | int svf; |
92dc8769 SG |
642 | |
643 | if (sq->sqs_mode) | |
644 | pqs_vnic = nic->pqs_vf[vnic]; | |
645 | else | |
646 | pqs_vnic = vnic; | |
647 | ||
648 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); | |
649 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); | |
4863dea3 | 650 | |
4863dea3 SG |
651 | /* 24 bytes for FCS, IPG and preamble */ |
652 | rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); | |
653 | ||
a5c3d498 SG |
654 | /* For 88xx 0-511 TL4 transmits via BGX0 and |
655 | * 512-1023 TL4s transmit via BGX1. | |
656 | */ | |
0025d93e SG |
657 | if (hw->tl1_per_bgx) { |
658 | tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt); | |
659 | if (!sq->sqs_mode) { | |
660 | tl4 += (lmac * MAX_QUEUES_PER_QSET); | |
661 | } else { | |
662 | for (svf = 0; svf < MAX_SQS_PER_VF; svf++) { | |
663 | if (nic->vf_sqs[pqs_vnic][svf] == vnic) | |
664 | break; | |
665 | } | |
666 | tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET); | |
667 | tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF); | |
668 | tl4 += (svf * MAX_QUEUES_PER_QSET); | |
3e29adba | 669 | } |
0025d93e SG |
670 | } else { |
671 | tl4 = (vnic * MAX_QUEUES_PER_QSET); | |
3e29adba | 672 | } |
4863dea3 | 673 | tl4 += sq_idx; |
92dc8769 | 674 | |
a5c3d498 | 675 | tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt); |
4863dea3 SG |
676 | nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | |
677 | ((u64)vnic << NIC_QS_ID_SHIFT) | | |
678 | ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); | |
679 | nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), | |
680 | ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); | |
681 | ||
682 | nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); | |
a5c3d498 SG |
683 | |
684 | /* On 88xx 0-127 channels are for BGX0 and | |
685 | * 127-255 channels for BGX1. | |
0025d93e SG |
686 | * |
687 | * On 81xx/83xx TL3_CHAN reg should be configured with channel | |
688 | * within LMAC i.e 0-7 and not the actual channel number like on 88xx | |
a5c3d498 SG |
689 | */ |
690 | chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); | |
0025d93e SG |
691 | if (hw->tl1_per_bgx) |
692 | nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); | |
693 | else | |
694 | nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0); | |
a5c3d498 | 695 | |
4863dea3 SG |
696 | /* Enable backpressure on the channel */ |
697 | nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); | |
698 | ||
699 | tl2 = tl3 >> 2; | |
700 | nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); | |
701 | nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); | |
702 | /* No priorities as of now */ | |
703 | nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); | |
0025d93e SG |
704 | |
705 | /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1' | |
706 | * on 81xx/83xx TL2 needs to be configured to transmit to one of the | |
707 | * possible LMACs. | |
708 | * | |
709 | * This register doesn't exist on 88xx. | |
710 | */ | |
711 | if (!hw->tl1_per_bgx) | |
712 | nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3), | |
713 | lmac + (bgx * MAX_LMAC_PER_BGX)); | |
4863dea3 SG |
714 | } |
715 | ||
92dc8769 SG |
716 | /* Send primary nicvf pointer to secondary QS's VF */ |
717 | static void nic_send_pnicvf(struct nicpf *nic, int sqs) | |
718 | { | |
719 | union nic_mbx mbx = {}; | |
720 | ||
721 | mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR; | |
722 | mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]]; | |
723 | nic_send_msg_to_vf(nic, sqs, &mbx); | |
724 | } | |
725 | ||
726 | /* Send SQS's nicvf pointer to primary QS's VF */ | |
727 | static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf) | |
728 | { | |
729 | union nic_mbx mbx = {}; | |
730 | int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id]; | |
731 | ||
732 | mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR; | |
733 | mbx.nicvf.sqs_id = nicvf->sqs_id; | |
734 | mbx.nicvf.nicvf = nic->nicvf[sqs_id]; | |
735 | nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx); | |
736 | } | |
737 | ||
738 | /* Find next available Qset that can be assigned as a | |
739 | * secondary Qset to a VF. | |
740 | */ | |
741 | static int nic_nxt_avail_sqs(struct nicpf *nic) | |
742 | { | |
743 | int sqs; | |
744 | ||
745 | for (sqs = 0; sqs < nic->num_sqs_en; sqs++) { | |
746 | if (!nic->sqs_used[sqs]) | |
747 | nic->sqs_used[sqs] = true; | |
748 | else | |
749 | continue; | |
750 | return sqs + nic->num_vf_en; | |
751 | } | |
752 | return -1; | |
753 | } | |
754 | ||
755 | /* Allocate additional Qsets for requested VF */ | |
756 | static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs) | |
757 | { | |
758 | union nic_mbx mbx = {}; | |
759 | int idx, alloc_qs = 0; | |
760 | int sqs_id; | |
761 | ||
762 | if (!nic->num_sqs_en) | |
763 | goto send_mbox; | |
764 | ||
765 | for (idx = 0; idx < sqs->qs_count; idx++) { | |
766 | sqs_id = nic_nxt_avail_sqs(nic); | |
767 | if (sqs_id < 0) | |
768 | break; | |
769 | nic->vf_sqs[sqs->vf_id][idx] = sqs_id; | |
770 | nic->pqs_vf[sqs_id] = sqs->vf_id; | |
771 | alloc_qs++; | |
772 | } | |
773 | ||
774 | send_mbox: | |
775 | mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS; | |
776 | mbx.sqs_alloc.vf_id = sqs->vf_id; | |
777 | mbx.sqs_alloc.qs_count = alloc_qs; | |
778 | nic_send_msg_to_vf(nic, sqs->vf_id, &mbx); | |
779 | } | |
780 | ||
d77a2384 SG |
781 | static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) |
782 | { | |
783 | int bgx_idx, lmac_idx; | |
784 | ||
949b5331 | 785 | if (lbk->vf_id >= nic->num_vf_en) |
d77a2384 SG |
786 | return -1; |
787 | ||
788 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); | |
789 | lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); | |
790 | ||
791 | bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); | |
792 | ||
793 | return 0; | |
794 | } | |
795 | ||
3458c40d JJ |
796 | /* Reset statistics counters */ |
797 | static int nic_reset_stat_counters(struct nicpf *nic, | |
798 | int vf, struct reset_stat_cfg *cfg) | |
799 | { | |
800 | int i, stat, qnum; | |
801 | u64 reg_addr; | |
802 | ||
803 | for (i = 0; i < RX_STATS_ENUM_LAST; i++) { | |
804 | if (cfg->rx_stat_mask & BIT(i)) { | |
805 | reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 | | |
806 | (vf << NIC_QS_ID_SHIFT) | | |
807 | (i << 3); | |
808 | nic_reg_write(nic, reg_addr, 0); | |
809 | } | |
810 | } | |
811 | ||
812 | for (i = 0; i < TX_STATS_ENUM_LAST; i++) { | |
813 | if (cfg->tx_stat_mask & BIT(i)) { | |
814 | reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 | | |
815 | (vf << NIC_QS_ID_SHIFT) | | |
816 | (i << 3); | |
817 | nic_reg_write(nic, reg_addr, 0); | |
818 | } | |
819 | } | |
820 | ||
821 | for (i = 0; i <= 15; i++) { | |
822 | qnum = i >> 1; | |
823 | stat = i & 1 ? 1 : 0; | |
824 | reg_addr = (vf << NIC_QS_ID_SHIFT) | | |
825 | (qnum << NIC_Q_NUM_SHIFT) | (stat << 3); | |
826 | if (cfg->rq_stat_mask & BIT(i)) { | |
827 | reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1; | |
828 | nic_reg_write(nic, reg_addr, 0); | |
829 | } | |
830 | if (cfg->sq_stat_mask & BIT(i)) { | |
831 | reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1; | |
832 | nic_reg_write(nic, reg_addr, 0); | |
833 | } | |
834 | } | |
835 | return 0; | |
836 | } | |
837 | ||
e22e86ea ZS |
838 | static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf) |
839 | { | |
840 | u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT; | |
841 | u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) | | |
842 | (IPV4_PROT_DEF) << 16 | ET_PROT_DEF; | |
843 | ||
844 | /* Configure tunnel parsing parameters */ | |
845 | nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF, | |
846 | (1ULL << 63 | UDP_GENEVE_PORT_NUM)); | |
847 | nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF, | |
848 | ((7ULL << 61) | prot_def)); | |
849 | nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF, | |
850 | ((7ULL << 61) | prot_def)); | |
851 | nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1, | |
852 | ((1ULL << 63) | UDP_VXLAN_PORT_NUM)); | |
853 | nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF, | |
854 | ((0xfULL << 60) | vxlan_prot_def)); | |
855 | } | |
856 | ||
f406ce42 PF |
857 | static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) |
858 | { | |
859 | int bgx, lmac; | |
860 | ||
861 | nic->vf_enabled[vf] = enable; | |
862 | ||
863 | if (vf >= nic->num_vf_en) | |
864 | return; | |
865 | ||
866 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
867 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
868 | ||
869 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); | |
870 | } | |
871 | ||
4863dea3 SG |
872 | /* Interrupt handler to handle mailbox messages from VFs */ |
873 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |
874 | { | |
875 | union nic_mbx mbx = {}; | |
876 | u64 *mbx_data; | |
877 | u64 mbx_addr; | |
878 | u64 reg_addr; | |
92dc8769 | 879 | u64 cfg; |
4863dea3 SG |
880 | int bgx, lmac; |
881 | int i; | |
882 | int ret = 0; | |
883 | ||
884 | nic->mbx_lock[vf] = true; | |
885 | ||
886 | mbx_addr = nic_get_mbx_addr(vf); | |
887 | mbx_data = (u64 *)&mbx; | |
888 | ||
889 | for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { | |
890 | *mbx_data = nic_reg_read(nic, mbx_addr); | |
891 | mbx_data++; | |
892 | mbx_addr += sizeof(u64); | |
893 | } | |
894 | ||
ecae29cb | 895 | dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n", |
4863dea3 SG |
896 | __func__, mbx.msg.msg, vf); |
897 | switch (mbx.msg.msg) { | |
898 | case NIC_MBOX_MSG_READY: | |
899 | nic_mbx_send_ready(nic, vf); | |
949b5331 | 900 | if (vf < nic->num_vf_en) { |
92dc8769 SG |
901 | nic->link[vf] = 0; |
902 | nic->duplex[vf] = 0; | |
903 | nic->speed[vf] = 0; | |
904 | } | |
ecae29cb | 905 | goto unlock; |
4863dea3 SG |
906 | case NIC_MBOX_MSG_QS_CFG: |
907 | reg_addr = NIC_PF_QSET_0_127_CFG | | |
908 | (mbx.qs.num << NIC_QS_ID_SHIFT); | |
92dc8769 SG |
909 | cfg = mbx.qs.cfg; |
910 | /* Check if its a secondary Qset */ | |
911 | if (vf >= nic->num_vf_en) { | |
912 | cfg = cfg & (~0x7FULL); | |
913 | /* Assign this Qset to primary Qset's VF */ | |
914 | cfg |= nic->pqs_vf[vf]; | |
915 | } | |
916 | nic_reg_write(nic, reg_addr, cfg); | |
4863dea3 SG |
917 | break; |
918 | case NIC_MBOX_MSG_RQ_CFG: | |
919 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | | |
920 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | |
921 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | |
922 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | |
02a72bd8 SG |
923 | /* Enable CQE_RX2_S extension in CQE_RX descriptor. |
924 | * This gets appended by default on 81xx/83xx chips, | |
925 | * for consistency enabling the same on 88xx pass2 | |
926 | * where this is introduced. | |
927 | */ | |
928 | if (pass2_silicon(nic->pdev)) | |
929 | nic_reg_write(nic, NIC_PF_RX_CFG, 0x01); | |
e22e86ea ZS |
930 | if (!pass1_silicon(nic->pdev)) |
931 | nic_enable_tunnel_parsing(nic, vf); | |
4863dea3 SG |
932 | break; |
933 | case NIC_MBOX_MSG_RQ_BP_CFG: | |
934 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | | |
935 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | |
936 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | |
937 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | |
938 | break; | |
939 | case NIC_MBOX_MSG_RQ_SW_SYNC: | |
940 | ret = nic_rcv_queue_sw_sync(nic); | |
941 | break; | |
942 | case NIC_MBOX_MSG_RQ_DROP_CFG: | |
943 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | | |
944 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | |
945 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | |
946 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | |
947 | break; | |
948 | case NIC_MBOX_MSG_SQ_CFG: | |
949 | reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | | |
950 | (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | | |
951 | (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); | |
952 | nic_reg_write(nic, reg_addr, mbx.sq.cfg); | |
92dc8769 | 953 | nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); |
4863dea3 SG |
954 | break; |
955 | case NIC_MBOX_MSG_SET_MAC: | |
ecae29cb RB |
956 | if (vf >= nic->num_vf_en) { |
957 | ret = -1; /* NACK */ | |
92dc8769 | 958 | break; |
ecae29cb | 959 | } |
4863dea3 SG |
960 | lmac = mbx.mac.vf_id; |
961 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); | |
962 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); | |
e610cb32 | 963 | bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); |
4863dea3 SG |
964 | break; |
965 | case NIC_MBOX_MSG_SET_MAX_FRS: | |
966 | ret = nic_update_hw_frs(nic, mbx.frs.max_frs, | |
967 | mbx.frs.vf_id); | |
968 | break; | |
969 | case NIC_MBOX_MSG_CPI_CFG: | |
970 | nic_config_cpi(nic, &mbx.cpi_cfg); | |
971 | break; | |
972 | case NIC_MBOX_MSG_RSS_SIZE: | |
973 | nic_send_rss_size(nic, vf); | |
974 | goto unlock; | |
975 | case NIC_MBOX_MSG_RSS_CFG: | |
976 | case NIC_MBOX_MSG_RSS_CFG_CONT: | |
977 | nic_config_rss(nic, &mbx.rss_cfg); | |
978 | break; | |
979 | case NIC_MBOX_MSG_CFG_DONE: | |
980 | /* Last message of VF config msg sequence */ | |
f406ce42 | 981 | nic_enable_vf(nic, vf, true); |
4863dea3 SG |
982 | goto unlock; |
983 | case NIC_MBOX_MSG_SHUTDOWN: | |
984 | /* First msg in VF teardown sequence */ | |
92dc8769 SG |
985 | if (vf >= nic->num_vf_en) |
986 | nic->sqs_used[vf - nic->num_vf_en] = false; | |
987 | nic->pqs_vf[vf] = 0; | |
f406ce42 | 988 | nic_enable_vf(nic, vf, false); |
92dc8769 SG |
989 | break; |
990 | case NIC_MBOX_MSG_ALLOC_SQS: | |
991 | nic_alloc_sqs(nic, &mbx.sqs_alloc); | |
992 | goto unlock; | |
993 | case NIC_MBOX_MSG_NICVF_PTR: | |
994 | nic->nicvf[vf] = mbx.nicvf.nicvf; | |
4863dea3 | 995 | break; |
92dc8769 SG |
996 | case NIC_MBOX_MSG_PNICVF_PTR: |
997 | nic_send_pnicvf(nic, vf); | |
998 | goto unlock; | |
999 | case NIC_MBOX_MSG_SNICVF_PTR: | |
1000 | nic_send_snicvf(nic, &mbx.nicvf); | |
1001 | goto unlock; | |
4863dea3 SG |
1002 | case NIC_MBOX_MSG_BGX_STATS: |
1003 | nic_get_bgx_stats(nic, &mbx.bgx_stats); | |
1004 | goto unlock; | |
d77a2384 SG |
1005 | case NIC_MBOX_MSG_LOOPBACK: |
1006 | ret = nic_config_loopback(nic, &mbx.lbk); | |
1007 | break; | |
3458c40d JJ |
1008 | case NIC_MBOX_MSG_RESET_STAT_COUNTER: |
1009 | ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat); | |
1010 | break; | |
4863dea3 SG |
1011 | default: |
1012 | dev_err(&nic->pdev->dev, | |
1013 | "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); | |
1014 | break; | |
1015 | } | |
1016 | ||
ecae29cb | 1017 | if (!ret) { |
4863dea3 | 1018 | nic_mbx_send_ack(nic, vf); |
ecae29cb RB |
1019 | } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) { |
1020 | dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n", | |
1021 | mbx.msg.msg, vf); | |
4863dea3 | 1022 | nic_mbx_send_nack(nic, vf); |
ecae29cb | 1023 | } |
4863dea3 SG |
1024 | unlock: |
1025 | nic->mbx_lock[vf] = false; | |
1026 | } | |
1027 | ||
52358aad | 1028 | static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) |
4863dea3 | 1029 | { |
52358aad SG |
1030 | struct nicpf *nic = (struct nicpf *)nic_irq; |
1031 | int mbx; | |
4863dea3 SG |
1032 | u64 intr; |
1033 | u8 vf, vf_per_mbx_reg = 64; | |
1034 | ||
52358aad SG |
1035 | if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector) |
1036 | mbx = 0; | |
1037 | else | |
1038 | mbx = 1; | |
1039 | ||
4863dea3 SG |
1040 | intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); |
1041 | dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); | |
1042 | for (vf = 0; vf < vf_per_mbx_reg; vf++) { | |
1043 | if (intr & (1ULL << vf)) { | |
1044 | dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", | |
1045 | vf + (mbx * vf_per_mbx_reg)); | |
92dc8769 | 1046 | |
4863dea3 SG |
1047 | nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); |
1048 | nic_clear_mbx_intr(nic, vf, mbx); | |
1049 | } | |
1050 | } | |
4863dea3 SG |
1051 | return IRQ_HANDLED; |
1052 | } | |
1053 | ||
1054 | static int nic_enable_msix(struct nicpf *nic) | |
1055 | { | |
1056 | int i, ret; | |
1057 | ||
52358aad SG |
1058 | nic->num_vec = pci_msix_vec_count(nic->pdev); |
1059 | ||
1060 | nic->msix_entries = kmalloc_array(nic->num_vec, | |
1061 | sizeof(struct msix_entry), | |
1062 | GFP_KERNEL); | |
1063 | if (!nic->msix_entries) | |
1064 | return -ENOMEM; | |
4863dea3 SG |
1065 | |
1066 | for (i = 0; i < nic->num_vec; i++) | |
1067 | nic->msix_entries[i].entry = i; | |
1068 | ||
1069 | ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); | |
1070 | if (ret) { | |
1071 | dev_err(&nic->pdev->dev, | |
52358aad SG |
1072 | "Request for #%d msix vectors failed, returned %d\n", |
1073 | nic->num_vec, ret); | |
1074 | kfree(nic->msix_entries); | |
4863dea3 SG |
1075 | return ret; |
1076 | } | |
1077 | ||
1078 | nic->msix_enabled = 1; | |
1079 | return 0; | |
1080 | } | |
1081 | ||
1082 | static void nic_disable_msix(struct nicpf *nic) | |
1083 | { | |
1084 | if (nic->msix_enabled) { | |
1085 | pci_disable_msix(nic->pdev); | |
52358aad | 1086 | kfree(nic->msix_entries); |
4863dea3 SG |
1087 | nic->msix_enabled = 0; |
1088 | nic->num_vec = 0; | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | static void nic_free_all_interrupts(struct nicpf *nic) | |
1093 | { | |
1094 | int irq; | |
1095 | ||
1096 | for (irq = 0; irq < nic->num_vec; irq++) { | |
1097 | if (nic->irq_allocated[irq]) | |
1098 | free_irq(nic->msix_entries[irq].vector, nic); | |
1099 | nic->irq_allocated[irq] = false; | |
1100 | } | |
1101 | } | |
1102 | ||
1103 | static int nic_register_interrupts(struct nicpf *nic) | |
1104 | { | |
52358aad | 1105 | int i, ret; |
4863dea3 SG |
1106 | |
1107 | /* Enable MSI-X */ | |
1108 | ret = nic_enable_msix(nic); | |
1109 | if (ret) | |
1110 | return ret; | |
1111 | ||
52358aad SG |
1112 | /* Register mailbox interrupt handler */ |
1113 | for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) { | |
1114 | sprintf(nic->irq_name[i], | |
1115 | "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); | |
4863dea3 | 1116 | |
52358aad SG |
1117 | ret = request_irq(nic->msix_entries[i].vector, |
1118 | nic_mbx_intr_handler, 0, | |
1119 | nic->irq_name[i], nic); | |
1120 | if (ret) | |
1121 | goto fail; | |
4863dea3 | 1122 | |
52358aad SG |
1123 | nic->irq_allocated[i] = true; |
1124 | } | |
4863dea3 SG |
1125 | |
1126 | /* Enable mailbox interrupt */ | |
1127 | nic_enable_mbx_intr(nic); | |
1128 | return 0; | |
1129 | ||
1130 | fail: | |
1131 | dev_err(&nic->pdev->dev, "Request irq failed\n"); | |
1132 | nic_free_all_interrupts(nic); | |
52358aad | 1133 | nic_disable_msix(nic); |
4863dea3 SG |
1134 | return ret; |
1135 | } | |
1136 | ||
1137 | static void nic_unregister_interrupts(struct nicpf *nic) | |
1138 | { | |
1139 | nic_free_all_interrupts(nic); | |
1140 | nic_disable_msix(nic); | |
1141 | } | |
1142 | ||
92dc8769 SG |
1143 | static int nic_num_sqs_en(struct nicpf *nic, int vf_en) |
1144 | { | |
1145 | int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE; | |
1146 | u16 total_vf; | |
1147 | ||
3a397ebe SG |
1148 | /* Secondary Qsets are needed only if CPU count is |
1149 | * morethan MAX_QUEUES_PER_QSET. | |
1150 | */ | |
1151 | if (num_online_cpus() <= MAX_QUEUES_PER_QSET) | |
1152 | return 0; | |
1153 | ||
92dc8769 SG |
1154 | /* Check if its a multi-node environment */ |
1155 | if (nr_node_ids > 1) | |
1156 | sqs_per_vf = MAX_SQS_PER_VF; | |
1157 | ||
1158 | pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV); | |
1159 | pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf); | |
1160 | return min(total_vf - vf_en, vf_en * sqs_per_vf); | |
1161 | } | |
1162 | ||
4863dea3 SG |
1163 | static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) |
1164 | { | |
1165 | int pos = 0; | |
92dc8769 | 1166 | int vf_en; |
4863dea3 SG |
1167 | int err; |
1168 | u16 total_vf_cnt; | |
1169 | ||
1170 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); | |
1171 | if (!pos) { | |
1172 | dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n"); | |
1173 | return -ENODEV; | |
1174 | } | |
1175 | ||
1176 | pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt); | |
1177 | if (total_vf_cnt < nic->num_vf_en) | |
1178 | nic->num_vf_en = total_vf_cnt; | |
1179 | ||
1180 | if (!total_vf_cnt) | |
1181 | return 0; | |
1182 | ||
92dc8769 SG |
1183 | vf_en = nic->num_vf_en; |
1184 | nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en); | |
1185 | vf_en += nic->num_sqs_en; | |
1186 | ||
1187 | err = pci_enable_sriov(pdev, vf_en); | |
4863dea3 SG |
1188 | if (err) { |
1189 | dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", | |
92dc8769 | 1190 | vf_en); |
4863dea3 SG |
1191 | nic->num_vf_en = 0; |
1192 | return err; | |
1193 | } | |
1194 | ||
1195 | dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", | |
92dc8769 | 1196 | vf_en); |
4863dea3 SG |
1197 | |
1198 | nic->flags |= NIC_SRIOV_ENABLED; | |
1199 | return 0; | |
1200 | } | |
1201 | ||
1202 | /* Poll for BGX LMAC link status and update corresponding VF | |
1203 | * if there is a change, valid only if internal L2 switch | |
1204 | * is not present otherwise VF link is always treated as up | |
1205 | */ | |
1206 | static void nic_poll_for_link(struct work_struct *work) | |
1207 | { | |
1208 | union nic_mbx mbx = {}; | |
1209 | struct nicpf *nic; | |
1210 | struct bgx_link_status link; | |
1211 | u8 vf, bgx, lmac; | |
1212 | ||
1213 | nic = container_of(work, struct nicpf, dwork.work); | |
1214 | ||
1215 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | |
1216 | ||
f406ce42 | 1217 | for (vf = 0; vf < nic->num_vf_en; vf++) { |
4863dea3 SG |
1218 | /* Poll only if VF is UP */ |
1219 | if (!nic->vf_enabled[vf]) | |
1220 | continue; | |
1221 | ||
1222 | /* Get BGX, LMAC indices for the VF */ | |
1223 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
1224 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
1225 | /* Get interface link status */ | |
1226 | bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); | |
1227 | ||
1228 | /* Inform VF only if link status changed */ | |
1229 | if (nic->link[vf] == link.link_up) | |
1230 | continue; | |
1231 | ||
1232 | if (!nic->mbx_lock[vf]) { | |
1233 | nic->link[vf] = link.link_up; | |
1234 | nic->duplex[vf] = link.duplex; | |
1235 | nic->speed[vf] = link.speed; | |
1236 | ||
1237 | /* Send a mbox message to VF with current link status */ | |
1238 | mbx.link_status.link_up = link.link_up; | |
1239 | mbx.link_status.duplex = link.duplex; | |
1240 | mbx.link_status.speed = link.speed; | |
1241 | nic_send_msg_to_vf(nic, vf, &mbx); | |
1242 | } | |
1243 | } | |
1244 | queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); | |
1245 | } | |
1246 | ||
1247 | static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1248 | { | |
1249 | struct device *dev = &pdev->dev; | |
1250 | struct nicpf *nic; | |
1251 | int err; | |
1252 | ||
1253 | BUILD_BUG_ON(sizeof(union nic_mbx) > 16); | |
1254 | ||
1255 | nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL); | |
1256 | if (!nic) | |
1257 | return -ENOMEM; | |
1258 | ||
a5c3d498 SG |
1259 | nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL); |
1260 | if (!nic->hw) { | |
1261 | devm_kfree(dev, nic); | |
1262 | return -ENOMEM; | |
1263 | } | |
1264 | ||
4863dea3 SG |
1265 | pci_set_drvdata(pdev, nic); |
1266 | ||
1267 | nic->pdev = pdev; | |
1268 | ||
1269 | err = pci_enable_device(pdev); | |
1270 | if (err) { | |
1271 | dev_err(dev, "Failed to enable PCI device\n"); | |
1272 | pci_set_drvdata(pdev, NULL); | |
1273 | return err; | |
1274 | } | |
1275 | ||
1276 | err = pci_request_regions(pdev, DRV_NAME); | |
1277 | if (err) { | |
1278 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | |
1279 | goto err_disable_device; | |
1280 | } | |
1281 | ||
1282 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); | |
1283 | if (err) { | |
1284 | dev_err(dev, "Unable to get usable DMA configuration\n"); | |
1285 | goto err_release_regions; | |
1286 | } | |
1287 | ||
1288 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); | |
1289 | if (err) { | |
1290 | dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); | |
1291 | goto err_release_regions; | |
1292 | } | |
1293 | ||
1294 | /* MAP PF's configuration registers */ | |
1295 | nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | |
1296 | if (!nic->reg_base) { | |
1297 | dev_err(dev, "Cannot map config register space, aborting\n"); | |
1298 | err = -ENOMEM; | |
1299 | goto err_release_regions; | |
1300 | } | |
1301 | ||
d768b678 | 1302 | nic->node = nic_get_node_id(pdev); |
4863dea3 | 1303 | |
4863dea3 | 1304 | /* Initialize hardware */ |
949b5331 SG |
1305 | err = nic_init_hw(nic); |
1306 | if (err) | |
1307 | goto err_release_regions; | |
4863dea3 | 1308 | |
a5c3d498 | 1309 | nic_set_lmac_vf_mapping(nic); |
4863dea3 SG |
1310 | |
1311 | /* Register interrupts */ | |
1312 | err = nic_register_interrupts(nic); | |
1313 | if (err) | |
1314 | goto err_release_regions; | |
1315 | ||
1316 | /* Configure SRIOV */ | |
1317 | err = nic_sriov_init(pdev, nic); | |
1318 | if (err) | |
1319 | goto err_unregister_interrupts; | |
1320 | ||
1321 | /* Register a physical link status poll fn() */ | |
1322 | nic->check_link = alloc_workqueue("check_link_status", | |
1323 | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); | |
1324 | if (!nic->check_link) { | |
1325 | err = -ENOMEM; | |
1326 | goto err_disable_sriov; | |
1327 | } | |
1328 | ||
1329 | INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); | |
1330 | queue_delayed_work(nic->check_link, &nic->dwork, 0); | |
1331 | ||
1332 | return 0; | |
1333 | ||
1334 | err_disable_sriov: | |
1335 | if (nic->flags & NIC_SRIOV_ENABLED) | |
1336 | pci_disable_sriov(pdev); | |
1337 | err_unregister_interrupts: | |
1338 | nic_unregister_interrupts(nic); | |
1339 | err_release_regions: | |
1340 | pci_release_regions(pdev); | |
1341 | err_disable_device: | |
949b5331 | 1342 | nic_free_lmacmem(nic); |
a5c3d498 SG |
1343 | devm_kfree(dev, nic->hw); |
1344 | devm_kfree(dev, nic); | |
4863dea3 SG |
1345 | pci_disable_device(pdev); |
1346 | pci_set_drvdata(pdev, NULL); | |
1347 | return err; | |
1348 | } | |
1349 | ||
1350 | static void nic_remove(struct pci_dev *pdev) | |
1351 | { | |
1352 | struct nicpf *nic = pci_get_drvdata(pdev); | |
1353 | ||
1354 | if (nic->flags & NIC_SRIOV_ENABLED) | |
1355 | pci_disable_sriov(pdev); | |
1356 | ||
1357 | if (nic->check_link) { | |
1358 | /* Destroy work Queue */ | |
a7b1f535 | 1359 | cancel_delayed_work_sync(&nic->dwork); |
4863dea3 SG |
1360 | destroy_workqueue(nic->check_link); |
1361 | } | |
1362 | ||
1363 | nic_unregister_interrupts(nic); | |
1364 | pci_release_regions(pdev); | |
a5c3d498 | 1365 | |
949b5331 | 1366 | nic_free_lmacmem(nic); |
a5c3d498 SG |
1367 | devm_kfree(&pdev->dev, nic->hw); |
1368 | devm_kfree(&pdev->dev, nic); | |
1369 | ||
4863dea3 SG |
1370 | pci_disable_device(pdev); |
1371 | pci_set_drvdata(pdev, NULL); | |
1372 | } | |
1373 | ||
1374 | static struct pci_driver nic_driver = { | |
1375 | .name = DRV_NAME, | |
1376 | .id_table = nic_id_table, | |
1377 | .probe = nic_probe, | |
1378 | .remove = nic_remove, | |
1379 | }; | |
1380 | ||
1381 | static int __init nic_init_module(void) | |
1382 | { | |
1383 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | |
1384 | ||
1385 | return pci_register_driver(&nic_driver); | |
1386 | } | |
1387 | ||
1388 | static void __exit nic_cleanup_module(void) | |
1389 | { | |
1390 | pci_unregister_driver(&nic_driver); | |
1391 | } | |
1392 | ||
1393 | module_init(nic_init_module); | |
1394 | module_exit(nic_cleanup_module); |