]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/netronome/nfp/nfp_net_main.c
4d602b1ddc90ea31f433b013cd22bfff3d86f9ac
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / netronome / nfp / nfp_net_main.c
1 /*
2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 /*
35 * nfp_net_main.c
36 * Netronome network device driver: Main entry point
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Alejandro Lucero <alejandro.lucero@netronome.com>
39 * Jason McMullan <jason.mcmullan@netronome.com>
40 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
41 */
42
43 #include <linux/etherdevice.h>
44 #include <linux/kernel.h>
45 #include <linux/init.h>
46 #include <linux/pci.h>
47 #include <linux/pci_regs.h>
48 #include <linux/msi.h>
49 #include <linux/random.h>
50
51 #include "nfpcore/nfp.h"
52 #include "nfpcore/nfp_cpp.h"
53 #include "nfpcore/nfp_nffw.h"
54 #include "nfpcore/nfp_nsp_eth.h"
55 #include "nfpcore/nfp6000_pcie.h"
56
57 #include "nfp_net_ctrl.h"
58 #include "nfp_net.h"
59 #include "nfp_main.h"
60
61 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
62
63 static int nfp_is_ready(struct nfp_cpp *cpp)
64 {
65 const char *cp;
66 long state;
67 int err;
68
69 cp = nfp_hwinfo_lookup(cpp, "board.state");
70 if (!cp)
71 return 0;
72
73 err = kstrtol(cp, 0, &state);
74 if (err < 0)
75 return 0;
76
77 return state == 15;
78 }
79
80 /**
81 * nfp_net_map_area() - Help function to map an area
82 * @cpp: NFP CPP handler
83 * @name: Name for the area
84 * @target: CPP target
85 * @addr: CPP address
86 * @size: Size of the area
87 * @area: Area handle (returned).
88 *
89 * This function is primarily to simplify the code in the main probe
90 * function. To undo the effect of this functions call
91 * @nfp_cpp_area_release_free(*area);
92 *
93 * Return: Pointer to memory mapped area or ERR_PTR
94 */
95 static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
96 const char *name, int isl, int target,
97 unsigned long long addr, unsigned long size,
98 struct nfp_cpp_area **area)
99 {
100 u8 __iomem *res;
101 u32 dest;
102 int err;
103
104 dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
105
106 *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
107 if (!*area) {
108 err = -EIO;
109 goto err_area;
110 }
111
112 err = nfp_cpp_area_acquire(*area);
113 if (err < 0)
114 goto err_acquire;
115
116 res = nfp_cpp_area_iomem(*area);
117 if (!res) {
118 err = -EIO;
119 goto err_map;
120 }
121
122 return res;
123
124 err_map:
125 nfp_cpp_area_release(*area);
126 err_acquire:
127 nfp_cpp_area_free(*area);
128 err_area:
129 return (u8 __iomem *)ERR_PTR(err);
130 }
131
132 /**
133 * nfp_net_get_mac_addr() - Get the MAC address.
134 * @nn: NFP Network structure
135 * @cpp: NFP CPP handle
136 * @id: NFP port id
137 *
138 * First try to get the MAC address from NSP ETH table. If that
139 * fails try HWInfo. As a last resort generate a random address.
140 */
141 static void
142 nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
143 {
144 struct nfp_net_dp *dp = &nn->dp;
145 u8 mac_addr[ETH_ALEN];
146 const char *mac_str;
147 char name[32];
148
149 if (nn->eth_port) {
150 ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
151 ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
152 return;
153 }
154
155 snprintf(name, sizeof(name), "eth%d.mac", id);
156
157 mac_str = nfp_hwinfo_lookup(cpp, name);
158 if (!mac_str) {
159 dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
160 eth_hw_addr_random(dp->netdev);
161 return;
162 }
163
164 if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
165 &mac_addr[0], &mac_addr[1], &mac_addr[2],
166 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
167 dev_warn(dp->dev,
168 "Can't parse MAC address (%s). Generate.\n", mac_str);
169 eth_hw_addr_random(dp->netdev);
170 return;
171 }
172
173 ether_addr_copy(dp->netdev->dev_addr, mac_addr);
174 ether_addr_copy(dp->netdev->perm_addr, mac_addr);
175 }
176
177 static struct nfp_eth_table_port *
178 nfp_net_find_port(struct nfp_pf *pf, unsigned int id)
179 {
180 int i;
181
182 for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
183 if (pf->eth_tbl->ports[i].eth_index == id)
184 return &pf->eth_tbl->ports[i];
185
186 return NULL;
187 }
188
189 static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
190 {
191 char name[256];
192 u16 interface;
193 int pcie_pf;
194 int err = 0;
195 u64 val;
196
197 interface = nfp_cpp_interface(pf->cpp);
198 pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
199
200 snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
201
202 val = nfp_rtsym_read_le(pf->cpp, name, &err);
203 /* Default to one port */
204 if (err) {
205 if (err != -ENOENT)
206 nfp_err(pf->cpp, "Unable to read adapter port count\n");
207 val = 1;
208 }
209
210 return val;
211 }
212
213 static unsigned int
214 nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
215 unsigned int stride, u32 start_off, u32 num_off)
216 {
217 unsigned int i, min_qc, max_qc;
218
219 min_qc = readl(ctrl_bar + start_off);
220 max_qc = min_qc;
221
222 for (i = 0; i < pf->num_ports; i++) {
223 /* To make our lives simpler only accept configuration where
224 * queues are allocated to PFs in order (queues of PFn all have
225 * indexes lower than PFn+1).
226 */
227 if (max_qc > readl(ctrl_bar + start_off))
228 return 0;
229
230 max_qc = readl(ctrl_bar + start_off);
231 max_qc += readl(ctrl_bar + num_off) * stride;
232 ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
233 }
234
235 return max_qc - min_qc;
236 }
237
238 static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
239 {
240 const struct nfp_rtsym *ctrl_sym;
241 u8 __iomem *ctrl_bar;
242 char pf_symbol[256];
243 u16 interface;
244 int pcie_pf;
245
246 interface = nfp_cpp_interface(pf->cpp);
247 pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
248
249 snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
250
251 ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
252 if (!ctrl_sym) {
253 dev_err(&pf->pdev->dev,
254 "Failed to find PF BAR0 symbol %s\n", pf_symbol);
255 return NULL;
256 }
257
258 if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
259 dev_err(&pf->pdev->dev,
260 "PF BAR0 too small to contain %d ports\n",
261 pf->num_ports);
262 return NULL;
263 }
264
265 ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
266 ctrl_sym->domain, ctrl_sym->target,
267 ctrl_sym->addr, ctrl_sym->size,
268 &pf->ctrl_area);
269 if (IS_ERR(ctrl_bar)) {
270 dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
271 PTR_ERR(ctrl_bar));
272 return NULL;
273 }
274
275 return ctrl_bar;
276 }
277
278 static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
279 {
280 struct nfp_net *nn;
281
282 while (!list_empty(&pf->ports)) {
283 nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
284 list_del(&nn->port_list);
285 pf->num_netdevs--;
286
287 nfp_net_netdev_free(nn);
288 }
289 }
290
291 static struct nfp_net *
292 nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
293 void __iomem *tx_bar, void __iomem *rx_bar,
294 int stride, struct nfp_net_fw_version *fw_ver,
295 struct nfp_eth_table_port *eth_port)
296 {
297 u32 n_tx_rings, n_rx_rings;
298 struct nfp_net *nn;
299
300 n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
301 n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
302
303 /* Allocate and initialise the netdev */
304 nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
305 if (IS_ERR(nn))
306 return nn;
307
308 nn->cpp = pf->cpp;
309 nn->fw_ver = *fw_ver;
310 nn->dp.ctrl_bar = ctrl_bar;
311 nn->tx_bar = tx_bar;
312 nn->rx_bar = rx_bar;
313 nn->dp.is_vf = 0;
314 nn->stride_rx = stride;
315 nn->stride_tx = stride;
316 nn->eth_port = eth_port;
317
318 return nn;
319 }
320
321 static int
322 nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
323 unsigned int id)
324 {
325 int err;
326
327 /* Get MAC address */
328 nfp_net_get_mac_addr(nn, pf->cpp, id);
329
330 /* Get ME clock frequency from ctrl BAR
331 * XXX for now frequency is hardcoded until we figure out how
332 * to get the value from nfp-hwinfo into ctrl bar
333 */
334 nn->me_freq_mhz = 1200;
335
336 err = nfp_net_netdev_init(nn->dp.netdev);
337 if (err)
338 return err;
339
340 nfp_net_debugfs_port_add(nn, pf->ddir, id);
341
342 nfp_net_info(nn);
343
344 return 0;
345 }
346
347 static int
348 nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
349 void __iomem *tx_bar, void __iomem *rx_bar,
350 int stride, struct nfp_net_fw_version *fw_ver)
351 {
352 u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
353 struct nfp_eth_table_port *eth_port;
354 struct nfp_net *nn;
355 unsigned int i;
356 int err;
357
358 prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
359 prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
360
361 for (i = 0; i < pf->num_ports; i++) {
362 tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
363 tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
364 tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
365 rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
366 prev_tx_base = tgt_tx_base;
367 prev_rx_base = tgt_rx_base;
368
369 eth_port = nfp_net_find_port(pf, i);
370 if (eth_port && eth_port->override_changed) {
371 nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
372 } else {
373 nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
374 rx_bar, stride,
375 fw_ver, eth_port);
376 if (IS_ERR(nn)) {
377 err = PTR_ERR(nn);
378 goto err_free_prev;
379 }
380 list_add_tail(&nn->port_list, &pf->ports);
381 pf->num_netdevs++;
382 }
383
384 ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
385 }
386
387 if (list_empty(&pf->ports))
388 return -ENODEV;
389
390 return 0;
391
392 err_free_prev:
393 nfp_net_pf_free_netdevs(pf);
394 return err;
395 }
396
397 static int
398 nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
399 void __iomem *ctrl_bar, void __iomem *tx_bar,
400 void __iomem *rx_bar, int stride,
401 struct nfp_net_fw_version *fw_ver)
402 {
403 unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
404 struct nfp_net *nn;
405 int err;
406
407 /* Allocate the netdevs and do basic init */
408 err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
409 stride, fw_ver);
410 if (err)
411 return err;
412
413 /* Get MSI-X vectors */
414 wanted_irqs = 0;
415 list_for_each_entry(nn, &pf->ports, port_list)
416 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
417 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
418 GFP_KERNEL);
419 if (!pf->irq_entries) {
420 err = -ENOMEM;
421 goto err_nn_free;
422 }
423
424 num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
425 NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
426 wanted_irqs);
427 if (!num_irqs) {
428 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
429 err = -ENOMEM;
430 goto err_vec_free;
431 }
432
433 /* Distribute IRQs to ports */
434 irqs_left = num_irqs;
435 ports_left = pf->num_netdevs;
436 list_for_each_entry(nn, &pf->ports, port_list) {
437 unsigned int n;
438
439 n = DIV_ROUND_UP(irqs_left, ports_left);
440 nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
441 n);
442 irqs_left -= n;
443 ports_left--;
444 }
445
446 /* Finish netdev init and register */
447 id = 0;
448 list_for_each_entry(nn, &pf->ports, port_list) {
449 err = nfp_net_pf_init_port_netdev(pf, nn, id);
450 if (err)
451 goto err_prev_deinit;
452
453 id++;
454 }
455
456 return 0;
457
458 err_prev_deinit:
459 list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
460 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
461 nfp_net_netdev_clean(nn->dp.netdev);
462 }
463 nfp_net_irqs_disable(pf->pdev);
464 err_vec_free:
465 kfree(pf->irq_entries);
466 err_nn_free:
467 nfp_net_pf_free_netdevs(pf);
468 return err;
469 }
470
471 /*
472 * PCI device functions
473 */
474 int nfp_net_pci_probe(struct nfp_pf *pf)
475 {
476 u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
477 u32 total_tx_qcs, total_rx_qcs;
478 struct nfp_net_fw_version fw_ver;
479 u32 tx_area_sz, rx_area_sz;
480 u32 start_q;
481 int stride;
482 int err;
483
484 mutex_init(&pf->port_lock);
485
486 /* Verify that the board has completed initialization */
487 if (!nfp_is_ready(pf->cpp)) {
488 nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
489 return -EINVAL;
490 }
491
492 mutex_lock(&pf->port_lock);
493 pf->num_ports = nfp_net_pf_get_num_ports(pf);
494
495 ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
496 if (!ctrl_bar) {
497 err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
498 goto err_unlock;
499 }
500
501 nfp_net_get_fw_version(&fw_ver, ctrl_bar);
502 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
503 nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
504 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
505 err = -EINVAL;
506 goto err_ctrl_unmap;
507 }
508
509 /* Determine stride */
510 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
511 stride = 2;
512 nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
513 } else {
514 switch (fw_ver.major) {
515 case 1 ... 4:
516 stride = 4;
517 break;
518 default:
519 nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
520 fw_ver.resv, fw_ver.class,
521 fw_ver.major, fw_ver.minor);
522 err = -EINVAL;
523 goto err_ctrl_unmap;
524 }
525 }
526
527 /* Find how many QC structs need to be mapped */
528 total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
529 NFP_NET_CFG_START_TXQ,
530 NFP_NET_CFG_MAX_TXRINGS);
531 total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
532 NFP_NET_CFG_START_RXQ,
533 NFP_NET_CFG_MAX_RXRINGS);
534 if (!total_tx_qcs || !total_rx_qcs) {
535 nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
536 total_tx_qcs, total_rx_qcs);
537 err = -EINVAL;
538 goto err_ctrl_unmap;
539 }
540
541 tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
542 rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
543
544 /* Map TX queues */
545 start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
546 tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
547 NFP_PCIE_QUEUE(start_q),
548 tx_area_sz, &pf->tx_area);
549 if (IS_ERR(tx_bar)) {
550 nfp_err(pf->cpp, "Failed to map TX area.\n");
551 err = PTR_ERR(tx_bar);
552 goto err_ctrl_unmap;
553 }
554
555 /* Map RX queues */
556 start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
557 rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
558 NFP_PCIE_QUEUE(start_q),
559 rx_area_sz, &pf->rx_area);
560 if (IS_ERR(rx_bar)) {
561 nfp_err(pf->cpp, "Failed to map RX area.\n");
562 err = PTR_ERR(rx_bar);
563 goto err_unmap_tx;
564 }
565
566 pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
567
568 err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
569 stride, &fw_ver);
570 if (err)
571 goto err_clean_ddir;
572
573 mutex_unlock(&pf->port_lock);
574
575 return 0;
576
577 err_clean_ddir:
578 nfp_net_debugfs_dir_clean(&pf->ddir);
579 nfp_cpp_area_release_free(pf->rx_area);
580 err_unmap_tx:
581 nfp_cpp_area_release_free(pf->tx_area);
582 err_ctrl_unmap:
583 nfp_cpp_area_release_free(pf->ctrl_area);
584 err_unlock:
585 mutex_unlock(&pf->port_lock);
586 return err;
587 }
588
589 void nfp_net_pci_remove(struct nfp_pf *pf)
590 {
591 struct nfp_net *nn;
592
593 mutex_lock(&pf->port_lock);
594 if (list_empty(&pf->ports))
595 goto out;
596
597 list_for_each_entry(nn, &pf->ports, port_list) {
598 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
599
600 nfp_net_netdev_clean(nn->dp.netdev);
601 }
602
603 nfp_net_pf_free_netdevs(pf);
604
605 nfp_net_debugfs_dir_clean(&pf->ddir);
606
607 nfp_net_irqs_disable(pf->pdev);
608 kfree(pf->irq_entries);
609
610 nfp_cpp_area_release_free(pf->rx_area);
611 nfp_cpp_area_release_free(pf->tx_area);
612 nfp_cpp_area_release_free(pf->ctrl_area);
613 out:
614 mutex_unlock(&pf->port_lock);
615 }