2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Main entry point
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Alejandro Lucero <alejandro.lucero@netronome.com>
39 * Jason McMullan <jason.mcmullan@netronome.com>
40 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
43 #include <linux/etherdevice.h>
44 #include <linux/kernel.h>
45 #include <linux/init.h>
46 #include <linux/pci.h>
47 #include <linux/pci_regs.h>
48 #include <linux/msi.h>
49 #include <linux/random.h>
51 #include "nfpcore/nfp.h"
52 #include "nfpcore/nfp_cpp.h"
53 #include "nfpcore/nfp_nffw.h"
54 #include "nfpcore/nfp_nsp_eth.h"
55 #include "nfpcore/nfp6000_pcie.h"
57 #include "nfp_net_ctrl.h"
61 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
63 static int nfp_is_ready(struct nfp_cpp
*cpp
)
69 cp
= nfp_hwinfo_lookup(cpp
, "board.state");
73 err
= kstrtol(cp
, 0, &state
);
81 * nfp_net_map_area() - Help function to map an area
82 * @cpp: NFP CPP handler
83 * @name: Name for the area
86 * @size: Size of the area
87 * @area: Area handle (returned).
89 * This function is primarily to simplify the code in the main probe
90 * function. To undo the effect of this functions call
91 * @nfp_cpp_area_release_free(*area);
93 * Return: Pointer to memory mapped area or ERR_PTR
95 static u8 __iomem
*nfp_net_map_area(struct nfp_cpp
*cpp
,
96 const char *name
, int isl
, int target
,
97 unsigned long long addr
, unsigned long size
,
98 struct nfp_cpp_area
**area
)
104 dest
= NFP_CPP_ISLAND_ID(target
, NFP_CPP_ACTION_RW
, 0, isl
);
106 *area
= nfp_cpp_area_alloc_with_name(cpp
, dest
, name
, addr
, size
);
112 err
= nfp_cpp_area_acquire(*area
);
116 res
= nfp_cpp_area_iomem(*area
);
125 nfp_cpp_area_release(*area
);
127 nfp_cpp_area_free(*area
);
129 return (u8 __iomem
*)ERR_PTR(err
);
133 nfp_net_get_mac_addr_hwinfo(struct nfp_net
*nn
, struct nfp_cpp
*cpp
,
136 u8 mac_addr
[ETH_ALEN
];
140 snprintf(name
, sizeof(name
), "eth%d.mac", id
);
142 mac_str
= nfp_hwinfo_lookup(cpp
, name
);
144 dev_warn(&nn
->pdev
->dev
,
145 "Can't lookup MAC address. Generate\n");
146 eth_hw_addr_random(nn
->netdev
);
150 if (sscanf(mac_str
, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
151 &mac_addr
[0], &mac_addr
[1], &mac_addr
[2],
152 &mac_addr
[3], &mac_addr
[4], &mac_addr
[5]) != 6) {
153 dev_warn(&nn
->pdev
->dev
,
154 "Can't parse MAC address (%s). Generate.\n", mac_str
);
155 eth_hw_addr_random(nn
->netdev
);
159 ether_addr_copy(nn
->netdev
->dev_addr
, mac_addr
);
160 ether_addr_copy(nn
->netdev
->perm_addr
, mac_addr
);
164 * nfp_net_get_mac_addr() - Get the MAC address.
165 * @nn: NFP Network structure
166 * @pf: NFP PF device structure
169 * First try to get the MAC address from NSP ETH table. If that
170 * fails try HWInfo. As a last resort generate a random address.
173 nfp_net_get_mac_addr(struct nfp_net
*nn
, struct nfp_pf
*pf
, unsigned int id
)
177 for (i
= 0; pf
->eth_tbl
&& i
< pf
->eth_tbl
->count
; i
++)
178 if (pf
->eth_tbl
->ports
[i
].eth_index
== id
) {
179 const u8
*mac_addr
= pf
->eth_tbl
->ports
[i
].mac_addr
;
181 nn
->eth_port
= &pf
->eth_tbl
->ports
[i
];
183 ether_addr_copy(nn
->netdev
->dev_addr
, mac_addr
);
184 ether_addr_copy(nn
->netdev
->perm_addr
, mac_addr
);
188 nfp_net_get_mac_addr_hwinfo(nn
, pf
->cpp
, id
);
191 static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf
*pf
)
199 interface
= nfp_cpp_interface(pf
->cpp
);
200 pcie_pf
= NFP_CPP_INTERFACE_UNIT_of(interface
);
202 snprintf(name
, sizeof(name
), "nfd_cfg_pf%d_num_ports", pcie_pf
);
204 val
= nfp_rtsym_read_le(pf
->cpp
, name
, &err
);
205 /* Default to one port */
208 nfp_err(pf
->cpp
, "Unable to read adapter port count\n");
216 nfp_net_pf_total_qcs(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
217 unsigned int stride
, u32 start_off
, u32 num_off
)
219 unsigned int i
, min_qc
, max_qc
;
221 min_qc
= readl(ctrl_bar
+ start_off
);
224 for (i
= 0; i
< pf
->num_ports
; i
++) {
225 /* To make our lives simpler only accept configuration where
226 * queues are allocated to PFs in order (queues of PFn all have
227 * indexes lower than PFn+1).
229 if (max_qc
> readl(ctrl_bar
+ start_off
))
232 max_qc
= readl(ctrl_bar
+ start_off
);
233 max_qc
+= readl(ctrl_bar
+ num_off
) * stride
;
234 ctrl_bar
+= NFP_PF_CSR_SLICE_SIZE
;
237 return max_qc
- min_qc
;
240 static u8 __iomem
*nfp_net_pf_map_ctrl_bar(struct nfp_pf
*pf
)
242 const struct nfp_rtsym
*ctrl_sym
;
243 u8 __iomem
*ctrl_bar
;
248 interface
= nfp_cpp_interface(pf
->cpp
);
249 pcie_pf
= NFP_CPP_INTERFACE_UNIT_of(interface
);
251 snprintf(pf_symbol
, sizeof(pf_symbol
), "_pf%d_net_bar0", pcie_pf
);
253 ctrl_sym
= nfp_rtsym_lookup(pf
->cpp
, pf_symbol
);
255 dev_err(&pf
->pdev
->dev
,
256 "Failed to find PF BAR0 symbol %s\n", pf_symbol
);
260 if (ctrl_sym
->size
< pf
->num_ports
* NFP_PF_CSR_SLICE_SIZE
) {
261 dev_err(&pf
->pdev
->dev
,
262 "PF BAR0 too small to contain %d ports\n",
267 ctrl_bar
= nfp_net_map_area(pf
->cpp
, "net.ctrl",
268 ctrl_sym
->domain
, ctrl_sym
->target
,
269 ctrl_sym
->addr
, ctrl_sym
->size
,
271 if (IS_ERR(ctrl_bar
)) {
272 dev_err(&pf
->pdev
->dev
, "Failed to map PF BAR0: %ld\n",
280 static void nfp_net_pf_free_netdevs(struct nfp_pf
*pf
)
284 while (!list_empty(&pf
->ports
)) {
285 nn
= list_first_entry(&pf
->ports
, struct nfp_net
, port_list
);
286 list_del(&nn
->port_list
);
288 nfp_net_netdev_free(nn
);
292 static struct nfp_net
*
293 nfp_net_pf_alloc_port_netdev(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
294 void __iomem
*tx_bar
, void __iomem
*rx_bar
,
295 int stride
, struct nfp_net_fw_version
*fw_ver
)
297 u32 n_tx_rings
, n_rx_rings
;
300 n_tx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_TXRINGS
);
301 n_rx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_RXRINGS
);
303 /* Allocate and initialise the netdev */
304 nn
= nfp_net_netdev_alloc(pf
->pdev
, n_tx_rings
, n_rx_rings
);
309 nn
->fw_ver
= *fw_ver
;
310 nn
->ctrl_bar
= ctrl_bar
;
314 nn
->stride_rx
= stride
;
315 nn
->stride_tx
= stride
;
321 nfp_net_pf_init_port_netdev(struct nfp_pf
*pf
, struct nfp_net
*nn
,
326 /* Get MAC address */
327 nfp_net_get_mac_addr(nn
, pf
, id
);
329 /* Get ME clock frequency from ctrl BAR
330 * XXX for now frequency is hardcoded until we figure out how
331 * to get the value from nfp-hwinfo into ctrl bar
333 nn
->me_freq_mhz
= 1200;
335 err
= nfp_net_netdev_init(nn
->netdev
);
339 nfp_net_debugfs_port_add(nn
, pf
->ddir
, id
);
347 nfp_net_pf_alloc_netdevs(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
348 void __iomem
*tx_bar
, void __iomem
*rx_bar
,
349 int stride
, struct nfp_net_fw_version
*fw_ver
)
351 u32 prev_tx_base
, prev_rx_base
, tgt_tx_base
, tgt_rx_base
;
356 prev_tx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
357 prev_rx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
359 for (i
= 0; i
< pf
->num_ports
; i
++) {
360 tgt_tx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
361 tgt_rx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
362 tx_bar
+= (tgt_tx_base
- prev_tx_base
) * NFP_QCP_QUEUE_ADDR_SZ
;
363 rx_bar
+= (tgt_rx_base
- prev_rx_base
) * NFP_QCP_QUEUE_ADDR_SZ
;
364 prev_tx_base
= tgt_tx_base
;
365 prev_rx_base
= tgt_rx_base
;
367 nn
= nfp_net_pf_alloc_port_netdev(pf
, ctrl_bar
, tx_bar
, rx_bar
,
373 list_add_tail(&nn
->port_list
, &pf
->ports
);
375 ctrl_bar
+= NFP_PF_CSR_SLICE_SIZE
;
381 nfp_net_pf_free_netdevs(pf
);
386 nfp_net_pf_spawn_netdevs(struct nfp_pf
*pf
,
387 void __iomem
*ctrl_bar
, void __iomem
*tx_bar
,
388 void __iomem
*rx_bar
, int stride
,
389 struct nfp_net_fw_version
*fw_ver
)
391 unsigned int id
, wanted_irqs
, num_irqs
, ports_left
, irqs_left
;
395 /* Allocate the netdevs and do basic init */
396 err
= nfp_net_pf_alloc_netdevs(pf
, ctrl_bar
, tx_bar
, rx_bar
,
401 /* Get MSI-X vectors */
403 list_for_each_entry(nn
, &pf
->ports
, port_list
)
404 wanted_irqs
+= NFP_NET_NON_Q_VECTORS
+ nn
->num_r_vecs
;
405 pf
->irq_entries
= kcalloc(wanted_irqs
, sizeof(*pf
->irq_entries
),
407 if (!pf
->irq_entries
) {
412 num_irqs
= nfp_net_irqs_alloc(pf
->pdev
, pf
->irq_entries
,
413 NFP_NET_MIN_PORT_IRQS
* pf
->num_ports
,
416 nn_warn(nn
, "Unable to allocate MSI-X Vectors. Exiting\n");
421 /* Distribute IRQs to ports */
422 irqs_left
= num_irqs
;
423 ports_left
= pf
->num_ports
;
424 list_for_each_entry(nn
, &pf
->ports
, port_list
) {
427 n
= DIV_ROUND_UP(irqs_left
, ports_left
);
428 nfp_net_irqs_assign(nn
, &pf
->irq_entries
[num_irqs
- irqs_left
],
434 /* Finish netdev init and register */
436 list_for_each_entry(nn
, &pf
->ports
, port_list
) {
437 err
= nfp_net_pf_init_port_netdev(pf
, nn
, id
);
439 goto err_prev_deinit
;
447 list_for_each_entry_continue_reverse(nn
, &pf
->ports
, port_list
) {
448 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
449 nfp_net_netdev_clean(nn
->netdev
);
451 nfp_net_irqs_disable(pf
->pdev
);
453 kfree(pf
->irq_entries
);
455 nfp_net_pf_free_netdevs(pf
);
460 * PCI device functions
462 int nfp_net_pci_probe(struct nfp_pf
*pf
)
464 u8 __iomem
*ctrl_bar
, *tx_bar
, *rx_bar
;
465 u32 total_tx_qcs
, total_rx_qcs
;
466 struct nfp_net_fw_version fw_ver
;
467 u32 tx_area_sz
, rx_area_sz
;
472 /* Verify that the board has completed initialization */
473 if (!nfp_is_ready(pf
->cpp
)) {
474 nfp_err(pf
->cpp
, "NFP is not ready for NIC operation.\n");
478 pf
->num_ports
= nfp_net_pf_get_num_ports(pf
);
480 ctrl_bar
= nfp_net_pf_map_ctrl_bar(pf
);
482 return pf
->fw_loaded
? -EINVAL
: -EPROBE_DEFER
;
484 nfp_net_get_fw_version(&fw_ver
, ctrl_bar
);
485 if (fw_ver
.resv
|| fw_ver
.class != NFP_NET_CFG_VERSION_CLASS_GENERIC
) {
486 nfp_err(pf
->cpp
, "Unknown Firmware ABI %d.%d.%d.%d\n",
487 fw_ver
.resv
, fw_ver
.class, fw_ver
.major
, fw_ver
.minor
);
492 /* Determine stride */
493 if (nfp_net_fw_ver_eq(&fw_ver
, 0, 0, 0, 1)) {
495 nfp_warn(pf
->cpp
, "OBSOLETE Firmware detected - VF isolation not available\n");
497 switch (fw_ver
.major
) {
502 nfp_err(pf
->cpp
, "Unsupported Firmware ABI %d.%d.%d.%d\n",
503 fw_ver
.resv
, fw_ver
.class,
504 fw_ver
.major
, fw_ver
.minor
);
510 /* Find how many QC structs need to be mapped */
511 total_tx_qcs
= nfp_net_pf_total_qcs(pf
, ctrl_bar
, stride
,
512 NFP_NET_CFG_START_TXQ
,
513 NFP_NET_CFG_MAX_TXRINGS
);
514 total_rx_qcs
= nfp_net_pf_total_qcs(pf
, ctrl_bar
, stride
,
515 NFP_NET_CFG_START_RXQ
,
516 NFP_NET_CFG_MAX_RXRINGS
);
517 if (!total_tx_qcs
|| !total_rx_qcs
) {
518 nfp_err(pf
->cpp
, "Invalid PF QC configuration [%d,%d]\n",
519 total_tx_qcs
, total_rx_qcs
);
524 tx_area_sz
= NFP_QCP_QUEUE_ADDR_SZ
* total_tx_qcs
;
525 rx_area_sz
= NFP_QCP_QUEUE_ADDR_SZ
* total_rx_qcs
;
528 start_q
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
529 tx_bar
= nfp_net_map_area(pf
->cpp
, "net.tx", 0, 0,
530 NFP_PCIE_QUEUE(start_q
),
531 tx_area_sz
, &pf
->tx_area
);
532 if (IS_ERR(tx_bar
)) {
533 nfp_err(pf
->cpp
, "Failed to map TX area.\n");
534 err
= PTR_ERR(tx_bar
);
539 start_q
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
540 rx_bar
= nfp_net_map_area(pf
->cpp
, "net.rx", 0, 0,
541 NFP_PCIE_QUEUE(start_q
),
542 rx_area_sz
, &pf
->rx_area
);
543 if (IS_ERR(rx_bar
)) {
544 nfp_err(pf
->cpp
, "Failed to map RX area.\n");
545 err
= PTR_ERR(rx_bar
);
549 pf
->ddir
= nfp_net_debugfs_device_add(pf
->pdev
);
551 err
= nfp_net_pf_spawn_netdevs(pf
, ctrl_bar
, tx_bar
, rx_bar
,
559 nfp_net_debugfs_dir_clean(&pf
->ddir
);
560 nfp_cpp_area_release_free(pf
->rx_area
);
562 nfp_cpp_area_release_free(pf
->tx_area
);
564 nfp_cpp_area_release_free(pf
->ctrl_area
);
568 void nfp_net_pci_remove(struct nfp_pf
*pf
)
572 list_for_each_entry(nn
, &pf
->ports
, port_list
) {
573 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
575 nfp_net_netdev_clean(nn
->netdev
);
578 nfp_net_pf_free_netdevs(pf
);
580 nfp_net_debugfs_dir_clean(&pf
->ddir
);
582 nfp_net_irqs_disable(pf
->pdev
);
583 kfree(pf
->irq_entries
);
585 nfp_cpp_area_release_free(pf
->rx_area
);
586 nfp_cpp_area_release_free(pf
->tx_area
);
587 nfp_cpp_area_release_free(pf
->ctrl_area
);