2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Main entry point
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Alejandro Lucero <alejandro.lucero@netronome.com>
39 * Jason McMullan <jason.mcmullan@netronome.com>
40 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
43 #include <linux/etherdevice.h>
44 #include <linux/kernel.h>
45 #include <linux/init.h>
46 #include <linux/pci.h>
47 #include <linux/pci_regs.h>
48 #include <linux/msi.h>
49 #include <linux/random.h>
51 #include "nfpcore/nfp.h"
52 #include "nfpcore/nfp_cpp.h"
53 #include "nfpcore/nfp_nffw.h"
54 #include "nfpcore/nfp_nsp_eth.h"
55 #include "nfpcore/nfp6000_pcie.h"
57 #include "nfp_net_ctrl.h"
61 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
63 static int nfp_is_ready(struct nfp_cpp
*cpp
)
69 cp
= nfp_hwinfo_lookup(cpp
, "board.state");
73 err
= kstrtol(cp
, 0, &state
);
81 * nfp_net_map_area() - Help function to map an area
82 * @cpp: NFP CPP handler
83 * @name: Name for the area
86 * @size: Size of the area
87 * @area: Area handle (returned).
89 * This function is primarily to simplify the code in the main probe
90 * function. To undo the effect of this functions call
91 * @nfp_cpp_area_release_free(*area);
93 * Return: Pointer to memory mapped area or ERR_PTR
95 static u8 __iomem
*nfp_net_map_area(struct nfp_cpp
*cpp
,
96 const char *name
, int isl
, int target
,
97 unsigned long long addr
, unsigned long size
,
98 struct nfp_cpp_area
**area
)
104 dest
= NFP_CPP_ISLAND_ID(target
, NFP_CPP_ACTION_RW
, 0, isl
);
106 *area
= nfp_cpp_area_alloc_with_name(cpp
, dest
, name
, addr
, size
);
112 err
= nfp_cpp_area_acquire(*area
);
116 res
= nfp_cpp_area_iomem(*area
);
125 nfp_cpp_area_release(*area
);
127 nfp_cpp_area_free(*area
);
129 return (u8 __iomem
*)ERR_PTR(err
);
133 * nfp_net_get_mac_addr() - Get the MAC address.
134 * @nn: NFP Network structure
135 * @cpp: NFP CPP handle
138 * First try to get the MAC address from NSP ETH table. If that
139 * fails try HWInfo. As a last resort generate a random address.
142 nfp_net_get_mac_addr(struct nfp_net
*nn
, struct nfp_cpp
*cpp
, unsigned int id
)
144 struct nfp_net_dp
*dp
= &nn
->dp
;
145 u8 mac_addr
[ETH_ALEN
];
150 ether_addr_copy(dp
->netdev
->dev_addr
, nn
->eth_port
->mac_addr
);
151 ether_addr_copy(dp
->netdev
->perm_addr
, nn
->eth_port
->mac_addr
);
155 snprintf(name
, sizeof(name
), "eth%d.mac", id
);
157 mac_str
= nfp_hwinfo_lookup(cpp
, name
);
159 dev_warn(dp
->dev
, "Can't lookup MAC address. Generate\n");
160 eth_hw_addr_random(dp
->netdev
);
164 if (sscanf(mac_str
, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
165 &mac_addr
[0], &mac_addr
[1], &mac_addr
[2],
166 &mac_addr
[3], &mac_addr
[4], &mac_addr
[5]) != 6) {
168 "Can't parse MAC address (%s). Generate.\n", mac_str
);
169 eth_hw_addr_random(dp
->netdev
);
173 ether_addr_copy(dp
->netdev
->dev_addr
, mac_addr
);
174 ether_addr_copy(dp
->netdev
->perm_addr
, mac_addr
);
177 static struct nfp_eth_table_port
*
178 nfp_net_find_port(struct nfp_pf
*pf
, unsigned int id
)
182 for (i
= 0; pf
->eth_tbl
&& i
< pf
->eth_tbl
->count
; i
++)
183 if (pf
->eth_tbl
->ports
[i
].eth_index
== id
)
184 return &pf
->eth_tbl
->ports
[i
];
189 static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf
*pf
)
197 interface
= nfp_cpp_interface(pf
->cpp
);
198 pcie_pf
= NFP_CPP_INTERFACE_UNIT_of(interface
);
200 snprintf(name
, sizeof(name
), "nfd_cfg_pf%d_num_ports", pcie_pf
);
202 val
= nfp_rtsym_read_le(pf
->cpp
, name
, &err
);
203 /* Default to one port */
206 nfp_err(pf
->cpp
, "Unable to read adapter port count\n");
214 nfp_net_pf_total_qcs(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
215 unsigned int stride
, u32 start_off
, u32 num_off
)
217 unsigned int i
, min_qc
, max_qc
;
219 min_qc
= readl(ctrl_bar
+ start_off
);
222 for (i
= 0; i
< pf
->num_ports
; i
++) {
223 /* To make our lives simpler only accept configuration where
224 * queues are allocated to PFs in order (queues of PFn all have
225 * indexes lower than PFn+1).
227 if (max_qc
> readl(ctrl_bar
+ start_off
))
230 max_qc
= readl(ctrl_bar
+ start_off
);
231 max_qc
+= readl(ctrl_bar
+ num_off
) * stride
;
232 ctrl_bar
+= NFP_PF_CSR_SLICE_SIZE
;
235 return max_qc
- min_qc
;
238 static u8 __iomem
*nfp_net_pf_map_ctrl_bar(struct nfp_pf
*pf
)
240 const struct nfp_rtsym
*ctrl_sym
;
241 u8 __iomem
*ctrl_bar
;
246 interface
= nfp_cpp_interface(pf
->cpp
);
247 pcie_pf
= NFP_CPP_INTERFACE_UNIT_of(interface
);
249 snprintf(pf_symbol
, sizeof(pf_symbol
), "_pf%d_net_bar0", pcie_pf
);
251 ctrl_sym
= nfp_rtsym_lookup(pf
->cpp
, pf_symbol
);
253 dev_err(&pf
->pdev
->dev
,
254 "Failed to find PF BAR0 symbol %s\n", pf_symbol
);
258 if (ctrl_sym
->size
< pf
->num_ports
* NFP_PF_CSR_SLICE_SIZE
) {
259 dev_err(&pf
->pdev
->dev
,
260 "PF BAR0 too small to contain %d ports\n",
265 ctrl_bar
= nfp_net_map_area(pf
->cpp
, "net.ctrl",
266 ctrl_sym
->domain
, ctrl_sym
->target
,
267 ctrl_sym
->addr
, ctrl_sym
->size
,
269 if (IS_ERR(ctrl_bar
)) {
270 dev_err(&pf
->pdev
->dev
, "Failed to map PF BAR0: %ld\n",
278 static void nfp_net_pf_free_netdevs(struct nfp_pf
*pf
)
282 while (!list_empty(&pf
->ports
)) {
283 nn
= list_first_entry(&pf
->ports
, struct nfp_net
, port_list
);
284 list_del(&nn
->port_list
);
287 nfp_net_netdev_free(nn
);
291 static struct nfp_net
*
292 nfp_net_pf_alloc_port_netdev(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
293 void __iomem
*tx_bar
, void __iomem
*rx_bar
,
294 int stride
, struct nfp_net_fw_version
*fw_ver
,
295 struct nfp_eth_table_port
*eth_port
)
297 u32 n_tx_rings
, n_rx_rings
;
300 n_tx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_TXRINGS
);
301 n_rx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_RXRINGS
);
303 /* Allocate and initialise the netdev */
304 nn
= nfp_net_netdev_alloc(pf
->pdev
, n_tx_rings
, n_rx_rings
);
309 nn
->fw_ver
= *fw_ver
;
310 nn
->dp
.ctrl_bar
= ctrl_bar
;
314 nn
->stride_rx
= stride
;
315 nn
->stride_tx
= stride
;
316 nn
->eth_port
= eth_port
;
322 nfp_net_pf_init_port_netdev(struct nfp_pf
*pf
, struct nfp_net
*nn
,
327 /* Get MAC address */
328 nfp_net_get_mac_addr(nn
, pf
->cpp
, id
);
330 /* Get ME clock frequency from ctrl BAR
331 * XXX for now frequency is hardcoded until we figure out how
332 * to get the value from nfp-hwinfo into ctrl bar
334 nn
->me_freq_mhz
= 1200;
336 err
= nfp_net_netdev_init(nn
->dp
.netdev
);
340 nfp_net_debugfs_port_add(nn
, pf
->ddir
, id
);
348 nfp_net_pf_alloc_netdevs(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
349 void __iomem
*tx_bar
, void __iomem
*rx_bar
,
350 int stride
, struct nfp_net_fw_version
*fw_ver
)
352 u32 prev_tx_base
, prev_rx_base
, tgt_tx_base
, tgt_rx_base
;
353 struct nfp_eth_table_port
*eth_port
;
358 prev_tx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
359 prev_rx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
361 for (i
= 0; i
< pf
->num_ports
; i
++) {
362 tgt_tx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
363 tgt_rx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
364 tx_bar
+= (tgt_tx_base
- prev_tx_base
) * NFP_QCP_QUEUE_ADDR_SZ
;
365 rx_bar
+= (tgt_rx_base
- prev_rx_base
) * NFP_QCP_QUEUE_ADDR_SZ
;
366 prev_tx_base
= tgt_tx_base
;
367 prev_rx_base
= tgt_rx_base
;
369 eth_port
= nfp_net_find_port(pf
, i
);
370 if (eth_port
&& eth_port
->override_changed
) {
371 nfp_warn(pf
->cpp
, "Config changed for port #%d, reboot required before port will be operational\n", i
);
373 nn
= nfp_net_pf_alloc_port_netdev(pf
, ctrl_bar
, tx_bar
,
380 list_add_tail(&nn
->port_list
, &pf
->ports
);
384 ctrl_bar
+= NFP_PF_CSR_SLICE_SIZE
;
387 if (list_empty(&pf
->ports
))
393 nfp_net_pf_free_netdevs(pf
);
398 nfp_net_pf_spawn_netdevs(struct nfp_pf
*pf
,
399 void __iomem
*ctrl_bar
, void __iomem
*tx_bar
,
400 void __iomem
*rx_bar
, int stride
,
401 struct nfp_net_fw_version
*fw_ver
)
403 unsigned int id
, wanted_irqs
, num_irqs
, ports_left
, irqs_left
;
407 /* Allocate the netdevs and do basic init */
408 err
= nfp_net_pf_alloc_netdevs(pf
, ctrl_bar
, tx_bar
, rx_bar
,
413 /* Get MSI-X vectors */
415 list_for_each_entry(nn
, &pf
->ports
, port_list
)
416 wanted_irqs
+= NFP_NET_NON_Q_VECTORS
+ nn
->dp
.num_r_vecs
;
417 pf
->irq_entries
= kcalloc(wanted_irqs
, sizeof(*pf
->irq_entries
),
419 if (!pf
->irq_entries
) {
424 num_irqs
= nfp_net_irqs_alloc(pf
->pdev
, pf
->irq_entries
,
425 NFP_NET_MIN_PORT_IRQS
* pf
->num_netdevs
,
428 nn_warn(nn
, "Unable to allocate MSI-X Vectors. Exiting\n");
433 /* Distribute IRQs to ports */
434 irqs_left
= num_irqs
;
435 ports_left
= pf
->num_netdevs
;
436 list_for_each_entry(nn
, &pf
->ports
, port_list
) {
439 n
= DIV_ROUND_UP(irqs_left
, ports_left
);
440 nfp_net_irqs_assign(nn
, &pf
->irq_entries
[num_irqs
- irqs_left
],
446 /* Finish netdev init and register */
448 list_for_each_entry(nn
, &pf
->ports
, port_list
) {
449 err
= nfp_net_pf_init_port_netdev(pf
, nn
, id
);
451 goto err_prev_deinit
;
459 list_for_each_entry_continue_reverse(nn
, &pf
->ports
, port_list
) {
460 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
461 nfp_net_netdev_clean(nn
->dp
.netdev
);
463 nfp_net_irqs_disable(pf
->pdev
);
465 kfree(pf
->irq_entries
);
467 nfp_net_pf_free_netdevs(pf
);
472 * PCI device functions
474 int nfp_net_pci_probe(struct nfp_pf
*pf
)
476 u8 __iomem
*ctrl_bar
, *tx_bar
, *rx_bar
;
477 u32 total_tx_qcs
, total_rx_qcs
;
478 struct nfp_net_fw_version fw_ver
;
479 u32 tx_area_sz
, rx_area_sz
;
484 mutex_init(&pf
->port_lock
);
486 /* Verify that the board has completed initialization */
487 if (!nfp_is_ready(pf
->cpp
)) {
488 nfp_err(pf
->cpp
, "NFP is not ready for NIC operation.\n");
492 mutex_lock(&pf
->port_lock
);
493 pf
->num_ports
= nfp_net_pf_get_num_ports(pf
);
495 ctrl_bar
= nfp_net_pf_map_ctrl_bar(pf
);
497 err
= pf
->fw_loaded
? -EINVAL
: -EPROBE_DEFER
;
501 nfp_net_get_fw_version(&fw_ver
, ctrl_bar
);
502 if (fw_ver
.resv
|| fw_ver
.class != NFP_NET_CFG_VERSION_CLASS_GENERIC
) {
503 nfp_err(pf
->cpp
, "Unknown Firmware ABI %d.%d.%d.%d\n",
504 fw_ver
.resv
, fw_ver
.class, fw_ver
.major
, fw_ver
.minor
);
509 /* Determine stride */
510 if (nfp_net_fw_ver_eq(&fw_ver
, 0, 0, 0, 1)) {
512 nfp_warn(pf
->cpp
, "OBSOLETE Firmware detected - VF isolation not available\n");
514 switch (fw_ver
.major
) {
519 nfp_err(pf
->cpp
, "Unsupported Firmware ABI %d.%d.%d.%d\n",
520 fw_ver
.resv
, fw_ver
.class,
521 fw_ver
.major
, fw_ver
.minor
);
527 /* Find how many QC structs need to be mapped */
528 total_tx_qcs
= nfp_net_pf_total_qcs(pf
, ctrl_bar
, stride
,
529 NFP_NET_CFG_START_TXQ
,
530 NFP_NET_CFG_MAX_TXRINGS
);
531 total_rx_qcs
= nfp_net_pf_total_qcs(pf
, ctrl_bar
, stride
,
532 NFP_NET_CFG_START_RXQ
,
533 NFP_NET_CFG_MAX_RXRINGS
);
534 if (!total_tx_qcs
|| !total_rx_qcs
) {
535 nfp_err(pf
->cpp
, "Invalid PF QC configuration [%d,%d]\n",
536 total_tx_qcs
, total_rx_qcs
);
541 tx_area_sz
= NFP_QCP_QUEUE_ADDR_SZ
* total_tx_qcs
;
542 rx_area_sz
= NFP_QCP_QUEUE_ADDR_SZ
* total_rx_qcs
;
545 start_q
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
546 tx_bar
= nfp_net_map_area(pf
->cpp
, "net.tx", 0, 0,
547 NFP_PCIE_QUEUE(start_q
),
548 tx_area_sz
, &pf
->tx_area
);
549 if (IS_ERR(tx_bar
)) {
550 nfp_err(pf
->cpp
, "Failed to map TX area.\n");
551 err
= PTR_ERR(tx_bar
);
556 start_q
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
557 rx_bar
= nfp_net_map_area(pf
->cpp
, "net.rx", 0, 0,
558 NFP_PCIE_QUEUE(start_q
),
559 rx_area_sz
, &pf
->rx_area
);
560 if (IS_ERR(rx_bar
)) {
561 nfp_err(pf
->cpp
, "Failed to map RX area.\n");
562 err
= PTR_ERR(rx_bar
);
566 pf
->ddir
= nfp_net_debugfs_device_add(pf
->pdev
);
568 err
= nfp_net_pf_spawn_netdevs(pf
, ctrl_bar
, tx_bar
, rx_bar
,
573 mutex_unlock(&pf
->port_lock
);
578 nfp_net_debugfs_dir_clean(&pf
->ddir
);
579 nfp_cpp_area_release_free(pf
->rx_area
);
581 nfp_cpp_area_release_free(pf
->tx_area
);
583 nfp_cpp_area_release_free(pf
->ctrl_area
);
585 mutex_unlock(&pf
->port_lock
);
589 void nfp_net_pci_remove(struct nfp_pf
*pf
)
593 mutex_lock(&pf
->port_lock
);
594 if (list_empty(&pf
->ports
))
597 list_for_each_entry(nn
, &pf
->ports
, port_list
) {
598 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
600 nfp_net_netdev_clean(nn
->dp
.netdev
);
603 nfp_net_pf_free_netdevs(pf
);
605 nfp_net_debugfs_dir_clean(&pf
->ddir
);
607 nfp_net_irqs_disable(pf
->pdev
);
608 kfree(pf
->irq_entries
);
610 nfp_cpp_area_release_free(pf
->rx_area
);
611 nfp_cpp_area_release_free(pf
->tx_area
);
612 nfp_cpp_area_release_free(pf
->ctrl_area
);
614 mutex_unlock(&pf
->port_lock
);