]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/cavium/liquidio/lio_main.c
Merge tag 'mac80211-next-for-davem-2018-03-29' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / cavium / liquidio / lio_main.c
CommitLineData
f21fb3ed 1/**********************************************************************
50579d3d
RV
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
e3bfc6e7 18#include <linux/module.h>
282ccf6e 19#include <linux/interrupt.h>
f21fb3ed 20#include <linux/pci.h>
f21fb3ed 21#include <linux/firmware.h>
01fb237a 22#include <net/vxlan.h>
9ff1a9ba 23#include <linux/kthread.h>
1f233f32 24#include <net/switchdev.h>
f21fb3ed
RV
25#include "liquidio_common.h"
26#include "octeon_droq.h"
27#include "octeon_iq.h"
28#include "response_manager.h"
29#include "octeon_device.h"
30#include "octeon_nic.h"
31#include "octeon_main.h"
32#include "octeon_network.h"
33#include "cn66xx_regs.h"
34#include "cn66xx_device.h"
f21fb3ed 35#include "cn68xx_device.h"
72c00912 36#include "cn23xx_pf_device.h"
f21fb3ed 37#include "liquidio_image.h"
d4be8ebe 38#include "lio_vf_rep.h"
f21fb3ed
RV
39
40MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(LIQUIDIO_VERSION);
ea6404c8
DC
44MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
f21fb3ed
RV
52
53static int ddr_timeout = 10000;
54module_param(ddr_timeout, int, 0644);
55MODULE_PARM_DESC(ddr_timeout,
56 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
57
f21fb3ed
RV
58#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
59
60static int debug = -1;
61module_param(debug, int, 0644);
62MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63
088b8749 64static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
d396179c 65module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
088b8749 66MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
f21fb3ed 67
2470f3a2
IB
68static u32 console_bitmask;
69module_param(console_bitmask, int, 0644);
70MODULE_PARM_DESC(console_bitmask,
71 "Bitmask indicating which consoles have debug output redirected to syslog.");
72
73/**
74 * \brief determines if a given console has debug enabled.
75 * @param console console to check
76 * @returns 1 = enabled. 0 otherwise
77 */
da1542b0 78static int octeon_console_debug_enabled(u32 console)
2470f3a2
IB
79{
80 return (console_bitmask >> (console)) & 0x1;
81}
82
f21fb3ed
RV
83/* Polling interval for determining when NIC application is alive */
84#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
85
86/* runtime link query interval */
87#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
907aaa6b
VB
88/* update localtime to octeon firmware every 60 seconds.
89 * make firmware to use same time reference, so that it will be easy to
90 * correlate firmware logged events/errors with host events, for debugging.
91 */
92#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
f21fb3ed 93
f2d254fa
IB
94struct lio_trusted_vf_ctx {
95 struct completion complete;
96 int status;
97};
98
afdf841f
RV
99struct liquidio_rx_ctl_context {
100 int octeon_id;
101
102 wait_queue_head_t wc;
103
104 int cond;
105};
106
f21fb3ed
RV
107struct oct_link_status_resp {
108 u64 rh;
109 struct oct_link_info link_info;
110 u64 status;
111};
112
113struct oct_timestamp_resp {
114 u64 rh;
115 u64 timestamp;
116 u64 status;
117};
118
119#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
120
121union tx_info {
122 u64 u64;
123 struct {
124#ifdef __BIG_ENDIAN_BITFIELD
125 u16 gso_size;
126 u16 gso_segs;
127 u32 reserved;
128#else
129 u32 reserved;
130 u16 gso_segs;
131 u16 gso_size;
132#endif
133 } s;
134};
135
136/** Octeon device properties to be used by the NIC module.
137 * Each octeon device in the system will be represented
138 * by this structure in the NIC module.
139 */
140
141#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
142
143#define OCTNIC_GSO_MAX_HEADER_SIZE 128
72c00912
RV
144#define OCTNIC_GSO_MAX_SIZE \
145 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
f21fb3ed
RV
146
147/** Structure of a node in list of gather components maintained by
148 * NIC driver for each network device.
149 */
150struct octnic_gather {
151 /** List manipulation. Next and prev pointers. */
152 struct list_head list;
153
154 /** Size of the gather component at sg in bytes. */
155 int sg_size;
156
157 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
158 int adjust;
159
160 /** Gather component that can accommodate max sized fragment list
161 * received from the IP layer.
162 */
163 struct octeon_sg_entry *sg;
fcd2b5e3 164
67e303e0 165 dma_addr_t sg_dma_ptr;
f21fb3ed
RV
166};
167
f21fb3ed
RV
168struct handshake {
169 struct completion init;
170 struct completion started;
171 struct pci_dev *pci_dev;
172 int init_ok;
173 int started_ok;
174};
175
ca6139ff
RV
176#ifdef CONFIG_PCI_IOV
177static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
178#endif
179
da1542b0
RF
180static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
181 char *prefix, char *suffix);
182
f21fb3ed 183static int octeon_device_init(struct octeon_device *);
32581245 184static int liquidio_stop(struct net_device *netdev);
f21fb3ed
RV
185static void liquidio_remove(struct pci_dev *pdev);
186static int liquidio_probe(struct pci_dev *pdev,
187 const struct pci_device_id *ent);
bb54be58
FM
188static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
189 int linkstate);
f21fb3ed
RV
190
191static struct handshake handshake[MAX_OCTEON_DEVICES];
192static struct completion first_stage;
193
5b173cf9 194static void octeon_droq_bh(unsigned long pdev)
f21fb3ed
RV
195{
196 int q_no;
197 int reschedule = 0;
198 struct octeon_device *oct = (struct octeon_device *)pdev;
199 struct octeon_device_priv *oct_priv =
200 (struct octeon_device_priv *)oct->priv;
201
63da8404 202 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
763185a3 203 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
f21fb3ed
RV
204 continue;
205 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
206 MAX_PACKET_BUDGET);
cd8b1eb4 207 lio_enable_irq(oct->droq[q_no], NULL);
5b07aee1
RV
208
209 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
210 /* set time and cnt interrupt thresholds for this DROQ
211 * for NAPI
212 */
213 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
214
215 octeon_write_csr64(
216 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
217 0x5700000040ULL);
218 octeon_write_csr64(
219 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
220 }
f21fb3ed
RV
221 }
222
223 if (reschedule)
224 tasklet_schedule(&oct_priv->droq_tasklet);
225}
226
5b173cf9 227static int lio_wait_for_oq_pkts(struct octeon_device *oct)
f21fb3ed
RV
228{
229 struct octeon_device_priv *oct_priv =
230 (struct octeon_device_priv *)oct->priv;
231 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
232 int i;
233
234 do {
235 pending_pkts = 0;
236
63da8404 237 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
763185a3 238 if (!(oct->io_qmask.oq & BIT_ULL(i)))
f21fb3ed 239 continue;
a7d5a3dc 240 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
f21fb3ed
RV
241 }
242 if (pkt_cnt > 0) {
243 pending_pkts += pkt_cnt;
244 tasklet_schedule(&oct_priv->droq_tasklet);
245 }
246 pkt_cnt = 0;
247 schedule_timeout_uninterruptible(1);
248
249 } while (retry-- && pending_pkts);
250
251 return pkt_cnt;
252}
253
f21fb3ed
RV
254/**
255 * \brief Forces all IO queues off on a given device
256 * @param oct Pointer to Octeon device
257 */
258static void force_io_queues_off(struct octeon_device *oct)
259{
260 if ((oct->chip_id == OCTEON_CN66XX) ||
261 (oct->chip_id == OCTEON_CN68XX)) {
262 /* Reset the Enable bits for Input Queues. */
263 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
264
265 /* Reset the Enable bits for Output Queues. */
266 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
267 }
268}
269
f21fb3ed
RV
270/**
271 * \brief Cause device to go quiet so it can be safely removed/reset/etc
272 * @param oct Pointer to Octeon device
273 */
274static inline void pcierror_quiesce_device(struct octeon_device *oct)
275{
276 int i;
277
278 /* Disable the input and output queues now. No more packets will
279 * arrive from Octeon, but we should wait for all packet processing
280 * to finish.
281 */
282 force_io_queues_off(oct);
283
284 /* To allow for in-flight requests */
285 schedule_timeout_uninterruptible(100);
286
287 if (wait_for_pending_requests(oct))
288 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
289
290 /* Force all requests waiting to be fetched by OCTEON to complete. */
63da8404 291 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
f21fb3ed
RV
292 struct octeon_instr_queue *iq;
293
763185a3 294 if (!(oct->io_qmask.iq & BIT_ULL(i)))
f21fb3ed
RV
295 continue;
296 iq = oct->instr_queue[i];
297
298 if (atomic_read(&iq->instr_pending)) {
299 spin_lock_bh(&iq->lock);
300 iq->fill_cnt = 0;
301 iq->octeon_read_index = iq->host_write_index;
302 iq->stats.instr_processed +=
303 atomic_read(&iq->instr_pending);
9a96bde4 304 lio_process_iq_request_list(oct, iq, 0);
f21fb3ed
RV
305 spin_unlock_bh(&iq->lock);
306 }
307 }
308
309 /* Force all pending ordered list requests to time out. */
310 lio_process_ordered_list(oct, 1);
311
312 /* We do not need to wait for output queue packets to be processed. */
313}
314
315/**
316 * \brief Cleanup PCI AER uncorrectable error status
317 * @param dev Pointer to PCI device
318 */
319static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
320{
321 int pos = 0x100;
322 u32 status, mask;
323
324 pr_info("%s :\n", __func__);
325
326 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
327 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
328 if (dev->error_state == pci_channel_io_normal)
329 status &= ~mask; /* Clear corresponding nonfatal bits */
330 else
331 status &= mask; /* Clear corresponding fatal bits */
332 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
333}
334
335/**
336 * \brief Stop all PCI IO to a given device
337 * @param dev Pointer to Octeon device
338 */
339static void stop_pci_io(struct octeon_device *oct)
340{
341 /* No more instructions will be forwarded. */
342 atomic_set(&oct->status, OCT_DEV_IN_RESET);
343
344 pci_disable_device(oct->pci_dev);
345
346 /* Disable interrupts */
5b07aee1 347 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
f21fb3ed
RV
348
349 pcierror_quiesce_device(oct);
350
351 /* Release the interrupt line */
352 free_irq(oct->pci_dev->irq, oct);
353
354 if (oct->flags & LIO_FLAG_MSI_ENABLED)
355 pci_disable_msi(oct->pci_dev);
356
357 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
358 lio_get_state_string(&oct->status));
359
f21fb3ed
RV
360 /* making it a common function for all OCTEON models */
361 cleanup_aer_uncorrect_error_status(oct->pci_dev);
362}
363
364/**
365 * \brief called when PCI error is detected
366 * @param pdev Pointer to PCI device
367 * @param state The current pci connection state
368 *
369 * This function is called after a PCI bus error affecting
370 * this device has been detected.
371 */
372static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
373 pci_channel_state_t state)
374{
375 struct octeon_device *oct = pci_get_drvdata(pdev);
376
377 /* Non-correctable Non-fatal errors */
378 if (state == pci_channel_io_normal) {
379 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
380 cleanup_aer_uncorrect_error_status(oct->pci_dev);
381 return PCI_ERS_RESULT_CAN_RECOVER;
382 }
383
384 /* Non-correctable Fatal errors */
385 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
386 stop_pci_io(oct);
387
388 /* Always return a DISCONNECT. There is no support for recovery but only
389 * for a clean shutdown.
390 */
391 return PCI_ERS_RESULT_DISCONNECT;
392}
393
394/**
395 * \brief mmio handler
396 * @param pdev Pointer to PCI device
397 */
a7d5a3dc
RV
398static pci_ers_result_t liquidio_pcie_mmio_enabled(
399 struct pci_dev *pdev __attribute__((unused)))
f21fb3ed
RV
400{
401 /* We should never hit this since we never ask for a reset for a Fatal
402 * Error. We always return DISCONNECT in io_error above.
403 * But play safe and return RECOVERED for now.
404 */
405 return PCI_ERS_RESULT_RECOVERED;
406}
407
408/**
409 * \brief called after the pci bus has been reset.
410 * @param pdev Pointer to PCI device
411 *
412 * Restart the card from scratch, as if from a cold-boot. Implementation
413 * resembles the first-half of the octeon_resume routine.
414 */
a7d5a3dc
RV
415static pci_ers_result_t liquidio_pcie_slot_reset(
416 struct pci_dev *pdev __attribute__((unused)))
f21fb3ed
RV
417{
418 /* We should never hit this since we never ask for a reset for a Fatal
419 * Error. We always return DISCONNECT in io_error above.
420 * But play safe and return RECOVERED for now.
421 */
422 return PCI_ERS_RESULT_RECOVERED;
423}
424
425/**
426 * \brief called when traffic can start flowing again.
427 * @param pdev Pointer to PCI device
428 *
429 * This callback is called when the error recovery driver tells us that
430 * its OK to resume normal operation. Implementation resembles the
431 * second-half of the octeon_resume routine.
432 */
a7d5a3dc 433static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
f21fb3ed
RV
434{
435 /* Nothing to be done here. */
436}
437
438#ifdef CONFIG_PM
439/**
440 * \brief called when suspending
441 * @param pdev Pointer to PCI device
442 * @param state state to suspend to
443 */
a7d5a3dc
RV
444static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
445 pm_message_t state __attribute__((unused)))
f21fb3ed
RV
446{
447 return 0;
448}
449
450/**
451 * \brief called when resuming
452 * @param pdev Pointer to PCI device
453 */
a7d5a3dc 454static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
f21fb3ed
RV
455{
456 return 0;
457}
458#endif
459
460/* For PCI-E Advanced Error Recovery (AER) Interface */
166e2362 461static const struct pci_error_handlers liquidio_err_handler = {
f21fb3ed
RV
462 .error_detected = liquidio_pcie_error_detected,
463 .mmio_enabled = liquidio_pcie_mmio_enabled,
464 .slot_reset = liquidio_pcie_slot_reset,
465 .resume = liquidio_pcie_resume,
466};
467
468static const struct pci_device_id liquidio_pci_tbl[] = {
469 { /* 68xx */
470 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
471 },
472 { /* 66xx */
473 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
474 },
e86b1ab6
RV
475 { /* 23xx pf */
476 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
477 },
f21fb3ed
RV
478 {
479 0, 0, 0, 0, 0, 0, 0
480 }
481};
482MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
483
484static struct pci_driver liquidio_pci_driver = {
485 .name = "LiquidIO",
486 .id_table = liquidio_pci_tbl,
487 .probe = liquidio_probe,
488 .remove = liquidio_remove,
489 .err_handler = &liquidio_err_handler, /* For AER */
490
491#ifdef CONFIG_PM
492 .suspend = liquidio_suspend,
493 .resume = liquidio_resume,
494#endif
ca6139ff
RV
495#ifdef CONFIG_PCI_IOV
496 .sriov_configure = liquidio_enable_sriov,
497#endif
f21fb3ed
RV
498};
499
500/**
501 * \brief register PCI driver
502 */
503static int liquidio_init_pci(void)
504{
505 return pci_register_driver(&liquidio_pci_driver);
506}
507
508/**
509 * \brief unregister PCI driver
510 */
511static void liquidio_deinit_pci(void)
512{
513 pci_unregister_driver(&liquidio_pci_driver);
514}
515
f21fb3ed
RV
516/**
517 * \brief Check Tx queue status, and take appropriate action
518 * @param lio per-network private data
519 * @returns 0 if full, number of queues woken up otherwise
520 */
521static inline int check_txq_status(struct lio *lio)
522{
2a2fabaf 523 int numqs = lio->netdev->num_tx_queues;
f21fb3ed 524 int ret_val = 0;
2a2fabaf 525 int q, iq;
f21fb3ed 526
2a2fabaf
IB
527 /* check each sub-queue state */
528 for (q = 0; q < numqs; q++) {
529 iq = lio->linfo.txpciq[q %
530 lio->oct_dev->num_iqs].s.q_no;
531 if (octnet_iq_is_full(lio->oct_dev, iq))
532 continue;
533 if (__netif_subqueue_stopped(lio->netdev, q)) {
dd69debc 534 netif_wake_subqueue(lio->netdev, q);
2a2fabaf
IB
535 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
536 tx_restart, 1);
537 ret_val++;
f21fb3ed 538 }
f21fb3ed 539 }
2a2fabaf 540
f21fb3ed
RV
541 return ret_val;
542}
543
544/**
545 * Remove the node at the head of the list. The list would be empty at
546 * the end of this call if there are no more nodes in the list.
547 */
548static inline struct list_head *list_delete_head(struct list_head *root)
549{
550 struct list_head *node;
551
552 if ((root->prev == root) && (root->next == root))
553 node = NULL;
554 else
555 node = root->next;
556
557 if (node)
558 list_del(node);
559
560 return node;
561}
562
563/**
fcd2b5e3 564 * \brief Delete gather lists
f21fb3ed
RV
565 * @param lio per-network private data
566 */
fcd2b5e3 567static void delete_glists(struct lio *lio)
f21fb3ed
RV
568{
569 struct octnic_gather *g;
fcd2b5e3 570 int i;
f21fb3ed 571
67e303e0
VB
572 kfree(lio->glist_lock);
573 lio->glist_lock = NULL;
574
fcd2b5e3
RV
575 if (!lio->glist)
576 return;
577
578 for (i = 0; i < lio->linfo.num_txpciq; i++) {
579 do {
580 g = (struct octnic_gather *)
581 list_delete_head(&lio->glist[i]);
67e303e0 582 if (g)
fcd2b5e3 583 kfree(g);
fcd2b5e3 584 } while (g);
67e303e0 585
58ad3198
FM
586 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
587 lio->glists_dma_base && lio->glists_dma_base[i]) {
67e303e0
VB
588 lio_dma_free(lio->oct_dev,
589 lio->glist_entry_size * lio->tx_qsize,
590 lio->glists_virt_base[i],
591 lio->glists_dma_base[i]);
592 }
fcd2b5e3
RV
593 }
594
67e303e0
VB
595 kfree(lio->glists_virt_base);
596 lio->glists_virt_base = NULL;
597
598 kfree(lio->glists_dma_base);
599 lio->glists_dma_base = NULL;
600
601 kfree(lio->glist);
602 lio->glist = NULL;
f21fb3ed
RV
603}
604
605/**
fcd2b5e3 606 * \brief Setup gather lists
f21fb3ed
RV
607 * @param lio per-network private data
608 */
fcd2b5e3 609static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
f21fb3ed 610{
fcd2b5e3 611 int i, j;
f21fb3ed
RV
612 struct octnic_gather *g;
613
fcd2b5e3
RV
614 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
615 GFP_KERNEL);
616 if (!lio->glist_lock)
67e303e0 617 return -ENOMEM;
f21fb3ed 618
fcd2b5e3
RV
619 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
620 GFP_KERNEL);
621 if (!lio->glist) {
67e303e0
VB
622 kfree(lio->glist_lock);
623 lio->glist_lock = NULL;
624 return -ENOMEM;
625 }
626
627 lio->glist_entry_size =
628 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
629
630 /* allocate memory to store virtual and dma base address of
631 * per glist consistent memory
632 */
633 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
634 GFP_KERNEL);
635 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
636 GFP_KERNEL);
637
638 if (!lio->glists_virt_base || !lio->glists_dma_base) {
639 delete_glists(lio);
640 return -ENOMEM;
fcd2b5e3 641 }
f21fb3ed 642
fcd2b5e3 643 for (i = 0; i < num_iqs; i++) {
b3ca9af0 644 int numa_node = dev_to_node(&oct->pci_dev->dev);
f21fb3ed 645
fcd2b5e3
RV
646 spin_lock_init(&lio->glist_lock[i]);
647
648 INIT_LIST_HEAD(&lio->glist[i]);
649
67e303e0
VB
650 lio->glists_virt_base[i] =
651 lio_dma_alloc(oct,
652 lio->glist_entry_size * lio->tx_qsize,
653 &lio->glists_dma_base[i]);
654
655 if (!lio->glists_virt_base[i]) {
656 delete_glists(lio);
657 return -ENOMEM;
658 }
659
fcd2b5e3
RV
660 for (j = 0; j < lio->tx_qsize; j++) {
661 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
662 numa_node);
663 if (!g)
664 g = kzalloc(sizeof(*g), GFP_KERNEL);
665 if (!g)
666 break;
667
67e303e0
VB
668 g->sg = lio->glists_virt_base[i] +
669 (j * lio->glist_entry_size);
fcd2b5e3 670
67e303e0
VB
671 g->sg_dma_ptr = lio->glists_dma_base[i] +
672 (j * lio->glist_entry_size);
fcd2b5e3
RV
673
674 list_add_tail(&g->list, &lio->glist[i]);
f21fb3ed
RV
675 }
676
fcd2b5e3
RV
677 if (j != lio->tx_qsize) {
678 delete_glists(lio);
67e303e0 679 return -ENOMEM;
f21fb3ed 680 }
f21fb3ed
RV
681 }
682
fcd2b5e3 683 return 0;
f21fb3ed
RV
684}
685
686/**
687 * \brief Print link information
688 * @param netdev network device
689 */
690static void print_link_info(struct net_device *netdev)
691{
692 struct lio *lio = GET_LIO(netdev);
693
d18ca7df
IB
694 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
695 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
f21fb3ed
RV
696 struct oct_link_info *linfo = &lio->linfo;
697
0cece6c5 698 if (linfo->link.s.link_up) {
f21fb3ed
RV
699 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
700 linfo->link.s.speed,
701 (linfo->link.s.duplex) ? "Full" : "Half");
702 } else {
703 netif_info(lio, link, lio->netdev, "Link Down\n");
704 }
705 }
706}
707
7b6b6c95
RV
708/**
709 * \brief Routine to notify MTU change
710 * @param work work_struct data structure
711 */
712static void octnet_link_status_change(struct work_struct *work)
713{
714 struct cavium_wk *wk = (struct cavium_wk *)work;
715 struct lio *lio = (struct lio *)wk->ctxptr;
716
87a7c4b3
VB
717 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
718 * this API is invoked only when new max-MTU of the interface is
719 * less than current MTU.
720 */
7b6b6c95 721 rtnl_lock();
87a7c4b3 722 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
7b6b6c95
RV
723 rtnl_unlock();
724}
725
726/**
727 * \brief Sets up the mtu status change work
728 * @param netdev network device
729 */
730static inline int setup_link_status_change_wq(struct net_device *netdev)
731{
732 struct lio *lio = GET_LIO(netdev);
733 struct octeon_device *oct = lio->oct_dev;
734
735 lio->link_status_wq.wq = alloc_workqueue("link-status",
736 WQ_MEM_RECLAIM, 0);
737 if (!lio->link_status_wq.wq) {
738 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
739 return -1;
740 }
741 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
742 octnet_link_status_change);
743 lio->link_status_wq.wk.ctxptr = lio;
744
745 return 0;
746}
747
748static inline void cleanup_link_status_change_wq(struct net_device *netdev)
749{
750 struct lio *lio = GET_LIO(netdev);
751
752 if (lio->link_status_wq.wq) {
753 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
754 destroy_workqueue(lio->link_status_wq.wq);
755 }
756}
757
f21fb3ed
RV
758/**
759 * \brief Update link status
760 * @param netdev network device
761 * @param ls link status structure
762 *
763 * Called on receipt of a link status response from the core application to
764 * update each interface's link status.
765 */
766static inline void update_link_status(struct net_device *netdev,
767 union oct_link_status *ls)
768{
769 struct lio *lio = GET_LIO(netdev);
0cece6c5 770 int changed = (lio->linfo.link.u64 != ls->u64);
87a7c4b3
VB
771 int current_max_mtu = lio->linfo.link.s.mtu;
772 struct octeon_device *oct = lio->oct_dev;
f21fb3ed 773
87a7c4b3
VB
774 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
775 __func__, lio->linfo.link.u64, ls->u64);
0cece6c5 776 lio->linfo.link.u64 = ls->u64;
f21fb3ed 777
0cece6c5 778 if ((lio->intf_open) && (changed)) {
f21fb3ed 779 print_link_info(netdev);
0cece6c5 780 lio->link_changes++;
f21fb3ed 781
0cece6c5 782 if (lio->linfo.link.s.link_up) {
87a7c4b3 783 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
f21fb3ed 784 netif_carrier_on(netdev);
a96d8ad3 785 wake_txqs(netdev);
f21fb3ed 786 } else {
87a7c4b3 787 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
f21fb3ed 788 netif_carrier_off(netdev);
736b7ea5 789 stop_txqs(netdev);
f21fb3ed 790 }
87a7c4b3
VB
791 if (lio->linfo.link.s.mtu != current_max_mtu) {
792 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
793 current_max_mtu, lio->linfo.link.s.mtu);
794 netdev->max_mtu = lio->linfo.link.s.mtu;
795 }
796 if (lio->linfo.link.s.mtu < netdev->mtu) {
797 dev_warn(&oct->pci_dev->dev,
798 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
799 netdev->mtu, lio->linfo.link.s.mtu);
800 queue_delayed_work(lio->link_status_wq.wq,
801 &lio->link_status_wq.wk.work, 0);
802 }
f21fb3ed
RV
803 }
804}
805
907aaa6b
VB
806/**
807 * lio_sync_octeon_time_cb - callback that is invoked when soft command
808 * sent by lio_sync_octeon_time() has completed successfully or failed
809 *
810 * @oct - octeon device structure
811 * @status - indicates success or failure
812 * @buf - pointer to the command that was sent to firmware
813 **/
814static void lio_sync_octeon_time_cb(struct octeon_device *oct,
815 u32 status, void *buf)
816{
817 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
818
819 if (status)
820 dev_err(&oct->pci_dev->dev,
821 "Failed to sync time to octeon; error=%d\n", status);
822
823 octeon_free_soft_command(oct, sc);
824}
825
826/**
827 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
828 * firmware will correct it's time, in case there is a time skew
829 *
830 * @work: work scheduled to send time update to octeon firmware
831 **/
832static void lio_sync_octeon_time(struct work_struct *work)
833{
834 struct cavium_wk *wk = (struct cavium_wk *)work;
835 struct lio *lio = (struct lio *)wk->ctxptr;
836 struct octeon_device *oct = lio->oct_dev;
837 struct octeon_soft_command *sc;
838 struct timespec64 ts;
839 struct lio_time *lt;
840 int ret;
841
842 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
843 if (!sc) {
844 dev_err(&oct->pci_dev->dev,
845 "Failed to sync time to octeon: soft command allocation failed\n");
846 return;
847 }
848
849 lt = (struct lio_time *)sc->virtdptr;
850
851 /* Get time of the day */
852 getnstimeofday64(&ts);
853 lt->sec = ts.tv_sec;
854 lt->nsec = ts.tv_nsec;
855 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
856
857 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
858 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
859 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
860
861 sc->callback = lio_sync_octeon_time_cb;
862 sc->callback_arg = sc;
863 sc->wait_time = 1000;
864
865 ret = octeon_send_soft_command(oct, sc);
866 if (ret == IQ_SEND_FAILED) {
867 dev_err(&oct->pci_dev->dev,
868 "Failed to sync time to octeon: failed to send soft command\n");
869 octeon_free_soft_command(oct, sc);
870 }
871
872 queue_delayed_work(lio->sync_octeon_time_wq.wq,
873 &lio->sync_octeon_time_wq.wk.work,
874 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
875}
876
877/**
878 * setup_sync_octeon_time_wq - Sets up the work to periodically update
879 * local time to octeon firmware
880 *
881 * @netdev - network device which should send time update to firmware
882 **/
883static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
884{
885 struct lio *lio = GET_LIO(netdev);
886 struct octeon_device *oct = lio->oct_dev;
887
888 lio->sync_octeon_time_wq.wq =
889 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
890 if (!lio->sync_octeon_time_wq.wq) {
891 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
892 return -1;
893 }
894 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
895 lio_sync_octeon_time);
896 lio->sync_octeon_time_wq.wk.ctxptr = lio;
897 queue_delayed_work(lio->sync_octeon_time_wq.wq,
898 &lio->sync_octeon_time_wq.wk.work,
899 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
900
901 return 0;
902}
903
904/**
905 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
906 * to periodically update local time to octeon firmware
907 *
908 * @netdev - network device which should send time update to firmware
909 **/
910static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
911{
912 struct lio *lio = GET_LIO(netdev);
913 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
914
915 if (time_wq->wq) {
916 cancel_delayed_work_sync(&time_wq->wk.work);
917 destroy_workqueue(time_wq->wq);
918 }
919}
920
bb54be58
FM
921static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
922{
923 struct octeon_device *other_oct;
924
925 other_oct = lio_get_device(oct->octeon_id + 1);
926
927 if (other_oct && other_oct->pci_dev) {
928 int oct_busnum, other_oct_busnum;
929
930 oct_busnum = oct->pci_dev->bus->number;
931 other_oct_busnum = other_oct->pci_dev->bus->number;
932
933 if (oct_busnum == other_oct_busnum) {
934 int oct_slot, other_oct_slot;
935
936 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
937 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
938
939 if (oct_slot == other_oct_slot)
940 return other_oct;
941 }
942 }
943
944 return NULL;
945}
946
947static void disable_all_vf_links(struct octeon_device *oct)
948{
949 struct net_device *netdev;
950 int max_vfs, vf, i;
951
952 if (!oct)
953 return;
954
955 max_vfs = oct->sriov_info.max_vfs;
956
957 for (i = 0; i < oct->ifcount; i++) {
958 netdev = oct->props[i].netdev;
959 if (!netdev)
960 continue;
961
962 for (vf = 0; vf < max_vfs; vf++)
963 liquidio_set_vf_link_state(netdev, vf,
964 IFLA_VF_LINK_STATE_DISABLE);
965 }
966}
967
9ff1a9ba
RV
968static int liquidio_watchdog(void *param)
969{
bb54be58
FM
970 bool err_msg_was_printed[LIO_MAX_CORES];
971 u16 mask_of_crashed_or_stuck_cores = 0;
972 bool all_vf_links_are_disabled = false;
9ff1a9ba 973 struct octeon_device *oct = param;
bb54be58
FM
974 struct octeon_device *other_oct;
975#ifdef CONFIG_MODULE_UNLOAD
976 long refcount, vfs_referencing_pf;
977 u64 vfs_mask1, vfs_mask2;
978#endif
979 int core;
9ff1a9ba 980
bb54be58 981 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
9ff1a9ba
RV
982
983 while (!kthread_should_stop()) {
bb54be58
FM
984 /* sleep for a couple of seconds so that we don't hog the CPU */
985 set_current_state(TASK_INTERRUPTIBLE);
986 schedule_timeout(msecs_to_jiffies(2000));
987
988 mask_of_crashed_or_stuck_cores =
9ff1a9ba
RV
989 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
990
bb54be58
FM
991 if (!mask_of_crashed_or_stuck_cores)
992 continue;
9ff1a9ba 993
bb54be58
FM
994 WRITE_ONCE(oct->cores_crashed, true);
995 other_oct = get_other_octeon_device(oct);
996 if (other_oct)
997 WRITE_ONCE(other_oct->cores_crashed, true);
9ff1a9ba 998
bb54be58
FM
999 for (core = 0; core < LIO_MAX_CORES; core++) {
1000 bool core_crashed_or_got_stuck;
9ff1a9ba 1001
bb54be58
FM
1002 core_crashed_or_got_stuck =
1003 (mask_of_crashed_or_stuck_cores
1004 >> core) & 1;
1005
1006 if (core_crashed_or_got_stuck &&
1007 !err_msg_was_printed[core]) {
1008 dev_err(&oct->pci_dev->dev,
1009 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
1010 core);
886afc1d 1011 err_msg_was_printed[core] = true;
9ff1a9ba
RV
1012 }
1013 }
bb54be58
FM
1014
1015 if (all_vf_links_are_disabled)
1016 continue;
1017
1018 disable_all_vf_links(oct);
1019 disable_all_vf_links(other_oct);
1020 all_vf_links_are_disabled = true;
1021
9ff1a9ba 1022#ifdef CONFIG_MODULE_UNLOAD
bb54be58
FM
1023 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
1024 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
9ff1a9ba 1025
bb54be58
FM
1026 vfs_referencing_pf = hweight64(vfs_mask1);
1027 vfs_referencing_pf += hweight64(vfs_mask2);
9ff1a9ba 1028
bb54be58
FM
1029 refcount = module_refcount(THIS_MODULE);
1030 if (refcount >= vfs_referencing_pf) {
1031 while (vfs_referencing_pf) {
9ff1a9ba 1032 module_put(THIS_MODULE);
bb54be58 1033 vfs_referencing_pf--;
9ff1a9ba
RV
1034 }
1035 }
1036#endif
9ff1a9ba
RV
1037 }
1038
1039 return 0;
1040}
1041
f21fb3ed
RV
1042/**
1043 * \brief PCI probe handler
1044 * @param pdev PCI device structure
1045 * @param ent unused
1046 */
a7d5a3dc
RV
1047static int
1048liquidio_probe(struct pci_dev *pdev,
1049 const struct pci_device_id *ent __attribute__((unused)))
f21fb3ed
RV
1050{
1051 struct octeon_device *oct_dev = NULL;
1052 struct handshake *hs;
1053
1054 oct_dev = octeon_allocate_device(pdev->device,
1055 sizeof(struct octeon_device_priv));
1056 if (!oct_dev) {
1057 dev_err(&pdev->dev, "Unable to allocate device\n");
1058 return -ENOMEM;
1059 }
1060
5b07aee1
RV
1061 if (pdev->device == OCTEON_CN23XX_PF_VID)
1062 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1063
aa69ff9e
IB
1064 /* Enable PTP for 6XXX Device */
1065 if (((pdev->device == OCTEON_CN66XX) ||
1066 (pdev->device == OCTEON_CN68XX)))
1067 oct_dev->ptp_enable = true;
1068 else
1069 oct_dev->ptp_enable = false;
1070
f21fb3ed
RV
1071 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1072 (u32)pdev->vendor, (u32)pdev->device);
1073
1074 /* Assign octeon_device for this device to the private data area. */
1075 pci_set_drvdata(pdev, oct_dev);
1076
1077 /* set linux specific device pointer */
1078 oct_dev->pci_dev = (void *)pdev;
1079
1080 hs = &handshake[oct_dev->octeon_id];
1081 init_completion(&hs->init);
1082 init_completion(&hs->started);
1083 hs->pci_dev = pdev;
1084
1085 if (oct_dev->octeon_id == 0)
1086 /* first LiquidIO NIC is detected */
1087 complete(&first_stage);
1088
1089 if (octeon_device_init(oct_dev)) {
515e752d 1090 complete(&hs->init);
f21fb3ed
RV
1091 liquidio_remove(pdev);
1092 return -ENOMEM;
1093 }
1094
9ff1a9ba 1095 if (OCTEON_CN23XX_PF(oct_dev)) {
9ff1a9ba
RV
1096 u8 bus, device, function;
1097
392209fa
FM
1098 if (atomic_read(oct_dev->adapter_refcount) == 1) {
1099 /* Each NIC gets one watchdog kernel thread. The first
1100 * PF (of each NIC) that gets pci_driver->probe()'d
1101 * creates that thread.
9ff1a9ba 1102 */
9ff1a9ba
RV
1103 bus = pdev->bus->number;
1104 device = PCI_SLOT(pdev->devfn);
1105 function = PCI_FUNC(pdev->devfn);
1106 oct_dev->watchdog_task = kthread_create(
1107 liquidio_watchdog, oct_dev,
1108 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
515e752d
RV
1109 if (!IS_ERR(oct_dev->watchdog_task)) {
1110 wake_up_process(oct_dev->watchdog_task);
1111 } else {
1112 oct_dev->watchdog_task = NULL;
1113 dev_err(&oct_dev->pci_dev->dev,
1114 "failed to create kernel_thread\n");
1115 liquidio_remove(pdev);
1116 return -1;
1117 }
9ff1a9ba
RV
1118 }
1119 }
1120
1f164717
RV
1121 oct_dev->rx_pause = 1;
1122 oct_dev->tx_pause = 1;
1123
f21fb3ed
RV
1124 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1125
1126 return 0;
1127}
1128
088b8749 1129static bool fw_type_is_auto(void)
7cc61db9 1130{
088b8749
RF
1131 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
1132 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
7cc61db9
FM
1133}
1134
70535350
RF
1135/**
1136 * \brief PCI FLR for each Octeon device.
1137 * @param oct octeon device
1138 */
1139static void octeon_pci_flr(struct octeon_device *oct)
1140{
1141 int rc;
1142
1143 pci_save_state(oct->pci_dev);
1144
1145 pci_cfg_access_lock(oct->pci_dev);
1146
1147 /* Quiesce the device completely */
1148 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
1149 PCI_COMMAND_INTX_DISABLE);
1150
1151 rc = __pci_reset_function_locked(oct->pci_dev);
1152
1153 if (rc != 0)
1154 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
1155 rc, oct->pf_num);
1156
1157 pci_cfg_access_unlock(oct->pci_dev);
1158
1159 pci_restore_state(oct->pci_dev);
1160}
1161
f21fb3ed
RV
1162/**
1163 *\brief Destroy resources associated with octeon device
1164 * @param pdev PCI device structure
1165 * @param ent unused
1166 */
1167static void octeon_destroy_resources(struct octeon_device *oct)
1168{
e1e3ce62 1169 int i, refcount;
5b07aee1 1170 struct msix_entry *msix_entries;
f21fb3ed
RV
1171 struct octeon_device_priv *oct_priv =
1172 (struct octeon_device_priv *)oct->priv;
1173
1174 struct handshake *hs;
1175
1176 switch (atomic_read(&oct->status)) {
1177 case OCT_DEV_RUNNING:
1178 case OCT_DEV_CORE_OK:
1179
1180 /* No more instructions will be forwarded. */
1181 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1182
1183 oct->app_mode = CVM_DRV_INVALID_APP;
1184 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1185 lio_get_state_string(&oct->status));
1186
1187 schedule_timeout_uninterruptible(HZ / 10);
1188
1189 /* fallthrough */
1190 case OCT_DEV_HOST_OK:
1191
1192 /* fallthrough */
1193 case OCT_DEV_CONSOLE_INIT_DONE:
1194 /* Remove any consoles */
1195 octeon_remove_consoles(oct);
1196
1197 /* fallthrough */
1198 case OCT_DEV_IO_QUEUES_DONE:
1199 if (wait_for_pending_requests(oct))
1200 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1201
1202 if (lio_wait_for_instr_fetch(oct))
1203 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1204
1205 /* Disable the input and output queues now. No more packets will
1206 * arrive from Octeon, but we should wait for all packet
1207 * processing to finish.
1208 */
1209 oct->fn_list.disable_io_queues(oct);
1210
1211 if (lio_wait_for_oq_pkts(oct))
1212 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1213
515e752d
RV
1214 /* fallthrough */
1215 case OCT_DEV_INTR_SET_DONE:
f21fb3ed 1216 /* Disable interrupts */
5b07aee1
RV
1217 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1218
1219 if (oct->msix_on) {
1220 msix_entries = (struct msix_entry *)oct->msix_entries;
1221 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
a82457f1
IB
1222 if (oct->ioq_vector[i].vector) {
1223 /* clear the affinity_cpumask */
1224 irq_set_affinity_hint(
1225 msix_entries[i].vector,
1226 NULL);
1227 free_irq(msix_entries[i].vector,
1228 &oct->ioq_vector[i]);
1229 oct->ioq_vector[i].vector = 0;
1230 }
5b07aee1
RV
1231 }
1232 /* non-iov vector's argument is oct struct */
1233 free_irq(msix_entries[i].vector, oct);
f21fb3ed 1234
5b07aee1
RV
1235 pci_disable_msix(oct->pci_dev);
1236 kfree(oct->msix_entries);
1237 oct->msix_entries = NULL;
1238 } else {
1239 /* Release the interrupt line */
1240 free_irq(oct->pci_dev->irq, oct);
f21fb3ed 1241
5b07aee1
RV
1242 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1243 pci_disable_msi(oct->pci_dev);
1244 }
f21fb3ed 1245
0c88a761
RF
1246 kfree(oct->irq_name_storage);
1247 oct->irq_name_storage = NULL;
1248
515e752d
RV
1249 /* fallthrough */
1250 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
5b07aee1
RV
1251 if (OCTEON_CN23XX_PF(oct))
1252 octeon_free_ioq_vector(oct);
5d65556b
RV
1253
1254 /* fallthrough */
1255 case OCT_DEV_MBOX_SETUP_DONE:
1256 if (OCTEON_CN23XX_PF(oct))
1257 oct->fn_list.free_mbox(oct);
1258
5b07aee1 1259 /* fallthrough */
f21fb3ed
RV
1260 case OCT_DEV_IN_RESET:
1261 case OCT_DEV_DROQ_INIT_DONE:
763185a3 1262 /* Wait for any pending operations */
f21fb3ed 1263 mdelay(100);
63da8404 1264 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
5b07aee1 1265 if (!(oct->io_qmask.oq & BIT_ULL(i)))
f21fb3ed
RV
1266 continue;
1267 octeon_delete_droq(oct, i);
1268 }
1269
1270 /* Force any pending handshakes to complete */
1271 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1272 hs = &handshake[i];
1273
1274 if (hs->pci_dev) {
1275 handshake[oct->octeon_id].init_ok = 0;
1276 complete(&handshake[oct->octeon_id].init);
1277 handshake[oct->octeon_id].started_ok = 0;
1278 complete(&handshake[oct->octeon_id].started);
1279 }
1280 }
1281
1282 /* fallthrough */
1283 case OCT_DEV_RESP_LIST_INIT_DONE:
1284 octeon_delete_response_list(oct);
1285
f21fb3ed
RV
1286 /* fallthrough */
1287 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
63da8404 1288 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
5b823514 1289 if (!(oct->io_qmask.iq & BIT_ULL(i)))
f21fb3ed
RV
1290 continue;
1291 octeon_delete_instr_queue(oct, i);
1292 }
ca6139ff
RV
1293#ifdef CONFIG_PCI_IOV
1294 if (oct->sriov_info.sriov_enabled)
1295 pci_disable_sriov(oct->pci_dev);
1296#endif
5b823514
RV
1297 /* fallthrough */
1298 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1299 octeon_free_sc_buffer_pool(oct);
f21fb3ed
RV
1300
1301 /* fallthrough */
1302 case OCT_DEV_DISPATCH_INIT_DONE:
1303 octeon_delete_dispatch_list(oct);
1304 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1305
1306 /* fallthrough */
1307 case OCT_DEV_PCI_MAP_DONE:
e1e3ce62
RF
1308 refcount = octeon_deregister_device(oct);
1309
70535350
RF
1310 /* Soft reset the octeon device before exiting.
1311 * However, if fw was loaded from card (i.e. autoboot),
1312 * perform an FLR instead.
1313 * Implementation note: only soft-reset the device
1314 * if it is a CN6XXX OR the LAST CN23XX device.
1315 */
088b8749 1316 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
70535350
RF
1317 octeon_pci_flr(oct);
1318 else if (OCTEON_CN6XXX(oct) || !refcount)
1319 oct->fn_list.soft_reset(oct);
60b48c5a 1320
f21fb3ed
RV
1321 octeon_unmap_pci_barx(oct, 0);
1322 octeon_unmap_pci_barx(oct, 1);
1323
1324 /* fallthrough */
515e752d
RV
1325 case OCT_DEV_PCI_ENABLE_DONE:
1326 pci_clear_master(oct->pci_dev);
60b48c5a
RV
1327 /* Disable the device, releasing the PCI INT */
1328 pci_disable_device(oct->pci_dev);
1329
515e752d
RV
1330 /* fallthrough */
1331 case OCT_DEV_BEGIN_STATE:
f21fb3ed
RV
1332 /* Nothing to be done here either */
1333 break;
a2c64b67 1334 } /* end switch (oct->status) */
f21fb3ed
RV
1335
1336 tasklet_kill(&oct_priv->droq_tasklet);
1337}
1338
afdf841f
RV
1339/**
1340 * \brief Callback for rx ctrl
1341 * @param status status of request
1342 * @param buf pointer to resp structure
1343 */
1344static void rx_ctl_callback(struct octeon_device *oct,
1345 u32 status,
1346 void *buf)
1347{
1348 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1349 struct liquidio_rx_ctl_context *ctx;
1350
1351 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1352
1353 oct = lio_get_device(ctx->octeon_id);
1354 if (status)
1355 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1356 CVM_CAST64(status));
1357 WRITE_ONCE(ctx->cond, 1);
1358
1359 /* This barrier is required to be sure that the response has been
1360 * written fully before waking up the handler
1361 */
1362 wmb();
1363
1364 wake_up_interruptible(&ctx->wc);
1365}
1366
f21fb3ed
RV
1367/**
1368 * \brief Send Rx control command
1369 * @param lio per-network private data
1370 * @param start_stop whether to start or stop
1371 */
1372static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1373{
afdf841f
RV
1374 struct octeon_soft_command *sc;
1375 struct liquidio_rx_ctl_context *ctx;
1376 union octnet_cmd *ncmd;
1377 int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1378 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1379 int retval;
f21fb3ed 1380
afdf841f
RV
1381 if (oct->props[lio->ifidx].rx_on == start_stop)
1382 return;
f21fb3ed 1383
afdf841f
RV
1384 sc = (struct octeon_soft_command *)
1385 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1386 16, ctx_size);
1387
1388 ncmd = (union octnet_cmd *)sc->virtdptr;
1389 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1390
1391 WRITE_ONCE(ctx->cond, 0);
1392 ctx->octeon_id = lio_get_device_id(oct);
1393 init_waitqueue_head(&ctx->wc);
1394
1395 ncmd->u64 = 0;
1396 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1397 ncmd->s.param1 = start_stop;
1398
1399 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1400
1401 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1402
1403 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1404 OPCODE_NIC_CMD, 0, 0, 0);
1405
1406 sc->callback = rx_ctl_callback;
1407 sc->callback_arg = sc;
1408 sc->wait_time = 5000;
f21fb3ed 1409
afdf841f
RV
1410 retval = octeon_send_soft_command(oct, sc);
1411 if (retval == IQ_SEND_FAILED) {
f21fb3ed 1412 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
afdf841f
RV
1413 } else {
1414 /* Sleep on a wait queue till the cond flag indicates that the
1415 * response arrived or timed-out.
1416 */
1417 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1418 return;
1419 oct->props[lio->ifidx].rx_on = start_stop;
1420 }
1421
1422 octeon_free_soft_command(oct, sc);
f21fb3ed
RV
1423}
1424
1425/**
1426 * \brief Destroy NIC device interface
1427 * @param oct octeon device
1428 * @param ifidx which interface to destroy
1429 *
1430 * Cleanup associated with each interface for an Octeon device when NIC
1431 * module is being unloaded or if initialization fails during load.
1432 */
1433static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1434{
1435 struct net_device *netdev = oct->props[ifidx].netdev;
1436 struct lio *lio;
9a96bde4 1437 struct napi_struct *napi, *n;
f21fb3ed
RV
1438
1439 if (!netdev) {
1440 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1441 __func__, ifidx);
1442 return;
1443 }
1444
1445 lio = GET_LIO(netdev);
1446
1447 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1448
f21fb3ed 1449 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
afdf841f 1450 liquidio_stop(netdev);
f21fb3ed 1451
9a96bde4
RV
1452 if (oct->props[lio->ifidx].napi_enabled == 1) {
1453 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1454 napi_disable(napi);
1455
1456 oct->props[lio->ifidx].napi_enabled = 0;
7b6b6c95
RV
1457
1458 if (OCTEON_CN23XX_PF(oct))
1459 oct->droq[0]->ops.poll_mode = 0;
9a96bde4
RV
1460 }
1461
42013e90
IB
1462 /* Delete NAPI */
1463 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1464 netif_napi_del(napi);
1465
f21fb3ed
RV
1466 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1467 unregister_netdev(netdev);
1468
907aaa6b 1469 cleanup_sync_octeon_time_wq(netdev);
7b6b6c95
RV
1470 cleanup_link_status_change_wq(netdev);
1471
031d4f12
SB
1472 cleanup_rx_oom_poll_fn(netdev);
1473
fcd2b5e3 1474 delete_glists(lio);
f21fb3ed
RV
1475
1476 free_netdev(netdev);
1477
0cece6c5
RV
1478 oct->props[ifidx].gmxport = -1;
1479
f21fb3ed
RV
1480 oct->props[ifidx].netdev = NULL;
1481}
1482
1483/**
1484 * \brief Stop complete NIC functionality
1485 * @param oct octeon device
1486 */
1487static int liquidio_stop_nic_module(struct octeon_device *oct)
1488{
1489 int i, j;
1490 struct lio *lio;
1491
1492 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1493 if (!oct->ifcount) {
1494 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1495 return 1;
1496 }
1497
60441888
RV
1498 spin_lock_bh(&oct->cmd_resp_wqlock);
1499 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1500 spin_unlock_bh(&oct->cmd_resp_wqlock);
1501
d4be8ebe
VMG
1502 lio_vf_rep_destroy(oct);
1503
f21fb3ed
RV
1504 for (i = 0; i < oct->ifcount; i++) {
1505 lio = GET_LIO(oct->props[i].netdev);
a82457f1 1506 for (j = 0; j < oct->num_oqs; j++)
26236fa9
RV
1507 octeon_unregister_droq_ops(oct,
1508 lio->linfo.rxpciq[j].s.q_no);
f21fb3ed
RV
1509 }
1510
1511 for (i = 0; i < oct->ifcount; i++)
1512 liquidio_destroy_nic_device(oct, i);
1513
d4be8ebe
VMG
1514 if (oct->devlink) {
1515 devlink_unregister(oct->devlink);
1516 devlink_free(oct->devlink);
1517 oct->devlink = NULL;
1518 }
1519
f21fb3ed
RV
1520 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1521 return 0;
1522}
1523
1524/**
1525 * \brief Cleans up resources at unload time
1526 * @param pdev PCI device structure
1527 */
1528static void liquidio_remove(struct pci_dev *pdev)
1529{
1530 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1531
1532 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1533
9ff1a9ba
RV
1534 if (oct_dev->watchdog_task)
1535 kthread_stop(oct_dev->watchdog_task);
1536
e20f4696
VMG
1537 if (!oct_dev->octeon_id &&
1538 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1539 lio_vf_rep_modexit();
1540
f21fb3ed
RV
1541 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1542 liquidio_stop_nic_module(oct_dev);
1543
1544 /* Reset the octeon device and cleanup all memory allocated for
1545 * the octeon device by driver.
1546 */
1547 octeon_destroy_resources(oct_dev);
1548
1549 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1550
1551 /* This octeon device has been removed. Update the global
1552 * data structure to reflect this. Free the device structure.
1553 */
1554 octeon_free_device_mem(oct_dev);
1555}
1556
1557/**
1558 * \brief Identify the Octeon device and to map the BAR address space
1559 * @param oct octeon device
1560 */
1561static int octeon_chip_specific_setup(struct octeon_device *oct)
1562{
1563 u32 dev_id, rev_id;
1564 int ret = 1;
d3d7e6c6 1565 char *s;
f21fb3ed
RV
1566
1567 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1568 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1569 oct->rev_id = rev_id & 0xff;
1570
1571 switch (dev_id) {
1572 case OCTEON_CN68XX_PCIID:
1573 oct->chip_id = OCTEON_CN68XX;
1574 ret = lio_setup_cn68xx_octeon_device(oct);
d3d7e6c6 1575 s = "CN68XX";
f21fb3ed
RV
1576 break;
1577
1578 case OCTEON_CN66XX_PCIID:
1579 oct->chip_id = OCTEON_CN66XX;
1580 ret = lio_setup_cn66xx_octeon_device(oct);
d3d7e6c6 1581 s = "CN66XX";
f21fb3ed 1582 break;
d3d7e6c6 1583
72c00912
RV
1584 case OCTEON_CN23XX_PCIID_PF:
1585 oct->chip_id = OCTEON_CN23XX_PF_VID;
1586 ret = setup_cn23xx_octeon_pf_device(oct);
0c45d7fe
RF
1587 if (ret)
1588 break;
cf19a8c3
DC
1589#ifdef CONFIG_PCI_IOV
1590 if (!ret)
1591 pci_sriov_set_totalvfs(oct->pci_dev,
1592 oct->sriov_info.max_vfs);
1593#endif
72c00912
RV
1594 s = "CN23XX";
1595 break;
1596
f21fb3ed 1597 default:
d3d7e6c6 1598 s = "?";
f21fb3ed
RV
1599 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1600 dev_id);
1601 }
1602
1603 if (!ret)
d3d7e6c6 1604 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
f21fb3ed
RV
1605 OCTEON_MAJOR_REV(oct),
1606 OCTEON_MINOR_REV(oct),
d3d7e6c6
RV
1607 octeon_get_conf(oct)->card_name,
1608 LIQUIDIO_VERSION);
f21fb3ed
RV
1609
1610 return ret;
1611}
1612
1613/**
1614 * \brief PCI initialization for each Octeon device.
1615 * @param oct octeon device
1616 */
1617static int octeon_pci_os_setup(struct octeon_device *oct)
1618{
1619 /* setup PCI stuff first */
1620 if (pci_enable_device(oct->pci_dev)) {
1621 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1622 return 1;
1623 }
1624
1625 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1626 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
515e752d 1627 pci_disable_device(oct->pci_dev);
f21fb3ed
RV
1628 return 1;
1629 }
1630
1631 /* Enable PCI DMA Master. */
1632 pci_set_master(oct->pci_dev);
1633
1634 return 0;
1635}
1636
f21fb3ed
RV
1637/**
1638 * \brief Unmap and free network buffer
1639 * @param buf buffer
1640 */
1641static void free_netbuf(void *buf)
1642{
1643 struct sk_buff *skb;
1644 struct octnet_buf_free_info *finfo;
1645 struct lio *lio;
1646
1647 finfo = (struct octnet_buf_free_info *)buf;
1648 skb = finfo->skb;
1649 lio = finfo->lio;
1650
1651 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1652 DMA_TO_DEVICE);
1653
cabeb13b 1654 tx_buffer_free(skb);
f21fb3ed
RV
1655}
1656
1657/**
1658 * \brief Unmap and free gather buffer
1659 * @param buf buffer
1660 */
1661static void free_netsgbuf(void *buf)
1662{
1663 struct octnet_buf_free_info *finfo;
1664 struct sk_buff *skb;
1665 struct lio *lio;
1666 struct octnic_gather *g;
fcd2b5e3 1667 int i, frags, iq;
f21fb3ed
RV
1668
1669 finfo = (struct octnet_buf_free_info *)buf;
1670 skb = finfo->skb;
1671 lio = finfo->lio;
1672 g = finfo->g;
1673 frags = skb_shinfo(skb)->nr_frags;
1674
1675 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1676 g->sg[0].ptr[0], (skb->len - skb->data_len),
1677 DMA_TO_DEVICE);
1678
1679 i = 1;
1680 while (frags--) {
1681 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1682
1683 pci_unmap_page((lio->oct_dev)->pci_dev,
1684 g->sg[(i >> 2)].ptr[(i & 3)],
1685 frag->size, DMA_TO_DEVICE);
1686 i++;
1687 }
1688
fcd2b5e3
RV
1689 iq = skb_iq(lio, skb);
1690 spin_lock(&lio->glist_lock[iq]);
1691 list_add_tail(&g->list, &lio->glist[iq]);
1692 spin_unlock(&lio->glist_lock[iq]);
f21fb3ed 1693
cabeb13b 1694 tx_buffer_free(skb);
f21fb3ed
RV
1695}
1696
1697/**
1698 * \brief Unmap and free gather buffer with response
1699 * @param buf buffer
1700 */
1701static void free_netsgbuf_with_resp(void *buf)
1702{
1703 struct octeon_soft_command *sc;
1704 struct octnet_buf_free_info *finfo;
1705 struct sk_buff *skb;
1706 struct lio *lio;
1707 struct octnic_gather *g;
fcd2b5e3 1708 int i, frags, iq;
f21fb3ed
RV
1709
1710 sc = (struct octeon_soft_command *)buf;
1711 skb = (struct sk_buff *)sc->callback_arg;
1712 finfo = (struct octnet_buf_free_info *)&skb->cb;
1713
1714 lio = finfo->lio;
1715 g = finfo->g;
1716 frags = skb_shinfo(skb)->nr_frags;
1717
1718 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1719 g->sg[0].ptr[0], (skb->len - skb->data_len),
1720 DMA_TO_DEVICE);
1721
1722 i = 1;
1723 while (frags--) {
1724 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1725
1726 pci_unmap_page((lio->oct_dev)->pci_dev,
1727 g->sg[(i >> 2)].ptr[(i & 3)],
1728 frag->size, DMA_TO_DEVICE);
1729 i++;
1730 }
1731
fcd2b5e3
RV
1732 iq = skb_iq(lio, skb);
1733
1734 spin_lock(&lio->glist_lock[iq]);
1735 list_add_tail(&g->list, &lio->glist[iq]);
1736 spin_unlock(&lio->glist_lock[iq]);
f21fb3ed
RV
1737
1738 /* Don't free the skb yet */
f21fb3ed
RV
1739}
1740
1741/**
1742 * \brief Adjust ptp frequency
1743 * @param ptp PTP clock info
1744 * @param ppb how much to adjust by, in parts-per-billion
1745 */
1746static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1747{
1748 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1749 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1750 u64 comp, delta;
1751 unsigned long flags;
1752 bool neg_adj = false;
1753
1754 if (ppb < 0) {
1755 neg_adj = true;
1756 ppb = -ppb;
1757 }
1758
1759 /* The hardware adds the clock compensation value to the
1760 * PTP clock on every coprocessor clock cycle, so we
1761 * compute the delta in terms of coprocessor clocks.
1762 */
1763 delta = (u64)ppb << 32;
1764 do_div(delta, oct->coproc_clock_rate);
1765
1766 spin_lock_irqsave(&lio->ptp_lock, flags);
1767 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1768 if (neg_adj)
1769 comp -= delta;
1770 else
1771 comp += delta;
1772 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1773 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1774
1775 return 0;
1776}
1777
1778/**
1779 * \brief Adjust ptp time
1780 * @param ptp PTP clock info
1781 * @param delta how much to adjust by, in nanosecs
1782 */
1783static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1784{
1785 unsigned long flags;
1786 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1787
1788 spin_lock_irqsave(&lio->ptp_lock, flags);
1789 lio->ptp_adjust += delta;
1790 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1791
1792 return 0;
1793}
1794
1795/**
1796 * \brief Get hardware clock time, including any adjustment
1797 * @param ptp PTP clock info
1798 * @param ts timespec
1799 */
1800static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1801 struct timespec64 *ts)
1802{
1803 u64 ns;
f21fb3ed
RV
1804 unsigned long flags;
1805 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1806 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1807
1808 spin_lock_irqsave(&lio->ptp_lock, flags);
1809 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1810 ns += lio->ptp_adjust;
1811 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1812
286af315 1813 *ts = ns_to_timespec64(ns);
f21fb3ed
RV
1814
1815 return 0;
1816}
1817
1818/**
1819 * \brief Set hardware clock time. Reset adjustment
1820 * @param ptp PTP clock info
1821 * @param ts timespec
1822 */
1823static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1824 const struct timespec64 *ts)
1825{
1826 u64 ns;
1827 unsigned long flags;
1828 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1829 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1830
e7ad9793 1831 ns = timespec64_to_ns(ts);
f21fb3ed
RV
1832
1833 spin_lock_irqsave(&lio->ptp_lock, flags);
1834 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1835 lio->ptp_adjust = 0;
1836 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1837
1838 return 0;
1839}
1840
1841/**
1842 * \brief Check if PTP is enabled
1843 * @param ptp PTP clock info
1844 * @param rq request
1845 * @param on is it on
1846 */
a7d5a3dc
RV
1847static int
1848liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1849 struct ptp_clock_request *rq __attribute__((unused)),
1850 int on __attribute__((unused)))
f21fb3ed
RV
1851{
1852 return -EOPNOTSUPP;
1853}
1854
1855/**
1856 * \brief Open PTP clock source
1857 * @param netdev network device
1858 */
1859static void oct_ptp_open(struct net_device *netdev)
1860{
1861 struct lio *lio = GET_LIO(netdev);
1862 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1863
1864 spin_lock_init(&lio->ptp_lock);
1865
1866 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1867 lio->ptp_info.owner = THIS_MODULE;
1868 lio->ptp_info.max_adj = 250000000;
1869 lio->ptp_info.n_alarm = 0;
1870 lio->ptp_info.n_ext_ts = 0;
1871 lio->ptp_info.n_per_out = 0;
1872 lio->ptp_info.pps = 0;
1873 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1874 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1875 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1876 lio->ptp_info.settime64 = liquidio_ptp_settime;
1877 lio->ptp_info.enable = liquidio_ptp_enable;
1878
1879 lio->ptp_adjust = 0;
1880
1881 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1882 &oct->pci_dev->dev);
1883
1884 if (IS_ERR(lio->ptp_clock))
1885 lio->ptp_clock = NULL;
1886}
1887
1888/**
1889 * \brief Init PTP clock
1890 * @param oct octeon device
1891 */
1892static void liquidio_ptp_init(struct octeon_device *oct)
1893{
1894 u64 clock_comp, cfg;
1895
1896 clock_comp = (u64)NSEC_PER_SEC << 32;
1897 do_div(clock_comp, oct->coproc_clock_rate);
1898 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1899
1900 /* Enable */
1901 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1902 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1903}
1904
1905/**
1906 * \brief Load firmware to device
1907 * @param oct octeon device
1908 *
1909 * Maps device to firmware filename, requests firmware, and downloads it
1910 */
1911static int load_firmware(struct octeon_device *oct)
1912{
1913 int ret = 0;
1914 const struct firmware *fw;
1915 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1916 char *tmp_fw_type;
1917
429cbf6b 1918 if (fw_type_is_auto()) {
f21fb3ed 1919 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
429cbf6b
RF
1920 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1921 } else {
f21fb3ed 1922 tmp_fw_type = fw_type;
429cbf6b 1923 }
f21fb3ed
RV
1924
1925 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1926 octeon_get_conf(oct)->card_name, tmp_fw_type,
1927 LIO_FW_NAME_SUFFIX);
1928
1929 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1930 if (ret) {
1931 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1932 fw_name);
d3d7e6c6 1933 release_firmware(fw);
f21fb3ed
RV
1934 return ret;
1935 }
1936
1937 ret = octeon_download_firmware(oct, fw->data, fw->size);
1938
1939 release_firmware(fw);
1940
1941 return ret;
1942}
1943
f21fb3ed
RV
1944/**
1945 * \brief Callback for getting interface configuration
1946 * @param status status of request
1947 * @param buf pointer to resp structure
1948 */
1949static void if_cfg_callback(struct octeon_device *oct,
a7d5a3dc 1950 u32 status __attribute__((unused)),
f21fb3ed
RV
1951 void *buf)
1952{
1953 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1954 struct liquidio_if_cfg_resp *resp;
1955 struct liquidio_if_cfg_context *ctx;
1956
1957 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
30136395 1958 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
f21fb3ed
RV
1959
1960 oct = lio_get_device(ctx->octeon_id);
1961 if (resp->status)
c5b71e63
RF
1962 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
1963 CVM_CAST64(resp->status), status);
a7d5a3dc 1964 WRITE_ONCE(ctx->cond, 1);
f21fb3ed 1965
d3d7e6c6
RV
1966 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1967 resp->cfg_info.liquidio_firmware_version);
1968
f21fb3ed
RV
1969 /* This barrier is required to be sure that the response has been
1970 * written fully before waking up the handler
1971 */
1972 wmb();
1973
1974 wake_up_interruptible(&ctx->wc);
1975}
1976
f21fb3ed
RV
1977/**
1978 * \brief Poll routine for checking transmit queue status
1979 * @param work work_struct data structure
1980 */
1981static void octnet_poll_check_txq_status(struct work_struct *work)
1982{
1983 struct cavium_wk *wk = (struct cavium_wk *)work;
1984 struct lio *lio = (struct lio *)wk->ctxptr;
1985
1986 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1987 return;
1988
1989 check_txq_status(lio);
1990 queue_delayed_work(lio->txq_status_wq.wq,
1991 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1992}
1993
1994/**
1995 * \brief Sets up the txq poll check
1996 * @param netdev network device
1997 */
5b07aee1 1998static inline int setup_tx_poll_fn(struct net_device *netdev)
f21fb3ed
RV
1999{
2000 struct lio *lio = GET_LIO(netdev);
2001 struct octeon_device *oct = lio->oct_dev;
2002
292b9dab
BS
2003 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2004 WQ_MEM_RECLAIM, 0);
f21fb3ed
RV
2005 if (!lio->txq_status_wq.wq) {
2006 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
5b07aee1 2007 return -1;
f21fb3ed
RV
2008 }
2009 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2010 octnet_poll_check_txq_status);
2011 lio->txq_status_wq.wk.ctxptr = lio;
2012 queue_delayed_work(lio->txq_status_wq.wq,
2013 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
5b07aee1 2014 return 0;
f21fb3ed
RV
2015}
2016
9a96bde4
RV
2017static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2018{
2019 struct lio *lio = GET_LIO(netdev);
2020
5b07aee1
RV
2021 if (lio->txq_status_wq.wq) {
2022 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2023 destroy_workqueue(lio->txq_status_wq.wq);
2024 }
9a96bde4
RV
2025}
2026
f21fb3ed
RV
2027/**
2028 * \brief Net device open for LiquidIO
2029 * @param netdev network device
2030 */
2031static int liquidio_open(struct net_device *netdev)
2032{
2033 struct lio *lio = GET_LIO(netdev);
2034 struct octeon_device *oct = lio->oct_dev;
2035 struct napi_struct *napi, *n;
2036
9a96bde4
RV
2037 if (oct->props[lio->ifidx].napi_enabled == 0) {
2038 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2039 napi_enable(napi);
2040
2041 oct->props[lio->ifidx].napi_enabled = 1;
7b6b6c95
RV
2042
2043 if (OCTEON_CN23XX_PF(oct))
2044 oct->droq[0]->ops.poll_mode = 1;
9a96bde4 2045 }
f21fb3ed 2046
aa69ff9e 2047 if (oct->ptp_enable)
9feb16ae 2048 oct_ptp_open(netdev);
f21fb3ed
RV
2049
2050 ifstate_set(lio, LIO_IFSTATE_RUNNING);
9a96bde4 2051
7b6b6c95
RV
2052 /* Ready for link status updates */
2053 lio->intf_open = 1;
2054
2055 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2056
5b07aee1
RV
2057 if (OCTEON_CN23XX_PF(oct)) {
2058 if (!oct->msix_on)
2059 if (setup_tx_poll_fn(netdev))
2060 return -1;
2061 } else {
2062 if (setup_tx_poll_fn(netdev))
2063 return -1;
2064 }
9a96bde4 2065
c9614a16 2066 start_txqs(netdev);
f21fb3ed 2067
f21fb3ed
RV
2068 /* tell Octeon to start forwarding packets to host */
2069 send_rx_ctrl_cmd(lio, 1);
2070
f21fb3ed
RV
2071 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2072 netdev->name);
2073
2074 return 0;
2075}
2076
2077/**
2078 * \brief Net device stop for LiquidIO
2079 * @param netdev network device
2080 */
2081static int liquidio_stop(struct net_device *netdev)
2082{
f21fb3ed
RV
2083 struct lio *lio = GET_LIO(netdev);
2084 struct octeon_device *oct = lio->oct_dev;
42013e90
IB
2085 struct napi_struct *napi, *n;
2086
2087 if (oct->props[lio->ifidx].napi_enabled) {
2088 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2089 napi_disable(napi);
2090
2091 oct->props[lio->ifidx].napi_enabled = 0;
2092
2093 if (OCTEON_CN23XX_PF(oct))
2094 oct->droq[0]->ops.poll_mode = 0;
2095 }
f21fb3ed 2096
9a96bde4
RV
2097 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2098
2099 netif_tx_disable(netdev);
2100
f21fb3ed 2101 /* Inform that netif carrier is down */
9a96bde4 2102 netif_carrier_off(netdev);
f21fb3ed 2103 lio->intf_open = 0;
0cece6c5
RV
2104 lio->linfo.link.s.link_up = 0;
2105 lio->link_changes++;
f21fb3ed 2106
cb2336b5 2107 /* Tell Octeon that nic interface is down. */
f21fb3ed
RV
2108 send_rx_ctrl_cmd(lio, 0);
2109
5b07aee1
RV
2110 if (OCTEON_CN23XX_PF(oct)) {
2111 if (!oct->msix_on)
2112 cleanup_tx_poll_fn(netdev);
2113 } else {
2114 cleanup_tx_poll_fn(netdev);
2115 }
f21fb3ed
RV
2116
2117 if (lio->ptp_clock) {
2118 ptp_clock_unregister(lio->ptp_clock);
2119 lio->ptp_clock = NULL;
2120 }
2121
f21fb3ed 2122 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
f21fb3ed
RV
2123
2124 return 0;
2125}
2126
f21fb3ed
RV
2127/**
2128 * \brief Converts a mask based on net device flags
2129 * @param netdev network device
2130 *
2131 * This routine generates a octnet_ifflags mask from the net device flags
2132 * received from the OS.
2133 */
2134static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2135{
2136 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2137
2138 if (netdev->flags & IFF_PROMISC)
2139 f |= OCTNET_IFFLAG_PROMISC;
2140
2141 if (netdev->flags & IFF_ALLMULTI)
2142 f |= OCTNET_IFFLAG_ALLMULTI;
2143
2144 if (netdev->flags & IFF_MULTICAST) {
2145 f |= OCTNET_IFFLAG_MULTICAST;
2146
2147 /* Accept all multicast addresses if there are more than we
2148 * can handle
2149 */
2150 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2151 f |= OCTNET_IFFLAG_ALLMULTI;
2152 }
2153
2154 if (netdev->flags & IFF_BROADCAST)
2155 f |= OCTNET_IFFLAG_BROADCAST;
2156
2157 return f;
2158}
2159
2160/**
2161 * \brief Net device set_multicast_list
2162 * @param netdev network device
2163 */
2164static void liquidio_set_mcast_list(struct net_device *netdev)
2165{
2166 struct lio *lio = GET_LIO(netdev);
2167 struct octeon_device *oct = lio->oct_dev;
2168 struct octnic_ctrl_pkt nctrl;
f21fb3ed
RV
2169 struct netdev_hw_addr *ha;
2170 u64 *mc;
a7d5a3dc 2171 int ret;
f21fb3ed
RV
2172 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2173
2174 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2175
2176 /* Create a ctrl pkt command to be sent to core app. */
2177 nctrl.ncmd.u64 = 0;
2178 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
0cece6c5
RV
2179 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2180 nctrl.ncmd.s.param2 = mc_count;
f21fb3ed 2181 nctrl.ncmd.s.more = mc_count;
0cece6c5 2182 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
f21fb3ed
RV
2183 nctrl.netpndev = (u64)netdev;
2184 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2185
2186 /* copy all the addresses into the udd */
f21fb3ed
RV
2187 mc = &nctrl.udd[0];
2188 netdev_for_each_mc_addr(ha, netdev) {
2189 *mc = 0;
2190 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2191 /* no need to swap bytes */
2192
2193 if (++mc > &nctrl.udd[mc_count])
2194 break;
2195 }
2196
2197 /* Apparently, any activity in this call from the kernel has to
2198 * be atomic. So we won't wait for response.
2199 */
2200 nctrl.wait_time = 0;
2201
0cece6c5 2202 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
f21fb3ed
RV
2203 if (ret < 0) {
2204 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2205 ret);
2206 }
2207}
2208
2209/**
2210 * \brief Net device set_mac_address
2211 * @param netdev network device
2212 */
2213static int liquidio_set_mac(struct net_device *netdev, void *p)
2214{
2215 int ret = 0;
2216 struct lio *lio = GET_LIO(netdev);
2217 struct octeon_device *oct = lio->oct_dev;
2218 struct sockaddr *addr = (struct sockaddr *)p;
2219 struct octnic_ctrl_pkt nctrl;
f21fb3ed 2220
0cece6c5 2221 if (!is_valid_ether_addr(addr->sa_data))
f21fb3ed
RV
2222 return -EADDRNOTAVAIL;
2223
2224 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2225
2226 nctrl.ncmd.u64 = 0;
2227 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
0cece6c5 2228 nctrl.ncmd.s.param1 = 0;
f21fb3ed 2229 nctrl.ncmd.s.more = 1;
0cece6c5 2230 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
f21fb3ed
RV
2231 nctrl.netpndev = (u64)netdev;
2232 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2233 nctrl.wait_time = 100;
2234
2235 nctrl.udd[0] = 0;
2236 /* The MAC Address is presented in network byte order. */
2237 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2238
0cece6c5 2239 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
f21fb3ed
RV
2240 if (ret < 0) {
2241 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2242 return -ENOMEM;
2243 }
2244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2245 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2246
2247 return 0;
2248}
2249
2250/**
2251 * \brief Net device get_stats
2252 * @param netdev network device
2253 */
2254static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2255{
2256 struct lio *lio = GET_LIO(netdev);
2257 struct net_device_stats *stats = &netdev->stats;
2258 struct octeon_device *oct;
2259 u64 pkts = 0, drop = 0, bytes = 0;
2260 struct oct_droq_stats *oq_stats;
2261 struct oct_iq_stats *iq_stats;
2262 int i, iq_no, oq_no;
2263
2264 oct = lio->oct_dev;
2265
d18ca7df
IB
2266 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2267 return stats;
2268
a82457f1 2269 for (i = 0; i < oct->num_iqs; i++) {
26236fa9 2270 iq_no = lio->linfo.txpciq[i].s.q_no;
f21fb3ed
RV
2271 iq_stats = &oct->instr_queue[iq_no]->stats;
2272 pkts += iq_stats->tx_done;
2273 drop += iq_stats->tx_dropped;
2274 bytes += iq_stats->tx_tot_bytes;
2275 }
2276
2277 stats->tx_packets = pkts;
2278 stats->tx_bytes = bytes;
2279 stats->tx_dropped = drop;
2280
2281 pkts = 0;
2282 drop = 0;
2283 bytes = 0;
2284
a82457f1 2285 for (i = 0; i < oct->num_oqs; i++) {
26236fa9 2286 oq_no = lio->linfo.rxpciq[i].s.q_no;
f21fb3ed
RV
2287 oq_stats = &oct->droq[oq_no]->stats;
2288 pkts += oq_stats->rx_pkts_received;
2289 drop += (oq_stats->rx_dropped +
2290 oq_stats->dropped_nodispatch +
2291 oq_stats->dropped_toomany +
2292 oq_stats->dropped_nomem);
2293 bytes += oq_stats->rx_bytes_received;
2294 }
2295
2296 stats->rx_bytes = bytes;
2297 stats->rx_packets = pkts;
2298 stats->rx_dropped = drop;
2299
2300 return stats;
2301}
2302
f21fb3ed
RV
2303/**
2304 * \brief Handler for SIOCSHWTSTAMP ioctl
2305 * @param netdev network device
2306 * @param ifr interface request
2307 * @param cmd command
2308 */
a7d5a3dc 2309static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
f21fb3ed
RV
2310{
2311 struct hwtstamp_config conf;
2312 struct lio *lio = GET_LIO(netdev);
2313
2314 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2315 return -EFAULT;
2316
2317 if (conf.flags)
2318 return -EINVAL;
2319
2320 switch (conf.tx_type) {
2321 case HWTSTAMP_TX_ON:
2322 case HWTSTAMP_TX_OFF:
2323 break;
2324 default:
2325 return -ERANGE;
2326 }
2327
2328 switch (conf.rx_filter) {
2329 case HWTSTAMP_FILTER_NONE:
2330 break;
2331 case HWTSTAMP_FILTER_ALL:
2332 case HWTSTAMP_FILTER_SOME:
2333 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2334 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2335 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2336 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2337 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2338 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2339 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2340 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2341 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2342 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2343 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2344 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
e3412575 2345 case HWTSTAMP_FILTER_NTP_ALL:
f21fb3ed
RV
2346 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2347 break;
2348 default:
2349 return -ERANGE;
2350 }
2351
2352 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2353 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2354
2355 else
2356 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2357
2358 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2359}
2360
2361/**
2362 * \brief ioctl handler
2363 * @param netdev network device
2364 * @param ifr interface request
2365 * @param cmd command
2366 */
2367static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2368{
9feb16ae
PK
2369 struct lio *lio = GET_LIO(netdev);
2370
f21fb3ed
RV
2371 switch (cmd) {
2372 case SIOCSHWTSTAMP:
aa69ff9e 2373 if (lio->oct_dev->ptp_enable)
9feb16ae 2374 return hwtstamp_ioctl(netdev, ifr);
f21fb3ed
RV
2375 default:
2376 return -EOPNOTSUPP;
2377 }
2378}
2379
2380/**
2381 * \brief handle a Tx timestamp response
2382 * @param status response status
2383 * @param buf pointer to skb
2384 */
2385static void handle_timestamp(struct octeon_device *oct,
2386 u32 status,
2387 void *buf)
2388{
2389 struct octnet_buf_free_info *finfo;
2390 struct octeon_soft_command *sc;
2391 struct oct_timestamp_resp *resp;
2392 struct lio *lio;
2393 struct sk_buff *skb = (struct sk_buff *)buf;
2394
2395 finfo = (struct octnet_buf_free_info *)skb->cb;
2396 lio = finfo->lio;
2397 sc = finfo->sc;
2398 oct = lio->oct_dev;
2399 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2400
2401 if (status != OCTEON_REQUEST_DONE) {
2402 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2403 CVM_CAST64(status));
2404 resp->timestamp = 0;
2405 }
2406
2407 octeon_swap_8B_data(&resp->timestamp, 1);
2408
19a6d156 2409 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
f21fb3ed
RV
2410 struct skb_shared_hwtstamps ts;
2411 u64 ns = resp->timestamp;
2412
2413 netif_info(lio, tx_done, lio->netdev,
2414 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2415 skb, (unsigned long long)ns);
2416 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2417 skb_tstamp_tx(skb, &ts);
2418 }
2419
2420 octeon_free_soft_command(oct, sc);
cabeb13b 2421 tx_buffer_free(skb);
f21fb3ed
RV
2422}
2423
2424/* \brief Send a data packet that will be timestamped
2425 * @param oct octeon device
2426 * @param ndata pointer to network data
2427 * @param finfo pointer to private network data
2428 */
2429static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2430 struct octnic_data_pkt *ndata,
c859e21a
IB
2431 struct octnet_buf_free_info *finfo,
2432 int xmit_more)
f21fb3ed
RV
2433{
2434 int retval;
2435 struct octeon_soft_command *sc;
f21fb3ed
RV
2436 struct lio *lio;
2437 int ring_doorbell;
6a885b60 2438 u32 len;
f21fb3ed
RV
2439
2440 lio = finfo->lio;
2441
2442 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2443 sizeof(struct oct_timestamp_resp));
2444 finfo->sc = sc;
2445
2446 if (!sc) {
2447 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2448 return IQ_SEND_FAILED;
2449 }
2450
2451 if (ndata->reqtype == REQTYPE_NORESP_NET)
2452 ndata->reqtype = REQTYPE_RESP_NET;
2453 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2454 ndata->reqtype = REQTYPE_RESP_NET_SG;
2455
2456 sc->callback = handle_timestamp;
2457 sc->callback_arg = finfo->skb;
2458 sc->iq_no = ndata->q_no;
2459
5b823514
RV
2460 if (OCTEON_CN23XX_PF(oct))
2461 len = (u32)((struct octeon_instr_ih3 *)
2462 (&sc->cmd.cmd3.ih3))->dlengsz;
2463 else
2464 len = (u32)((struct octeon_instr_ih2 *)
2465 (&sc->cmd.cmd2.ih2))->dlengsz;
f21fb3ed 2466
c859e21a 2467 ring_doorbell = !xmit_more;
5b823514 2468
f21fb3ed 2469 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
6a885b60 2470 sc, len, ndata->reqtype);
f21fb3ed 2471
ddc173a6 2472 if (retval == IQ_SEND_FAILED) {
f21fb3ed
RV
2473 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2474 retval);
2475 octeon_free_soft_command(oct, sc);
2476 } else {
2477 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2478 }
2479
2480 return retval;
2481}
2482
f21fb3ed
RV
2483/** \brief Transmit networks packets to the Octeon interface
2484 * @param skbuff skbuff struct to be passed to network layer.
2485 * @param netdev pointer to network device
2486 * @returns whether the packet was transmitted to the device okay or not
2487 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2488 */
2489static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2490{
2491 struct lio *lio;
2492 struct octnet_buf_free_info *finfo;
2493 union octnic_cmd_setup cmdsetup;
2494 struct octnic_data_pkt ndata;
2495 struct octeon_device *oct;
2496 struct oct_iq_stats *stats;
6a885b60
RV
2497 struct octeon_instr_irh *irh;
2498 union tx_info *tx_info;
26236fa9 2499 int status = 0;
f21fb3ed 2500 int q_idx = 0, iq_no = 0;
c859e21a 2501 int j, xmit_more = 0;
fcd2b5e3 2502 u64 dptr = 0;
f21fb3ed
RV
2503 u32 tag = 0;
2504
2505 lio = GET_LIO(netdev);
2506 oct = lio->oct_dev;
2507
fc756d0f 2508 q_idx = skb_iq(lio, skb);
2a2fabaf
IB
2509 tag = q_idx;
2510 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
f21fb3ed
RV
2511
2512 stats = &oct->instr_queue[iq_no]->stats;
2513
2514 /* Check for all conditions in which the current packet cannot be
2515 * transmitted.
2516 */
2517 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
0cece6c5 2518 (!lio->linfo.link.s.link_up) ||
f21fb3ed
RV
2519 (skb->len <= 0)) {
2520 netif_info(lio, tx_err, lio->netdev,
2521 "Transmit failed link_status : %d\n",
0cece6c5 2522 lio->linfo.link.s.link_up);
f21fb3ed
RV
2523 goto lio_xmit_failed;
2524 }
2525
2526 /* Use space in skb->cb to store info used to unmap and
2527 * free the buffers.
2528 */
2529 finfo = (struct octnet_buf_free_info *)skb->cb;
2530 finfo->lio = lio;
2531 finfo->skb = skb;
2532 finfo->sc = NULL;
2533
2534 /* Prepare the attributes for the data to be passed to OSI. */
2535 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2536
2537 ndata.buf = (void *)finfo;
2538
2539 ndata.q_no = iq_no;
2540
2a2fabaf
IB
2541 if (octnet_iq_is_full(oct, ndata.q_no)) {
2542 /* defer sending if queue is full */
2543 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2544 ndata.q_no);
2545 stats->tx_iq_busy++;
2546 return NETDEV_TX_BUSY;
f21fb3ed 2547 }
2a2fabaf 2548
f21fb3ed 2549 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
a2c64b67 2550 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
f21fb3ed
RV
2551 */
2552
2553 ndata.datasize = skb->len;
2554
2555 cmdsetup.u64 = 0;
7275ebfc 2556 cmdsetup.s.iq_no = iq_no;
f21fb3ed 2557
01fb237a
RV
2558 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2559 if (skb->encapsulation) {
2560 cmdsetup.s.tnl_csum = 1;
2561 stats->tx_vxlan++;
2562 } else {
2563 cmdsetup.s.transport_csum = 1;
2564 }
2565 }
f21fb3ed
RV
2566 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2567 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2568 cmdsetup.s.timestamp = 1;
2569 }
2570
2571 if (skb_shinfo(skb)->nr_frags == 0) {
2572 cmdsetup.s.u.datasize = skb->len;
0cece6c5 2573 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
a2c64b67 2574
f21fb3ed 2575 /* Offload checksum calculation for TCP/UDP packets */
6a885b60
RV
2576 dptr = dma_map_single(&oct->pci_dev->dev,
2577 skb->data,
2578 skb->len,
2579 DMA_TO_DEVICE);
2580 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
f21fb3ed
RV
2581 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2582 __func__);
2583 return NETDEV_TX_BUSY;
2584 }
2585
5b823514
RV
2586 if (OCTEON_CN23XX_PF(oct))
2587 ndata.cmd.cmd3.dptr = dptr;
2588 else
2589 ndata.cmd.cmd2.dptr = dptr;
6a885b60 2590 finfo->dptr = dptr;
f21fb3ed
RV
2591 ndata.reqtype = REQTYPE_NORESP_NET;
2592
2593 } else {
2594 int i, frags;
2595 struct skb_frag_struct *frag;
2596 struct octnic_gather *g;
2597
fcd2b5e3
RV
2598 spin_lock(&lio->glist_lock[q_idx]);
2599 g = (struct octnic_gather *)
2600 list_delete_head(&lio->glist[q_idx]);
2601 spin_unlock(&lio->glist_lock[q_idx]);
f21fb3ed
RV
2602
2603 if (!g) {
2604 netif_info(lio, tx_err, lio->netdev,
2605 "Transmit scatter gather: glist null!\n");
2606 goto lio_xmit_failed;
2607 }
2608
2609 cmdsetup.s.gather = 1;
2610 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
0cece6c5 2611 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
f21fb3ed
RV
2612
2613 memset(g->sg, 0, g->sg_size);
2614
2615 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2616 skb->data,
2617 (skb->len - skb->data_len),
2618 DMA_TO_DEVICE);
2619 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2620 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2621 __func__);
2622 return NETDEV_TX_BUSY;
2623 }
2624 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2625
2626 frags = skb_shinfo(skb)->nr_frags;
2627 i = 1;
2628 while (frags--) {
2629 frag = &skb_shinfo(skb)->frags[i - 1];
2630
2631 g->sg[(i >> 2)].ptr[(i & 3)] =
2632 dma_map_page(&oct->pci_dev->dev,
2633 frag->page.p,
2634 frag->page_offset,
2635 frag->size,
2636 DMA_TO_DEVICE);
2637
fcd2b5e3
RV
2638 if (dma_mapping_error(&oct->pci_dev->dev,
2639 g->sg[i >> 2].ptr[i & 3])) {
2640 dma_unmap_single(&oct->pci_dev->dev,
2641 g->sg[0].ptr[0],
2642 skb->len - skb->data_len,
2643 DMA_TO_DEVICE);
2644 for (j = 1; j < i; j++) {
2645 frag = &skb_shinfo(skb)->frags[j - 1];
2646 dma_unmap_page(&oct->pci_dev->dev,
2647 g->sg[j >> 2].ptr[j & 3],
2648 frag->size,
2649 DMA_TO_DEVICE);
2650 }
2651 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2652 __func__);
2653 return NETDEV_TX_BUSY;
2654 }
2655
f21fb3ed
RV
2656 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2657 i++;
2658 }
2659
fcd2b5e3 2660 dptr = g->sg_dma_ptr;
f21fb3ed 2661
5b823514
RV
2662 if (OCTEON_CN23XX_PF(oct))
2663 ndata.cmd.cmd3.dptr = dptr;
2664 else
2665 ndata.cmd.cmd2.dptr = dptr;
6a885b60 2666 finfo->dptr = dptr;
f21fb3ed
RV
2667 finfo->g = g;
2668
2669 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2670 }
2671
5b823514
RV
2672 if (OCTEON_CN23XX_PF(oct)) {
2673 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2674 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2675 } else {
2676 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2677 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2678 }
f21fb3ed 2679
6a885b60 2680 if (skb_shinfo(skb)->gso_size) {
f21fb3ed
RV
2681 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2682 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1f164717 2683 stats->tx_gso++;
f21fb3ed 2684 }
1f164717 2685
0da0b77c
RV
2686 /* HW insert VLAN tag */
2687 if (skb_vlan_tag_present(skb)) {
2688 irh->priority = skb_vlan_tag_get(skb) >> 13;
2689 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2690 }
f21fb3ed 2691
c859e21a
IB
2692 xmit_more = skb->xmit_more;
2693
f21fb3ed 2694 if (unlikely(cmdsetup.s.timestamp))
c859e21a 2695 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
f21fb3ed 2696 else
c859e21a 2697 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
f21fb3ed
RV
2698 if (status == IQ_SEND_FAILED)
2699 goto lio_xmit_failed;
2700
2701 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2702
2703 if (status == IQ_SEND_STOP)
78a202f0 2704 netif_stop_subqueue(netdev, q_idx);
f21fb3ed 2705
860e9538 2706 netif_trans_update(netdev);
f21fb3ed 2707
80c8eae6
SB
2708 if (tx_info->s.gso_segs)
2709 stats->tx_done += tx_info->s.gso_segs;
1f164717
RV
2710 else
2711 stats->tx_done++;
80c8eae6 2712 stats->tx_tot_bytes += ndata.datasize;
f21fb3ed
RV
2713
2714 return NETDEV_TX_OK;
2715
2716lio_xmit_failed:
2717 stats->tx_dropped++;
2718 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2719 iq_no, stats->tx_dropped);
6a885b60
RV
2720 if (dptr)
2721 dma_unmap_single(&oct->pci_dev->dev, dptr,
2722 ndata.datasize, DMA_TO_DEVICE);
c859e21a
IB
2723
2724 octeon_ring_doorbell_locked(oct, iq_no);
2725
cabeb13b 2726 tx_buffer_free(skb);
f21fb3ed
RV
2727 return NETDEV_TX_OK;
2728}
2729
2730/** \brief Network device Tx timeout
2731 * @param netdev pointer to network device
2732 */
2733static void liquidio_tx_timeout(struct net_device *netdev)
2734{
2735 struct lio *lio;
2736
2737 lio = GET_LIO(netdev);
2738
2739 netif_info(lio, tx_err, lio->netdev,
2740 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2741 netdev->stats.tx_dropped);
860e9538 2742 netif_trans_update(netdev);
a96d8ad3 2743 wake_txqs(netdev);
f21fb3ed
RV
2744}
2745
63245f25
RV
2746static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2747 __be16 proto __attribute__((unused)),
2748 u16 vid)
2749{
2750 struct lio *lio = GET_LIO(netdev);
2751 struct octeon_device *oct = lio->oct_dev;
2752 struct octnic_ctrl_pkt nctrl;
2753 int ret = 0;
2754
2755 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2756
2757 nctrl.ncmd.u64 = 0;
2758 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2759 nctrl.ncmd.s.param1 = vid;
2760 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2761 nctrl.wait_time = 100;
2762 nctrl.netpndev = (u64)netdev;
2763 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2764
2765 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2766 if (ret < 0) {
2767 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2768 ret);
2769 }
2770
2771 return ret;
2772}
2773
2774static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2775 __be16 proto __attribute__((unused)),
2776 u16 vid)
2777{
2778 struct lio *lio = GET_LIO(netdev);
2779 struct octeon_device *oct = lio->oct_dev;
2780 struct octnic_ctrl_pkt nctrl;
2781 int ret = 0;
2782
2783 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2784
2785 nctrl.ncmd.u64 = 0;
2786 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2787 nctrl.ncmd.s.param1 = vid;
2788 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2789 nctrl.wait_time = 100;
2790 nctrl.netpndev = (u64)netdev;
2791 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2792
2793 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2794 if (ret < 0) {
2795 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2796 ret);
2797 }
2798 return ret;
2799}
2800
01fb237a
RV
2801/** Sending command to enable/disable RX checksum offload
2802 * @param netdev pointer to network device
2803 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
2804 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
2805 * OCTNET_CMD_RXCSUM_DISABLE
2806 * @returns SUCCESS or FAILURE
2807 */
c41419b0
NMG
2808static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2809 u8 rx_cmd)
01fb237a
RV
2810{
2811 struct lio *lio = GET_LIO(netdev);
2812 struct octeon_device *oct = lio->oct_dev;
2813 struct octnic_ctrl_pkt nctrl;
2814 int ret = 0;
2815
0c264588
FM
2816 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2817
01fb237a
RV
2818 nctrl.ncmd.u64 = 0;
2819 nctrl.ncmd.s.cmd = command;
2820 nctrl.ncmd.s.param1 = rx_cmd;
2821 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2822 nctrl.wait_time = 100;
2823 nctrl.netpndev = (u64)netdev;
2824 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2825
2826 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2827 if (ret < 0) {
2828 dev_err(&oct->pci_dev->dev,
2829 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2830 ret);
2831 }
2832 return ret;
2833}
2834
2835/** Sending command to add/delete VxLAN UDP port to firmware
2836 * @param netdev pointer to network device
2837 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
2838 * @param vxlan_port VxLAN port to be added or deleted
2839 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
2840 * OCTNET_CMD_VXLAN_PORT_DEL
2841 * @returns SUCCESS or FAILURE
2842 */
2843static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2844 u16 vxlan_port, u8 vxlan_cmd_bit)
2845{
2846 struct lio *lio = GET_LIO(netdev);
2847 struct octeon_device *oct = lio->oct_dev;
2848 struct octnic_ctrl_pkt nctrl;
2849 int ret = 0;
2850
0c264588
FM
2851 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2852
01fb237a
RV
2853 nctrl.ncmd.u64 = 0;
2854 nctrl.ncmd.s.cmd = command;
2855 nctrl.ncmd.s.more = vxlan_cmd_bit;
2856 nctrl.ncmd.s.param1 = vxlan_port;
2857 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2858 nctrl.wait_time = 100;
2859 nctrl.netpndev = (u64)netdev;
2860 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2861
2862 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2863 if (ret < 0) {
2864 dev_err(&oct->pci_dev->dev,
2865 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2866 ret);
2867 }
2868 return ret;
2869}
2870
f21fb3ed
RV
2871/** \brief Net device fix features
2872 * @param netdev pointer to network device
2873 * @param request features requested
2874 * @returns updated features list
2875 */
2876static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2877 netdev_features_t request)
2878{
2879 struct lio *lio = netdev_priv(netdev);
2880
2881 if ((request & NETIF_F_RXCSUM) &&
2882 !(lio->dev_capability & NETIF_F_RXCSUM))
2883 request &= ~NETIF_F_RXCSUM;
2884
2885 if ((request & NETIF_F_HW_CSUM) &&
2886 !(lio->dev_capability & NETIF_F_HW_CSUM))
2887 request &= ~NETIF_F_HW_CSUM;
2888
2889 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2890 request &= ~NETIF_F_TSO;
2891
2892 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2893 request &= ~NETIF_F_TSO6;
2894
2895 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2896 request &= ~NETIF_F_LRO;
2897
2898 /*Disable LRO if RXCSUM is off */
2899 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2900 (lio->dev_capability & NETIF_F_LRO))
2901 request &= ~NETIF_F_LRO;
2902
836d57e5
PK
2903 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2904 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2905 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2906
f21fb3ed
RV
2907 return request;
2908}
2909
2910/** \brief Net device set features
2911 * @param netdev pointer to network device
2912 * @param features features to enable/disable
2913 */
2914static int liquidio_set_features(struct net_device *netdev,
2915 netdev_features_t features)
2916{
2917 struct lio *lio = netdev_priv(netdev);
2918
836d57e5
PK
2919 if ((features & NETIF_F_LRO) &&
2920 (lio->dev_capability & NETIF_F_LRO) &&
2921 !(netdev->features & NETIF_F_LRO))
0cece6c5
RV
2922 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2923 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
f21fb3ed 2924 else if (!(features & NETIF_F_LRO) &&
836d57e5
PK
2925 (lio->dev_capability & NETIF_F_LRO) &&
2926 (netdev->features & NETIF_F_LRO))
0cece6c5
RV
2927 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2928 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
f21fb3ed 2929
01fb237a
RV
2930 /* Sending command to firmware to enable/disable RX checksum
2931 * offload settings using ethtool
2932 */
2933 if (!(netdev->features & NETIF_F_RXCSUM) &&
2934 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2935 (features & NETIF_F_RXCSUM))
2936 liquidio_set_rxcsum_command(netdev,
2937 OCTNET_CMD_TNL_RX_CSUM_CTL,
2938 OCTNET_CMD_RXCSUM_ENABLE);
2939 else if ((netdev->features & NETIF_F_RXCSUM) &&
2940 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2941 !(features & NETIF_F_RXCSUM))
2942 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2943 OCTNET_CMD_RXCSUM_DISABLE);
2944
836d57e5
PK
2945 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2946 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2947 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2948 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2949 OCTNET_CMD_VLAN_FILTER_ENABLE);
2950 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2951 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2952 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2953 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2954 OCTNET_CMD_VLAN_FILTER_DISABLE);
2955
f21fb3ed
RV
2956 return 0;
2957}
2958
01fb237a
RV
2959static void liquidio_add_vxlan_port(struct net_device *netdev,
2960 struct udp_tunnel_info *ti)
2961{
2962 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2963 return;
2964
2965 liquidio_vxlan_port_command(netdev,
2966 OCTNET_CMD_VXLAN_PORT_CONFIG,
2967 htons(ti->port),
2968 OCTNET_CMD_VXLAN_PORT_ADD);
2969}
2970
2971static void liquidio_del_vxlan_port(struct net_device *netdev,
2972 struct udp_tunnel_info *ti)
2973{
2974 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2975 return;
2976
2977 liquidio_vxlan_port_command(netdev,
2978 OCTNET_CMD_VXLAN_PORT_CONFIG,
2979 htons(ti->port),
2980 OCTNET_CMD_VXLAN_PORT_DEL);
2981}
2982
86dea55b
RV
2983static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2984 u8 *mac, bool is_admin_assigned)
2985{
2986 struct lio *lio = GET_LIO(netdev);
2987 struct octeon_device *oct = lio->oct_dev;
2988 struct octnic_ctrl_pkt nctrl;
2989
2990 if (!is_valid_ether_addr(mac))
2991 return -EINVAL;
2992
2993 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2994 return -EINVAL;
2995
2996 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2997
2998 nctrl.ncmd.u64 = 0;
2999 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
3000 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3001 nctrl.ncmd.s.param1 = vfidx + 1;
3002 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3003 nctrl.ncmd.s.more = 1;
3004 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
9549c6c8
RF
3005 nctrl.netpndev = (u64)netdev;
3006 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
86dea55b
RV
3007 nctrl.wait_time = LIO_CMD_WAIT_TM;
3008
3009 nctrl.udd[0] = 0;
3010 /* The MAC Address is presented in network byte order. */
3011 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
3012
3013 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
3014
3015 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3016
3017 return 0;
3018}
3019
3020static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
3021{
3022 struct lio *lio = GET_LIO(netdev);
3023 struct octeon_device *oct = lio->oct_dev;
3024 int retval;
3025
0d9a5997
FM
3026 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3027 return -EINVAL;
3028
86dea55b
RV
3029 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
3030 if (!retval)
3031 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
3032
3033 return retval;
3034}
3035
3036static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
3037 u16 vlan, u8 qos, __be16 vlan_proto)
3038{
3039 struct lio *lio = GET_LIO(netdev);
3040 struct octeon_device *oct = lio->oct_dev;
3041 struct octnic_ctrl_pkt nctrl;
3042 u16 vlantci;
3043
3044 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3045 return -EINVAL;
3046
3047 if (vlan_proto != htons(ETH_P_8021Q))
3048 return -EPROTONOSUPPORT;
3049
3050 if (vlan >= VLAN_N_VID || qos > 7)
3051 return -EINVAL;
3052
3053 if (vlan)
3054 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
3055 else
3056 vlantci = 0;
3057
3058 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
3059 return 0;
3060
3061 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3062
3063 if (vlan)
3064 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3065 else
3066 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3067
3068 nctrl.ncmd.s.param1 = vlantci;
3069 nctrl.ncmd.s.param2 =
3070 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3071 nctrl.ncmd.s.more = 0;
3072 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3073 nctrl.cb_fn = 0;
3074 nctrl.wait_time = LIO_CMD_WAIT_TM;
3075
3076 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3077
3078 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
3079
3080 return 0;
3081}
3082
3083static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
3084 struct ifla_vf_info *ivi)
3085{
3086 struct lio *lio = GET_LIO(netdev);
3087 struct octeon_device *oct = lio->oct_dev;
3088 u8 *macaddr;
3089
3090 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3091 return -EINVAL;
3092
3093 ivi->vf = vfidx;
3094 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3095 ether_addr_copy(&ivi->mac[0], macaddr);
3096 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3097 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
f2d254fa
IB
3098 if (oct->sriov_info.trusted_vf.active &&
3099 oct->sriov_info.trusted_vf.id == vfidx)
3100 ivi->trusted = true;
3101 else
3102 ivi->trusted = false;
86dea55b
RV
3103 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3104 return 0;
3105}
3106
f2d254fa
IB
3107static void trusted_vf_callback(struct octeon_device *oct_dev,
3108 u32 status, void *ptr)
3109{
3110 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
3111 struct lio_trusted_vf_ctx *ctx;
3112
3113 ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
3114 ctx->status = status;
3115
3116 complete(&ctx->complete);
3117}
3118
3119static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3120{
3121 struct octeon_device *oct = lio->oct_dev;
3122 struct lio_trusted_vf_ctx *ctx;
3123 struct octeon_soft_command *sc;
3124 int ctx_size, retval;
3125
3126 ctx_size = sizeof(struct lio_trusted_vf_ctx);
3127 sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size);
3128
3129 ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
3130 init_completion(&ctx->complete);
3131
3132 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3133
3134 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3135 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3136 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3137 trusted);
3138
3139 sc->callback = trusted_vf_callback;
3140 sc->callback_arg = sc;
3141 sc->wait_time = 1000;
3142
3143 retval = octeon_send_soft_command(oct, sc);
3144 if (retval == IQ_SEND_FAILED) {
3145 retval = -1;
3146 } else {
3147 /* Wait for response or timeout */
3148 if (wait_for_completion_timeout(&ctx->complete,
3149 msecs_to_jiffies(2000)))
3150 retval = ctx->status;
3151 else
3152 retval = -1;
3153 }
3154
3155 octeon_free_soft_command(oct, sc);
3156
3157 return retval;
3158}
3159
3160static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3161 bool setting)
3162{
3163 struct lio *lio = GET_LIO(netdev);
3164 struct octeon_device *oct = lio->oct_dev;
3165
3166 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3167 /* trusted vf is not supported by firmware older than 1.7.1 */
3168 return -EOPNOTSUPP;
3169 }
3170
3171 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3172 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3173 return -EINVAL;
3174 }
3175
3176 if (setting) {
3177 /* Set */
3178
3179 if (oct->sriov_info.trusted_vf.active &&
3180 oct->sriov_info.trusted_vf.id == vfidx)
3181 return 0;
3182
3183 if (oct->sriov_info.trusted_vf.active) {
3184 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3185 return -EPERM;
3186 }
3187 } else {
3188 /* Clear */
3189
3190 if (!oct->sriov_info.trusted_vf.active)
3191 return 0;
3192 }
3193
3194 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3195 if (setting) {
3196 oct->sriov_info.trusted_vf.id = vfidx;
3197 oct->sriov_info.trusted_vf.active = true;
3198 } else {
3199 oct->sriov_info.trusted_vf.active = false;
3200 }
3201
3202 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3203 setting ? "" : "not ");
3204 } else {
3205 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3206 return -1;
3207 }
3208
3209 return 0;
3210}
3211
86dea55b
RV
3212static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3213 int linkstate)
3214{
3215 struct lio *lio = GET_LIO(netdev);
3216 struct octeon_device *oct = lio->oct_dev;
3217 struct octnic_ctrl_pkt nctrl;
3218
3219 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3220 return -EINVAL;
3221
3222 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3223 return 0;
3224
3225 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3226 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3227 nctrl.ncmd.s.param1 =
3228 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3229 nctrl.ncmd.s.param2 = linkstate;
3230 nctrl.ncmd.s.more = 0;
3231 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3232 nctrl.cb_fn = 0;
3233 nctrl.wait_time = LIO_CMD_WAIT_TM;
3234
3235 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3236
3237 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3238
3239 return 0;
3240}
3241
d4be8ebe
VMG
3242static int
3243liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3244{
3245 struct lio_devlink_priv *priv;
3246 struct octeon_device *oct;
3247
3248 priv = devlink_priv(devlink);
3249 oct = priv->oct;
3250
3251 *mode = oct->eswitch_mode;
3252
3253 return 0;
3254}
3255
3256static int
3257liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
3258{
3259 struct lio_devlink_priv *priv;
3260 struct octeon_device *oct;
3261 int ret = 0;
3262
3263 priv = devlink_priv(devlink);
3264 oct = priv->oct;
3265
3266 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3267 return -EINVAL;
3268
3269 if (oct->eswitch_mode == mode)
3270 return 0;
3271
3272 switch (mode) {
3273 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3274 oct->eswitch_mode = mode;
3275 ret = lio_vf_rep_create(oct);
3276 break;
3277
3278 case DEVLINK_ESWITCH_MODE_LEGACY:
3279 lio_vf_rep_destroy(oct);
3280 oct->eswitch_mode = mode;
3281 break;
3282
3283 default:
3284 ret = -EINVAL;
3285 }
3286
3287 return ret;
3288}
3289
3290static const struct devlink_ops liquidio_devlink_ops = {
3291 .eswitch_mode_get = liquidio_eswitch_mode_get,
3292 .eswitch_mode_set = liquidio_eswitch_mode_set,
3293};
3294
1f233f32
VMG
3295static int
3296lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3297{
3298 struct lio *lio = GET_LIO(dev);
d4be8ebe
VMG
3299 struct octeon_device *oct = lio->oct_dev;
3300
3301 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3302 return -EOPNOTSUPP;
1f233f32
VMG
3303
3304 switch (attr->id) {
3305 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3306 attr->u.ppid.id_len = ETH_ALEN;
3307 ether_addr_copy(attr->u.ppid.id,
3308 (void *)&lio->linfo.hw_addr + 2);
3309 break;
3310
3311 default:
3312 return -EOPNOTSUPP;
3313 }
3314
3315 return 0;
3316}
3317
3318static const struct switchdev_ops lio_pf_switchdev_ops = {
3319 .switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3320};
3321
97a25326 3322static const struct net_device_ops lionetdevops = {
f21fb3ed
RV
3323 .ndo_open = liquidio_open,
3324 .ndo_stop = liquidio_stop,
3325 .ndo_start_xmit = liquidio_xmit,
3326 .ndo_get_stats = liquidio_get_stats,
3327 .ndo_set_mac_address = liquidio_set_mac,
3328 .ndo_set_rx_mode = liquidio_set_mcast_list,
3329 .ndo_tx_timeout = liquidio_tx_timeout,
63245f25
RV
3330
3331 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3332 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
f21fb3ed
RV
3333 .ndo_change_mtu = liquidio_change_mtu,
3334 .ndo_do_ioctl = liquidio_ioctl,
3335 .ndo_fix_features = liquidio_fix_features,
3336 .ndo_set_features = liquidio_set_features,
01fb237a
RV
3337 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3338 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
86dea55b
RV
3339 .ndo_set_vf_mac = liquidio_set_vf_mac,
3340 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3341 .ndo_get_vf_config = liquidio_get_vf_config,
f2d254fa 3342 .ndo_set_vf_trust = liquidio_set_vf_trust,
86dea55b 3343 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
f21fb3ed
RV
3344};
3345
3346/** \brief Entry point for the liquidio module
3347 */
3348static int __init liquidio_init(void)
3349{
3350 int i;
3351 struct handshake *hs;
3352
3353 init_completion(&first_stage);
3354
97a25326 3355 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
f21fb3ed
RV
3356
3357 if (liquidio_init_pci())
3358 return -EINVAL;
3359
3360 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3361
3362 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3363 hs = &handshake[i];
3364 if (hs->pci_dev) {
3365 wait_for_completion(&hs->init);
3366 if (!hs->init_ok) {
3367 /* init handshake failed */
3368 dev_err(&hs->pci_dev->dev,
3369 "Failed to init device\n");
3370 liquidio_deinit_pci();
3371 return -EIO;
3372 }
3373 }
3374 }
3375
3376 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3377 hs = &handshake[i];
3378 if (hs->pci_dev) {
3379 wait_for_completion_timeout(&hs->started,
3380 msecs_to_jiffies(30000));
3381 if (!hs->started_ok) {
3382 /* starter handshake failed */
3383 dev_err(&hs->pci_dev->dev,
3384 "Firmware failed to start\n");
3385 liquidio_deinit_pci();
3386 return -EIO;
3387 }
3388 }
3389 }
3390
3391 return 0;
3392}
3393
5b173cf9 3394static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
f21fb3ed
RV
3395{
3396 struct octeon_device *oct = (struct octeon_device *)buf;
3397 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
0cece6c5 3398 int gmxport = 0;
f21fb3ed
RV
3399 union oct_link_status *ls;
3400 int i;
3401
c4ee5d81 3402 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
f21fb3ed
RV
3403 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3404 recv_pkt->buffer_size[0],
0cece6c5 3405 recv_pkt->rh.r_nic_info.gmxport);
f21fb3ed
RV
3406 goto nic_info_err;
3407 }
3408
0cece6c5 3409 gmxport = recv_pkt->rh.r_nic_info.gmxport;
c4ee5d81
PK
3410 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3411 OCT_DROQ_INFO_SIZE);
f21fb3ed
RV
3412
3413 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
0cece6c5
RV
3414 for (i = 0; i < oct->ifcount; i++) {
3415 if (oct->props[i].gmxport == gmxport) {
3416 update_link_status(oct->props[i].netdev, ls);
3417 break;
3418 }
3419 }
f21fb3ed
RV
3420
3421nic_info_err:
3422 for (i = 0; i < recv_pkt->buffer_count; i++)
3423 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3424 octeon_free_recv_info(recv_info);
3425 return 0;
3426}
3427
3428/**
3429 * \brief Setup network interfaces
3430 * @param octeon_dev octeon device
3431 *
3432 * Called during init time for each device. It assumes the NIC
3433 * is already up and running. The link information for each
3434 * interface is passed in link_info.
3435 */
3436static int setup_nic_devices(struct octeon_device *octeon_dev)
3437{
3438 struct lio *lio = NULL;
3439 struct net_device *netdev;
b36e4820 3440 u8 mac[6], i, j, *fw_ver;
f21fb3ed
RV
3441 struct octeon_soft_command *sc;
3442 struct liquidio_if_cfg_context *ctx;
3443 struct liquidio_if_cfg_resp *resp;
3444 struct octdev_props *props;
26236fa9 3445 int retval, num_iqueues, num_oqueues;
f21fb3ed
RV
3446 union oct_nic_if_cfg if_cfg;
3447 unsigned int base_queue;
3448 unsigned int gmx_port_id;
83101ce3 3449 u32 resp_size, ctx_size, data_size;
0cece6c5 3450 u32 ifidx_or_pfnum;
83101ce3 3451 struct lio_version *vdata;
d4be8ebe
VMG
3452 struct devlink *devlink;
3453 struct lio_devlink_priv *lio_devlink;
f21fb3ed
RV
3454
3455 /* This is to handle link status changes */
3456 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3457 OPCODE_NIC_INFO,
3458 lio_nic_info, octeon_dev);
3459
3460 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3461 * They are handled directly.
3462 */
3463 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3464 free_netbuf);
3465
3466 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3467 free_netsgbuf);
3468
3469 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3470 free_netsgbuf_with_resp);
3471
3472 for (i = 0; i < octeon_dev->ifcount; i++) {
3473 resp_size = sizeof(struct liquidio_if_cfg_resp);
3474 ctx_size = sizeof(struct liquidio_if_cfg_context);
83101ce3 3475 data_size = sizeof(struct lio_version);
f21fb3ed 3476 sc = (struct octeon_soft_command *)
83101ce3 3477 octeon_alloc_soft_command(octeon_dev, data_size,
f21fb3ed
RV
3478 resp_size, ctx_size);
3479 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3480 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
83101ce3
RV
3481 vdata = (struct lio_version *)sc->virtdptr;
3482
3483 *((u64 *)vdata) = 0;
3484 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3485 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3486 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
f21fb3ed 3487
e86b1ab6
RV
3488 if (OCTEON_CN23XX_PF(octeon_dev)) {
3489 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3490 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3491 base_queue = octeon_dev->sriov_info.pf_srn;
3492
3493 gmx_port_id = octeon_dev->pf_num;
3494 ifidx_or_pfnum = octeon_dev->pf_num;
3495 } else {
3496 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3497 octeon_get_conf(octeon_dev), i);
3498 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3499 octeon_get_conf(octeon_dev), i);
3500 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3501 octeon_get_conf(octeon_dev), i);
3502 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3503 octeon_get_conf(octeon_dev), i);
3504 ifidx_or_pfnum = i;
3505 }
3dcef2ca 3506
f21fb3ed
RV
3507 dev_dbg(&octeon_dev->pci_dev->dev,
3508 "requesting config for interface %d, iqs %d, oqs %d\n",
0cece6c5 3509 ifidx_or_pfnum, num_iqueues, num_oqueues);
a7d5a3dc 3510 WRITE_ONCE(ctx->cond, 0);
f21fb3ed
RV
3511 ctx->octeon_id = lio_get_device_id(octeon_dev);
3512 init_waitqueue_head(&ctx->wc);
3513
3514 if_cfg.u64 = 0;
3515 if_cfg.s.num_iqueues = num_iqueues;
3516 if_cfg.s.num_oqueues = num_oqueues;
3517 if_cfg.s.base_queue = base_queue;
3518 if_cfg.s.gmx_port_id = gmx_port_id;
0cece6c5
RV
3519
3520 sc->iq_no = 0;
3521
f21fb3ed 3522 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
0cece6c5 3523 OPCODE_NIC_IF_CFG, 0,
f21fb3ed
RV
3524 if_cfg.u64, 0);
3525
3526 sc->callback = if_cfg_callback;
3527 sc->callback_arg = sc;
55893a63 3528 sc->wait_time = 3000;
f21fb3ed
RV
3529
3530 retval = octeon_send_soft_command(octeon_dev, sc);
ddc173a6 3531 if (retval == IQ_SEND_FAILED) {
f21fb3ed
RV
3532 dev_err(&octeon_dev->pci_dev->dev,
3533 "iq/oq config failed status: %x\n",
3534 retval);
3535 /* Soft instr is freed by driver in case of failure. */
3536 goto setup_nic_dev_fail;
3537 }
3538
3539 /* Sleep on a wait queue till the cond flag indicates that the
3540 * response arrived or timed-out.
3541 */
afdf841f
RV
3542 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3543 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3544 goto setup_nic_wait_intr;
3545 }
3546
f21fb3ed
RV
3547 retval = resp->status;
3548 if (retval) {
3549 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3550 goto setup_nic_dev_fail;
3551 }
3552
b36e4820
RF
3553 /* Verify f/w version (in case of 'auto' loading from flash) */
3554 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3555 if (memcmp(LIQUIDIO_BASE_VERSION,
3556 fw_ver,
3557 strlen(LIQUIDIO_BASE_VERSION))) {
3558 dev_err(&octeon_dev->pci_dev->dev,
3559 "Unmatched firmware version. Expected %s.x, got %s.\n",
3560 LIQUIDIO_BASE_VERSION, fw_ver);
3561 goto setup_nic_dev_fail;
3562 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3563 FW_IS_PRELOADED) {
3564 dev_info(&octeon_dev->pci_dev->dev,
3565 "Using auto-loaded firmware version %s.\n",
3566 fw_ver);
3567 }
3568
f21fb3ed
RV
3569 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3570 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3571
3572 num_iqueues = hweight64(resp->cfg_info.iqmask);
3573 num_oqueues = hweight64(resp->cfg_info.oqmask);
3574
3575 if (!(num_iqueues) || !(num_oqueues)) {
3576 dev_err(&octeon_dev->pci_dev->dev,
3577 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3578 resp->cfg_info.iqmask,
3579 resp->cfg_info.oqmask);
3580 goto setup_nic_dev_fail;
3581 }
3582 dev_dbg(&octeon_dev->pci_dev->dev,
3583 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3584 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3585 num_iqueues, num_oqueues);
3586 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3587
3588 if (!netdev) {
3589 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3590 goto setup_nic_dev_fail;
3591 }
3592
0cece6c5 3593 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
f21fb3ed 3594
f21fb3ed
RV
3595 /* Associate the routines that will handle different
3596 * netdev tasks.
3597 */
3598 netdev->netdev_ops = &lionetdevops;
1f233f32 3599 SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
f21fb3ed
RV
3600
3601 lio = GET_LIO(netdev);
3602
3603 memset(lio, 0, sizeof(struct lio));
3604
0cece6c5
RV
3605 lio->ifidx = ifidx_or_pfnum;
3606
3607 props = &octeon_dev->props[i];
3608 props->gmxport = resp->cfg_info.linfo.gmxport;
3609 props->netdev = netdev;
f21fb3ed
RV
3610
3611 lio->linfo.num_rxpciq = num_oqueues;
3612 lio->linfo.num_txpciq = num_iqueues;
f21fb3ed 3613 for (j = 0; j < num_oqueues; j++) {
26236fa9
RV
3614 lio->linfo.rxpciq[j].u64 =
3615 resp->cfg_info.linfo.rxpciq[j].u64;
f21fb3ed 3616 }
f21fb3ed 3617 for (j = 0; j < num_iqueues; j++) {
26236fa9
RV
3618 lio->linfo.txpciq[j].u64 =
3619 resp->cfg_info.linfo.txpciq[j].u64;
f21fb3ed
RV
3620 }
3621 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3622 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3623 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3624
3625 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3626
e86b1ab6
RV
3627 if (OCTEON_CN23XX_PF(octeon_dev) ||
3628 OCTEON_CN6XXX(octeon_dev)) {
3629 lio->dev_capability = NETIF_F_HIGHDMA
3630 | NETIF_F_IP_CSUM
3631 | NETIF_F_IPV6_CSUM
3632 | NETIF_F_SG | NETIF_F_RXCSUM
3633 | NETIF_F_GRO
3634 | NETIF_F_TSO | NETIF_F_TSO6
3635 | NETIF_F_LRO;
3636 }
f21fb3ed
RV
3637 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3638
01fb237a
RV
3639 /* Copy of transmit encapsulation capabilities:
3640 * TSO, TSO6, Checksums for this device
3641 */
3642 lio->enc_dev_capability = NETIF_F_IP_CSUM
3643 | NETIF_F_IPV6_CSUM
3644 | NETIF_F_GSO_UDP_TUNNEL
3645 | NETIF_F_HW_CSUM | NETIF_F_SG
3646 | NETIF_F_RXCSUM
3647 | NETIF_F_TSO | NETIF_F_TSO6
3648 | NETIF_F_LRO;
3649
3650 netdev->hw_enc_features = (lio->enc_dev_capability &
3651 ~NETIF_F_LRO);
3652
3653 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3654
f21fb3ed 3655 netdev->vlan_features = lio->dev_capability;
0da0b77c 3656 /* Add any unchangeable hw features */
63245f25
RV
3657 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3658 NETIF_F_HW_VLAN_CTAG_RX |
0da0b77c
RV
3659 NETIF_F_HW_VLAN_CTAG_TX;
3660
3661 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
f21fb3ed
RV
3662
3663 netdev->hw_features = lio->dev_capability;
0da0b77c
RV
3664 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3665 netdev->hw_features = netdev->hw_features &
3666 ~NETIF_F_HW_VLAN_CTAG_RX;
f21fb3ed 3667
109cc165
JW
3668 /* MTU range: 68 - 16000 */
3669 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3670 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3671
f21fb3ed
RV
3672 /* Point to the properties for octeon device to which this
3673 * interface belongs.
3674 */
3675 lio->oct_dev = octeon_dev;
3676 lio->octprops = props;
3677 lio->netdev = netdev;
f21fb3ed
RV
3678
3679 dev_dbg(&octeon_dev->pci_dev->dev,
3680 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3681 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3682
86dea55b
RV
3683 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3684 u8 vfmac[ETH_ALEN];
3685
3686 random_ether_addr(&vfmac[0]);
3687 if (__liquidio_set_vf_mac(netdev, j,
3688 &vfmac[0], false)) {
3689 dev_err(&octeon_dev->pci_dev->dev,
3690 "Error setting VF%d MAC address\n",
3691 j);
3692 goto setup_nic_dev_fail;
3693 }
3694 }
3695
f21fb3ed
RV
3696 /* 64-bit swap required on LE machines */
3697 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3698 for (j = 0; j < 6; j++)
3699 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3700
3701 /* Copy MAC Address to OS network device structure */
3702
3703 ether_addr_copy(netdev->dev_addr, mac);
3704
26236fa9
RV
3705 /* By default all interfaces on a single Octeon uses the same
3706 * tx and rx queues
3707 */
3708 lio->txq = lio->linfo.txpciq[0].s.q_no;
3709 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
a82457f1
IB
3710 if (liquidio_setup_io_queues(octeon_dev, i,
3711 lio->linfo.num_txpciq,
3712 lio->linfo.num_rxpciq)) {
f21fb3ed
RV
3713 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3714 goto setup_nic_dev_fail;
3715 }
3716
3717 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3718
f21fb3ed
RV
3719 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3720 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3721
fcd2b5e3 3722 if (setup_glists(octeon_dev, lio, num_iqueues)) {
f21fb3ed
RV
3723 dev_err(&octeon_dev->pci_dev->dev,
3724 "Gather list allocation failed\n");
3725 goto setup_nic_dev_fail;
3726 }
3727
3728 /* Register ethtool support */
3729 liquidio_set_ethtool_ops(netdev);
30136395
RV
3730 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3731 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3732 else
3733 octeon_dev->priv_flags = 0x0;
f21fb3ed 3734
0cece6c5 3735 if (netdev->features & NETIF_F_LRO)
a2c64b67
RV
3736 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3737 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
f21fb3ed 3738
836d57e5
PK
3739 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3740 OCTNET_CMD_VLAN_FILTER_ENABLE);
63245f25 3741
f21fb3ed 3742 if ((debug != -1) && (debug & NETIF_MSG_HW))
63245f25
RV
3743 liquidio_set_feature(netdev,
3744 OCTNET_CMD_VERBOSE_ENABLE, 0);
f21fb3ed 3745
7b6b6c95
RV
3746 if (setup_link_status_change_wq(netdev))
3747 goto setup_nic_dev_fail;
3748
907aaa6b
VB
3749 if ((octeon_dev->fw_info.app_cap_flags &
3750 LIQUIDIO_TIME_SYNC_CAP) &&
3751 setup_sync_octeon_time_wq(netdev))
3752 goto setup_nic_dev_fail;
3753
031d4f12
SB
3754 if (setup_rx_oom_poll_fn(netdev))
3755 goto setup_nic_dev_fail;
3756
f21fb3ed
RV
3757 /* Register the network device with the OS */
3758 if (register_netdev(netdev)) {
3759 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3760 goto setup_nic_dev_fail;
3761 }
3762
3763 dev_dbg(&octeon_dev->pci_dev->dev,
3764 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3765 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3766 netif_carrier_off(netdev);
0cece6c5 3767 lio->link_changes++;
f21fb3ed
RV
3768
3769 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3770
01fb237a
RV
3771 /* Sending command to firmware to enable Rx checksum offload
3772 * by default at the time of setup of Liquidio driver for
3773 * this device
3774 */
3775 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3776 OCTNET_CMD_RXCSUM_ENABLE);
3777 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3778 OCTNET_CMD_TXCSUM_ENABLE);
3779
f21fb3ed
RV
3780 dev_dbg(&octeon_dev->pci_dev->dev,
3781 "NIC ifidx:%d Setup successful\n", i);
3782
3783 octeon_free_soft_command(octeon_dev, sc);
3784 }
3785
d4be8ebe
VMG
3786 devlink = devlink_alloc(&liquidio_devlink_ops,
3787 sizeof(struct lio_devlink_priv));
3788 if (!devlink) {
3789 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3790 goto setup_nic_wait_intr;
3791 }
3792
3793 lio_devlink = devlink_priv(devlink);
3794 lio_devlink->oct = octeon_dev;
3795
3796 if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3797 devlink_free(devlink);
3798 dev_err(&octeon_dev->pci_dev->dev,
3799 "devlink registration failed\n");
3800 goto setup_nic_wait_intr;
3801 }
3802
3803 octeon_dev->devlink = devlink;
3804 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3805
f21fb3ed
RV
3806 return 0;
3807
3808setup_nic_dev_fail:
3809
3810 octeon_free_soft_command(octeon_dev, sc);
3811
afdf841f
RV
3812setup_nic_wait_intr:
3813
f21fb3ed
RV
3814 while (i--) {
3815 dev_err(&octeon_dev->pci_dev->dev,
3816 "NIC ifidx:%d Setup failed\n", i);
3817 liquidio_destroy_nic_device(octeon_dev, i);
3818 }
3819 return -ENODEV;
3820}
3821
ca6139ff
RV
3822#ifdef CONFIG_PCI_IOV
3823static int octeon_enable_sriov(struct octeon_device *oct)
3824{
3825 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3826 struct pci_dev *vfdev;
3827 int err;
3828 u32 u;
3829
3830 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3831 err = pci_enable_sriov(oct->pci_dev,
3832 oct->sriov_info.num_vfs_alloced);
3833 if (err) {
3834 dev_err(&oct->pci_dev->dev,
3835 "OCTEON: Failed to enable PCI sriov: %d\n",
3836 err);
3837 oct->sriov_info.num_vfs_alloced = 0;
3838 return err;
3839 }
3840 oct->sriov_info.sriov_enabled = 1;
3841
3842 /* init lookup table that maps DPI ring number to VF pci_dev
3843 * struct pointer
3844 */
3845 u = 0;
3846 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3847 OCTEON_CN23XX_VF_VID, NULL);
3848 while (vfdev) {
3849 if (vfdev->is_virtfn &&
3850 (vfdev->physfn == oct->pci_dev)) {
3851 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3852 vfdev;
3853 u += oct->sriov_info.rings_per_vf;
3854 }
3855 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3856 OCTEON_CN23XX_VF_VID, vfdev);
3857 }
3858 }
3859
3860 return num_vfs_alloced;
3861}
3862
3863static int lio_pci_sriov_disable(struct octeon_device *oct)
3864{
3865 int u;
3866
3867 if (pci_vfs_assigned(oct->pci_dev)) {
3868 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3869 return -EPERM;
3870 }
3871
3872 pci_disable_sriov(oct->pci_dev);
3873
3874 u = 0;
3875 while (u < MAX_POSSIBLE_VFS) {
3876 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3877 u += oct->sriov_info.rings_per_vf;
3878 }
3879
3880 oct->sriov_info.num_vfs_alloced = 0;
3881 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3882 oct->pf_num);
3883
3884 return 0;
3885}
3886
3887static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3888{
3889 struct octeon_device *oct = pci_get_drvdata(dev);
3890 int ret = 0;
3891
3892 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3893 (oct->sriov_info.sriov_enabled)) {
3894 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3895 oct->pf_num, num_vfs);
3896 return 0;
3897 }
3898
3899 if (!num_vfs) {
d4be8ebe 3900 lio_vf_rep_destroy(oct);
ca6139ff
RV
3901 ret = lio_pci_sriov_disable(oct);
3902 } else if (num_vfs > oct->sriov_info.max_vfs) {
3903 dev_err(&oct->pci_dev->dev,
3904 "OCTEON: Max allowed VFs:%d user requested:%d",
3905 oct->sriov_info.max_vfs, num_vfs);
3906 ret = -EPERM;
3907 } else {
3908 oct->sriov_info.num_vfs_alloced = num_vfs;
3909 ret = octeon_enable_sriov(oct);
3910 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3911 oct->pf_num, num_vfs);
d4be8ebe
VMG
3912 ret = lio_vf_rep_create(oct);
3913 if (ret)
3914 dev_info(&oct->pci_dev->dev,
3915 "vf representor create failed");
ca6139ff
RV
3916 }
3917
3918 return ret;
3919}
3920#endif
3921
f21fb3ed
RV
3922/**
3923 * \brief initialize the NIC
3924 * @param oct octeon device
3925 *
3926 * This initialization routine is called once the Octeon device application is
3927 * up and running
3928 */
3929static int liquidio_init_nic_module(struct octeon_device *oct)
3930{
0cece6c5 3931 int i, retval = 0;
f21fb3ed
RV
3932 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3933
3934 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3935
3936 /* only default iq and oq were initialized
3937 * initialize the rest as well
3938 */
3939 /* run port_config command for each port */
3940 oct->ifcount = num_nic_ports;
3941
30136395 3942 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
f21fb3ed 3943
0cece6c5
RV
3944 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3945 oct->props[i].gmxport = -1;
3946
f21fb3ed
RV
3947 retval = setup_nic_devices(oct);
3948 if (retval) {
3949 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3950 goto octnet_init_failure;
3951 }
3952
e20f4696
VMG
3953 /* Call vf_rep_modinit if the firmware is switchdev capable
3954 * and do it from the first liquidio function probed.
3955 */
3956 if (!oct->octeon_id &&
3957 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
228aa012
DC
3958 retval = lio_vf_rep_modinit();
3959 if (retval) {
e20f4696
VMG
3960 liquidio_stop_nic_module(oct);
3961 goto octnet_init_failure;
3962 }
3963 }
3964
f21fb3ed
RV
3965 liquidio_ptp_init(oct);
3966
f21fb3ed
RV
3967 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3968
3969 return retval;
3970
3971octnet_init_failure:
3972
3973 oct->ifcount = 0;
3974
3975 return retval;
3976}
3977
3978/**
3979 * \brief starter callback that invokes the remaining initialization work after
3980 * the NIC is up and running.
3981 * @param octptr work struct work_struct
3982 */
3983static void nic_starter(struct work_struct *work)
3984{
3985 struct octeon_device *oct;
3986 struct cavium_wk *wk = (struct cavium_wk *)work;
3987
3988 oct = (struct octeon_device *)wk->ctxptr;
3989
3990 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3991 return;
3992
3993 /* If the status of the device is CORE_OK, the core
3994 * application has reported its application type. Call
3995 * any registered handlers now and move to the RUNNING
3996 * state.
3997 */
3998 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3999 schedule_delayed_work(&oct->nic_poll_work.work,
4000 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4001 return;
4002 }
4003
4004 atomic_set(&oct->status, OCT_DEV_RUNNING);
4005
4006 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
4007 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
4008
4009 if (liquidio_init_nic_module(oct))
4010 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
4011 else
4012 handshake[oct->octeon_id].started_ok = 1;
4013 } else {
4014 dev_err(&oct->pci_dev->dev,
4015 "Unexpected application running on NIC (%d). Check firmware.\n",
4016 oct->app_mode);
4017 }
4018
4019 complete(&handshake[oct->octeon_id].started);
4020}
4021
86dea55b
RV
4022static int
4023octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4024{
4025 struct octeon_device *oct = (struct octeon_device *)buf;
4026 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4027 int i, notice, vf_idx;
bb54be58 4028 bool cores_crashed;
86dea55b
RV
4029 u64 *data, vf_num;
4030
4031 notice = recv_pkt->rh.r.ossp;
c4ee5d81 4032 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
86dea55b
RV
4033
4034 /* the first 64-bit word of data is the vf_num */
4035 vf_num = data[0];
4036 octeon_swap_8B_data(&vf_num, 1);
4037 vf_idx = (int)vf_num - 1;
4038
bb54be58
FM
4039 cores_crashed = READ_ONCE(oct->cores_crashed);
4040
86dea55b
RV
4041 if (notice == VF_DRV_LOADED) {
4042 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4043 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4044 dev_info(&oct->pci_dev->dev,
4045 "driver for VF%d was loaded\n", vf_idx);
bb54be58
FM
4046 if (!cores_crashed)
4047 try_module_get(THIS_MODULE);
86dea55b
RV
4048 }
4049 } else if (notice == VF_DRV_REMOVED) {
4050 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4051 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4052 dev_info(&oct->pci_dev->dev,
4053 "driver for VF%d was removed\n", vf_idx);
bb54be58
FM
4054 if (!cores_crashed)
4055 module_put(THIS_MODULE);
86dea55b
RV
4056 }
4057 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4058 u8 *b = (u8 *)&data[1];
4059
4060 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4061 dev_info(&oct->pci_dev->dev,
4062 "VF driver changed VF%d's MAC address to %pM\n",
4063 vf_idx, b + 2);
4064 }
4065
4066 for (i = 0; i < recv_pkt->buffer_count; i++)
4067 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4068 octeon_free_recv_info(recv_info);
4069
4070 return 0;
4071}
4072
f21fb3ed
RV
4073/**
4074 * \brief Device initialization for each Octeon device that is probed
4075 * @param octeon_dev octeon device
4076 */
4077static int octeon_device_init(struct octeon_device *octeon_dev)
4078{
4079 int j, ret;
d3d7e6c6 4080 char bootcmd[] = "\n";
da1542b0 4081 char *dbg_enb = NULL;
088b8749 4082 enum lio_fw_state fw_state;
f21fb3ed
RV
4083 struct octeon_device_priv *oct_priv =
4084 (struct octeon_device_priv *)octeon_dev->priv;
4085 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4086
4087 /* Enable access to the octeon device and make its DMA capability
4088 * known to the OS.
4089 */
4090 if (octeon_pci_os_setup(octeon_dev))
4091 return 1;
4092
515e752d
RV
4093 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4094
f21fb3ed
RV
4095 /* Identify the Octeon type and map the BAR address space. */
4096 if (octeon_chip_specific_setup(octeon_dev)) {
4097 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4098 return 1;
4099 }
4100
4101 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4102
e1e3ce62
RF
4103 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4104 * since that is what is required for the reference to be removed
4105 * during de-initialization (see 'octeon_destroy_resources').
4106 */
4107 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4108 PCI_SLOT(octeon_dev->pci_dev->devfn),
4109 PCI_FUNC(octeon_dev->pci_dev->devfn),
4110 true);
4111
f21fb3ed
RV
4112 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4113
088b8749
RF
4114 /* CN23XX supports preloaded firmware if the following is true:
4115 *
4116 * The adapter indicates that firmware is currently running AND
4117 * 'fw_type' is 'auto'.
4118 *
4119 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4120 */
4121 if (OCTEON_CN23XX_PF(octeon_dev) &&
4122 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4123 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4124 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
c0eab5b3 4125 }
f21fb3ed 4126
088b8749
RF
4127 /* If loading firmware, only first device of adapter needs to do so. */
4128 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4129 FW_NEEDS_TO_BE_LOADED,
4130 FW_IS_BEING_LOADED);
4131
4132 /* Here, [local variable] 'fw_state' is set to one of:
4133 *
4134 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4135 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4136 * firmware to the adapter.
4137 * FW_IS_BEING_LOADED: The driver's second instance will not load
4138 * firmware to the adapter.
4139 */
4140
4141 /* Prior to f/w load, perform a soft reset of the Octeon device;
4142 * if error resetting, return w/error.
4143 */
4144 if (fw_state == FW_NEEDS_TO_BE_LOADED)
4145 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4146 return 1;
4147
f21fb3ed
RV
4148 /* Initialize the dispatch mechanism used to push packets arriving on
4149 * Octeon Output queues.
4150 */
4151 if (octeon_init_dispatch_list(octeon_dev))
4152 return 1;
4153
4154 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4155 OPCODE_NIC_CORE_DRV_ACTIVE,
4156 octeon_core_drv_init,
4157 octeon_dev);
4158
86dea55b
RV
4159 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4160 OPCODE_NIC_VF_DRV_NOTICE,
4161 octeon_recv_vf_drv_notice, octeon_dev);
f21fb3ed
RV
4162 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4163 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4164 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4165 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4166
4167 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4168
c865cdf1
RV
4169 if (octeon_set_io_queues_off(octeon_dev)) {
4170 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4171 return 1;
4172 }
f21fb3ed 4173
3451b97c
RV
4174 if (OCTEON_CN23XX_PF(octeon_dev)) {
4175 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4176 if (ret) {
4177 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4178 return ret;
4179 }
4180 }
4181
4182 /* Initialize soft command buffer pool
4183 */
4184 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4185 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4186 return 1;
4187 }
4188 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4189
f21fb3ed
RV
4190 /* Setup the data structures that manage this Octeon's Input queues. */
4191 if (octeon_setup_instr_queues(octeon_dev)) {
4192 dev_err(&octeon_dev->pci_dev->dev,
4193 "instruction queue initialization failed\n");
f21fb3ed
RV
4194 return 1;
4195 }
4196 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4197
f21fb3ed
RV
4198 /* Initialize lists to manage the requests of different types that
4199 * arrive from user & kernel applications for this octeon device.
4200 */
4201 if (octeon_setup_response_list(octeon_dev)) {
4202 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4203 return 1;
4204 }
4205 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4206
4207 if (octeon_setup_output_queues(octeon_dev)) {
4208 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
1e0d30fe 4209 return 1;
f21fb3ed
RV
4210 }
4211
4212 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4213
5b07aee1 4214 if (OCTEON_CN23XX_PF(octeon_dev)) {
5d65556b
RV
4215 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4216 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4217 return 1;
4218 }
4219 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4220
5b07aee1
RV
4221 if (octeon_allocate_ioq_vector(octeon_dev)) {
4222 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4223 return 1;
4224 }
515e752d 4225 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
5b07aee1
RV
4226
4227 } else {
4228 /* The input and output queue registers were setup earlier (the
4229 * queues were not enabled). Any additional registers
4230 * that need to be programmed should be done now.
4231 */
4232 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4233 if (ret) {
4234 dev_err(&octeon_dev->pci_dev->dev,
4235 "Failed to configure device registers\n");
4236 return ret;
4237 }
f21fb3ed
RV
4238 }
4239
4240 /* Initialize the tasklet that handles output queue packet processing.*/
4241 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4242 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4243 (unsigned long)octeon_dev);
4244
4245 /* Setup the interrupt handler and record the INT SUM register address
4246 */
a82457f1
IB
4247 if (octeon_setup_interrupt(octeon_dev,
4248 octeon_dev->sriov_info.num_pf_rings))
1e0d30fe 4249 return 1;
f21fb3ed
RV
4250
4251 /* Enable Octeon device interrupts */
5b07aee1 4252 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
f21fb3ed 4253
515e752d
RV
4254 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4255
3c57f615
RF
4256 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4257 * the output queue is enabled.
4258 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4259 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4260 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4261 * before any credits have been issued, causing the ring to be reset
4262 * (and the f/w appear to never have started).
4263 */
4264 for (j = 0; j < octeon_dev->num_oqs; j++)
4265 writel(octeon_dev->droq[j]->max_count,
4266 octeon_dev->droq[j]->pkts_credit_reg);
4267
f21fb3ed 4268 /* Enable the input and output queues for this Octeon device */
1b7c55c4
RV
4269 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4270 if (ret) {
4271 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4272 return ret;
4273 }
f21fb3ed
RV
4274
4275 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4276
088b8749 4277 if (fw_state == FW_NEEDS_TO_BE_LOADED) {
c0eab5b3
RV
4278 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4279 if (!ddr_timeout) {
4280 dev_info(&octeon_dev->pci_dev->dev,
4281 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4282 }
f21fb3ed 4283
c0eab5b3 4284 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
f21fb3ed 4285
c0eab5b3
RV
4286 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4287 while (!ddr_timeout) {
4288 set_current_state(TASK_INTERRUPTIBLE);
4289 if (schedule_timeout(HZ / 10)) {
4290 /* user probably pressed Control-C */
4291 return 1;
4292 }
4293 }
4294 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4295 if (ret) {
4296 dev_err(&octeon_dev->pci_dev->dev,
4297 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4298 ret);
4b129ae3
RV
4299 return 1;
4300 }
f21fb3ed 4301
c0eab5b3
RV
4302 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4303 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4304 return 1;
4305 }
f21fb3ed 4306
c0eab5b3
RV
4307 /* Divert uboot to take commands from host instead. */
4308 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
d3d7e6c6 4309
c0eab5b3
RV
4310 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4311 ret = octeon_init_consoles(octeon_dev);
4312 if (ret) {
4313 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4314 return 1;
4315 }
da1542b0
RF
4316 /* If console debug enabled, specify empty string to use default
4317 * enablement ELSE specify NULL string for 'disabled'.
4318 */
4319 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4320 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
c0eab5b3
RV
4321 if (ret) {
4322 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4323 return 1;
da1542b0
RF
4324 } else if (octeon_console_debug_enabled(0)) {
4325 /* If console was added AND we're logging console output
4326 * then set our console print function.
4327 */
4328 octeon_dev->console[0].print = octeon_dbg_console_print;
c0eab5b3 4329 }
f21fb3ed 4330
c0eab5b3 4331 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
f21fb3ed 4332
c0eab5b3
RV
4333 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4334 ret = load_firmware(octeon_dev);
4335 if (ret) {
4336 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4337 return 1;
4338 }
088b8749
RF
4339
4340 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
f21fb3ed
RV
4341 }
4342
4343 handshake[octeon_dev->octeon_id].init_ok = 1;
4344 complete(&handshake[octeon_dev->octeon_id].init);
4345
4346 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4347
f21fb3ed
RV
4348 return 0;
4349}
4350
da1542b0
RF
4351/**
4352 * \brief Debug console print function
4353 * @param octeon_dev octeon device
4354 * @param console_num console number
4355 * @param prefix first portion of line to display
4356 * @param suffix second portion of line to display
4357 *
4358 * The OCTEON debug console outputs entire lines (excluding '\n').
4359 * Normally, the line will be passed in the 'prefix' parameter.
4360 * However, due to buffering, it is possible for a line to be split into two
4361 * parts, in which case they will be passed as the 'prefix' parameter and
4362 * 'suffix' parameter.
4363 */
4364static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4365 char *prefix, char *suffix)
4366{
4367 if (prefix && suffix)
4368 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4369 suffix);
4370 else if (prefix)
4371 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4372 else if (suffix)
4373 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4374
4375 return 0;
4376}
4377
f21fb3ed
RV
4378/**
4379 * \brief Exits the module
4380 */
4381static void __exit liquidio_exit(void)
4382{
4383 liquidio_deinit_pci();
4384
4385 pr_info("LiquidIO network module is now unloaded\n");
4386}
4387
4388module_init(liquidio_init);
4389module_exit(liquidio_exit);