1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/firmware.h>
21 #include <net/vxlan.h>
22 #include <linux/kthread.h>
23 #include "liquidio_common.h"
24 #include "octeon_droq.h"
25 #include "octeon_iq.h"
26 #include "response_manager.h"
27 #include "octeon_device.h"
28 #include "octeon_nic.h"
29 #include "octeon_main.h"
30 #include "octeon_network.h"
31 #include "cn66xx_regs.h"
32 #include "cn66xx_device.h"
33 #include "cn68xx_device.h"
34 #include "cn23xx_pf_device.h"
35 #include "liquidio_image.h"
37 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
38 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
39 MODULE_LICENSE("GPL");
40 MODULE_VERSION(LIQUIDIO_VERSION
);
41 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX
);
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX
);
43 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX
);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX
);
46 static int ddr_timeout
= 10000;
47 module_param(ddr_timeout
, int, 0644);
48 MODULE_PARM_DESC(ddr_timeout
,
49 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
51 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
53 static int debug
= -1;
54 module_param(debug
, int, 0644);
55 MODULE_PARM_DESC(debug
, "NETIF_MSG debug bits");
57 static char fw_type
[LIO_MAX_FW_TYPE_LEN
];
58 module_param_string(fw_type
, fw_type
, sizeof(fw_type
), 0000);
59 MODULE_PARM_DESC(fw_type
, "Type of firmware to be loaded. Default \"nic\"");
61 static int ptp_enable
= 1;
63 /* Bit mask values for lio->ifstate */
64 #define LIO_IFSTATE_DROQ_OPS 0x01
65 #define LIO_IFSTATE_REGISTERED 0x02
66 #define LIO_IFSTATE_RUNNING 0x04
67 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
69 /* Polling interval for determining when NIC application is alive */
70 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
72 /* runtime link query interval */
73 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
75 struct liquidio_if_cfg_context
{
83 struct liquidio_if_cfg_resp
{
85 struct liquidio_if_cfg_info cfg_info
;
89 struct liquidio_rx_ctl_context
{
97 struct oct_link_status_resp
{
99 struct oct_link_info link_info
;
103 struct oct_timestamp_resp
{
109 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
114 #ifdef __BIG_ENDIAN_BITFIELD
126 /** Octeon device properties to be used by the NIC module.
127 * Each octeon device in the system will be represented
128 * by this structure in the NIC module.
131 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
133 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
134 #define OCTNIC_GSO_MAX_SIZE \
135 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
137 /** Structure of a node in list of gather components maintained by
138 * NIC driver for each network device.
140 struct octnic_gather
{
141 /** List manipulation. Next and prev pointers. */
142 struct list_head list
;
144 /** Size of the gather component at sg in bytes. */
147 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
150 /** Gather component that can accommodate max sized fragment list
151 * received from the IP layer.
153 struct octeon_sg_entry
*sg
;
155 dma_addr_t sg_dma_ptr
;
159 struct completion init
;
160 struct completion started
;
161 struct pci_dev
*pci_dev
;
166 struct octeon_device_priv
{
167 /** Tasklet structures for this device. */
168 struct tasklet_struct droq_tasklet
;
169 unsigned long napi_mask
;
172 #ifdef CONFIG_PCI_IOV
173 static int liquidio_enable_sriov(struct pci_dev
*dev
, int num_vfs
);
176 static int octeon_device_init(struct octeon_device
*);
177 static int liquidio_stop(struct net_device
*netdev
);
178 static void liquidio_remove(struct pci_dev
*pdev
);
179 static int liquidio_probe(struct pci_dev
*pdev
,
180 const struct pci_device_id
*ent
);
182 static struct handshake handshake
[MAX_OCTEON_DEVICES
];
183 static struct completion first_stage
;
185 static void octeon_droq_bh(unsigned long pdev
)
189 struct octeon_device
*oct
= (struct octeon_device
*)pdev
;
190 struct octeon_device_priv
*oct_priv
=
191 (struct octeon_device_priv
*)oct
->priv
;
193 for (q_no
= 0; q_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); q_no
++) {
194 if (!(oct
->io_qmask
.oq
& BIT_ULL(q_no
)))
196 reschedule
|= octeon_droq_process_packets(oct
, oct
->droq
[q_no
],
198 lio_enable_irq(oct
->droq
[q_no
], NULL
);
200 if (OCTEON_CN23XX_PF(oct
) && oct
->msix_on
) {
201 /* set time and cnt interrupt thresholds for this DROQ
204 int adjusted_q_no
= q_no
+ oct
->sriov_info
.pf_srn
;
207 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no
),
210 oct
, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no
), 0);
215 tasklet_schedule(&oct_priv
->droq_tasklet
);
218 static int lio_wait_for_oq_pkts(struct octeon_device
*oct
)
220 struct octeon_device_priv
*oct_priv
=
221 (struct octeon_device_priv
*)oct
->priv
;
222 int retry
= 100, pkt_cnt
= 0, pending_pkts
= 0;
228 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
229 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
231 pkt_cnt
+= octeon_droq_check_hw_for_pkts(oct
->droq
[i
]);
234 pending_pkts
+= pkt_cnt
;
235 tasklet_schedule(&oct_priv
->droq_tasklet
);
238 schedule_timeout_uninterruptible(1);
240 } while (retry
-- && pending_pkts
);
246 * \brief Forces all IO queues off on a given device
247 * @param oct Pointer to Octeon device
249 static void force_io_queues_off(struct octeon_device
*oct
)
251 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
252 (oct
->chip_id
== OCTEON_CN68XX
)) {
253 /* Reset the Enable bits for Input Queues. */
254 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
256 /* Reset the Enable bits for Output Queues. */
257 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
262 * \brief wait for all pending requests to complete
263 * @param oct Pointer to Octeon device
265 * Called during shutdown sequence
267 static int wait_for_pending_requests(struct octeon_device
*oct
)
271 for (i
= 0; i
< 100; i
++) {
273 atomic_read(&oct
->response_list
274 [OCTEON_ORDERED_SC_LIST
].pending_req_count
);
276 schedule_timeout_uninterruptible(HZ
/ 10);
288 * \brief Cause device to go quiet so it can be safely removed/reset/etc
289 * @param oct Pointer to Octeon device
291 static inline void pcierror_quiesce_device(struct octeon_device
*oct
)
295 /* Disable the input and output queues now. No more packets will
296 * arrive from Octeon, but we should wait for all packet processing
299 force_io_queues_off(oct
);
301 /* To allow for in-flight requests */
302 schedule_timeout_uninterruptible(100);
304 if (wait_for_pending_requests(oct
))
305 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
307 /* Force all requests waiting to be fetched by OCTEON to complete. */
308 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
309 struct octeon_instr_queue
*iq
;
311 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
313 iq
= oct
->instr_queue
[i
];
315 if (atomic_read(&iq
->instr_pending
)) {
316 spin_lock_bh(&iq
->lock
);
318 iq
->octeon_read_index
= iq
->host_write_index
;
319 iq
->stats
.instr_processed
+=
320 atomic_read(&iq
->instr_pending
);
321 lio_process_iq_request_list(oct
, iq
, 0);
322 spin_unlock_bh(&iq
->lock
);
326 /* Force all pending ordered list requests to time out. */
327 lio_process_ordered_list(oct
, 1);
329 /* We do not need to wait for output queue packets to be processed. */
333 * \brief Cleanup PCI AER uncorrectable error status
334 * @param dev Pointer to PCI device
336 static void cleanup_aer_uncorrect_error_status(struct pci_dev
*dev
)
341 pr_info("%s :\n", __func__
);
343 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &status
);
344 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, &mask
);
345 if (dev
->error_state
== pci_channel_io_normal
)
346 status
&= ~mask
; /* Clear corresponding nonfatal bits */
348 status
&= mask
; /* Clear corresponding fatal bits */
349 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, status
);
353 * \brief Stop all PCI IO to a given device
354 * @param dev Pointer to Octeon device
356 static void stop_pci_io(struct octeon_device
*oct
)
358 /* No more instructions will be forwarded. */
359 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
361 pci_disable_device(oct
->pci_dev
);
363 /* Disable interrupts */
364 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
366 pcierror_quiesce_device(oct
);
368 /* Release the interrupt line */
369 free_irq(oct
->pci_dev
->irq
, oct
);
371 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
372 pci_disable_msi(oct
->pci_dev
);
374 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
375 lio_get_state_string(&oct
->status
));
377 /* making it a common function for all OCTEON models */
378 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
382 * \brief called when PCI error is detected
383 * @param pdev Pointer to PCI device
384 * @param state The current pci connection state
386 * This function is called after a PCI bus error affecting
387 * this device has been detected.
389 static pci_ers_result_t
liquidio_pcie_error_detected(struct pci_dev
*pdev
,
390 pci_channel_state_t state
)
392 struct octeon_device
*oct
= pci_get_drvdata(pdev
);
394 /* Non-correctable Non-fatal errors */
395 if (state
== pci_channel_io_normal
) {
396 dev_err(&oct
->pci_dev
->dev
, "Non-correctable non-fatal error reported:\n");
397 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
398 return PCI_ERS_RESULT_CAN_RECOVER
;
401 /* Non-correctable Fatal errors */
402 dev_err(&oct
->pci_dev
->dev
, "Non-correctable FATAL reported by PCI AER driver\n");
405 /* Always return a DISCONNECT. There is no support for recovery but only
406 * for a clean shutdown.
408 return PCI_ERS_RESULT_DISCONNECT
;
412 * \brief mmio handler
413 * @param pdev Pointer to PCI device
415 static pci_ers_result_t
liquidio_pcie_mmio_enabled(
416 struct pci_dev
*pdev
__attribute__((unused
)))
418 /* We should never hit this since we never ask for a reset for a Fatal
419 * Error. We always return DISCONNECT in io_error above.
420 * But play safe and return RECOVERED for now.
422 return PCI_ERS_RESULT_RECOVERED
;
426 * \brief called after the pci bus has been reset.
427 * @param pdev Pointer to PCI device
429 * Restart the card from scratch, as if from a cold-boot. Implementation
430 * resembles the first-half of the octeon_resume routine.
432 static pci_ers_result_t
liquidio_pcie_slot_reset(
433 struct pci_dev
*pdev
__attribute__((unused
)))
435 /* We should never hit this since we never ask for a reset for a Fatal
436 * Error. We always return DISCONNECT in io_error above.
437 * But play safe and return RECOVERED for now.
439 return PCI_ERS_RESULT_RECOVERED
;
443 * \brief called when traffic can start flowing again.
444 * @param pdev Pointer to PCI device
446 * This callback is called when the error recovery driver tells us that
447 * its OK to resume normal operation. Implementation resembles the
448 * second-half of the octeon_resume routine.
450 static void liquidio_pcie_resume(struct pci_dev
*pdev
__attribute__((unused
)))
452 /* Nothing to be done here. */
457 * \brief called when suspending
458 * @param pdev Pointer to PCI device
459 * @param state state to suspend to
461 static int liquidio_suspend(struct pci_dev
*pdev
__attribute__((unused
)),
462 pm_message_t state
__attribute__((unused
)))
468 * \brief called when resuming
469 * @param pdev Pointer to PCI device
471 static int liquidio_resume(struct pci_dev
*pdev
__attribute__((unused
)))
477 /* For PCI-E Advanced Error Recovery (AER) Interface */
478 static const struct pci_error_handlers liquidio_err_handler
= {
479 .error_detected
= liquidio_pcie_error_detected
,
480 .mmio_enabled
= liquidio_pcie_mmio_enabled
,
481 .slot_reset
= liquidio_pcie_slot_reset
,
482 .resume
= liquidio_pcie_resume
,
485 static const struct pci_device_id liquidio_pci_tbl
[] = {
487 PCI_VENDOR_ID_CAVIUM
, 0x91, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
490 PCI_VENDOR_ID_CAVIUM
, 0x92, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
493 PCI_VENDOR_ID_CAVIUM
, 0x9702, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
499 MODULE_DEVICE_TABLE(pci
, liquidio_pci_tbl
);
501 static struct pci_driver liquidio_pci_driver
= {
503 .id_table
= liquidio_pci_tbl
,
504 .probe
= liquidio_probe
,
505 .remove
= liquidio_remove
,
506 .err_handler
= &liquidio_err_handler
, /* For AER */
509 .suspend
= liquidio_suspend
,
510 .resume
= liquidio_resume
,
512 #ifdef CONFIG_PCI_IOV
513 .sriov_configure
= liquidio_enable_sriov
,
518 * \brief register PCI driver
520 static int liquidio_init_pci(void)
522 return pci_register_driver(&liquidio_pci_driver
);
526 * \brief unregister PCI driver
528 static void liquidio_deinit_pci(void)
530 pci_unregister_driver(&liquidio_pci_driver
);
534 * \brief check interface state
535 * @param lio per-network private data
536 * @param state_flag flag state to check
538 static inline int ifstate_check(struct lio
*lio
, int state_flag
)
540 return atomic_read(&lio
->ifstate
) & state_flag
;
544 * \brief set interface state
545 * @param lio per-network private data
546 * @param state_flag flag state to set
548 static inline void ifstate_set(struct lio
*lio
, int state_flag
)
550 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) | state_flag
));
554 * \brief clear interface state
555 * @param lio per-network private data
556 * @param state_flag flag state to clear
558 static inline void ifstate_reset(struct lio
*lio
, int state_flag
)
560 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) & ~(state_flag
)));
564 * \brief Stop Tx queues
565 * @param netdev network device
567 static inline void txqs_stop(struct net_device
*netdev
)
569 if (netif_is_multiqueue(netdev
)) {
572 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
573 netif_stop_subqueue(netdev
, i
);
575 netif_stop_queue(netdev
);
580 * \brief Start Tx queues
581 * @param netdev network device
583 static inline void txqs_start(struct net_device
*netdev
)
585 if (netif_is_multiqueue(netdev
)) {
588 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
589 netif_start_subqueue(netdev
, i
);
591 netif_start_queue(netdev
);
596 * \brief Wake Tx queues
597 * @param netdev network device
599 static inline void txqs_wake(struct net_device
*netdev
)
601 struct lio
*lio
= GET_LIO(netdev
);
603 if (netif_is_multiqueue(netdev
)) {
606 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
607 int qno
= lio
->linfo
.txpciq
[i
%
608 (lio
->linfo
.num_txpciq
)].s
.q_no
;
610 if (__netif_subqueue_stopped(netdev
, i
)) {
611 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, qno
,
613 netif_wake_subqueue(netdev
, i
);
617 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
619 netif_wake_queue(netdev
);
624 * \brief Stop Tx queue
625 * @param netdev network device
627 static void stop_txq(struct net_device
*netdev
)
633 * \brief Start Tx queue
634 * @param netdev network device
636 static void start_txq(struct net_device
*netdev
)
638 struct lio
*lio
= GET_LIO(netdev
);
640 if (lio
->linfo
.link
.s
.link_up
) {
647 * \brief Wake a queue
648 * @param netdev network device
649 * @param q which queue to wake
651 static inline void wake_q(struct net_device
*netdev
, int q
)
653 if (netif_is_multiqueue(netdev
))
654 netif_wake_subqueue(netdev
, q
);
656 netif_wake_queue(netdev
);
660 * \brief Stop a queue
661 * @param netdev network device
662 * @param q which queue to stop
664 static inline void stop_q(struct net_device
*netdev
, int q
)
666 if (netif_is_multiqueue(netdev
))
667 netif_stop_subqueue(netdev
, q
);
669 netif_stop_queue(netdev
);
673 * \brief Check Tx queue status, and take appropriate action
674 * @param lio per-network private data
675 * @returns 0 if full, number of queues woken up otherwise
677 static inline int check_txq_status(struct lio
*lio
)
681 if (netif_is_multiqueue(lio
->netdev
)) {
682 int numqs
= lio
->netdev
->num_tx_queues
;
685 /* check each sub-queue state */
686 for (q
= 0; q
< numqs
; q
++) {
687 iq
= lio
->linfo
.txpciq
[q
%
688 (lio
->linfo
.num_txpciq
)].s
.q_no
;
689 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
691 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
692 wake_q(lio
->netdev
, q
);
693 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
,
699 if (octnet_iq_is_full(lio
->oct_dev
, lio
->txq
))
701 wake_q(lio
->netdev
, lio
->txq
);
702 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
710 * Remove the node at the head of the list. The list would be empty at
711 * the end of this call if there are no more nodes in the list.
713 static inline struct list_head
*list_delete_head(struct list_head
*root
)
715 struct list_head
*node
;
717 if ((root
->prev
== root
) && (root
->next
== root
))
729 * \brief Delete gather lists
730 * @param lio per-network private data
732 static void delete_glists(struct lio
*lio
)
734 struct octnic_gather
*g
;
737 kfree(lio
->glist_lock
);
738 lio
->glist_lock
= NULL
;
743 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
745 g
= (struct octnic_gather
*)
746 list_delete_head(&lio
->glist
[i
]);
751 if (lio
->glists_virt_base
&& lio
->glists_virt_base
[i
]) {
752 lio_dma_free(lio
->oct_dev
,
753 lio
->glist_entry_size
* lio
->tx_qsize
,
754 lio
->glists_virt_base
[i
],
755 lio
->glists_dma_base
[i
]);
759 kfree(lio
->glists_virt_base
);
760 lio
->glists_virt_base
= NULL
;
762 kfree(lio
->glists_dma_base
);
763 lio
->glists_dma_base
= NULL
;
770 * \brief Setup gather lists
771 * @param lio per-network private data
773 static int setup_glists(struct octeon_device
*oct
, struct lio
*lio
, int num_iqs
)
776 struct octnic_gather
*g
;
778 lio
->glist_lock
= kcalloc(num_iqs
, sizeof(*lio
->glist_lock
),
780 if (!lio
->glist_lock
)
783 lio
->glist
= kcalloc(num_iqs
, sizeof(*lio
->glist
),
786 kfree(lio
->glist_lock
);
787 lio
->glist_lock
= NULL
;
791 lio
->glist_entry_size
=
792 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG
) >> 2) * OCT_SG_ENTRY_SIZE
);
794 /* allocate memory to store virtual and dma base address of
795 * per glist consistent memory
797 lio
->glists_virt_base
= kcalloc(num_iqs
, sizeof(*lio
->glists_virt_base
),
799 lio
->glists_dma_base
= kcalloc(num_iqs
, sizeof(*lio
->glists_dma_base
),
802 if (!lio
->glists_virt_base
|| !lio
->glists_dma_base
) {
807 for (i
= 0; i
< num_iqs
; i
++) {
808 int numa_node
= dev_to_node(&oct
->pci_dev
->dev
);
810 spin_lock_init(&lio
->glist_lock
[i
]);
812 INIT_LIST_HEAD(&lio
->glist
[i
]);
814 lio
->glists_virt_base
[i
] =
816 lio
->glist_entry_size
* lio
->tx_qsize
,
817 &lio
->glists_dma_base
[i
]);
819 if (!lio
->glists_virt_base
[i
]) {
824 for (j
= 0; j
< lio
->tx_qsize
; j
++) {
825 g
= kzalloc_node(sizeof(*g
), GFP_KERNEL
,
828 g
= kzalloc(sizeof(*g
), GFP_KERNEL
);
832 g
->sg
= lio
->glists_virt_base
[i
] +
833 (j
* lio
->glist_entry_size
);
835 g
->sg_dma_ptr
= lio
->glists_dma_base
[i
] +
836 (j
* lio
->glist_entry_size
);
838 list_add_tail(&g
->list
, &lio
->glist
[i
]);
841 if (j
!= lio
->tx_qsize
) {
851 * \brief Print link information
852 * @param netdev network device
854 static void print_link_info(struct net_device
*netdev
)
856 struct lio
*lio
= GET_LIO(netdev
);
858 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
) {
859 struct oct_link_info
*linfo
= &lio
->linfo
;
861 if (linfo
->link
.s
.link_up
) {
862 netif_info(lio
, link
, lio
->netdev
, "%d Mbps %s Duplex UP\n",
864 (linfo
->link
.s
.duplex
) ? "Full" : "Half");
866 netif_info(lio
, link
, lio
->netdev
, "Link Down\n");
872 * \brief Routine to notify MTU change
873 * @param work work_struct data structure
875 static void octnet_link_status_change(struct work_struct
*work
)
877 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
878 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
881 call_netdevice_notifiers(NETDEV_CHANGEMTU
, lio
->netdev
);
886 * \brief Sets up the mtu status change work
887 * @param netdev network device
889 static inline int setup_link_status_change_wq(struct net_device
*netdev
)
891 struct lio
*lio
= GET_LIO(netdev
);
892 struct octeon_device
*oct
= lio
->oct_dev
;
894 lio
->link_status_wq
.wq
= alloc_workqueue("link-status",
896 if (!lio
->link_status_wq
.wq
) {
897 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium link status wq\n");
900 INIT_DELAYED_WORK(&lio
->link_status_wq
.wk
.work
,
901 octnet_link_status_change
);
902 lio
->link_status_wq
.wk
.ctxptr
= lio
;
907 static inline void cleanup_link_status_change_wq(struct net_device
*netdev
)
909 struct lio
*lio
= GET_LIO(netdev
);
911 if (lio
->link_status_wq
.wq
) {
912 cancel_delayed_work_sync(&lio
->link_status_wq
.wk
.work
);
913 destroy_workqueue(lio
->link_status_wq
.wq
);
918 * \brief Update link status
919 * @param netdev network device
920 * @param ls link status structure
922 * Called on receipt of a link status response from the core application to
923 * update each interface's link status.
925 static inline void update_link_status(struct net_device
*netdev
,
926 union oct_link_status
*ls
)
928 struct lio
*lio
= GET_LIO(netdev
);
929 int changed
= (lio
->linfo
.link
.u64
!= ls
->u64
);
931 lio
->linfo
.link
.u64
= ls
->u64
;
933 if ((lio
->intf_open
) && (changed
)) {
934 print_link_info(netdev
);
937 if (lio
->linfo
.link
.s
.link_up
) {
938 netif_carrier_on(netdev
);
941 netif_carrier_off(netdev
);
947 /* Runs in interrupt context. */
948 static void update_txq_status(struct octeon_device
*oct
, int iq_num
)
950 struct net_device
*netdev
;
952 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_num
];
954 netdev
= oct
->props
[iq
->ifidx
].netdev
;
956 /* This is needed because the first IQ does not have
957 * a netdev associated with it.
962 lio
= GET_LIO(netdev
);
963 if (netif_is_multiqueue(netdev
)) {
964 if (__netif_subqueue_stopped(netdev
, iq
->q_index
) &&
965 lio
->linfo
.link
.s
.link_up
&&
966 (!octnet_iq_is_full(oct
, iq_num
))) {
967 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq_num
,
969 netif_wake_subqueue(netdev
, iq
->q_index
);
971 if (!octnet_iq_is_full(oct
, lio
->txq
)) {
972 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
,
975 wake_q(netdev
, lio
->txq
);
982 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq
*droq
, u64 ret
)
984 struct octeon_device
*oct
= droq
->oct_dev
;
985 struct octeon_device_priv
*oct_priv
=
986 (struct octeon_device_priv
*)oct
->priv
;
988 if (droq
->ops
.poll_mode
) {
989 droq
->ops
.napi_fn(droq
);
991 if (ret
& MSIX_PO_INT
) {
992 tasklet_schedule(&oct_priv
->droq_tasklet
);
995 /* this will be flushed periodically by check iq db */
996 if (ret
& MSIX_PI_INT
)
1003 * \brief Droq packet processor sceduler
1004 * @param oct octeon device
1006 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device
*oct
)
1008 struct octeon_device_priv
*oct_priv
=
1009 (struct octeon_device_priv
*)oct
->priv
;
1011 struct octeon_droq
*droq
;
1013 if (oct
->int_status
& OCT_DEV_INTR_PKT_DATA
) {
1014 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
);
1016 if (!(oct
->droq_intr
& BIT_ULL(oq_no
)))
1019 droq
= oct
->droq
[oq_no
];
1021 if (droq
->ops
.poll_mode
) {
1022 droq
->ops
.napi_fn(droq
);
1023 oct_priv
->napi_mask
|= (1 << oq_no
);
1025 tasklet_schedule(&oct_priv
->droq_tasklet
);
1032 liquidio_msix_intr_handler(int irq
__attribute__((unused
)), void *dev
)
1035 struct octeon_ioq_vector
*ioq_vector
= (struct octeon_ioq_vector
*)dev
;
1036 struct octeon_device
*oct
= ioq_vector
->oct_dev
;
1037 struct octeon_droq
*droq
= oct
->droq
[ioq_vector
->droq_index
];
1039 ret
= oct
->fn_list
.msix_interrupt_handler(ioq_vector
);
1041 if ((ret
& MSIX_PO_INT
) || (ret
& MSIX_PI_INT
))
1042 liquidio_schedule_msix_droq_pkt_handler(droq
, ret
);
1048 * \brief Interrupt handler for octeon
1050 * @param dev octeon device
1053 irqreturn_t
liquidio_legacy_intr_handler(int irq
__attribute__((unused
)),
1056 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
1059 /* Disable our interrupts for the duration of ISR */
1060 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
1062 ret
= oct
->fn_list
.process_interrupt_regs(oct
);
1064 if (ret
== IRQ_HANDLED
)
1065 liquidio_schedule_droq_pkt_handlers(oct
);
1067 /* Re-enable our interrupts */
1068 if (!(atomic_read(&oct
->status
) == OCT_DEV_IN_RESET
))
1069 oct
->fn_list
.enable_interrupt(oct
, OCTEON_ALL_INTR
);
1075 * \brief Setup interrupt for octeon device
1076 * @param oct octeon device
1078 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1080 static int octeon_setup_interrupt(struct octeon_device
*oct
)
1083 struct msix_entry
*msix_entries
;
1085 int num_ioq_vectors
;
1086 int num_alloc_ioq_vectors
;
1088 if (OCTEON_CN23XX_PF(oct
) && oct
->msix_on
) {
1089 oct
->num_msix_irqs
= oct
->sriov_info
.num_pf_rings
;
1090 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1091 oct
->num_msix_irqs
+= 1;
1093 oct
->msix_entries
= kcalloc(
1094 oct
->num_msix_irqs
, sizeof(struct msix_entry
), GFP_KERNEL
);
1095 if (!oct
->msix_entries
)
1098 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
1099 /*Assumption is that pf msix vectors start from pf srn to pf to
1100 * trs and not from 0. if not change this code
1102 for (i
= 0; i
< oct
->num_msix_irqs
- 1; i
++)
1103 msix_entries
[i
].entry
= oct
->sriov_info
.pf_srn
+ i
;
1104 msix_entries
[oct
->num_msix_irqs
- 1].entry
=
1105 oct
->sriov_info
.trs
;
1106 num_alloc_ioq_vectors
= pci_enable_msix_range(
1107 oct
->pci_dev
, msix_entries
,
1109 oct
->num_msix_irqs
);
1110 if (num_alloc_ioq_vectors
< 0) {
1111 dev_err(&oct
->pci_dev
->dev
, "unable to Allocate MSI-X interrupts\n");
1112 kfree(oct
->msix_entries
);
1113 oct
->msix_entries
= NULL
;
1116 dev_dbg(&oct
->pci_dev
->dev
, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1118 num_ioq_vectors
= oct
->num_msix_irqs
;
1120 /** For PF, there is one non-ioq interrupt handler */
1121 num_ioq_vectors
-= 1;
1122 irqret
= request_irq(msix_entries
[num_ioq_vectors
].vector
,
1123 liquidio_legacy_intr_handler
, 0, "octeon",
1126 dev_err(&oct
->pci_dev
->dev
,
1127 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1129 pci_disable_msix(oct
->pci_dev
);
1130 kfree(oct
->msix_entries
);
1131 oct
->msix_entries
= NULL
;
1135 for (i
= 0; i
< num_ioq_vectors
; i
++) {
1136 irqret
= request_irq(msix_entries
[i
].vector
,
1137 liquidio_msix_intr_handler
, 0,
1138 "octeon", &oct
->ioq_vector
[i
]);
1140 dev_err(&oct
->pci_dev
->dev
,
1141 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1143 /** Freeing the non-ioq irq vector here . */
1144 free_irq(msix_entries
[num_ioq_vectors
].vector
,
1149 /** clearing affinity mask. */
1150 irq_set_affinity_hint(
1151 msix_entries
[i
].vector
, NULL
);
1152 free_irq(msix_entries
[i
].vector
,
1153 &oct
->ioq_vector
[i
]);
1155 pci_disable_msix(oct
->pci_dev
);
1156 kfree(oct
->msix_entries
);
1157 oct
->msix_entries
= NULL
;
1160 oct
->ioq_vector
[i
].vector
= msix_entries
[i
].vector
;
1161 /* assign the cpu mask for this msix interrupt vector */
1162 irq_set_affinity_hint(
1163 msix_entries
[i
].vector
,
1164 (&oct
->ioq_vector
[i
].affinity_mask
));
1166 dev_dbg(&oct
->pci_dev
->dev
, "OCTEON[%d]: MSI-X enabled\n",
1169 err
= pci_enable_msi(oct
->pci_dev
);
1171 dev_warn(&oct
->pci_dev
->dev
, "Reverting to legacy interrupts. Error: %d\n",
1174 oct
->flags
|= LIO_FLAG_MSI_ENABLED
;
1176 irqret
= request_irq(oct
->pci_dev
->irq
,
1177 liquidio_legacy_intr_handler
, IRQF_SHARED
,
1180 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1181 pci_disable_msi(oct
->pci_dev
);
1182 dev_err(&oct
->pci_dev
->dev
, "Request IRQ failed with code: %d\n",
1190 static int liquidio_watchdog(void *param
)
1193 u16 mask_of_stuck_cores
= 0;
1194 u16 mask_of_crashed_cores
= 0;
1196 u8 core_is_stuck
[LIO_MAX_CORES
];
1197 u8 core_crashed
[LIO_MAX_CORES
];
1198 struct octeon_device
*oct
= param
;
1200 memset(core_is_stuck
, 0, sizeof(core_is_stuck
));
1201 memset(core_crashed
, 0, sizeof(core_crashed
));
1203 while (!kthread_should_stop()) {
1204 mask_of_crashed_cores
=
1205 (u16
)octeon_read_csr64(oct
, CN23XX_SLI_SCRATCH2
);
1207 for (core_num
= 0; core_num
< LIO_MAX_CORES
; core_num
++) {
1208 if (!core_is_stuck
[core_num
]) {
1209 wdog
= lio_pci_readq(oct
, CIU3_WDOG(core_num
));
1211 /* look at watchdog state field */
1212 wdog
&= CIU3_WDOG_MASK
;
1214 /* this watchdog timer has expired */
1215 core_is_stuck
[core_num
] =
1216 LIO_MONITOR_WDOG_EXPIRE
;
1217 mask_of_stuck_cores
|= (1 << core_num
);
1221 if (!core_crashed
[core_num
])
1222 core_crashed
[core_num
] =
1223 (mask_of_crashed_cores
>> core_num
) & 1;
1226 if (mask_of_stuck_cores
) {
1227 for (core_num
= 0; core_num
< LIO_MAX_CORES
;
1229 if (core_is_stuck
[core_num
] == 1) {
1230 dev_err(&oct
->pci_dev
->dev
,
1231 "ERROR: Octeon core %d is stuck!\n",
1233 /* 2 means we have printk'd an error
1234 * so no need to repeat the same printk
1236 core_is_stuck
[core_num
] =
1237 LIO_MONITOR_CORE_STUCK_MSGD
;
1242 if (mask_of_crashed_cores
) {
1243 for (core_num
= 0; core_num
< LIO_MAX_CORES
;
1245 if (core_crashed
[core_num
] == 1) {
1246 dev_err(&oct
->pci_dev
->dev
,
1247 "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
1249 /* 2 means we have printk'd an error
1250 * so no need to repeat the same printk
1252 core_crashed
[core_num
] =
1253 LIO_MONITOR_CORE_STUCK_MSGD
;
1257 #ifdef CONFIG_MODULE_UNLOAD
1258 if (mask_of_stuck_cores
|| mask_of_crashed_cores
) {
1259 /* make module refcount=0 so that rmmod will work */
1262 refcount
= module_refcount(THIS_MODULE
);
1264 while (refcount
> 0) {
1265 module_put(THIS_MODULE
);
1266 refcount
= module_refcount(THIS_MODULE
);
1269 /* compensate for and withstand an unlikely (but still
1270 * possible) race condition
1272 while (refcount
< 0) {
1273 try_module_get(THIS_MODULE
);
1274 refcount
= module_refcount(THIS_MODULE
);
1278 /* sleep for two seconds */
1279 set_current_state(TASK_INTERRUPTIBLE
);
1280 schedule_timeout(2 * HZ
);
1287 * \brief PCI probe handler
1288 * @param pdev PCI device structure
1292 liquidio_probe(struct pci_dev
*pdev
,
1293 const struct pci_device_id
*ent
__attribute__((unused
)))
1295 struct octeon_device
*oct_dev
= NULL
;
1296 struct handshake
*hs
;
1298 oct_dev
= octeon_allocate_device(pdev
->device
,
1299 sizeof(struct octeon_device_priv
));
1301 dev_err(&pdev
->dev
, "Unable to allocate device\n");
1305 if (pdev
->device
== OCTEON_CN23XX_PF_VID
)
1306 oct_dev
->msix_on
= LIO_FLAG_MSIX_ENABLED
;
1308 dev_info(&pdev
->dev
, "Initializing device %x:%x.\n",
1309 (u32
)pdev
->vendor
, (u32
)pdev
->device
);
1311 /* Assign octeon_device for this device to the private data area. */
1312 pci_set_drvdata(pdev
, oct_dev
);
1314 /* set linux specific device pointer */
1315 oct_dev
->pci_dev
= (void *)pdev
;
1317 hs
= &handshake
[oct_dev
->octeon_id
];
1318 init_completion(&hs
->init
);
1319 init_completion(&hs
->started
);
1322 if (oct_dev
->octeon_id
== 0)
1323 /* first LiquidIO NIC is detected */
1324 complete(&first_stage
);
1326 if (octeon_device_init(oct_dev
)) {
1327 complete(&hs
->init
);
1328 liquidio_remove(pdev
);
1332 if (OCTEON_CN23XX_PF(oct_dev
)) {
1334 u8 bus
, device
, function
;
1336 scratch1
= octeon_read_csr64(oct_dev
, CN23XX_SLI_SCRATCH1
);
1337 if (!(scratch1
& 4ULL)) {
1338 /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
1339 * the lio watchdog kernel thread is running for this
1340 * NIC. Each NIC gets one watchdog kernel thread.
1343 octeon_write_csr64(oct_dev
, CN23XX_SLI_SCRATCH1
,
1346 bus
= pdev
->bus
->number
;
1347 device
= PCI_SLOT(pdev
->devfn
);
1348 function
= PCI_FUNC(pdev
->devfn
);
1349 oct_dev
->watchdog_task
= kthread_create(
1350 liquidio_watchdog
, oct_dev
,
1351 "liowd/%02hhx:%02hhx.%hhx", bus
, device
, function
);
1352 if (!IS_ERR(oct_dev
->watchdog_task
)) {
1353 wake_up_process(oct_dev
->watchdog_task
);
1355 oct_dev
->watchdog_task
= NULL
;
1356 dev_err(&oct_dev
->pci_dev
->dev
,
1357 "failed to create kernel_thread\n");
1358 liquidio_remove(pdev
);
1364 oct_dev
->rx_pause
= 1;
1365 oct_dev
->tx_pause
= 1;
1367 dev_dbg(&oct_dev
->pci_dev
->dev
, "Device is ready\n");
1373 *\brief Destroy resources associated with octeon device
1374 * @param pdev PCI device structure
1377 static void octeon_destroy_resources(struct octeon_device
*oct
)
1380 struct msix_entry
*msix_entries
;
1381 struct octeon_device_priv
*oct_priv
=
1382 (struct octeon_device_priv
*)oct
->priv
;
1384 struct handshake
*hs
;
1386 switch (atomic_read(&oct
->status
)) {
1387 case OCT_DEV_RUNNING
:
1388 case OCT_DEV_CORE_OK
:
1390 /* No more instructions will be forwarded. */
1391 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
1393 oct
->app_mode
= CVM_DRV_INVALID_APP
;
1394 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
1395 lio_get_state_string(&oct
->status
));
1397 schedule_timeout_uninterruptible(HZ
/ 10);
1400 case OCT_DEV_HOST_OK
:
1403 case OCT_DEV_CONSOLE_INIT_DONE
:
1404 /* Remove any consoles */
1405 octeon_remove_consoles(oct
);
1408 case OCT_DEV_IO_QUEUES_DONE
:
1409 if (wait_for_pending_requests(oct
))
1410 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1412 if (lio_wait_for_instr_fetch(oct
))
1413 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1415 /* Disable the input and output queues now. No more packets will
1416 * arrive from Octeon, but we should wait for all packet
1417 * processing to finish.
1419 oct
->fn_list
.disable_io_queues(oct
);
1421 if (lio_wait_for_oq_pkts(oct
))
1422 dev_err(&oct
->pci_dev
->dev
, "OQ had pending packets\n");
1425 case OCT_DEV_INTR_SET_DONE
:
1426 /* Disable interrupts */
1427 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
1430 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
1431 for (i
= 0; i
< oct
->num_msix_irqs
- 1; i
++) {
1432 /* clear the affinity_cpumask */
1433 irq_set_affinity_hint(msix_entries
[i
].vector
,
1435 free_irq(msix_entries
[i
].vector
,
1436 &oct
->ioq_vector
[i
]);
1438 /* non-iov vector's argument is oct struct */
1439 free_irq(msix_entries
[i
].vector
, oct
);
1441 pci_disable_msix(oct
->pci_dev
);
1442 kfree(oct
->msix_entries
);
1443 oct
->msix_entries
= NULL
;
1445 /* Release the interrupt line */
1446 free_irq(oct
->pci_dev
->irq
, oct
);
1448 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1449 pci_disable_msi(oct
->pci_dev
);
1453 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE
:
1454 if (OCTEON_CN23XX_PF(oct
))
1455 octeon_free_ioq_vector(oct
);
1458 case OCT_DEV_MBOX_SETUP_DONE
:
1459 if (OCTEON_CN23XX_PF(oct
))
1460 oct
->fn_list
.free_mbox(oct
);
1463 case OCT_DEV_IN_RESET
:
1464 case OCT_DEV_DROQ_INIT_DONE
:
1465 /* Wait for any pending operations */
1467 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1468 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
1470 octeon_delete_droq(oct
, i
);
1473 /* Force any pending handshakes to complete */
1474 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
1478 handshake
[oct
->octeon_id
].init_ok
= 0;
1479 complete(&handshake
[oct
->octeon_id
].init
);
1480 handshake
[oct
->octeon_id
].started_ok
= 0;
1481 complete(&handshake
[oct
->octeon_id
].started
);
1486 case OCT_DEV_RESP_LIST_INIT_DONE
:
1487 octeon_delete_response_list(oct
);
1490 case OCT_DEV_INSTR_QUEUE_INIT_DONE
:
1491 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1492 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
1494 octeon_delete_instr_queue(oct
, i
);
1496 #ifdef CONFIG_PCI_IOV
1497 if (oct
->sriov_info
.sriov_enabled
)
1498 pci_disable_sriov(oct
->pci_dev
);
1501 case OCT_DEV_SC_BUFF_POOL_INIT_DONE
:
1502 octeon_free_sc_buffer_pool(oct
);
1505 case OCT_DEV_DISPATCH_INIT_DONE
:
1506 octeon_delete_dispatch_list(oct
);
1507 cancel_delayed_work_sync(&oct
->nic_poll_work
.work
);
1510 case OCT_DEV_PCI_MAP_DONE
:
1511 /* Soft reset the octeon device before exiting */
1512 if ((!OCTEON_CN23XX_PF(oct
)) || !oct
->octeon_id
)
1513 oct
->fn_list
.soft_reset(oct
);
1515 octeon_unmap_pci_barx(oct
, 0);
1516 octeon_unmap_pci_barx(oct
, 1);
1519 case OCT_DEV_PCI_ENABLE_DONE
:
1520 pci_clear_master(oct
->pci_dev
);
1521 /* Disable the device, releasing the PCI INT */
1522 pci_disable_device(oct
->pci_dev
);
1525 case OCT_DEV_BEGIN_STATE
:
1526 /* Nothing to be done here either */
1528 } /* end switch (oct->status) */
1530 tasklet_kill(&oct_priv
->droq_tasklet
);
1534 * \brief Callback for rx ctrl
1535 * @param status status of request
1536 * @param buf pointer to resp structure
1538 static void rx_ctl_callback(struct octeon_device
*oct
,
1542 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
1543 struct liquidio_rx_ctl_context
*ctx
;
1545 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1547 oct
= lio_get_device(ctx
->octeon_id
);
1549 dev_err(&oct
->pci_dev
->dev
, "rx ctl instruction failed. Status: %llx\n",
1550 CVM_CAST64(status
));
1551 WRITE_ONCE(ctx
->cond
, 1);
1553 /* This barrier is required to be sure that the response has been
1554 * written fully before waking up the handler
1558 wake_up_interruptible(&ctx
->wc
);
1562 * \brief Send Rx control command
1563 * @param lio per-network private data
1564 * @param start_stop whether to start or stop
1566 static void send_rx_ctrl_cmd(struct lio
*lio
, int start_stop
)
1568 struct octeon_soft_command
*sc
;
1569 struct liquidio_rx_ctl_context
*ctx
;
1570 union octnet_cmd
*ncmd
;
1571 int ctx_size
= sizeof(struct liquidio_rx_ctl_context
);
1572 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1575 if (oct
->props
[lio
->ifidx
].rx_on
== start_stop
)
1578 sc
= (struct octeon_soft_command
*)
1579 octeon_alloc_soft_command(oct
, OCTNET_CMD_SIZE
,
1582 ncmd
= (union octnet_cmd
*)sc
->virtdptr
;
1583 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1585 WRITE_ONCE(ctx
->cond
, 0);
1586 ctx
->octeon_id
= lio_get_device_id(oct
);
1587 init_waitqueue_head(&ctx
->wc
);
1590 ncmd
->s
.cmd
= OCTNET_CMD_RX_CTL
;
1591 ncmd
->s
.param1
= start_stop
;
1593 octeon_swap_8B_data((u64
*)ncmd
, (OCTNET_CMD_SIZE
>> 3));
1595 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1597 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
1598 OPCODE_NIC_CMD
, 0, 0, 0);
1600 sc
->callback
= rx_ctl_callback
;
1601 sc
->callback_arg
= sc
;
1602 sc
->wait_time
= 5000;
1604 retval
= octeon_send_soft_command(oct
, sc
);
1605 if (retval
== IQ_SEND_FAILED
) {
1606 netif_info(lio
, rx_err
, lio
->netdev
, "Failed to send RX Control message\n");
1608 /* Sleep on a wait queue till the cond flag indicates that the
1609 * response arrived or timed-out.
1611 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
)
1613 oct
->props
[lio
->ifidx
].rx_on
= start_stop
;
1616 octeon_free_soft_command(oct
, sc
);
1620 * \brief Destroy NIC device interface
1621 * @param oct octeon device
1622 * @param ifidx which interface to destroy
1624 * Cleanup associated with each interface for an Octeon device when NIC
1625 * module is being unloaded or if initialization fails during load.
1627 static void liquidio_destroy_nic_device(struct octeon_device
*oct
, int ifidx
)
1629 struct net_device
*netdev
= oct
->props
[ifidx
].netdev
;
1631 struct napi_struct
*napi
, *n
;
1634 dev_err(&oct
->pci_dev
->dev
, "%s No netdevice ptr for index %d\n",
1639 lio
= GET_LIO(netdev
);
1641 dev_dbg(&oct
->pci_dev
->dev
, "NIC device cleanup\n");
1643 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
)
1644 liquidio_stop(netdev
);
1646 if (oct
->props
[lio
->ifidx
].napi_enabled
== 1) {
1647 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1650 oct
->props
[lio
->ifidx
].napi_enabled
= 0;
1652 if (OCTEON_CN23XX_PF(oct
))
1653 oct
->droq
[0]->ops
.poll_mode
= 0;
1656 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
)
1657 unregister_netdev(netdev
);
1659 cleanup_link_status_change_wq(netdev
);
1663 free_netdev(netdev
);
1665 oct
->props
[ifidx
].gmxport
= -1;
1667 oct
->props
[ifidx
].netdev
= NULL
;
1671 * \brief Stop complete NIC functionality
1672 * @param oct octeon device
1674 static int liquidio_stop_nic_module(struct octeon_device
*oct
)
1679 dev_dbg(&oct
->pci_dev
->dev
, "Stopping network interfaces\n");
1680 if (!oct
->ifcount
) {
1681 dev_err(&oct
->pci_dev
->dev
, "Init for Octeon was not completed\n");
1685 spin_lock_bh(&oct
->cmd_resp_wqlock
);
1686 oct
->cmd_resp_state
= OCT_DRV_OFFLINE
;
1687 spin_unlock_bh(&oct
->cmd_resp_wqlock
);
1689 for (i
= 0; i
< oct
->ifcount
; i
++) {
1690 lio
= GET_LIO(oct
->props
[i
].netdev
);
1691 for (j
= 0; j
< lio
->linfo
.num_rxpciq
; j
++)
1692 octeon_unregister_droq_ops(oct
,
1693 lio
->linfo
.rxpciq
[j
].s
.q_no
);
1696 for (i
= 0; i
< oct
->ifcount
; i
++)
1697 liquidio_destroy_nic_device(oct
, i
);
1699 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces stopped\n");
1704 * \brief Cleans up resources at unload time
1705 * @param pdev PCI device structure
1707 static void liquidio_remove(struct pci_dev
*pdev
)
1709 struct octeon_device
*oct_dev
= pci_get_drvdata(pdev
);
1711 dev_dbg(&oct_dev
->pci_dev
->dev
, "Stopping device\n");
1713 if (oct_dev
->watchdog_task
)
1714 kthread_stop(oct_dev
->watchdog_task
);
1716 if (oct_dev
->app_mode
&& (oct_dev
->app_mode
== CVM_DRV_NIC_APP
))
1717 liquidio_stop_nic_module(oct_dev
);
1719 /* Reset the octeon device and cleanup all memory allocated for
1720 * the octeon device by driver.
1722 octeon_destroy_resources(oct_dev
);
1724 dev_info(&oct_dev
->pci_dev
->dev
, "Device removed\n");
1726 /* This octeon device has been removed. Update the global
1727 * data structure to reflect this. Free the device structure.
1729 octeon_free_device_mem(oct_dev
);
1733 * \brief Identify the Octeon device and to map the BAR address space
1734 * @param oct octeon device
1736 static int octeon_chip_specific_setup(struct octeon_device
*oct
)
1742 pci_read_config_dword(oct
->pci_dev
, 0, &dev_id
);
1743 pci_read_config_dword(oct
->pci_dev
, 8, &rev_id
);
1744 oct
->rev_id
= rev_id
& 0xff;
1747 case OCTEON_CN68XX_PCIID
:
1748 oct
->chip_id
= OCTEON_CN68XX
;
1749 ret
= lio_setup_cn68xx_octeon_device(oct
);
1753 case OCTEON_CN66XX_PCIID
:
1754 oct
->chip_id
= OCTEON_CN66XX
;
1755 ret
= lio_setup_cn66xx_octeon_device(oct
);
1759 case OCTEON_CN23XX_PCIID_PF
:
1760 oct
->chip_id
= OCTEON_CN23XX_PF_VID
;
1761 ret
= setup_cn23xx_octeon_pf_device(oct
);
1767 dev_err(&oct
->pci_dev
->dev
, "Unknown device found (dev_id: %x)\n",
1772 dev_info(&oct
->pci_dev
->dev
, "%s PASS%d.%d %s Version: %s\n", s
,
1773 OCTEON_MAJOR_REV(oct
),
1774 OCTEON_MINOR_REV(oct
),
1775 octeon_get_conf(oct
)->card_name
,
1782 * \brief PCI initialization for each Octeon device.
1783 * @param oct octeon device
1785 static int octeon_pci_os_setup(struct octeon_device
*oct
)
1787 /* setup PCI stuff first */
1788 if (pci_enable_device(oct
->pci_dev
)) {
1789 dev_err(&oct
->pci_dev
->dev
, "pci_enable_device failed\n");
1793 if (dma_set_mask_and_coherent(&oct
->pci_dev
->dev
, DMA_BIT_MASK(64))) {
1794 dev_err(&oct
->pci_dev
->dev
, "Unexpected DMA device capability\n");
1795 pci_disable_device(oct
->pci_dev
);
1799 /* Enable PCI DMA Master. */
1800 pci_set_master(oct
->pci_dev
);
1805 static inline int skb_iq(struct lio
*lio
, struct sk_buff
*skb
)
1809 if (netif_is_multiqueue(lio
->netdev
))
1810 q
= skb
->queue_mapping
% lio
->linfo
.num_txpciq
;
1816 * \brief Check Tx queue state for a given network buffer
1817 * @param lio per-network private data
1818 * @param skb network buffer
1820 static inline int check_txq_state(struct lio
*lio
, struct sk_buff
*skb
)
1824 if (netif_is_multiqueue(lio
->netdev
)) {
1825 q
= skb
->queue_mapping
;
1826 iq
= lio
->linfo
.txpciq
[(q
% (lio
->linfo
.num_txpciq
))].s
.q_no
;
1832 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
1835 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
1836 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
, tx_restart
, 1);
1837 wake_q(lio
->netdev
, q
);
1843 * \brief Unmap and free network buffer
1846 static void free_netbuf(void *buf
)
1848 struct sk_buff
*skb
;
1849 struct octnet_buf_free_info
*finfo
;
1852 finfo
= (struct octnet_buf_free_info
*)buf
;
1856 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
, finfo
->dptr
, skb
->len
,
1859 check_txq_state(lio
, skb
);
1861 tx_buffer_free(skb
);
1865 * \brief Unmap and free gather buffer
1868 static void free_netsgbuf(void *buf
)
1870 struct octnet_buf_free_info
*finfo
;
1871 struct sk_buff
*skb
;
1873 struct octnic_gather
*g
;
1876 finfo
= (struct octnet_buf_free_info
*)buf
;
1880 frags
= skb_shinfo(skb
)->nr_frags
;
1882 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1883 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1888 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1890 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1891 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1892 frag
->size
, DMA_TO_DEVICE
);
1896 iq
= skb_iq(lio
, skb
);
1897 spin_lock(&lio
->glist_lock
[iq
]);
1898 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1899 spin_unlock(&lio
->glist_lock
[iq
]);
1901 check_txq_state(lio
, skb
); /* mq support: sub-queue state check */
1903 tx_buffer_free(skb
);
1907 * \brief Unmap and free gather buffer with response
1910 static void free_netsgbuf_with_resp(void *buf
)
1912 struct octeon_soft_command
*sc
;
1913 struct octnet_buf_free_info
*finfo
;
1914 struct sk_buff
*skb
;
1916 struct octnic_gather
*g
;
1919 sc
= (struct octeon_soft_command
*)buf
;
1920 skb
= (struct sk_buff
*)sc
->callback_arg
;
1921 finfo
= (struct octnet_buf_free_info
*)&skb
->cb
;
1925 frags
= skb_shinfo(skb
)->nr_frags
;
1927 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1928 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1933 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1935 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1936 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1937 frag
->size
, DMA_TO_DEVICE
);
1941 iq
= skb_iq(lio
, skb
);
1943 spin_lock(&lio
->glist_lock
[iq
]);
1944 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1945 spin_unlock(&lio
->glist_lock
[iq
]);
1947 /* Don't free the skb yet */
1949 check_txq_state(lio
, skb
);
1953 * \brief Adjust ptp frequency
1954 * @param ptp PTP clock info
1955 * @param ppb how much to adjust by, in parts-per-billion
1957 static int liquidio_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
1959 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1960 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1962 unsigned long flags
;
1963 bool neg_adj
= false;
1970 /* The hardware adds the clock compensation value to the
1971 * PTP clock on every coprocessor clock cycle, so we
1972 * compute the delta in terms of coprocessor clocks.
1974 delta
= (u64
)ppb
<< 32;
1975 do_div(delta
, oct
->coproc_clock_rate
);
1977 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1978 comp
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1983 lio_pci_writeq(oct
, comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1984 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1990 * \brief Adjust ptp time
1991 * @param ptp PTP clock info
1992 * @param delta how much to adjust by, in nanosecs
1994 static int liquidio_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
1996 unsigned long flags
;
1997 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1999 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
2000 lio
->ptp_adjust
+= delta
;
2001 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
2007 * \brief Get hardware clock time, including any adjustment
2008 * @param ptp PTP clock info
2009 * @param ts timespec
2011 static int liquidio_ptp_gettime(struct ptp_clock_info
*ptp
,
2012 struct timespec64
*ts
)
2015 unsigned long flags
;
2016 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
2017 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
2019 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
2020 ns
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_HI
);
2021 ns
+= lio
->ptp_adjust
;
2022 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
2024 *ts
= ns_to_timespec64(ns
);
2030 * \brief Set hardware clock time. Reset adjustment
2031 * @param ptp PTP clock info
2032 * @param ts timespec
2034 static int liquidio_ptp_settime(struct ptp_clock_info
*ptp
,
2035 const struct timespec64
*ts
)
2038 unsigned long flags
;
2039 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
2040 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
2042 ns
= timespec_to_ns(ts
);
2044 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
2045 lio_pci_writeq(oct
, ns
, CN6XXX_MIO_PTP_CLOCK_HI
);
2046 lio
->ptp_adjust
= 0;
2047 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
2053 * \brief Check if PTP is enabled
2054 * @param ptp PTP clock info
2056 * @param on is it on
2059 liquidio_ptp_enable(struct ptp_clock_info
*ptp
__attribute__((unused
)),
2060 struct ptp_clock_request
*rq
__attribute__((unused
)),
2061 int on
__attribute__((unused
)))
2067 * \brief Open PTP clock source
2068 * @param netdev network device
2070 static void oct_ptp_open(struct net_device
*netdev
)
2072 struct lio
*lio
= GET_LIO(netdev
);
2073 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
2075 spin_lock_init(&lio
->ptp_lock
);
2077 snprintf(lio
->ptp_info
.name
, 16, "%s", netdev
->name
);
2078 lio
->ptp_info
.owner
= THIS_MODULE
;
2079 lio
->ptp_info
.max_adj
= 250000000;
2080 lio
->ptp_info
.n_alarm
= 0;
2081 lio
->ptp_info
.n_ext_ts
= 0;
2082 lio
->ptp_info
.n_per_out
= 0;
2083 lio
->ptp_info
.pps
= 0;
2084 lio
->ptp_info
.adjfreq
= liquidio_ptp_adjfreq
;
2085 lio
->ptp_info
.adjtime
= liquidio_ptp_adjtime
;
2086 lio
->ptp_info
.gettime64
= liquidio_ptp_gettime
;
2087 lio
->ptp_info
.settime64
= liquidio_ptp_settime
;
2088 lio
->ptp_info
.enable
= liquidio_ptp_enable
;
2090 lio
->ptp_adjust
= 0;
2092 lio
->ptp_clock
= ptp_clock_register(&lio
->ptp_info
,
2093 &oct
->pci_dev
->dev
);
2095 if (IS_ERR(lio
->ptp_clock
))
2096 lio
->ptp_clock
= NULL
;
2100 * \brief Init PTP clock
2101 * @param oct octeon device
2103 static void liquidio_ptp_init(struct octeon_device
*oct
)
2105 u64 clock_comp
, cfg
;
2107 clock_comp
= (u64
)NSEC_PER_SEC
<< 32;
2108 do_div(clock_comp
, oct
->coproc_clock_rate
);
2109 lio_pci_writeq(oct
, clock_comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
2112 cfg
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_CFG
);
2113 lio_pci_writeq(oct
, cfg
| 0x01, CN6XXX_MIO_PTP_CLOCK_CFG
);
2117 * \brief Load firmware to device
2118 * @param oct octeon device
2120 * Maps device to firmware filename, requests firmware, and downloads it
2122 static int load_firmware(struct octeon_device
*oct
)
2125 const struct firmware
*fw
;
2126 char fw_name
[LIO_MAX_FW_FILENAME_LEN
];
2129 if (strncmp(fw_type
, LIO_FW_NAME_TYPE_NONE
,
2130 sizeof(LIO_FW_NAME_TYPE_NONE
)) == 0) {
2131 dev_info(&oct
->pci_dev
->dev
, "Skipping firmware load\n");
2135 if (fw_type
[0] == '\0')
2136 tmp_fw_type
= LIO_FW_NAME_TYPE_NIC
;
2138 tmp_fw_type
= fw_type
;
2140 sprintf(fw_name
, "%s%s%s_%s%s", LIO_FW_DIR
, LIO_FW_BASE_NAME
,
2141 octeon_get_conf(oct
)->card_name
, tmp_fw_type
,
2142 LIO_FW_NAME_SUFFIX
);
2144 ret
= request_firmware(&fw
, fw_name
, &oct
->pci_dev
->dev
);
2146 dev_err(&oct
->pci_dev
->dev
, "Request firmware failed. Could not find file %s.\n.",
2148 release_firmware(fw
);
2152 ret
= octeon_download_firmware(oct
, fw
->data
, fw
->size
);
2154 release_firmware(fw
);
2160 * \brief Setup output queue
2161 * @param oct octeon device
2162 * @param q_no which queue
2163 * @param num_descs how many descriptors
2164 * @param desc_size size of each descriptor
2165 * @param app_ctx application context
2167 static int octeon_setup_droq(struct octeon_device
*oct
, int q_no
, int num_descs
,
2168 int desc_size
, void *app_ctx
)
2172 dev_dbg(&oct
->pci_dev
->dev
, "Creating Droq: %d\n", q_no
);
2173 /* droq creation and local register settings. */
2174 ret_val
= octeon_create_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
);
2179 dev_dbg(&oct
->pci_dev
->dev
, "Using default droq %d\n", q_no
);
2182 /* tasklet creation for the droq */
2184 /* Enable the droq queues */
2185 octeon_set_droq_pkt_op(oct
, q_no
, 1);
2187 /* Send Credit for Octeon Output queues. Credits are always
2188 * sent after the output queue is enabled.
2190 writel(oct
->droq
[q_no
]->max_count
,
2191 oct
->droq
[q_no
]->pkts_credit_reg
);
2197 * \brief Callback for getting interface configuration
2198 * @param status status of request
2199 * @param buf pointer to resp structure
2201 static void if_cfg_callback(struct octeon_device
*oct
,
2202 u32 status
__attribute__((unused
)),
2205 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
2206 struct liquidio_if_cfg_resp
*resp
;
2207 struct liquidio_if_cfg_context
*ctx
;
2209 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
2210 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
2212 oct
= lio_get_device(ctx
->octeon_id
);
2214 dev_err(&oct
->pci_dev
->dev
, "nic if cfg instruction failed. Status: %llx\n",
2215 CVM_CAST64(resp
->status
));
2216 WRITE_ONCE(ctx
->cond
, 1);
2218 snprintf(oct
->fw_info
.liquidio_firmware_version
, 32, "%s",
2219 resp
->cfg_info
.liquidio_firmware_version
);
2221 /* This barrier is required to be sure that the response has been
2222 * written fully before waking up the handler
2226 wake_up_interruptible(&ctx
->wc
);
2229 /** Routine to push packets arriving on Octeon interface upto network layer.
2230 * @param oct_id - octeon device id.
2231 * @param skbuff - skbuff struct to be passed to network layer.
2232 * @param len - size of total data received.
2233 * @param rh - Control header associated with the packet
2234 * @param param - additional control data with the packet
2235 * @param arg - farg registered in droq_ops
2238 liquidio_push_packet(u32 octeon_id
__attribute__((unused
)),
2241 union octeon_rh
*rh
,
2245 struct napi_struct
*napi
= param
;
2246 struct sk_buff
*skb
= (struct sk_buff
*)skbuff
;
2247 struct skb_shared_hwtstamps
*shhwtstamps
;
2251 struct net_device
*netdev
= (struct net_device
*)arg
;
2252 struct octeon_droq
*droq
= container_of(param
, struct octeon_droq
,
2255 int packet_was_received
;
2256 struct lio
*lio
= GET_LIO(netdev
);
2257 struct octeon_device
*oct
= lio
->oct_dev
;
2259 /* Do not proceed if the interface is not in RUNNING state. */
2260 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
)) {
2261 recv_buffer_free(skb
);
2262 droq
->stats
.rx_dropped
++;
2268 skb_record_rx_queue(skb
, droq
->q_no
);
2269 if (likely(len
> MIN_SKB_SIZE
)) {
2270 struct octeon_skb_page_info
*pg_info
;
2273 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
2274 if (pg_info
->page
) {
2275 /* For Paged allocation use the frags */
2276 va
= page_address(pg_info
->page
) +
2277 pg_info
->page_offset
;
2278 memcpy(skb
->data
, va
, MIN_SKB_SIZE
);
2279 skb_put(skb
, MIN_SKB_SIZE
);
2280 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2282 pg_info
->page_offset
+
2288 struct octeon_skb_page_info
*pg_info
=
2289 ((struct octeon_skb_page_info
*)(skb
->cb
));
2290 skb_copy_to_linear_data(skb
, page_address(pg_info
->page
)
2291 + pg_info
->page_offset
, len
);
2293 put_page(pg_info
->page
);
2296 r_dh_off
= (rh
->r_dh
.len
- 1) * BYTES_PER_DHLEN_UNIT
;
2298 if (((oct
->chip_id
== OCTEON_CN66XX
) ||
2299 (oct
->chip_id
== OCTEON_CN68XX
)) &&
2301 if (rh
->r_dh
.has_hwtstamp
) {
2302 /* timestamp is included from the hardware at
2303 * the beginning of the packet.
2306 (lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
)) {
2307 /* Nanoseconds are in the first 64-bits
2310 memcpy(&ns
, (skb
->data
+ r_dh_off
),
2312 r_dh_off
-= BYTES_PER_DHLEN_UNIT
;
2313 shhwtstamps
= skb_hwtstamps(skb
);
2314 shhwtstamps
->hwtstamp
=
2321 if (rh
->r_dh
.has_hash
) {
2322 __be32
*hash_be
= (__be32
*)(skb
->data
+ r_dh_off
);
2323 u32 hash
= be32_to_cpu(*hash_be
);
2325 skb_set_hash(skb
, hash
, PKT_HASH_TYPE_L4
);
2326 r_dh_off
-= BYTES_PER_DHLEN_UNIT
;
2329 skb_pull(skb
, rh
->r_dh
.len
* BYTES_PER_DHLEN_UNIT
);
2331 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2332 if ((netdev
->features
& NETIF_F_RXCSUM
) &&
2333 (((rh
->r_dh
.encap_on
) &&
2334 (rh
->r_dh
.csum_verified
& CNNIC_TUN_CSUM_VERIFIED
)) ||
2335 (!(rh
->r_dh
.encap_on
) &&
2336 (rh
->r_dh
.csum_verified
& CNNIC_CSUM_VERIFIED
))))
2337 /* checksum has already been verified */
2338 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2340 skb
->ip_summed
= CHECKSUM_NONE
;
2342 /* Setting Encapsulation field on basis of status received
2345 if (rh
->r_dh
.encap_on
) {
2346 skb
->encapsulation
= 1;
2347 skb
->csum_level
= 1;
2348 droq
->stats
.rx_vxlan
++;
2351 /* inbound VLAN tag */
2352 if ((netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2353 (rh
->r_dh
.vlan
!= 0)) {
2354 u16 vid
= rh
->r_dh
.vlan
;
2355 u16 priority
= rh
->r_dh
.priority
;
2357 vtag
= priority
<< 13 | vid
;
2358 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vtag
);
2361 packet_was_received
= napi_gro_receive(napi
, skb
) != GRO_DROP
;
2363 if (packet_was_received
) {
2364 droq
->stats
.rx_bytes_received
+= len
;
2365 droq
->stats
.rx_pkts_received
++;
2367 droq
->stats
.rx_dropped
++;
2368 netif_info(lio
, rx_err
, lio
->netdev
,
2369 "droq:%d error rx_dropped:%llu\n",
2370 droq
->q_no
, droq
->stats
.rx_dropped
);
2374 recv_buffer_free(skb
);
2379 * \brief wrapper for calling napi_schedule
2380 * @param param parameters to pass to napi_schedule
2382 * Used when scheduling on different CPUs
2384 static void napi_schedule_wrapper(void *param
)
2386 struct napi_struct
*napi
= param
;
2388 napi_schedule(napi
);
2392 * \brief callback when receive interrupt occurs and we are in NAPI mode
2393 * @param arg pointer to octeon output queue
2395 static void liquidio_napi_drv_callback(void *arg
)
2397 struct octeon_device
*oct
;
2398 struct octeon_droq
*droq
= arg
;
2399 int this_cpu
= smp_processor_id();
2401 oct
= droq
->oct_dev
;
2403 if (OCTEON_CN23XX_PF(oct
) || droq
->cpu_id
== this_cpu
) {
2404 napi_schedule_irqoff(&droq
->napi
);
2406 struct call_single_data
*csd
= &droq
->csd
;
2408 csd
->func
= napi_schedule_wrapper
;
2409 csd
->info
= &droq
->napi
;
2412 smp_call_function_single_async(droq
->cpu_id
, csd
);
2417 * \brief Entry point for NAPI polling
2418 * @param napi NAPI structure
2419 * @param budget maximum number of items to process
2421 static int liquidio_napi_poll(struct napi_struct
*napi
, int budget
)
2423 struct octeon_droq
*droq
;
2425 int tx_done
= 0, iq_no
;
2426 struct octeon_instr_queue
*iq
;
2427 struct octeon_device
*oct
;
2429 droq
= container_of(napi
, struct octeon_droq
, napi
);
2430 oct
= droq
->oct_dev
;
2432 /* Handle Droq descriptors */
2433 work_done
= octeon_process_droq_poll_cmd(oct
, droq
->q_no
,
2434 POLL_EVENT_PROCESS_PKTS
,
2437 /* Flush the instruction queue */
2438 iq
= oct
->instr_queue
[iq_no
];
2440 /* Process iq buffers with in the budget limits */
2441 tx_done
= octeon_flush_iq(oct
, iq
, budget
);
2442 /* Update iq read-index rather than waiting for next interrupt.
2443 * Return back if tx_done is false.
2445 update_txq_status(oct
, iq_no
);
2447 dev_err(&oct
->pci_dev
->dev
, "%s: iq (%d) num invalid\n",
2451 /* force enable interrupt if reg cnts are high to avoid wraparound */
2452 if ((work_done
< budget
&& tx_done
) ||
2453 (iq
&& iq
->pkt_in_done
>= MAX_REG_CNT
) ||
2454 (droq
->pkt_count
>= MAX_REG_CNT
)) {
2456 napi_complete_done(napi
, work_done
);
2457 octeon_process_droq_poll_cmd(droq
->oct_dev
, droq
->q_no
,
2458 POLL_EVENT_ENABLE_INTR
, 0);
2462 return (!tx_done
) ? (budget
) : (work_done
);
2466 * \brief Setup input and output queues
2467 * @param octeon_dev octeon device
2468 * @param ifidx Interface Index
2470 * Note: Queues are with respect to the octeon device. Thus
2471 * an input queue is for egress packets, and output queues
2472 * are for ingress packets.
2474 static inline int setup_io_queues(struct octeon_device
*octeon_dev
,
2477 struct octeon_droq_ops droq_ops
;
2478 struct net_device
*netdev
;
2480 static int cpu_id_modulus
;
2481 struct octeon_droq
*droq
;
2482 struct napi_struct
*napi
;
2483 int q
, q_no
, retval
= 0;
2487 netdev
= octeon_dev
->props
[ifidx
].netdev
;
2489 lio
= GET_LIO(netdev
);
2491 memset(&droq_ops
, 0, sizeof(struct octeon_droq_ops
));
2493 droq_ops
.fptr
= liquidio_push_packet
;
2494 droq_ops
.farg
= (void *)netdev
;
2496 droq_ops
.poll_mode
= 1;
2497 droq_ops
.napi_fn
= liquidio_napi_drv_callback
;
2499 cpu_id_modulus
= num_present_cpus();
2502 for (q
= 0; q
< lio
->linfo
.num_rxpciq
; q
++) {
2503 q_no
= lio
->linfo
.rxpciq
[q
].s
.q_no
;
2504 dev_dbg(&octeon_dev
->pci_dev
->dev
,
2505 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2507 retval
= octeon_setup_droq(octeon_dev
, q_no
,
2508 CFG_GET_NUM_RX_DESCS_NIC_IF
2509 (octeon_get_conf(octeon_dev
),
2511 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2512 (octeon_get_conf(octeon_dev
),
2515 dev_err(&octeon_dev
->pci_dev
->dev
,
2516 "%s : Runtime DROQ(RxQ) creation failed.\n",
2521 droq
= octeon_dev
->droq
[q_no
];
2523 dev_dbg(&octeon_dev
->pci_dev
->dev
, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
2524 (u64
)netdev
, (u64
)octeon_dev
, octeon_dev
->pf_num
);
2525 netif_napi_add(netdev
, napi
, liquidio_napi_poll
, 64);
2527 /* designate a CPU for this droq */
2528 droq
->cpu_id
= cpu_id
;
2530 if (cpu_id
>= cpu_id_modulus
)
2533 octeon_register_droq_ops(octeon_dev
, q_no
, &droq_ops
);
2536 if (OCTEON_CN23XX_PF(octeon_dev
)) {
2537 /* 23XX PF can receive control messages (via the first PF-owned
2538 * droq) from the firmware even if the ethX interface is down,
2539 * so that's why poll_mode must be off for the first droq.
2541 octeon_dev
->droq
[0]->ops
.poll_mode
= 0;
2545 for (q
= 0; q
< lio
->linfo
.num_txpciq
; q
++) {
2546 num_tx_descs
= CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2549 retval
= octeon_setup_iq(octeon_dev
, ifidx
, q
,
2550 lio
->linfo
.txpciq
[q
], num_tx_descs
,
2551 netdev_get_tx_queue(netdev
, q
));
2553 dev_err(&octeon_dev
->pci_dev
->dev
,
2554 " %s : Runtime IQ(TxQ) creation failed.\n",
2559 if (octeon_dev
->ioq_vector
) {
2560 struct octeon_ioq_vector
*ioq_vector
;
2562 ioq_vector
= &octeon_dev
->ioq_vector
[q
];
2563 netif_set_xps_queue(netdev
,
2564 &ioq_vector
->affinity_mask
,
2565 ioq_vector
->iq_index
);
2573 * \brief Poll routine for checking transmit queue status
2574 * @param work work_struct data structure
2576 static void octnet_poll_check_txq_status(struct work_struct
*work
)
2578 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
2579 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
2581 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
))
2584 check_txq_status(lio
);
2585 queue_delayed_work(lio
->txq_status_wq
.wq
,
2586 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2590 * \brief Sets up the txq poll check
2591 * @param netdev network device
2593 static inline int setup_tx_poll_fn(struct net_device
*netdev
)
2595 struct lio
*lio
= GET_LIO(netdev
);
2596 struct octeon_device
*oct
= lio
->oct_dev
;
2598 lio
->txq_status_wq
.wq
= alloc_workqueue("txq-status",
2600 if (!lio
->txq_status_wq
.wq
) {
2601 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium txq status wq\n");
2604 INIT_DELAYED_WORK(&lio
->txq_status_wq
.wk
.work
,
2605 octnet_poll_check_txq_status
);
2606 lio
->txq_status_wq
.wk
.ctxptr
= lio
;
2607 queue_delayed_work(lio
->txq_status_wq
.wq
,
2608 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2612 static inline void cleanup_tx_poll_fn(struct net_device
*netdev
)
2614 struct lio
*lio
= GET_LIO(netdev
);
2616 if (lio
->txq_status_wq
.wq
) {
2617 cancel_delayed_work_sync(&lio
->txq_status_wq
.wk
.work
);
2618 destroy_workqueue(lio
->txq_status_wq
.wq
);
2623 * \brief Net device open for LiquidIO
2624 * @param netdev network device
2626 static int liquidio_open(struct net_device
*netdev
)
2628 struct lio
*lio
= GET_LIO(netdev
);
2629 struct octeon_device
*oct
= lio
->oct_dev
;
2630 struct napi_struct
*napi
, *n
;
2632 if (oct
->props
[lio
->ifidx
].napi_enabled
== 0) {
2633 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2636 oct
->props
[lio
->ifidx
].napi_enabled
= 1;
2638 if (OCTEON_CN23XX_PF(oct
))
2639 oct
->droq
[0]->ops
.poll_mode
= 1;
2642 if ((oct
->chip_id
== OCTEON_CN66XX
|| oct
->chip_id
== OCTEON_CN68XX
) &&
2644 oct_ptp_open(netdev
);
2646 ifstate_set(lio
, LIO_IFSTATE_RUNNING
);
2648 /* Ready for link status updates */
2651 netif_info(lio
, ifup
, lio
->netdev
, "Interface Open, ready for traffic\n");
2653 if (OCTEON_CN23XX_PF(oct
)) {
2655 if (setup_tx_poll_fn(netdev
))
2658 if (setup_tx_poll_fn(netdev
))
2664 /* tell Octeon to start forwarding packets to host */
2665 send_rx_ctrl_cmd(lio
, 1);
2667 dev_info(&oct
->pci_dev
->dev
, "%s interface is opened\n",
2674 * \brief Net device stop for LiquidIO
2675 * @param netdev network device
2677 static int liquidio_stop(struct net_device
*netdev
)
2679 struct lio
*lio
= GET_LIO(netdev
);
2680 struct octeon_device
*oct
= lio
->oct_dev
;
2682 ifstate_reset(lio
, LIO_IFSTATE_RUNNING
);
2684 netif_tx_disable(netdev
);
2686 /* Inform that netif carrier is down */
2687 netif_carrier_off(netdev
);
2689 lio
->linfo
.link
.s
.link_up
= 0;
2690 lio
->link_changes
++;
2692 /* Tell Octeon that nic interface is down. */
2693 send_rx_ctrl_cmd(lio
, 0);
2695 if (OCTEON_CN23XX_PF(oct
)) {
2697 cleanup_tx_poll_fn(netdev
);
2699 cleanup_tx_poll_fn(netdev
);
2702 if (lio
->ptp_clock
) {
2703 ptp_clock_unregister(lio
->ptp_clock
);
2704 lio
->ptp_clock
= NULL
;
2707 dev_info(&oct
->pci_dev
->dev
, "%s interface is stopped\n", netdev
->name
);
2713 * \brief Converts a mask based on net device flags
2714 * @param netdev network device
2716 * This routine generates a octnet_ifflags mask from the net device flags
2717 * received from the OS.
2719 static inline enum octnet_ifflags
get_new_flags(struct net_device
*netdev
)
2721 enum octnet_ifflags f
= OCTNET_IFFLAG_UNICAST
;
2723 if (netdev
->flags
& IFF_PROMISC
)
2724 f
|= OCTNET_IFFLAG_PROMISC
;
2726 if (netdev
->flags
& IFF_ALLMULTI
)
2727 f
|= OCTNET_IFFLAG_ALLMULTI
;
2729 if (netdev
->flags
& IFF_MULTICAST
) {
2730 f
|= OCTNET_IFFLAG_MULTICAST
;
2732 /* Accept all multicast addresses if there are more than we
2735 if (netdev_mc_count(netdev
) > MAX_OCTEON_MULTICAST_ADDR
)
2736 f
|= OCTNET_IFFLAG_ALLMULTI
;
2739 if (netdev
->flags
& IFF_BROADCAST
)
2740 f
|= OCTNET_IFFLAG_BROADCAST
;
2746 * \brief Net device set_multicast_list
2747 * @param netdev network device
2749 static void liquidio_set_mcast_list(struct net_device
*netdev
)
2751 struct lio
*lio
= GET_LIO(netdev
);
2752 struct octeon_device
*oct
= lio
->oct_dev
;
2753 struct octnic_ctrl_pkt nctrl
;
2754 struct netdev_hw_addr
*ha
;
2757 int mc_count
= min(netdev_mc_count(netdev
), MAX_OCTEON_MULTICAST_ADDR
);
2759 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2761 /* Create a ctrl pkt command to be sent to core app. */
2763 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_MULTI_LIST
;
2764 nctrl
.ncmd
.s
.param1
= get_new_flags(netdev
);
2765 nctrl
.ncmd
.s
.param2
= mc_count
;
2766 nctrl
.ncmd
.s
.more
= mc_count
;
2767 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2768 nctrl
.netpndev
= (u64
)netdev
;
2769 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2771 /* copy all the addresses into the udd */
2773 netdev_for_each_mc_addr(ha
, netdev
) {
2775 memcpy(((u8
*)mc
) + 2, ha
->addr
, ETH_ALEN
);
2776 /* no need to swap bytes */
2778 if (++mc
> &nctrl
.udd
[mc_count
])
2782 /* Apparently, any activity in this call from the kernel has to
2783 * be atomic. So we won't wait for response.
2785 nctrl
.wait_time
= 0;
2787 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2789 dev_err(&oct
->pci_dev
->dev
, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2795 * \brief Net device set_mac_address
2796 * @param netdev network device
2798 static int liquidio_set_mac(struct net_device
*netdev
, void *p
)
2801 struct lio
*lio
= GET_LIO(netdev
);
2802 struct octeon_device
*oct
= lio
->oct_dev
;
2803 struct sockaddr
*addr
= (struct sockaddr
*)p
;
2804 struct octnic_ctrl_pkt nctrl
;
2806 if (!is_valid_ether_addr(addr
->sa_data
))
2807 return -EADDRNOTAVAIL
;
2809 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2812 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2813 nctrl
.ncmd
.s
.param1
= 0;
2814 nctrl
.ncmd
.s
.more
= 1;
2815 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2816 nctrl
.netpndev
= (u64
)netdev
;
2817 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2818 nctrl
.wait_time
= 100;
2821 /* The MAC Address is presented in network byte order. */
2822 memcpy((u8
*)&nctrl
.udd
[0] + 2, addr
->sa_data
, ETH_ALEN
);
2824 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2826 dev_err(&oct
->pci_dev
->dev
, "MAC Address change failed\n");
2829 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2830 memcpy(((u8
*)&lio
->linfo
.hw_addr
) + 2, addr
->sa_data
, ETH_ALEN
);
2836 * \brief Net device get_stats
2837 * @param netdev network device
2839 static struct net_device_stats
*liquidio_get_stats(struct net_device
*netdev
)
2841 struct lio
*lio
= GET_LIO(netdev
);
2842 struct net_device_stats
*stats
= &netdev
->stats
;
2843 struct octeon_device
*oct
;
2844 u64 pkts
= 0, drop
= 0, bytes
= 0;
2845 struct oct_droq_stats
*oq_stats
;
2846 struct oct_iq_stats
*iq_stats
;
2847 int i
, iq_no
, oq_no
;
2851 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
2852 iq_no
= lio
->linfo
.txpciq
[i
].s
.q_no
;
2853 iq_stats
= &oct
->instr_queue
[iq_no
]->stats
;
2854 pkts
+= iq_stats
->tx_done
;
2855 drop
+= iq_stats
->tx_dropped
;
2856 bytes
+= iq_stats
->tx_tot_bytes
;
2859 stats
->tx_packets
= pkts
;
2860 stats
->tx_bytes
= bytes
;
2861 stats
->tx_dropped
= drop
;
2867 for (i
= 0; i
< lio
->linfo
.num_rxpciq
; i
++) {
2868 oq_no
= lio
->linfo
.rxpciq
[i
].s
.q_no
;
2869 oq_stats
= &oct
->droq
[oq_no
]->stats
;
2870 pkts
+= oq_stats
->rx_pkts_received
;
2871 drop
+= (oq_stats
->rx_dropped
+
2872 oq_stats
->dropped_nodispatch
+
2873 oq_stats
->dropped_toomany
+
2874 oq_stats
->dropped_nomem
);
2875 bytes
+= oq_stats
->rx_bytes_received
;
2878 stats
->rx_bytes
= bytes
;
2879 stats
->rx_packets
= pkts
;
2880 stats
->rx_dropped
= drop
;
2886 * \brief Net device change_mtu
2887 * @param netdev network device
2889 static int liquidio_change_mtu(struct net_device
*netdev
, int new_mtu
)
2891 struct lio
*lio
= GET_LIO(netdev
);
2892 struct octeon_device
*oct
= lio
->oct_dev
;
2893 struct octnic_ctrl_pkt nctrl
;
2896 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2899 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MTU
;
2900 nctrl
.ncmd
.s
.param1
= new_mtu
;
2901 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2902 nctrl
.wait_time
= 100;
2903 nctrl
.netpndev
= (u64
)netdev
;
2904 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2906 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2908 dev_err(&oct
->pci_dev
->dev
, "Failed to set MTU\n");
2918 * \brief Handler for SIOCSHWTSTAMP ioctl
2919 * @param netdev network device
2920 * @param ifr interface request
2921 * @param cmd command
2923 static int hwtstamp_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
)
2925 struct hwtstamp_config conf
;
2926 struct lio
*lio
= GET_LIO(netdev
);
2928 if (copy_from_user(&conf
, ifr
->ifr_data
, sizeof(conf
)))
2934 switch (conf
.tx_type
) {
2935 case HWTSTAMP_TX_ON
:
2936 case HWTSTAMP_TX_OFF
:
2942 switch (conf
.rx_filter
) {
2943 case HWTSTAMP_FILTER_NONE
:
2945 case HWTSTAMP_FILTER_ALL
:
2946 case HWTSTAMP_FILTER_SOME
:
2947 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2948 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2949 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2950 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2951 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2952 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2953 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2954 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2955 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2956 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2957 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2958 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2959 conf
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2965 if (conf
.rx_filter
== HWTSTAMP_FILTER_ALL
)
2966 ifstate_set(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2969 ifstate_reset(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2971 return copy_to_user(ifr
->ifr_data
, &conf
, sizeof(conf
)) ? -EFAULT
: 0;
2975 * \brief ioctl handler
2976 * @param netdev network device
2977 * @param ifr interface request
2978 * @param cmd command
2980 static int liquidio_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2982 struct lio
*lio
= GET_LIO(netdev
);
2986 if ((lio
->oct_dev
->chip_id
== OCTEON_CN66XX
||
2987 lio
->oct_dev
->chip_id
== OCTEON_CN68XX
) && ptp_enable
)
2988 return hwtstamp_ioctl(netdev
, ifr
);
2995 * \brief handle a Tx timestamp response
2996 * @param status response status
2997 * @param buf pointer to skb
2999 static void handle_timestamp(struct octeon_device
*oct
,
3003 struct octnet_buf_free_info
*finfo
;
3004 struct octeon_soft_command
*sc
;
3005 struct oct_timestamp_resp
*resp
;
3007 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
3009 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
3013 resp
= (struct oct_timestamp_resp
*)sc
->virtrptr
;
3015 if (status
!= OCTEON_REQUEST_DONE
) {
3016 dev_err(&oct
->pci_dev
->dev
, "Tx timestamp instruction failed. Status: %llx\n",
3017 CVM_CAST64(status
));
3018 resp
->timestamp
= 0;
3021 octeon_swap_8B_data(&resp
->timestamp
, 1);
3023 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) != 0)) {
3024 struct skb_shared_hwtstamps ts
;
3025 u64 ns
= resp
->timestamp
;
3027 netif_info(lio
, tx_done
, lio
->netdev
,
3028 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
3029 skb
, (unsigned long long)ns
);
3030 ts
.hwtstamp
= ns_to_ktime(ns
+ lio
->ptp_adjust
);
3031 skb_tstamp_tx(skb
, &ts
);
3034 octeon_free_soft_command(oct
, sc
);
3035 tx_buffer_free(skb
);
3038 /* \brief Send a data packet that will be timestamped
3039 * @param oct octeon device
3040 * @param ndata pointer to network data
3041 * @param finfo pointer to private network data
3043 static inline int send_nic_timestamp_pkt(struct octeon_device
*oct
,
3044 struct octnic_data_pkt
*ndata
,
3045 struct octnet_buf_free_info
*finfo
)
3048 struct octeon_soft_command
*sc
;
3055 sc
= octeon_alloc_soft_command_resp(oct
, &ndata
->cmd
,
3056 sizeof(struct oct_timestamp_resp
));
3060 dev_err(&oct
->pci_dev
->dev
, "No memory for timestamped data packet\n");
3061 return IQ_SEND_FAILED
;
3064 if (ndata
->reqtype
== REQTYPE_NORESP_NET
)
3065 ndata
->reqtype
= REQTYPE_RESP_NET
;
3066 else if (ndata
->reqtype
== REQTYPE_NORESP_NET_SG
)
3067 ndata
->reqtype
= REQTYPE_RESP_NET_SG
;
3069 sc
->callback
= handle_timestamp
;
3070 sc
->callback_arg
= finfo
->skb
;
3071 sc
->iq_no
= ndata
->q_no
;
3073 if (OCTEON_CN23XX_PF(oct
))
3074 len
= (u32
)((struct octeon_instr_ih3
*)
3075 (&sc
->cmd
.cmd3
.ih3
))->dlengsz
;
3077 len
= (u32
)((struct octeon_instr_ih2
*)
3078 (&sc
->cmd
.cmd2
.ih2
))->dlengsz
;
3082 retval
= octeon_send_command(oct
, sc
->iq_no
, ring_doorbell
, &sc
->cmd
,
3083 sc
, len
, ndata
->reqtype
);
3085 if (retval
== IQ_SEND_FAILED
) {
3086 dev_err(&oct
->pci_dev
->dev
, "timestamp data packet failed status: %x\n",
3088 octeon_free_soft_command(oct
, sc
);
3090 netif_info(lio
, tx_queued
, lio
->netdev
, "Queued timestamp packet\n");
3096 /** \brief Transmit networks packets to the Octeon interface
3097 * @param skbuff skbuff struct to be passed to network layer.
3098 * @param netdev pointer to network device
3099 * @returns whether the packet was transmitted to the device okay or not
3100 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
3102 static int liquidio_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
3105 struct octnet_buf_free_info
*finfo
;
3106 union octnic_cmd_setup cmdsetup
;
3107 struct octnic_data_pkt ndata
;
3108 struct octeon_device
*oct
;
3109 struct oct_iq_stats
*stats
;
3110 struct octeon_instr_irh
*irh
;
3111 union tx_info
*tx_info
;
3113 int q_idx
= 0, iq_no
= 0;
3118 lio
= GET_LIO(netdev
);
3121 if (netif_is_multiqueue(netdev
)) {
3122 q_idx
= skb
->queue_mapping
;
3123 q_idx
= (q_idx
% (lio
->linfo
.num_txpciq
));
3125 iq_no
= lio
->linfo
.txpciq
[q_idx
].s
.q_no
;
3130 stats
= &oct
->instr_queue
[iq_no
]->stats
;
3132 /* Check for all conditions in which the current packet cannot be
3135 if (!(atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
) ||
3136 (!lio
->linfo
.link
.s
.link_up
) ||
3138 netif_info(lio
, tx_err
, lio
->netdev
,
3139 "Transmit failed link_status : %d\n",
3140 lio
->linfo
.link
.s
.link_up
);
3141 goto lio_xmit_failed
;
3144 /* Use space in skb->cb to store info used to unmap and
3147 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
3152 /* Prepare the attributes for the data to be passed to OSI. */
3153 memset(&ndata
, 0, sizeof(struct octnic_data_pkt
));
3155 ndata
.buf
= (void *)finfo
;
3159 if (netif_is_multiqueue(netdev
)) {
3160 if (octnet_iq_is_full(oct
, ndata
.q_no
)) {
3161 /* defer sending if queue is full */
3162 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
3164 stats
->tx_iq_busy
++;
3165 return NETDEV_TX_BUSY
;
3168 if (octnet_iq_is_full(oct
, lio
->txq
)) {
3169 /* defer sending if queue is full */
3170 stats
->tx_iq_busy
++;
3171 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
3173 return NETDEV_TX_BUSY
;
3176 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
3177 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
3180 ndata
.datasize
= skb
->len
;
3183 cmdsetup
.s
.iq_no
= iq_no
;
3185 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3186 if (skb
->encapsulation
) {
3187 cmdsetup
.s
.tnl_csum
= 1;
3190 cmdsetup
.s
.transport_csum
= 1;
3193 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
3194 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
3195 cmdsetup
.s
.timestamp
= 1;
3198 if (skb_shinfo(skb
)->nr_frags
== 0) {
3199 cmdsetup
.s
.u
.datasize
= skb
->len
;
3200 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
3202 /* Offload checksum calculation for TCP/UDP packets */
3203 dptr
= dma_map_single(&oct
->pci_dev
->dev
,
3207 if (dma_mapping_error(&oct
->pci_dev
->dev
, dptr
)) {
3208 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 1\n",
3210 return NETDEV_TX_BUSY
;
3213 if (OCTEON_CN23XX_PF(oct
))
3214 ndata
.cmd
.cmd3
.dptr
= dptr
;
3216 ndata
.cmd
.cmd2
.dptr
= dptr
;
3218 ndata
.reqtype
= REQTYPE_NORESP_NET
;
3222 struct skb_frag_struct
*frag
;
3223 struct octnic_gather
*g
;
3225 spin_lock(&lio
->glist_lock
[q_idx
]);
3226 g
= (struct octnic_gather
*)
3227 list_delete_head(&lio
->glist
[q_idx
]);
3228 spin_unlock(&lio
->glist_lock
[q_idx
]);
3231 netif_info(lio
, tx_err
, lio
->netdev
,
3232 "Transmit scatter gather: glist null!\n");
3233 goto lio_xmit_failed
;
3236 cmdsetup
.s
.gather
= 1;
3237 cmdsetup
.s
.u
.gatherptrs
= (skb_shinfo(skb
)->nr_frags
+ 1);
3238 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
3240 memset(g
->sg
, 0, g
->sg_size
);
3242 g
->sg
[0].ptr
[0] = dma_map_single(&oct
->pci_dev
->dev
,
3244 (skb
->len
- skb
->data_len
),
3246 if (dma_mapping_error(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0])) {
3247 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 2\n",
3249 return NETDEV_TX_BUSY
;
3251 add_sg_size(&g
->sg
[0], (skb
->len
- skb
->data_len
), 0);
3253 frags
= skb_shinfo(skb
)->nr_frags
;
3256 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
3258 g
->sg
[(i
>> 2)].ptr
[(i
& 3)] =
3259 dma_map_page(&oct
->pci_dev
->dev
,
3265 if (dma_mapping_error(&oct
->pci_dev
->dev
,
3266 g
->sg
[i
>> 2].ptr
[i
& 3])) {
3267 dma_unmap_single(&oct
->pci_dev
->dev
,
3269 skb
->len
- skb
->data_len
,
3271 for (j
= 1; j
< i
; j
++) {
3272 frag
= &skb_shinfo(skb
)->frags
[j
- 1];
3273 dma_unmap_page(&oct
->pci_dev
->dev
,
3274 g
->sg
[j
>> 2].ptr
[j
& 3],
3278 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 3\n",
3280 return NETDEV_TX_BUSY
;
3283 add_sg_size(&g
->sg
[(i
>> 2)], frag
->size
, (i
& 3));
3287 dptr
= g
->sg_dma_ptr
;
3289 if (OCTEON_CN23XX_PF(oct
))
3290 ndata
.cmd
.cmd3
.dptr
= dptr
;
3292 ndata
.cmd
.cmd2
.dptr
= dptr
;
3296 ndata
.reqtype
= REQTYPE_NORESP_NET_SG
;
3299 if (OCTEON_CN23XX_PF(oct
)) {
3300 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd3
.irh
;
3301 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd3
.ossp
[0];
3303 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd2
.irh
;
3304 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd2
.ossp
[0];
3307 if (skb_shinfo(skb
)->gso_size
) {
3308 tx_info
->s
.gso_size
= skb_shinfo(skb
)->gso_size
;
3309 tx_info
->s
.gso_segs
= skb_shinfo(skb
)->gso_segs
;
3313 /* HW insert VLAN tag */
3314 if (skb_vlan_tag_present(skb
)) {
3315 irh
->priority
= skb_vlan_tag_get(skb
) >> 13;
3316 irh
->vlan
= skb_vlan_tag_get(skb
) & 0xfff;
3319 if (unlikely(cmdsetup
.s
.timestamp
))
3320 status
= send_nic_timestamp_pkt(oct
, &ndata
, finfo
);
3322 status
= octnet_send_nic_data_pkt(oct
, &ndata
);
3323 if (status
== IQ_SEND_FAILED
)
3324 goto lio_xmit_failed
;
3326 netif_info(lio
, tx_queued
, lio
->netdev
, "Transmit queued successfully\n");
3328 if (status
== IQ_SEND_STOP
)
3329 stop_q(lio
->netdev
, q_idx
);
3331 netif_trans_update(netdev
);
3333 if (tx_info
->s
.gso_segs
)
3334 stats
->tx_done
+= tx_info
->s
.gso_segs
;
3337 stats
->tx_tot_bytes
+= ndata
.datasize
;
3339 return NETDEV_TX_OK
;
3342 stats
->tx_dropped
++;
3343 netif_info(lio
, tx_err
, lio
->netdev
, "IQ%d Transmit dropped:%llu\n",
3344 iq_no
, stats
->tx_dropped
);
3346 dma_unmap_single(&oct
->pci_dev
->dev
, dptr
,
3347 ndata
.datasize
, DMA_TO_DEVICE
);
3348 tx_buffer_free(skb
);
3349 return NETDEV_TX_OK
;
3352 /** \brief Network device Tx timeout
3353 * @param netdev pointer to network device
3355 static void liquidio_tx_timeout(struct net_device
*netdev
)
3359 lio
= GET_LIO(netdev
);
3361 netif_info(lio
, tx_err
, lio
->netdev
,
3362 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3363 netdev
->stats
.tx_dropped
);
3364 netif_trans_update(netdev
);
3368 static int liquidio_vlan_rx_add_vid(struct net_device
*netdev
,
3369 __be16 proto
__attribute__((unused
)),
3372 struct lio
*lio
= GET_LIO(netdev
);
3373 struct octeon_device
*oct
= lio
->oct_dev
;
3374 struct octnic_ctrl_pkt nctrl
;
3377 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3380 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
3381 nctrl
.ncmd
.s
.param1
= vid
;
3382 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3383 nctrl
.wait_time
= 100;
3384 nctrl
.netpndev
= (u64
)netdev
;
3385 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3387 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3389 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
3396 static int liquidio_vlan_rx_kill_vid(struct net_device
*netdev
,
3397 __be16 proto
__attribute__((unused
)),
3400 struct lio
*lio
= GET_LIO(netdev
);
3401 struct octeon_device
*oct
= lio
->oct_dev
;
3402 struct octnic_ctrl_pkt nctrl
;
3405 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3408 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
3409 nctrl
.ncmd
.s
.param1
= vid
;
3410 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3411 nctrl
.wait_time
= 100;
3412 nctrl
.netpndev
= (u64
)netdev
;
3413 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3415 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3417 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
3423 /** Sending command to enable/disable RX checksum offload
3424 * @param netdev pointer to network device
3425 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
3426 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
3427 * OCTNET_CMD_RXCSUM_DISABLE
3428 * @returns SUCCESS or FAILURE
3430 static int liquidio_set_rxcsum_command(struct net_device
*netdev
, int command
,
3433 struct lio
*lio
= GET_LIO(netdev
);
3434 struct octeon_device
*oct
= lio
->oct_dev
;
3435 struct octnic_ctrl_pkt nctrl
;
3439 nctrl
.ncmd
.s
.cmd
= command
;
3440 nctrl
.ncmd
.s
.param1
= rx_cmd
;
3441 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3442 nctrl
.wait_time
= 100;
3443 nctrl
.netpndev
= (u64
)netdev
;
3444 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3446 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3448 dev_err(&oct
->pci_dev
->dev
,
3449 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3455 /** Sending command to add/delete VxLAN UDP port to firmware
3456 * @param netdev pointer to network device
3457 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3458 * @param vxlan_port VxLAN port to be added or deleted
3459 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3460 * OCTNET_CMD_VXLAN_PORT_DEL
3461 * @returns SUCCESS or FAILURE
3463 static int liquidio_vxlan_port_command(struct net_device
*netdev
, int command
,
3464 u16 vxlan_port
, u8 vxlan_cmd_bit
)
3466 struct lio
*lio
= GET_LIO(netdev
);
3467 struct octeon_device
*oct
= lio
->oct_dev
;
3468 struct octnic_ctrl_pkt nctrl
;
3472 nctrl
.ncmd
.s
.cmd
= command
;
3473 nctrl
.ncmd
.s
.more
= vxlan_cmd_bit
;
3474 nctrl
.ncmd
.s
.param1
= vxlan_port
;
3475 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3476 nctrl
.wait_time
= 100;
3477 nctrl
.netpndev
= (u64
)netdev
;
3478 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3480 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3482 dev_err(&oct
->pci_dev
->dev
,
3483 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3489 /** \brief Net device fix features
3490 * @param netdev pointer to network device
3491 * @param request features requested
3492 * @returns updated features list
3494 static netdev_features_t
liquidio_fix_features(struct net_device
*netdev
,
3495 netdev_features_t request
)
3497 struct lio
*lio
= netdev_priv(netdev
);
3499 if ((request
& NETIF_F_RXCSUM
) &&
3500 !(lio
->dev_capability
& NETIF_F_RXCSUM
))
3501 request
&= ~NETIF_F_RXCSUM
;
3503 if ((request
& NETIF_F_HW_CSUM
) &&
3504 !(lio
->dev_capability
& NETIF_F_HW_CSUM
))
3505 request
&= ~NETIF_F_HW_CSUM
;
3507 if ((request
& NETIF_F_TSO
) && !(lio
->dev_capability
& NETIF_F_TSO
))
3508 request
&= ~NETIF_F_TSO
;
3510 if ((request
& NETIF_F_TSO6
) && !(lio
->dev_capability
& NETIF_F_TSO6
))
3511 request
&= ~NETIF_F_TSO6
;
3513 if ((request
& NETIF_F_LRO
) && !(lio
->dev_capability
& NETIF_F_LRO
))
3514 request
&= ~NETIF_F_LRO
;
3516 /*Disable LRO if RXCSUM is off */
3517 if (!(request
& NETIF_F_RXCSUM
) && (netdev
->features
& NETIF_F_LRO
) &&
3518 (lio
->dev_capability
& NETIF_F_LRO
))
3519 request
&= ~NETIF_F_LRO
;
3524 /** \brief Net device set features
3525 * @param netdev pointer to network device
3526 * @param features features to enable/disable
3528 static int liquidio_set_features(struct net_device
*netdev
,
3529 netdev_features_t features
)
3531 struct lio
*lio
= netdev_priv(netdev
);
3533 if (!((netdev
->features
^ features
) & NETIF_F_LRO
))
3536 if ((features
& NETIF_F_LRO
) && (lio
->dev_capability
& NETIF_F_LRO
))
3537 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3538 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3539 else if (!(features
& NETIF_F_LRO
) &&
3540 (lio
->dev_capability
& NETIF_F_LRO
))
3541 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_DISABLE
,
3542 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3544 /* Sending command to firmware to enable/disable RX checksum
3545 * offload settings using ethtool
3547 if (!(netdev
->features
& NETIF_F_RXCSUM
) &&
3548 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3549 (features
& NETIF_F_RXCSUM
))
3550 liquidio_set_rxcsum_command(netdev
,
3551 OCTNET_CMD_TNL_RX_CSUM_CTL
,
3552 OCTNET_CMD_RXCSUM_ENABLE
);
3553 else if ((netdev
->features
& NETIF_F_RXCSUM
) &&
3554 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3555 !(features
& NETIF_F_RXCSUM
))
3556 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3557 OCTNET_CMD_RXCSUM_DISABLE
);
3562 static void liquidio_add_vxlan_port(struct net_device
*netdev
,
3563 struct udp_tunnel_info
*ti
)
3565 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3568 liquidio_vxlan_port_command(netdev
,
3569 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3571 OCTNET_CMD_VXLAN_PORT_ADD
);
3574 static void liquidio_del_vxlan_port(struct net_device
*netdev
,
3575 struct udp_tunnel_info
*ti
)
3577 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3580 liquidio_vxlan_port_command(netdev
,
3581 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3583 OCTNET_CMD_VXLAN_PORT_DEL
);
3586 static int __liquidio_set_vf_mac(struct net_device
*netdev
, int vfidx
,
3587 u8
*mac
, bool is_admin_assigned
)
3589 struct lio
*lio
= GET_LIO(netdev
);
3590 struct octeon_device
*oct
= lio
->oct_dev
;
3591 struct octnic_ctrl_pkt nctrl
;
3593 if (!is_valid_ether_addr(mac
))
3596 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.max_vfs
)
3599 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3602 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
3603 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3604 nctrl
.ncmd
.s
.param1
= vfidx
+ 1;
3605 nctrl
.ncmd
.s
.param2
= (is_admin_assigned
? 1 : 0);
3606 nctrl
.ncmd
.s
.more
= 1;
3607 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3609 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
3612 /* The MAC Address is presented in network byte order. */
3613 ether_addr_copy((u8
*)&nctrl
.udd
[0] + 2, mac
);
3615 oct
->sriov_info
.vf_macaddr
[vfidx
] = nctrl
.udd
[0];
3617 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
3622 static int liquidio_set_vf_mac(struct net_device
*netdev
, int vfidx
, u8
*mac
)
3624 struct lio
*lio
= GET_LIO(netdev
);
3625 struct octeon_device
*oct
= lio
->oct_dev
;
3628 retval
= __liquidio_set_vf_mac(netdev
, vfidx
, mac
, true);
3630 cn23xx_tell_vf_its_macaddr_changed(oct
, vfidx
, mac
);
3635 static int liquidio_set_vf_vlan(struct net_device
*netdev
, int vfidx
,
3636 u16 vlan
, u8 qos
, __be16 vlan_proto
)
3638 struct lio
*lio
= GET_LIO(netdev
);
3639 struct octeon_device
*oct
= lio
->oct_dev
;
3640 struct octnic_ctrl_pkt nctrl
;
3643 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3646 if (vlan_proto
!= htons(ETH_P_8021Q
))
3647 return -EPROTONOSUPPORT
;
3649 if (vlan
>= VLAN_N_VID
|| qos
> 7)
3653 vlantci
= vlan
| (u16
)qos
<< VLAN_PRIO_SHIFT
;
3657 if (oct
->sriov_info
.vf_vlantci
[vfidx
] == vlantci
)
3660 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3663 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
3665 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
3667 nctrl
.ncmd
.s
.param1
= vlantci
;
3668 nctrl
.ncmd
.s
.param2
=
3669 vfidx
+ 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3670 nctrl
.ncmd
.s
.more
= 0;
3671 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3673 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
3675 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
3677 oct
->sriov_info
.vf_vlantci
[vfidx
] = vlantci
;
3682 static int liquidio_get_vf_config(struct net_device
*netdev
, int vfidx
,
3683 struct ifla_vf_info
*ivi
)
3685 struct lio
*lio
= GET_LIO(netdev
);
3686 struct octeon_device
*oct
= lio
->oct_dev
;
3689 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3693 macaddr
= 2 + (u8
*)&oct
->sriov_info
.vf_macaddr
[vfidx
];
3694 ether_addr_copy(&ivi
->mac
[0], macaddr
);
3695 ivi
->vlan
= oct
->sriov_info
.vf_vlantci
[vfidx
] & VLAN_VID_MASK
;
3696 ivi
->qos
= oct
->sriov_info
.vf_vlantci
[vfidx
] >> VLAN_PRIO_SHIFT
;
3697 ivi
->linkstate
= oct
->sriov_info
.vf_linkstate
[vfidx
];
3701 static int liquidio_set_vf_link_state(struct net_device
*netdev
, int vfidx
,
3704 struct lio
*lio
= GET_LIO(netdev
);
3705 struct octeon_device
*oct
= lio
->oct_dev
;
3706 struct octnic_ctrl_pkt nctrl
;
3708 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3711 if (oct
->sriov_info
.vf_linkstate
[vfidx
] == linkstate
)
3714 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3715 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_VF_LINKSTATE
;
3716 nctrl
.ncmd
.s
.param1
=
3717 vfidx
+ 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3718 nctrl
.ncmd
.s
.param2
= linkstate
;
3719 nctrl
.ncmd
.s
.more
= 0;
3720 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3722 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
3724 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
3726 oct
->sriov_info
.vf_linkstate
[vfidx
] = linkstate
;
3731 static const struct net_device_ops lionetdevops
= {
3732 .ndo_open
= liquidio_open
,
3733 .ndo_stop
= liquidio_stop
,
3734 .ndo_start_xmit
= liquidio_xmit
,
3735 .ndo_get_stats
= liquidio_get_stats
,
3736 .ndo_set_mac_address
= liquidio_set_mac
,
3737 .ndo_set_rx_mode
= liquidio_set_mcast_list
,
3738 .ndo_tx_timeout
= liquidio_tx_timeout
,
3740 .ndo_vlan_rx_add_vid
= liquidio_vlan_rx_add_vid
,
3741 .ndo_vlan_rx_kill_vid
= liquidio_vlan_rx_kill_vid
,
3742 .ndo_change_mtu
= liquidio_change_mtu
,
3743 .ndo_do_ioctl
= liquidio_ioctl
,
3744 .ndo_fix_features
= liquidio_fix_features
,
3745 .ndo_set_features
= liquidio_set_features
,
3746 .ndo_udp_tunnel_add
= liquidio_add_vxlan_port
,
3747 .ndo_udp_tunnel_del
= liquidio_del_vxlan_port
,
3748 .ndo_set_vf_mac
= liquidio_set_vf_mac
,
3749 .ndo_set_vf_vlan
= liquidio_set_vf_vlan
,
3750 .ndo_get_vf_config
= liquidio_get_vf_config
,
3751 .ndo_set_vf_link_state
= liquidio_set_vf_link_state
,
3754 /** \brief Entry point for the liquidio module
3756 static int __init
liquidio_init(void)
3759 struct handshake
*hs
;
3761 init_completion(&first_stage
);
3763 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT
);
3765 if (liquidio_init_pci())
3768 wait_for_completion_timeout(&first_stage
, msecs_to_jiffies(1000));
3770 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3773 wait_for_completion(&hs
->init
);
3775 /* init handshake failed */
3776 dev_err(&hs
->pci_dev
->dev
,
3777 "Failed to init device\n");
3778 liquidio_deinit_pci();
3784 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3787 wait_for_completion_timeout(&hs
->started
,
3788 msecs_to_jiffies(30000));
3789 if (!hs
->started_ok
) {
3790 /* starter handshake failed */
3791 dev_err(&hs
->pci_dev
->dev
,
3792 "Firmware failed to start\n");
3793 liquidio_deinit_pci();
3802 static int lio_nic_info(struct octeon_recv_info
*recv_info
, void *buf
)
3804 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3805 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3807 union oct_link_status
*ls
;
3810 if (recv_pkt
->buffer_size
[0] != sizeof(*ls
)) {
3811 dev_err(&oct
->pci_dev
->dev
, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3812 recv_pkt
->buffer_size
[0],
3813 recv_pkt
->rh
.r_nic_info
.gmxport
);
3817 gmxport
= recv_pkt
->rh
.r_nic_info
.gmxport
;
3818 ls
= (union oct_link_status
*)get_rbd(recv_pkt
->buffer_ptr
[0]);
3820 octeon_swap_8B_data((u64
*)ls
, (sizeof(union oct_link_status
)) >> 3);
3821 for (i
= 0; i
< oct
->ifcount
; i
++) {
3822 if (oct
->props
[i
].gmxport
== gmxport
) {
3823 update_link_status(oct
->props
[i
].netdev
, ls
);
3829 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3830 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3831 octeon_free_recv_info(recv_info
);
3836 * \brief Setup network interfaces
3837 * @param octeon_dev octeon device
3839 * Called during init time for each device. It assumes the NIC
3840 * is already up and running. The link information for each
3841 * interface is passed in link_info.
3843 static int setup_nic_devices(struct octeon_device
*octeon_dev
)
3845 struct lio
*lio
= NULL
;
3846 struct net_device
*netdev
;
3848 struct octeon_soft_command
*sc
;
3849 struct liquidio_if_cfg_context
*ctx
;
3850 struct liquidio_if_cfg_resp
*resp
;
3851 struct octdev_props
*props
;
3852 int retval
, num_iqueues
, num_oqueues
;
3853 union oct_nic_if_cfg if_cfg
;
3854 unsigned int base_queue
;
3855 unsigned int gmx_port_id
;
3856 u32 resp_size
, ctx_size
, data_size
;
3858 struct lio_version
*vdata
;
3860 /* This is to handle link status changes */
3861 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3863 lio_nic_info
, octeon_dev
);
3865 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3866 * They are handled directly.
3868 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET
,
3871 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET_SG
,
3874 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_RESP_NET_SG
,
3875 free_netsgbuf_with_resp
);
3877 for (i
= 0; i
< octeon_dev
->ifcount
; i
++) {
3878 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
3879 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
3880 data_size
= sizeof(struct lio_version
);
3881 sc
= (struct octeon_soft_command
*)
3882 octeon_alloc_soft_command(octeon_dev
, data_size
,
3883 resp_size
, ctx_size
);
3884 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
3885 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
3886 vdata
= (struct lio_version
*)sc
->virtdptr
;
3888 *((u64
*)vdata
) = 0;
3889 vdata
->major
= cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION
);
3890 vdata
->minor
= cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION
);
3891 vdata
->micro
= cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION
);
3893 if (OCTEON_CN23XX_PF(octeon_dev
)) {
3894 num_iqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3895 num_oqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3896 base_queue
= octeon_dev
->sriov_info
.pf_srn
;
3898 gmx_port_id
= octeon_dev
->pf_num
;
3899 ifidx_or_pfnum
= octeon_dev
->pf_num
;
3901 num_iqueues
= CFG_GET_NUM_TXQS_NIC_IF(
3902 octeon_get_conf(octeon_dev
), i
);
3903 num_oqueues
= CFG_GET_NUM_RXQS_NIC_IF(
3904 octeon_get_conf(octeon_dev
), i
);
3905 base_queue
= CFG_GET_BASE_QUE_NIC_IF(
3906 octeon_get_conf(octeon_dev
), i
);
3907 gmx_port_id
= CFG_GET_GMXID_NIC_IF(
3908 octeon_get_conf(octeon_dev
), i
);
3912 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3913 "requesting config for interface %d, iqs %d, oqs %d\n",
3914 ifidx_or_pfnum
, num_iqueues
, num_oqueues
);
3915 WRITE_ONCE(ctx
->cond
, 0);
3916 ctx
->octeon_id
= lio_get_device_id(octeon_dev
);
3917 init_waitqueue_head(&ctx
->wc
);
3920 if_cfg
.s
.num_iqueues
= num_iqueues
;
3921 if_cfg
.s
.num_oqueues
= num_oqueues
;
3922 if_cfg
.s
.base_queue
= base_queue
;
3923 if_cfg
.s
.gmx_port_id
= gmx_port_id
;
3927 octeon_prepare_soft_command(octeon_dev
, sc
, OPCODE_NIC
,
3928 OPCODE_NIC_IF_CFG
, 0,
3931 sc
->callback
= if_cfg_callback
;
3932 sc
->callback_arg
= sc
;
3933 sc
->wait_time
= 3000;
3935 retval
= octeon_send_soft_command(octeon_dev
, sc
);
3936 if (retval
== IQ_SEND_FAILED
) {
3937 dev_err(&octeon_dev
->pci_dev
->dev
,
3938 "iq/oq config failed status: %x\n",
3940 /* Soft instr is freed by driver in case of failure. */
3941 goto setup_nic_dev_fail
;
3944 /* Sleep on a wait queue till the cond flag indicates that the
3945 * response arrived or timed-out.
3947 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
) {
3948 dev_err(&octeon_dev
->pci_dev
->dev
, "Wait interrupted\n");
3949 goto setup_nic_wait_intr
;
3952 retval
= resp
->status
;
3954 dev_err(&octeon_dev
->pci_dev
->dev
, "iq/oq config failed\n");
3955 goto setup_nic_dev_fail
;
3958 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
3959 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
3961 num_iqueues
= hweight64(resp
->cfg_info
.iqmask
);
3962 num_oqueues
= hweight64(resp
->cfg_info
.oqmask
);
3964 if (!(num_iqueues
) || !(num_oqueues
)) {
3965 dev_err(&octeon_dev
->pci_dev
->dev
,
3966 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3967 resp
->cfg_info
.iqmask
,
3968 resp
->cfg_info
.oqmask
);
3969 goto setup_nic_dev_fail
;
3971 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3972 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3973 i
, resp
->cfg_info
.iqmask
, resp
->cfg_info
.oqmask
,
3974 num_iqueues
, num_oqueues
);
3975 netdev
= alloc_etherdev_mq(LIO_SIZE
, num_iqueues
);
3978 dev_err(&octeon_dev
->pci_dev
->dev
, "Device allocation failed\n");
3979 goto setup_nic_dev_fail
;
3982 SET_NETDEV_DEV(netdev
, &octeon_dev
->pci_dev
->dev
);
3984 /* Associate the routines that will handle different
3987 netdev
->netdev_ops
= &lionetdevops
;
3989 lio
= GET_LIO(netdev
);
3991 memset(lio
, 0, sizeof(struct lio
));
3993 lio
->ifidx
= ifidx_or_pfnum
;
3995 props
= &octeon_dev
->props
[i
];
3996 props
->gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3997 props
->netdev
= netdev
;
3999 lio
->linfo
.num_rxpciq
= num_oqueues
;
4000 lio
->linfo
.num_txpciq
= num_iqueues
;
4001 for (j
= 0; j
< num_oqueues
; j
++) {
4002 lio
->linfo
.rxpciq
[j
].u64
=
4003 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
4005 for (j
= 0; j
< num_iqueues
; j
++) {
4006 lio
->linfo
.txpciq
[j
].u64
=
4007 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
4009 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
4010 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
4011 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
4013 lio
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
4015 if (OCTEON_CN23XX_PF(octeon_dev
) ||
4016 OCTEON_CN6XXX(octeon_dev
)) {
4017 lio
->dev_capability
= NETIF_F_HIGHDMA
4020 | NETIF_F_SG
| NETIF_F_RXCSUM
4022 | NETIF_F_TSO
| NETIF_F_TSO6
4025 netif_set_gso_max_size(netdev
, OCTNIC_GSO_MAX_SIZE
);
4027 /* Copy of transmit encapsulation capabilities:
4028 * TSO, TSO6, Checksums for this device
4030 lio
->enc_dev_capability
= NETIF_F_IP_CSUM
4032 | NETIF_F_GSO_UDP_TUNNEL
4033 | NETIF_F_HW_CSUM
| NETIF_F_SG
4035 | NETIF_F_TSO
| NETIF_F_TSO6
4038 netdev
->hw_enc_features
= (lio
->enc_dev_capability
&
4041 lio
->dev_capability
|= NETIF_F_GSO_UDP_TUNNEL
;
4043 netdev
->vlan_features
= lio
->dev_capability
;
4044 /* Add any unchangeable hw features */
4045 lio
->dev_capability
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
4046 NETIF_F_HW_VLAN_CTAG_RX
|
4047 NETIF_F_HW_VLAN_CTAG_TX
;
4049 netdev
->features
= (lio
->dev_capability
& ~NETIF_F_LRO
);
4051 netdev
->hw_features
= lio
->dev_capability
;
4052 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
4053 netdev
->hw_features
= netdev
->hw_features
&
4054 ~NETIF_F_HW_VLAN_CTAG_RX
;
4056 /* MTU range: 68 - 16000 */
4057 netdev
->min_mtu
= LIO_MIN_MTU_SIZE
;
4058 netdev
->max_mtu
= LIO_MAX_MTU_SIZE
;
4060 /* Point to the properties for octeon device to which this
4061 * interface belongs.
4063 lio
->oct_dev
= octeon_dev
;
4064 lio
->octprops
= props
;
4065 lio
->netdev
= netdev
;
4067 dev_dbg(&octeon_dev
->pci_dev
->dev
,
4068 "if%d gmx: %d hw_addr: 0x%llx\n", i
,
4069 lio
->linfo
.gmxport
, CVM_CAST64(lio
->linfo
.hw_addr
));
4071 for (j
= 0; j
< octeon_dev
->sriov_info
.max_vfs
; j
++) {
4074 random_ether_addr(&vfmac
[0]);
4075 if (__liquidio_set_vf_mac(netdev
, j
,
4076 &vfmac
[0], false)) {
4077 dev_err(&octeon_dev
->pci_dev
->dev
,
4078 "Error setting VF%d MAC address\n",
4080 goto setup_nic_dev_fail
;
4084 /* 64-bit swap required on LE machines */
4085 octeon_swap_8B_data(&lio
->linfo
.hw_addr
, 1);
4086 for (j
= 0; j
< 6; j
++)
4087 mac
[j
] = *((u8
*)(((u8
*)&lio
->linfo
.hw_addr
) + 2 + j
));
4089 /* Copy MAC Address to OS network device structure */
4091 ether_addr_copy(netdev
->dev_addr
, mac
);
4093 /* By default all interfaces on a single Octeon uses the same
4096 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
4097 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
4098 if (setup_io_queues(octeon_dev
, i
)) {
4099 dev_err(&octeon_dev
->pci_dev
->dev
, "I/O queues creation failed\n");
4100 goto setup_nic_dev_fail
;
4103 ifstate_set(lio
, LIO_IFSTATE_DROQ_OPS
);
4105 lio
->tx_qsize
= octeon_get_tx_qsize(octeon_dev
, lio
->txq
);
4106 lio
->rx_qsize
= octeon_get_rx_qsize(octeon_dev
, lio
->rxq
);
4108 if (setup_glists(octeon_dev
, lio
, num_iqueues
)) {
4109 dev_err(&octeon_dev
->pci_dev
->dev
,
4110 "Gather list allocation failed\n");
4111 goto setup_nic_dev_fail
;
4114 /* Register ethtool support */
4115 liquidio_set_ethtool_ops(netdev
);
4116 if (lio
->oct_dev
->chip_id
== OCTEON_CN23XX_PF_VID
)
4117 octeon_dev
->priv_flags
= OCT_PRIV_FLAG_DEFAULT
;
4119 octeon_dev
->priv_flags
= 0x0;
4121 if (netdev
->features
& NETIF_F_LRO
)
4122 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
4123 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
4125 liquidio_set_feature(netdev
, OCTNET_CMD_ENABLE_VLAN_FILTER
, 0);
4127 if ((debug
!= -1) && (debug
& NETIF_MSG_HW
))
4128 liquidio_set_feature(netdev
,
4129 OCTNET_CMD_VERBOSE_ENABLE
, 0);
4131 if (setup_link_status_change_wq(netdev
))
4132 goto setup_nic_dev_fail
;
4134 /* Register the network device with the OS */
4135 if (register_netdev(netdev
)) {
4136 dev_err(&octeon_dev
->pci_dev
->dev
, "Device registration failed\n");
4137 goto setup_nic_dev_fail
;
4140 dev_dbg(&octeon_dev
->pci_dev
->dev
,
4141 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
4142 i
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
4143 netif_carrier_off(netdev
);
4144 lio
->link_changes
++;
4146 ifstate_set(lio
, LIO_IFSTATE_REGISTERED
);
4148 /* Sending command to firmware to enable Rx checksum offload
4149 * by default at the time of setup of Liquidio driver for
4152 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
4153 OCTNET_CMD_RXCSUM_ENABLE
);
4154 liquidio_set_feature(netdev
, OCTNET_CMD_TNL_TX_CSUM_CTL
,
4155 OCTNET_CMD_TXCSUM_ENABLE
);
4157 dev_dbg(&octeon_dev
->pci_dev
->dev
,
4158 "NIC ifidx:%d Setup successful\n", i
);
4160 octeon_free_soft_command(octeon_dev
, sc
);
4167 octeon_free_soft_command(octeon_dev
, sc
);
4169 setup_nic_wait_intr
:
4172 dev_err(&octeon_dev
->pci_dev
->dev
,
4173 "NIC ifidx:%d Setup failed\n", i
);
4174 liquidio_destroy_nic_device(octeon_dev
, i
);
4179 #ifdef CONFIG_PCI_IOV
4180 static int octeon_enable_sriov(struct octeon_device
*oct
)
4182 unsigned int num_vfs_alloced
= oct
->sriov_info
.num_vfs_alloced
;
4183 struct pci_dev
*vfdev
;
4187 if (OCTEON_CN23XX_PF(oct
) && num_vfs_alloced
) {
4188 err
= pci_enable_sriov(oct
->pci_dev
,
4189 oct
->sriov_info
.num_vfs_alloced
);
4191 dev_err(&oct
->pci_dev
->dev
,
4192 "OCTEON: Failed to enable PCI sriov: %d\n",
4194 oct
->sriov_info
.num_vfs_alloced
= 0;
4197 oct
->sriov_info
.sriov_enabled
= 1;
4199 /* init lookup table that maps DPI ring number to VF pci_dev
4203 vfdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
4204 OCTEON_CN23XX_VF_VID
, NULL
);
4206 if (vfdev
->is_virtfn
&&
4207 (vfdev
->physfn
== oct
->pci_dev
)) {
4208 oct
->sriov_info
.dpiring_to_vfpcidev_lut
[u
] =
4210 u
+= oct
->sriov_info
.rings_per_vf
;
4212 vfdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
4213 OCTEON_CN23XX_VF_VID
, vfdev
);
4217 return num_vfs_alloced
;
4220 static int lio_pci_sriov_disable(struct octeon_device
*oct
)
4224 if (pci_vfs_assigned(oct
->pci_dev
)) {
4225 dev_err(&oct
->pci_dev
->dev
, "VFs are still assigned to VMs.\n");
4229 pci_disable_sriov(oct
->pci_dev
);
4232 while (u
< MAX_POSSIBLE_VFS
) {
4233 oct
->sriov_info
.dpiring_to_vfpcidev_lut
[u
] = NULL
;
4234 u
+= oct
->sriov_info
.rings_per_vf
;
4237 oct
->sriov_info
.num_vfs_alloced
= 0;
4238 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d disabled VFs\n",
4244 static int liquidio_enable_sriov(struct pci_dev
*dev
, int num_vfs
)
4246 struct octeon_device
*oct
= pci_get_drvdata(dev
);
4249 if ((num_vfs
== oct
->sriov_info
.num_vfs_alloced
) &&
4250 (oct
->sriov_info
.sriov_enabled
)) {
4251 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d already enabled num_vfs:%d\n",
4252 oct
->pf_num
, num_vfs
);
4257 ret
= lio_pci_sriov_disable(oct
);
4258 } else if (num_vfs
> oct
->sriov_info
.max_vfs
) {
4259 dev_err(&oct
->pci_dev
->dev
,
4260 "OCTEON: Max allowed VFs:%d user requested:%d",
4261 oct
->sriov_info
.max_vfs
, num_vfs
);
4264 oct
->sriov_info
.num_vfs_alloced
= num_vfs
;
4265 ret
= octeon_enable_sriov(oct
);
4266 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d num_vfs:%d\n",
4267 oct
->pf_num
, num_vfs
);
4275 * \brief initialize the NIC
4276 * @param oct octeon device
4278 * This initialization routine is called once the Octeon device application is
4281 static int liquidio_init_nic_module(struct octeon_device
*oct
)
4283 struct oct_intrmod_cfg
*intrmod_cfg
;
4285 int num_nic_ports
= CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct
));
4287 dev_dbg(&oct
->pci_dev
->dev
, "Initializing network interfaces\n");
4289 /* only default iq and oq were initialized
4290 * initialize the rest as well
4292 /* run port_config command for each port */
4293 oct
->ifcount
= num_nic_ports
;
4295 memset(oct
->props
, 0, sizeof(struct octdev_props
) * num_nic_ports
);
4297 for (i
= 0; i
< MAX_OCTEON_LINKS
; i
++)
4298 oct
->props
[i
].gmxport
= -1;
4300 retval
= setup_nic_devices(oct
);
4302 dev_err(&oct
->pci_dev
->dev
, "Setup NIC devices failed\n");
4303 goto octnet_init_failure
;
4306 liquidio_ptp_init(oct
);
4308 /* Initialize interrupt moderation params */
4309 intrmod_cfg
= &((struct octeon_device
*)oct
)->intrmod
;
4310 intrmod_cfg
->rx_enable
= 1;
4311 intrmod_cfg
->check_intrvl
= LIO_INTRMOD_CHECK_INTERVAL
;
4312 intrmod_cfg
->maxpkt_ratethr
= LIO_INTRMOD_MAXPKT_RATETHR
;
4313 intrmod_cfg
->minpkt_ratethr
= LIO_INTRMOD_MINPKT_RATETHR
;
4314 intrmod_cfg
->rx_maxcnt_trigger
= LIO_INTRMOD_RXMAXCNT_TRIGGER
;
4315 intrmod_cfg
->rx_maxtmr_trigger
= LIO_INTRMOD_RXMAXTMR_TRIGGER
;
4316 intrmod_cfg
->rx_mintmr_trigger
= LIO_INTRMOD_RXMINTMR_TRIGGER
;
4317 intrmod_cfg
->rx_mincnt_trigger
= LIO_INTRMOD_RXMINCNT_TRIGGER
;
4318 intrmod_cfg
->tx_enable
= 1;
4319 intrmod_cfg
->tx_maxcnt_trigger
= LIO_INTRMOD_TXMAXCNT_TRIGGER
;
4320 intrmod_cfg
->tx_mincnt_trigger
= LIO_INTRMOD_TXMINCNT_TRIGGER
;
4321 intrmod_cfg
->rx_frames
= CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct
));
4322 intrmod_cfg
->rx_usecs
= CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct
));
4323 intrmod_cfg
->tx_frames
= CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct
));
4324 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces ready\n");
4328 octnet_init_failure
:
4336 * \brief starter callback that invokes the remaining initialization work after
4337 * the NIC is up and running.
4338 * @param octptr work struct work_struct
4340 static void nic_starter(struct work_struct
*work
)
4342 struct octeon_device
*oct
;
4343 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
4345 oct
= (struct octeon_device
*)wk
->ctxptr
;
4347 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
)
4350 /* If the status of the device is CORE_OK, the core
4351 * application has reported its application type. Call
4352 * any registered handlers now and move to the RUNNING
4355 if (atomic_read(&oct
->status
) != OCT_DEV_CORE_OK
) {
4356 schedule_delayed_work(&oct
->nic_poll_work
.work
,
4357 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
4361 atomic_set(&oct
->status
, OCT_DEV_RUNNING
);
4363 if (oct
->app_mode
&& oct
->app_mode
== CVM_DRV_NIC_APP
) {
4364 dev_dbg(&oct
->pci_dev
->dev
, "Starting NIC module\n");
4366 if (liquidio_init_nic_module(oct
))
4367 dev_err(&oct
->pci_dev
->dev
, "NIC initialization failed\n");
4369 handshake
[oct
->octeon_id
].started_ok
= 1;
4371 dev_err(&oct
->pci_dev
->dev
,
4372 "Unexpected application running on NIC (%d). Check firmware.\n",
4376 complete(&handshake
[oct
->octeon_id
].started
);
4380 octeon_recv_vf_drv_notice(struct octeon_recv_info
*recv_info
, void *buf
)
4382 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
4383 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
4384 int i
, notice
, vf_idx
;
4387 notice
= recv_pkt
->rh
.r
.ossp
;
4388 data
= (u64
*)get_rbd(recv_pkt
->buffer_ptr
[0]);
4390 /* the first 64-bit word of data is the vf_num */
4392 octeon_swap_8B_data(&vf_num
, 1);
4393 vf_idx
= (int)vf_num
- 1;
4395 if (notice
== VF_DRV_LOADED
) {
4396 if (!(oct
->sriov_info
.vf_drv_loaded_mask
& BIT_ULL(vf_idx
))) {
4397 oct
->sriov_info
.vf_drv_loaded_mask
|= BIT_ULL(vf_idx
);
4398 dev_info(&oct
->pci_dev
->dev
,
4399 "driver for VF%d was loaded\n", vf_idx
);
4400 try_module_get(THIS_MODULE
);
4402 } else if (notice
== VF_DRV_REMOVED
) {
4403 if (oct
->sriov_info
.vf_drv_loaded_mask
& BIT_ULL(vf_idx
)) {
4404 oct
->sriov_info
.vf_drv_loaded_mask
&= ~BIT_ULL(vf_idx
);
4405 dev_info(&oct
->pci_dev
->dev
,
4406 "driver for VF%d was removed\n", vf_idx
);
4407 module_put(THIS_MODULE
);
4409 } else if (notice
== VF_DRV_MACADDR_CHANGED
) {
4410 u8
*b
= (u8
*)&data
[1];
4412 oct
->sriov_info
.vf_macaddr
[vf_idx
] = data
[1];
4413 dev_info(&oct
->pci_dev
->dev
,
4414 "VF driver changed VF%d's MAC address to %pM\n",
4418 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
4419 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
4420 octeon_free_recv_info(recv_info
);
4426 * \brief Device initialization for each Octeon device that is probed
4427 * @param octeon_dev octeon device
4429 static int octeon_device_init(struct octeon_device
*octeon_dev
)
4433 char bootcmd
[] = "\n";
4434 struct octeon_device_priv
*oct_priv
=
4435 (struct octeon_device_priv
*)octeon_dev
->priv
;
4436 atomic_set(&octeon_dev
->status
, OCT_DEV_BEGIN_STATE
);
4438 /* Enable access to the octeon device and make its DMA capability
4441 if (octeon_pci_os_setup(octeon_dev
))
4444 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_ENABLE_DONE
);
4446 /* Identify the Octeon type and map the BAR address space. */
4447 if (octeon_chip_specific_setup(octeon_dev
)) {
4448 dev_err(&octeon_dev
->pci_dev
->dev
, "Chip specific setup failed\n");
4452 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_MAP_DONE
);
4454 octeon_dev
->app_mode
= CVM_DRV_INVALID_APP
;
4456 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4457 if (!cn23xx_fw_loaded(octeon_dev
)) {
4459 /* Do a soft reset of the Octeon device. */
4460 if (octeon_dev
->fn_list
.soft_reset(octeon_dev
))
4462 /* things might have changed */
4463 if (!cn23xx_fw_loaded(octeon_dev
))
4470 } else if (octeon_dev
->fn_list
.soft_reset(octeon_dev
)) {
4474 /* Initialize the dispatch mechanism used to push packets arriving on
4475 * Octeon Output queues.
4477 if (octeon_init_dispatch_list(octeon_dev
))
4480 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
4481 OPCODE_NIC_CORE_DRV_ACTIVE
,
4482 octeon_core_drv_init
,
4485 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
4486 OPCODE_NIC_VF_DRV_NOTICE
,
4487 octeon_recv_vf_drv_notice
, octeon_dev
);
4488 INIT_DELAYED_WORK(&octeon_dev
->nic_poll_work
.work
, nic_starter
);
4489 octeon_dev
->nic_poll_work
.ctxptr
= (void *)octeon_dev
;
4490 schedule_delayed_work(&octeon_dev
->nic_poll_work
.work
,
4491 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
4493 atomic_set(&octeon_dev
->status
, OCT_DEV_DISPATCH_INIT_DONE
);
4495 if (octeon_set_io_queues_off(octeon_dev
)) {
4496 dev_err(&octeon_dev
->pci_dev
->dev
, "setting io queues off failed\n");
4500 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4501 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4503 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: Failed to configure device registers\n");
4508 /* Initialize soft command buffer pool
4510 if (octeon_setup_sc_buffer_pool(octeon_dev
)) {
4511 dev_err(&octeon_dev
->pci_dev
->dev
, "sc buffer pool allocation failed\n");
4514 atomic_set(&octeon_dev
->status
, OCT_DEV_SC_BUFF_POOL_INIT_DONE
);
4516 /* Setup the data structures that manage this Octeon's Input queues. */
4517 if (octeon_setup_instr_queues(octeon_dev
)) {
4518 dev_err(&octeon_dev
->pci_dev
->dev
,
4519 "instruction queue initialization failed\n");
4522 atomic_set(&octeon_dev
->status
, OCT_DEV_INSTR_QUEUE_INIT_DONE
);
4524 /* Initialize lists to manage the requests of different types that
4525 * arrive from user & kernel applications for this octeon device.
4527 if (octeon_setup_response_list(octeon_dev
)) {
4528 dev_err(&octeon_dev
->pci_dev
->dev
, "Response list allocation failed\n");
4531 atomic_set(&octeon_dev
->status
, OCT_DEV_RESP_LIST_INIT_DONE
);
4533 if (octeon_setup_output_queues(octeon_dev
)) {
4534 dev_err(&octeon_dev
->pci_dev
->dev
, "Output queue initialization failed\n");
4538 atomic_set(&octeon_dev
->status
, OCT_DEV_DROQ_INIT_DONE
);
4540 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4541 if (octeon_dev
->fn_list
.setup_mbox(octeon_dev
)) {
4542 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: Mailbox setup failed\n");
4545 atomic_set(&octeon_dev
->status
, OCT_DEV_MBOX_SETUP_DONE
);
4547 if (octeon_allocate_ioq_vector(octeon_dev
)) {
4548 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: ioq vector allocation failed\n");
4551 atomic_set(&octeon_dev
->status
, OCT_DEV_MSIX_ALLOC_VECTOR_DONE
);
4554 /* The input and output queue registers were setup earlier (the
4555 * queues were not enabled). Any additional registers
4556 * that need to be programmed should be done now.
4558 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4560 dev_err(&octeon_dev
->pci_dev
->dev
,
4561 "Failed to configure device registers\n");
4566 /* Initialize the tasklet that handles output queue packet processing.*/
4567 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing droq tasklet\n");
4568 tasklet_init(&oct_priv
->droq_tasklet
, octeon_droq_bh
,
4569 (unsigned long)octeon_dev
);
4571 /* Setup the interrupt handler and record the INT SUM register address
4573 if (octeon_setup_interrupt(octeon_dev
))
4576 /* Enable Octeon device interrupts */
4577 octeon_dev
->fn_list
.enable_interrupt(octeon_dev
, OCTEON_ALL_INTR
);
4579 atomic_set(&octeon_dev
->status
, OCT_DEV_INTR_SET_DONE
);
4581 /* Enable the input and output queues for this Octeon device */
4582 ret
= octeon_dev
->fn_list
.enable_io_queues(octeon_dev
);
4584 dev_err(&octeon_dev
->pci_dev
->dev
, "Failed to enable input/output queues");
4588 atomic_set(&octeon_dev
->status
, OCT_DEV_IO_QUEUES_DONE
);
4590 if ((!OCTEON_CN23XX_PF(octeon_dev
)) || !fw_loaded
) {
4591 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Waiting for DDR initialization...\n");
4593 dev_info(&octeon_dev
->pci_dev
->dev
,
4594 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4597 schedule_timeout_uninterruptible(HZ
* LIO_RESET_SECS
);
4599 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4600 while (!ddr_timeout
) {
4601 set_current_state(TASK_INTERRUPTIBLE
);
4602 if (schedule_timeout(HZ
/ 10)) {
4603 /* user probably pressed Control-C */
4607 ret
= octeon_wait_for_ddr_init(octeon_dev
, &ddr_timeout
);
4609 dev_err(&octeon_dev
->pci_dev
->dev
,
4610 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4615 if (octeon_wait_for_bootloader(octeon_dev
, 1000)) {
4616 dev_err(&octeon_dev
->pci_dev
->dev
, "Board not responding\n");
4620 /* Divert uboot to take commands from host instead. */
4621 ret
= octeon_console_send_cmd(octeon_dev
, bootcmd
, 50);
4623 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing consoles\n");
4624 ret
= octeon_init_consoles(octeon_dev
);
4626 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board consoles\n");
4629 ret
= octeon_add_console(octeon_dev
, 0);
4631 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board console\n");
4635 atomic_set(&octeon_dev
->status
, OCT_DEV_CONSOLE_INIT_DONE
);
4637 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Loading firmware\n");
4638 ret
= load_firmware(octeon_dev
);
4640 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not load firmware to board\n");
4643 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
4646 if (OCTEON_CN23XX_PF(octeon_dev
))
4647 octeon_write_csr64(octeon_dev
, CN23XX_SLI_SCRATCH1
,
4651 handshake
[octeon_dev
->octeon_id
].init_ok
= 1;
4652 complete(&handshake
[octeon_dev
->octeon_id
].init
);
4654 atomic_set(&octeon_dev
->status
, OCT_DEV_HOST_OK
);
4656 /* Send Credit for Octeon Output queues. Credits are always sent after
4657 * the output queue is enabled.
4659 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
4660 writel(octeon_dev
->droq
[j
]->max_count
,
4661 octeon_dev
->droq
[j
]->pkts_credit_reg
);
4663 /* Packets can start arriving on the output queues from this point. */
4668 * \brief Exits the module
4670 static void __exit
liquidio_exit(void)
4672 liquidio_deinit_pci();
4674 pr_info("LiquidIO network module is now unloaded\n");
4677 module_init(liquidio_init
);
4678 module_exit(liquidio_exit
);