1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/module.h>
24 #include <linux/crc32.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pci.h>
27 #include <linux/pci_ids.h>
30 #include <linux/ipv6.h>
31 #include <linux/net_tstamp.h>
32 #include <linux/if_vlan.h>
33 #include <linux/firmware.h>
34 #include <linux/ethtool.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <linux/types.h>
37 #include <linux/list.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include "octeon_config.h"
41 #include "liquidio_common.h"
42 #include "octeon_droq.h"
43 #include "octeon_iq.h"
44 #include "response_manager.h"
45 #include "octeon_device.h"
46 #include "octeon_nic.h"
47 #include "octeon_main.h"
48 #include "octeon_network.h"
49 #include "cn66xx_regs.h"
50 #include "cn66xx_device.h"
51 #include "cn68xx_regs.h"
52 #include "cn68xx_device.h"
53 #include "liquidio_image.h"
55 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
56 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(LIQUIDIO_VERSION
);
59 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX
);
60 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX
);
61 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX
);
63 static int ddr_timeout
= 10000;
64 module_param(ddr_timeout
, int, 0644);
65 MODULE_PARM_DESC(ddr_timeout
,
66 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
68 static u32 console_bitmask
;
69 module_param(console_bitmask
, int, 0644);
70 MODULE_PARM_DESC(console_bitmask
,
71 "Bitmask indicating which consoles have debug output redirected to syslog.");
73 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
75 static int debug
= -1;
76 module_param(debug
, int, 0644);
77 MODULE_PARM_DESC(debug
, "NETIF_MSG debug bits");
79 static char fw_type
[LIO_MAX_FW_TYPE_LEN
];
80 module_param_string(fw_type
, fw_type
, sizeof(fw_type
), 0000);
81 MODULE_PARM_DESC(fw_type
, "Type of firmware to be loaded. Default \"nic\"");
84 module_param(conf_type
, int, 0);
85 MODULE_PARM_DESC(conf_type
, "select octeon configuration 0 default 1 ovs");
87 /* Bit mask values for lio->ifstate */
88 #define LIO_IFSTATE_DROQ_OPS 0x01
89 #define LIO_IFSTATE_REGISTERED 0x02
90 #define LIO_IFSTATE_RUNNING 0x04
91 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
93 /* Polling interval for determining when NIC application is alive */
94 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
96 /* runtime link query interval */
97 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
99 struct liquidio_if_cfg_context
{
102 wait_queue_head_t wc
;
107 struct liquidio_if_cfg_resp
{
109 struct liquidio_if_cfg_info cfg_info
;
113 struct oct_link_status_resp
{
115 struct oct_link_info link_info
;
119 struct oct_timestamp_resp
{
125 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
130 #ifdef __BIG_ENDIAN_BITFIELD
142 /** Octeon device properties to be used by the NIC module.
143 * Each octeon device in the system will be represented
144 * by this structure in the NIC module.
147 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
149 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
150 #define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
152 /** Structure of a node in list of gather components maintained by
153 * NIC driver for each network device.
155 struct octnic_gather
{
156 /** List manipulation. Next and prev pointers. */
157 struct list_head list
;
159 /** Size of the gather component at sg in bytes. */
162 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
165 /** Gather component that can accommodate max sized fragment list
166 * received from the IP layer.
168 struct octeon_sg_entry
*sg
;
171 /** This structure is used by NIC driver to store information required
172 * to free the sk_buff when the packet has been fetched by Octeon.
173 * Bytes offset below assume worst-case of a 64-bit system.
175 struct octnet_buf_free_info
{
176 /** Bytes 1-8. Pointer to network device private structure. */
179 /** Bytes 9-16. Pointer to sk_buff. */
182 /** Bytes 17-24. Pointer to gather list. */
183 struct octnic_gather
*g
;
185 /** Bytes 25-32. Physical address of skb->data or gather list. */
188 /** Bytes 33-47. Piggybacked soft command, if any */
189 struct octeon_soft_command
*sc
;
193 struct completion init
;
194 struct completion started
;
195 struct pci_dev
*pci_dev
;
200 struct octeon_device_priv
{
201 /** Tasklet structures for this device. */
202 struct tasklet_struct droq_tasklet
;
203 unsigned long napi_mask
;
206 static int octeon_device_init(struct octeon_device
*);
207 static void liquidio_remove(struct pci_dev
*pdev
);
208 static int liquidio_probe(struct pci_dev
*pdev
,
209 const struct pci_device_id
*ent
);
211 static struct handshake handshake
[MAX_OCTEON_DEVICES
];
212 static struct completion first_stage
;
214 static void octeon_droq_bh(unsigned long pdev
)
218 struct octeon_device
*oct
= (struct octeon_device
*)pdev
;
219 struct octeon_device_priv
*oct_priv
=
220 (struct octeon_device_priv
*)oct
->priv
;
222 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
223 for (q_no
= 0; q_no
< MAX_OCTEON_OUTPUT_QUEUES
; q_no
++) {
224 if (!(oct
->io_qmask
.oq
& (1UL << q_no
)))
226 reschedule
|= octeon_droq_process_packets(oct
, oct
->droq
[q_no
],
231 tasklet_schedule(&oct_priv
->droq_tasklet
);
234 static int lio_wait_for_oq_pkts(struct octeon_device
*oct
)
236 struct octeon_device_priv
*oct_priv
=
237 (struct octeon_device_priv
*)oct
->priv
;
238 int retry
= 100, pkt_cnt
= 0, pending_pkts
= 0;
244 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES
; i
++) {
245 if (!(oct
->io_qmask
.oq
& (1UL << i
)))
247 pkt_cnt
+= octeon_droq_check_hw_for_pkts(oct
,
251 pending_pkts
+= pkt_cnt
;
252 tasklet_schedule(&oct_priv
->droq_tasklet
);
255 schedule_timeout_uninterruptible(1);
257 } while (retry
-- && pending_pkts
);
262 void octeon_report_tx_completion_to_bql(void *txq
, unsigned int pkts_compl
,
263 unsigned int bytes_compl
)
265 struct netdev_queue
*netdev_queue
= txq
;
267 netdev_tx_completed_queue(netdev_queue
, pkts_compl
, bytes_compl
);
270 void octeon_update_tx_completion_counters(void *buf
, int reqtype
,
271 unsigned int *pkts_compl
,
272 unsigned int *bytes_compl
)
274 struct octnet_buf_free_info
*finfo
;
275 struct sk_buff
*skb
= NULL
;
276 struct octeon_soft_command
*sc
;
279 case REQTYPE_NORESP_NET
:
280 case REQTYPE_NORESP_NET_SG
:
285 case REQTYPE_RESP_NET_SG
:
286 case REQTYPE_RESP_NET
:
288 skb
= sc
->callback_arg
;
296 *bytes_compl
+= skb
->len
;
299 void octeon_report_sent_bytes_to_bql(void *buf
, int reqtype
)
301 struct octnet_buf_free_info
*finfo
;
303 struct octeon_soft_command
*sc
;
304 struct netdev_queue
*txq
;
307 case REQTYPE_NORESP_NET
:
308 case REQTYPE_NORESP_NET_SG
:
313 case REQTYPE_RESP_NET_SG
:
314 case REQTYPE_RESP_NET
:
316 skb
= sc
->callback_arg
;
323 txq
= netdev_get_tx_queue(skb
->dev
, skb_get_queue_mapping(skb
));
324 netdev_tx_sent_queue(txq
, skb
->len
);
327 int octeon_console_debug_enabled(u32 console
)
329 return (console_bitmask
>> (console
)) & 0x1;
333 * \brief Forces all IO queues off on a given device
334 * @param oct Pointer to Octeon device
336 static void force_io_queues_off(struct octeon_device
*oct
)
338 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
339 (oct
->chip_id
== OCTEON_CN68XX
)) {
340 /* Reset the Enable bits for Input Queues. */
341 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
343 /* Reset the Enable bits for Output Queues. */
344 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
349 * \brief wait for all pending requests to complete
350 * @param oct Pointer to Octeon device
352 * Called during shutdown sequence
354 static int wait_for_pending_requests(struct octeon_device
*oct
)
358 for (i
= 0; i
< 100; i
++) {
360 atomic_read(&oct
->response_list
361 [OCTEON_ORDERED_SC_LIST
].pending_req_count
);
363 schedule_timeout_uninterruptible(HZ
/ 10);
375 * \brief Cause device to go quiet so it can be safely removed/reset/etc
376 * @param oct Pointer to Octeon device
378 static inline void pcierror_quiesce_device(struct octeon_device
*oct
)
382 /* Disable the input and output queues now. No more packets will
383 * arrive from Octeon, but we should wait for all packet processing
386 force_io_queues_off(oct
);
388 /* To allow for in-flight requests */
389 schedule_timeout_uninterruptible(100);
391 if (wait_for_pending_requests(oct
))
392 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
394 /* Force all requests waiting to be fetched by OCTEON to complete. */
395 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES
; i
++) {
396 struct octeon_instr_queue
*iq
;
398 if (!(oct
->io_qmask
.iq
& (1UL << i
)))
400 iq
= oct
->instr_queue
[i
];
402 if (atomic_read(&iq
->instr_pending
)) {
403 spin_lock_bh(&iq
->lock
);
405 iq
->octeon_read_index
= iq
->host_write_index
;
406 iq
->stats
.instr_processed
+=
407 atomic_read(&iq
->instr_pending
);
408 lio_process_iq_request_list(oct
, iq
);
409 spin_unlock_bh(&iq
->lock
);
413 /* Force all pending ordered list requests to time out. */
414 lio_process_ordered_list(oct
, 1);
416 /* We do not need to wait for output queue packets to be processed. */
420 * \brief Cleanup PCI AER uncorrectable error status
421 * @param dev Pointer to PCI device
423 static void cleanup_aer_uncorrect_error_status(struct pci_dev
*dev
)
428 pr_info("%s :\n", __func__
);
430 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &status
);
431 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, &mask
);
432 if (dev
->error_state
== pci_channel_io_normal
)
433 status
&= ~mask
; /* Clear corresponding nonfatal bits */
435 status
&= mask
; /* Clear corresponding fatal bits */
436 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, status
);
440 * \brief Stop all PCI IO to a given device
441 * @param dev Pointer to Octeon device
443 static void stop_pci_io(struct octeon_device
*oct
)
445 /* No more instructions will be forwarded. */
446 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
448 pci_disable_device(oct
->pci_dev
);
450 /* Disable interrupts */
451 oct
->fn_list
.disable_interrupt(oct
->chip
);
453 pcierror_quiesce_device(oct
);
455 /* Release the interrupt line */
456 free_irq(oct
->pci_dev
->irq
, oct
);
458 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
459 pci_disable_msi(oct
->pci_dev
);
461 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
462 lio_get_state_string(&oct
->status
));
464 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
465 /* making it a common function for all OCTEON models */
466 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
470 * \brief called when PCI error is detected
471 * @param pdev Pointer to PCI device
472 * @param state The current pci connection state
474 * This function is called after a PCI bus error affecting
475 * this device has been detected.
477 static pci_ers_result_t
liquidio_pcie_error_detected(struct pci_dev
*pdev
,
478 pci_channel_state_t state
)
480 struct octeon_device
*oct
= pci_get_drvdata(pdev
);
482 /* Non-correctable Non-fatal errors */
483 if (state
== pci_channel_io_normal
) {
484 dev_err(&oct
->pci_dev
->dev
, "Non-correctable non-fatal error reported:\n");
485 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
486 return PCI_ERS_RESULT_CAN_RECOVER
;
489 /* Non-correctable Fatal errors */
490 dev_err(&oct
->pci_dev
->dev
, "Non-correctable FATAL reported by PCI AER driver\n");
493 /* Always return a DISCONNECT. There is no support for recovery but only
494 * for a clean shutdown.
496 return PCI_ERS_RESULT_DISCONNECT
;
500 * \brief mmio handler
501 * @param pdev Pointer to PCI device
503 static pci_ers_result_t
liquidio_pcie_mmio_enabled(struct pci_dev
*pdev
)
505 /* We should never hit this since we never ask for a reset for a Fatal
506 * Error. We always return DISCONNECT in io_error above.
507 * But play safe and return RECOVERED for now.
509 return PCI_ERS_RESULT_RECOVERED
;
513 * \brief called after the pci bus has been reset.
514 * @param pdev Pointer to PCI device
516 * Restart the card from scratch, as if from a cold-boot. Implementation
517 * resembles the first-half of the octeon_resume routine.
519 static pci_ers_result_t
liquidio_pcie_slot_reset(struct pci_dev
*pdev
)
521 /* We should never hit this since we never ask for a reset for a Fatal
522 * Error. We always return DISCONNECT in io_error above.
523 * But play safe and return RECOVERED for now.
525 return PCI_ERS_RESULT_RECOVERED
;
529 * \brief called when traffic can start flowing again.
530 * @param pdev Pointer to PCI device
532 * This callback is called when the error recovery driver tells us that
533 * its OK to resume normal operation. Implementation resembles the
534 * second-half of the octeon_resume routine.
536 static void liquidio_pcie_resume(struct pci_dev
*pdev
)
538 /* Nothing to be done here. */
543 * \brief called when suspending
544 * @param pdev Pointer to PCI device
545 * @param state state to suspend to
547 static int liquidio_suspend(struct pci_dev
*pdev
, pm_message_t state
)
553 * \brief called when resuming
554 * @param pdev Pointer to PCI device
556 static int liquidio_resume(struct pci_dev
*pdev
)
562 /* For PCI-E Advanced Error Recovery (AER) Interface */
563 static const struct pci_error_handlers liquidio_err_handler
= {
564 .error_detected
= liquidio_pcie_error_detected
,
565 .mmio_enabled
= liquidio_pcie_mmio_enabled
,
566 .slot_reset
= liquidio_pcie_slot_reset
,
567 .resume
= liquidio_pcie_resume
,
570 static const struct pci_device_id liquidio_pci_tbl
[] = {
572 PCI_VENDOR_ID_CAVIUM
, 0x91, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
575 PCI_VENDOR_ID_CAVIUM
, 0x92, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
581 MODULE_DEVICE_TABLE(pci
, liquidio_pci_tbl
);
583 static struct pci_driver liquidio_pci_driver
= {
585 .id_table
= liquidio_pci_tbl
,
586 .probe
= liquidio_probe
,
587 .remove
= liquidio_remove
,
588 .err_handler
= &liquidio_err_handler
, /* For AER */
591 .suspend
= liquidio_suspend
,
592 .resume
= liquidio_resume
,
598 * \brief register PCI driver
600 static int liquidio_init_pci(void)
602 return pci_register_driver(&liquidio_pci_driver
);
606 * \brief unregister PCI driver
608 static void liquidio_deinit_pci(void)
610 pci_unregister_driver(&liquidio_pci_driver
);
614 * \brief check interface state
615 * @param lio per-network private data
616 * @param state_flag flag state to check
618 static inline int ifstate_check(struct lio
*lio
, int state_flag
)
620 return atomic_read(&lio
->ifstate
) & state_flag
;
624 * \brief set interface state
625 * @param lio per-network private data
626 * @param state_flag flag state to set
628 static inline void ifstate_set(struct lio
*lio
, int state_flag
)
630 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) | state_flag
));
634 * \brief clear interface state
635 * @param lio per-network private data
636 * @param state_flag flag state to clear
638 static inline void ifstate_reset(struct lio
*lio
, int state_flag
)
640 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) & ~(state_flag
)));
644 * \brief Stop Tx queues
645 * @param netdev network device
647 static inline void txqs_stop(struct net_device
*netdev
)
649 if (netif_is_multiqueue(netdev
)) {
652 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
653 netif_stop_subqueue(netdev
, i
);
655 netif_stop_queue(netdev
);
660 * \brief Start Tx queues
661 * @param netdev network device
663 static inline void txqs_start(struct net_device
*netdev
)
665 if (netif_is_multiqueue(netdev
)) {
668 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
669 netif_start_subqueue(netdev
, i
);
671 netif_start_queue(netdev
);
676 * \brief Wake Tx queues
677 * @param netdev network device
679 static inline void txqs_wake(struct net_device
*netdev
)
681 if (netif_is_multiqueue(netdev
)) {
684 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
685 netif_wake_subqueue(netdev
, i
);
687 netif_wake_queue(netdev
);
692 * \brief Stop Tx queue
693 * @param netdev network device
695 static void stop_txq(struct net_device
*netdev
)
701 * \brief Start Tx queue
702 * @param netdev network device
704 static void start_txq(struct net_device
*netdev
)
706 struct lio
*lio
= GET_LIO(netdev
);
708 if (lio
->linfo
.link
.s
.status
) {
715 * \brief Wake a queue
716 * @param netdev network device
717 * @param q which queue to wake
719 static inline void wake_q(struct net_device
*netdev
, int q
)
721 if (netif_is_multiqueue(netdev
))
722 netif_wake_subqueue(netdev
, q
);
724 netif_wake_queue(netdev
);
728 * \brief Stop a queue
729 * @param netdev network device
730 * @param q which queue to stop
732 static inline void stop_q(struct net_device
*netdev
, int q
)
734 if (netif_is_multiqueue(netdev
))
735 netif_stop_subqueue(netdev
, q
);
737 netif_stop_queue(netdev
);
741 * \brief Check Tx queue status, and take appropriate action
742 * @param lio per-network private data
743 * @returns 0 if full, number of queues woken up otherwise
745 static inline int check_txq_status(struct lio
*lio
)
749 if (netif_is_multiqueue(lio
->netdev
)) {
750 int numqs
= lio
->netdev
->num_tx_queues
;
753 /* check each sub-queue state */
754 for (q
= 0; q
< numqs
; q
++) {
755 iq
= lio
->linfo
.txpciq
[q
& (lio
->linfo
.num_txpciq
- 1)];
756 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
758 wake_q(lio
->netdev
, q
);
762 if (octnet_iq_is_full(lio
->oct_dev
, lio
->txq
))
764 wake_q(lio
->netdev
, lio
->txq
);
771 * Remove the node at the head of the list. The list would be empty at
772 * the end of this call if there are no more nodes in the list.
774 static inline struct list_head
*list_delete_head(struct list_head
*root
)
776 struct list_head
*node
;
778 if ((root
->prev
== root
) && (root
->next
== root
))
790 * \brief Delete gather list
791 * @param lio per-network private data
793 static void delete_glist(struct lio
*lio
)
795 struct octnic_gather
*g
;
798 g
= (struct octnic_gather
*)
799 list_delete_head(&lio
->glist
);
802 kfree((void *)((unsigned long)g
->sg
-
810 * \brief Setup gather list
811 * @param lio per-network private data
813 static int setup_glist(struct lio
*lio
)
816 struct octnic_gather
*g
;
818 INIT_LIST_HEAD(&lio
->glist
);
820 for (i
= 0; i
< lio
->tx_qsize
; i
++) {
821 g
= kzalloc(sizeof(*g
), GFP_KERNEL
);
826 ((ROUNDUP4(OCTNIC_MAX_SG
) >> 2) * OCT_SG_ENTRY_SIZE
);
828 g
->sg
= kmalloc(g
->sg_size
+ 8, GFP_KERNEL
);
834 /* The gather component should be aligned on 64-bit boundary */
835 if (((unsigned long)g
->sg
) & 7) {
836 g
->adjust
= 8 - (((unsigned long)g
->sg
) & 7);
837 g
->sg
= (struct octeon_sg_entry
*)
838 ((unsigned long)g
->sg
+ g
->adjust
);
840 list_add_tail(&g
->list
, &lio
->glist
);
843 if (i
== lio
->tx_qsize
)
851 * \brief Print link information
852 * @param netdev network device
854 static void print_link_info(struct net_device
*netdev
)
856 struct lio
*lio
= GET_LIO(netdev
);
858 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
) {
859 struct oct_link_info
*linfo
= &lio
->linfo
;
861 if (linfo
->link
.s
.status
) {
862 netif_info(lio
, link
, lio
->netdev
, "%d Mbps %s Duplex UP\n",
864 (linfo
->link
.s
.duplex
) ? "Full" : "Half");
866 netif_info(lio
, link
, lio
->netdev
, "Link Down\n");
872 * \brief Update link status
873 * @param netdev network device
874 * @param ls link status structure
876 * Called on receipt of a link status response from the core application to
877 * update each interface's link status.
879 static inline void update_link_status(struct net_device
*netdev
,
880 union oct_link_status
*ls
)
882 struct lio
*lio
= GET_LIO(netdev
);
884 if ((lio
->intf_open
) && (lio
->linfo
.link
.u64
!= ls
->u64
)) {
885 lio
->linfo
.link
.u64
= ls
->u64
;
887 print_link_info(netdev
);
889 if (lio
->linfo
.link
.s
.status
) {
890 netif_carrier_on(netdev
);
891 /* start_txq(netdev); */
894 netif_carrier_off(netdev
);
901 * \brief Droq packet processor sceduler
902 * @param oct octeon device
905 void liquidio_schedule_droq_pkt_handlers(struct octeon_device
*oct
)
907 struct octeon_device_priv
*oct_priv
=
908 (struct octeon_device_priv
*)oct
->priv
;
910 struct octeon_droq
*droq
;
912 if (oct
->int_status
& OCT_DEV_INTR_PKT_DATA
) {
913 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES
; oq_no
++) {
914 if (!(oct
->droq_intr
& (1 << oq_no
)))
917 droq
= oct
->droq
[oq_no
];
919 if (droq
->ops
.poll_mode
) {
920 droq
->ops
.napi_fn(droq
);
921 oct_priv
->napi_mask
|= (1 << oq_no
);
923 tasklet_schedule(&oct_priv
->droq_tasklet
);
930 * \brief Interrupt handler for octeon
932 * @param dev octeon device
935 irqreturn_t
liquidio_intr_handler(int irq
__attribute__((unused
)), void *dev
)
937 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
940 /* Disable our interrupts for the duration of ISR */
941 oct
->fn_list
.disable_interrupt(oct
->chip
);
943 ret
= oct
->fn_list
.process_interrupt_regs(oct
);
945 if (ret
== IRQ_HANDLED
)
946 liquidio_schedule_droq_pkt_handlers(oct
);
948 /* Re-enable our interrupts */
949 if (!(atomic_read(&oct
->status
) == OCT_DEV_IN_RESET
))
950 oct
->fn_list
.enable_interrupt(oct
->chip
);
956 * \brief Setup interrupt for octeon device
957 * @param oct octeon device
959 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
961 static int octeon_setup_interrupt(struct octeon_device
*oct
)
965 err
= pci_enable_msi(oct
->pci_dev
);
967 dev_warn(&oct
->pci_dev
->dev
, "Reverting to legacy interrupts. Error: %d\n",
970 oct
->flags
|= LIO_FLAG_MSI_ENABLED
;
972 irqret
= request_irq(oct
->pci_dev
->irq
, liquidio_intr_handler
,
973 IRQF_SHARED
, "octeon", oct
);
975 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
976 pci_disable_msi(oct
->pci_dev
);
977 dev_err(&oct
->pci_dev
->dev
, "Request IRQ failed with code: %d\n",
986 * \brief PCI probe handler
987 * @param pdev PCI device structure
990 static int liquidio_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
992 struct octeon_device
*oct_dev
= NULL
;
993 struct handshake
*hs
;
995 oct_dev
= octeon_allocate_device(pdev
->device
,
996 sizeof(struct octeon_device_priv
));
998 dev_err(&pdev
->dev
, "Unable to allocate device\n");
1002 dev_info(&pdev
->dev
, "Initializing device %x:%x.\n",
1003 (u32
)pdev
->vendor
, (u32
)pdev
->device
);
1005 /* Assign octeon_device for this device to the private data area. */
1006 pci_set_drvdata(pdev
, oct_dev
);
1008 /* set linux specific device pointer */
1009 oct_dev
->pci_dev
= (void *)pdev
;
1011 hs
= &handshake
[oct_dev
->octeon_id
];
1012 init_completion(&hs
->init
);
1013 init_completion(&hs
->started
);
1016 if (oct_dev
->octeon_id
== 0)
1017 /* first LiquidIO NIC is detected */
1018 complete(&first_stage
);
1020 if (octeon_device_init(oct_dev
)) {
1021 liquidio_remove(pdev
);
1025 dev_dbg(&oct_dev
->pci_dev
->dev
, "Device is ready\n");
1031 *\brief Destroy resources associated with octeon device
1032 * @param pdev PCI device structure
1035 static void octeon_destroy_resources(struct octeon_device
*oct
)
1038 struct octeon_device_priv
*oct_priv
=
1039 (struct octeon_device_priv
*)oct
->priv
;
1041 struct handshake
*hs
;
1043 switch (atomic_read(&oct
->status
)) {
1044 case OCT_DEV_RUNNING
:
1045 case OCT_DEV_CORE_OK
:
1047 /* No more instructions will be forwarded. */
1048 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
1050 oct
->app_mode
= CVM_DRV_INVALID_APP
;
1051 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
1052 lio_get_state_string(&oct
->status
));
1054 schedule_timeout_uninterruptible(HZ
/ 10);
1057 case OCT_DEV_HOST_OK
:
1060 case OCT_DEV_CONSOLE_INIT_DONE
:
1061 /* Remove any consoles */
1062 octeon_remove_consoles(oct
);
1065 case OCT_DEV_IO_QUEUES_DONE
:
1066 if (wait_for_pending_requests(oct
))
1067 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1069 if (lio_wait_for_instr_fetch(oct
))
1070 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1072 /* Disable the input and output queues now. No more packets will
1073 * arrive from Octeon, but we should wait for all packet
1074 * processing to finish.
1076 oct
->fn_list
.disable_io_queues(oct
);
1078 if (lio_wait_for_oq_pkts(oct
))
1079 dev_err(&oct
->pci_dev
->dev
, "OQ had pending packets\n");
1081 /* Disable interrupts */
1082 oct
->fn_list
.disable_interrupt(oct
->chip
);
1084 /* Release the interrupt line */
1085 free_irq(oct
->pci_dev
->irq
, oct
);
1087 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1088 pci_disable_msi(oct
->pci_dev
);
1090 /* Soft reset the octeon device before exiting */
1091 oct
->fn_list
.soft_reset(oct
);
1093 /* Disable the device, releasing the PCI INT */
1094 pci_disable_device(oct
->pci_dev
);
1097 case OCT_DEV_IN_RESET
:
1098 case OCT_DEV_DROQ_INIT_DONE
:
1099 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1101 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES
; i
++) {
1102 if (!(oct
->io_qmask
.oq
& (1UL << i
)))
1104 octeon_delete_droq(oct
, i
);
1107 /* Force any pending handshakes to complete */
1108 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
1112 handshake
[oct
->octeon_id
].init_ok
= 0;
1113 complete(&handshake
[oct
->octeon_id
].init
);
1114 handshake
[oct
->octeon_id
].started_ok
= 0;
1115 complete(&handshake
[oct
->octeon_id
].started
);
1120 case OCT_DEV_RESP_LIST_INIT_DONE
:
1121 octeon_delete_response_list(oct
);
1124 case OCT_DEV_SC_BUFF_POOL_INIT_DONE
:
1125 octeon_free_sc_buffer_pool(oct
);
1128 case OCT_DEV_INSTR_QUEUE_INIT_DONE
:
1129 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES
; i
++) {
1130 if (!(oct
->io_qmask
.iq
& (1UL << i
)))
1132 octeon_delete_instr_queue(oct
, i
);
1136 case OCT_DEV_DISPATCH_INIT_DONE
:
1137 octeon_delete_dispatch_list(oct
);
1138 cancel_delayed_work_sync(&oct
->nic_poll_work
.work
);
1141 case OCT_DEV_PCI_MAP_DONE
:
1142 octeon_unmap_pci_barx(oct
, 0);
1143 octeon_unmap_pci_barx(oct
, 1);
1146 case OCT_DEV_BEGIN_STATE
:
1147 /* Nothing to be done here either */
1149 } /* end switch(oct->status) */
1151 tasklet_kill(&oct_priv
->droq_tasklet
);
1155 * \brief Send Rx control command
1156 * @param lio per-network private data
1157 * @param start_stop whether to start or stop
1159 static void send_rx_ctrl_cmd(struct lio
*lio
, int start_stop
)
1161 struct octnic_ctrl_pkt nctrl
;
1162 struct octnic_ctrl_params nparams
;
1164 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
1166 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_RX_CTL
;
1167 nctrl
.ncmd
.s
.param1
= lio
->linfo
.ifidx
;
1168 nctrl
.ncmd
.s
.param2
= start_stop
;
1169 nctrl
.netpndev
= (u64
)lio
->netdev
;
1171 nparams
.resp_order
= OCTEON_RESP_NORESPONSE
;
1173 if (octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
, nparams
) < 0)
1174 netif_info(lio
, rx_err
, lio
->netdev
, "Failed to send RX Control message\n");
1178 * \brief Destroy NIC device interface
1179 * @param oct octeon device
1180 * @param ifidx which interface to destroy
1182 * Cleanup associated with each interface for an Octeon device when NIC
1183 * module is being unloaded or if initialization fails during load.
1185 static void liquidio_destroy_nic_device(struct octeon_device
*oct
, int ifidx
)
1187 struct net_device
*netdev
= oct
->props
[ifidx
].netdev
;
1191 dev_err(&oct
->pci_dev
->dev
, "%s No netdevice ptr for index %d\n",
1196 lio
= GET_LIO(netdev
);
1198 dev_dbg(&oct
->pci_dev
->dev
, "NIC device cleanup\n");
1200 send_rx_ctrl_cmd(lio
, 0);
1202 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
)
1205 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
)
1206 unregister_netdev(netdev
);
1210 free_netdev(netdev
);
1212 oct
->props
[ifidx
].netdev
= NULL
;
1216 * \brief Stop complete NIC functionality
1217 * @param oct octeon device
1219 static int liquidio_stop_nic_module(struct octeon_device
*oct
)
1224 dev_dbg(&oct
->pci_dev
->dev
, "Stopping network interfaces\n");
1225 if (!oct
->ifcount
) {
1226 dev_err(&oct
->pci_dev
->dev
, "Init for Octeon was not completed\n");
1230 for (i
= 0; i
< oct
->ifcount
; i
++) {
1231 lio
= GET_LIO(oct
->props
[i
].netdev
);
1232 for (j
= 0; j
< lio
->linfo
.num_rxpciq
; j
++)
1233 octeon_unregister_droq_ops(oct
, lio
->linfo
.rxpciq
[j
]);
1236 for (i
= 0; i
< oct
->ifcount
; i
++)
1237 liquidio_destroy_nic_device(oct
, i
);
1239 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces stopped\n");
1244 * \brief Cleans up resources at unload time
1245 * @param pdev PCI device structure
1247 static void liquidio_remove(struct pci_dev
*pdev
)
1249 struct octeon_device
*oct_dev
= pci_get_drvdata(pdev
);
1251 dev_dbg(&oct_dev
->pci_dev
->dev
, "Stopping device\n");
1253 if (oct_dev
->app_mode
&& (oct_dev
->app_mode
== CVM_DRV_NIC_APP
))
1254 liquidio_stop_nic_module(oct_dev
);
1256 /* Reset the octeon device and cleanup all memory allocated for
1257 * the octeon device by driver.
1259 octeon_destroy_resources(oct_dev
);
1261 dev_info(&oct_dev
->pci_dev
->dev
, "Device removed\n");
1263 /* This octeon device has been removed. Update the global
1264 * data structure to reflect this. Free the device structure.
1266 octeon_free_device_mem(oct_dev
);
1270 * \brief Identify the Octeon device and to map the BAR address space
1271 * @param oct octeon device
1273 static int octeon_chip_specific_setup(struct octeon_device
*oct
)
1278 pci_read_config_dword(oct
->pci_dev
, 0, &dev_id
);
1279 pci_read_config_dword(oct
->pci_dev
, 8, &rev_id
);
1280 oct
->rev_id
= rev_id
& 0xff;
1283 case OCTEON_CN68XX_PCIID
:
1284 oct
->chip_id
= OCTEON_CN68XX
;
1285 ret
= lio_setup_cn68xx_octeon_device(oct
);
1288 case OCTEON_CN66XX_PCIID
:
1289 oct
->chip_id
= OCTEON_CN66XX
;
1290 ret
= lio_setup_cn66xx_octeon_device(oct
);
1293 dev_err(&oct
->pci_dev
->dev
, "Unknown device found (dev_id: %x)\n",
1298 dev_info(&oct
->pci_dev
->dev
, "CN68XX PASS%d.%d %s\n",
1299 OCTEON_MAJOR_REV(oct
),
1300 OCTEON_MINOR_REV(oct
),
1301 octeon_get_conf(oct
)->card_name
);
1307 * \brief PCI initialization for each Octeon device.
1308 * @param oct octeon device
1310 static int octeon_pci_os_setup(struct octeon_device
*oct
)
1312 /* setup PCI stuff first */
1313 if (pci_enable_device(oct
->pci_dev
)) {
1314 dev_err(&oct
->pci_dev
->dev
, "pci_enable_device failed\n");
1318 if (dma_set_mask_and_coherent(&oct
->pci_dev
->dev
, DMA_BIT_MASK(64))) {
1319 dev_err(&oct
->pci_dev
->dev
, "Unexpected DMA device capability\n");
1323 /* Enable PCI DMA Master. */
1324 pci_set_master(oct
->pci_dev
);
1330 * \brief Check Tx queue state for a given network buffer
1331 * @param lio per-network private data
1332 * @param skb network buffer
1334 static inline int check_txq_state(struct lio
*lio
, struct sk_buff
*skb
)
1338 if (netif_is_multiqueue(lio
->netdev
)) {
1339 q
= skb
->queue_mapping
;
1340 iq
= lio
->linfo
.txpciq
[(q
& (lio
->linfo
.num_txpciq
- 1))];
1345 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
1347 wake_q(lio
->netdev
, q
);
1352 * \brief Unmap and free network buffer
1355 static void free_netbuf(void *buf
)
1357 struct sk_buff
*skb
;
1358 struct octnet_buf_free_info
*finfo
;
1361 finfo
= (struct octnet_buf_free_info
*)buf
;
1365 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
, finfo
->dptr
, skb
->len
,
1368 check_txq_state(lio
, skb
);
1370 recv_buffer_free((struct sk_buff
*)skb
);
1374 * \brief Unmap and free gather buffer
1377 static void free_netsgbuf(void *buf
)
1379 struct octnet_buf_free_info
*finfo
;
1380 struct sk_buff
*skb
;
1382 struct octnic_gather
*g
;
1385 finfo
= (struct octnet_buf_free_info
*)buf
;
1389 frags
= skb_shinfo(skb
)->nr_frags
;
1391 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1392 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1397 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1399 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1400 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1401 frag
->size
, DMA_TO_DEVICE
);
1405 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1406 finfo
->dptr
, g
->sg_size
,
1409 spin_lock(&lio
->lock
);
1410 list_add_tail(&g
->list
, &lio
->glist
);
1411 spin_unlock(&lio
->lock
);
1413 check_txq_state(lio
, skb
); /* mq support: sub-queue state check */
1415 recv_buffer_free((struct sk_buff
*)skb
);
1419 * \brief Unmap and free gather buffer with response
1422 static void free_netsgbuf_with_resp(void *buf
)
1424 struct octeon_soft_command
*sc
;
1425 struct octnet_buf_free_info
*finfo
;
1426 struct sk_buff
*skb
;
1428 struct octnic_gather
*g
;
1431 sc
= (struct octeon_soft_command
*)buf
;
1432 skb
= (struct sk_buff
*)sc
->callback_arg
;
1433 finfo
= (struct octnet_buf_free_info
*)&skb
->cb
;
1437 frags
= skb_shinfo(skb
)->nr_frags
;
1439 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1440 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1445 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1447 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1448 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1449 frag
->size
, DMA_TO_DEVICE
);
1453 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1454 finfo
->dptr
, g
->sg_size
,
1457 spin_lock(&lio
->lock
);
1458 list_add_tail(&g
->list
, &lio
->glist
);
1459 spin_unlock(&lio
->lock
);
1461 /* Don't free the skb yet */
1463 check_txq_state(lio
, skb
);
1467 * \brief Adjust ptp frequency
1468 * @param ptp PTP clock info
1469 * @param ppb how much to adjust by, in parts-per-billion
1471 static int liquidio_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
1473 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1474 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1476 unsigned long flags
;
1477 bool neg_adj
= false;
1484 /* The hardware adds the clock compensation value to the
1485 * PTP clock on every coprocessor clock cycle, so we
1486 * compute the delta in terms of coprocessor clocks.
1488 delta
= (u64
)ppb
<< 32;
1489 do_div(delta
, oct
->coproc_clock_rate
);
1491 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1492 comp
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1497 lio_pci_writeq(oct
, comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1498 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1504 * \brief Adjust ptp time
1505 * @param ptp PTP clock info
1506 * @param delta how much to adjust by, in nanosecs
1508 static int liquidio_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
1510 unsigned long flags
;
1511 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1513 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1514 lio
->ptp_adjust
+= delta
;
1515 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1521 * \brief Get hardware clock time, including any adjustment
1522 * @param ptp PTP clock info
1523 * @param ts timespec
1525 static int liquidio_ptp_gettime(struct ptp_clock_info
*ptp
,
1526 struct timespec64
*ts
)
1529 unsigned long flags
;
1530 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1531 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1533 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1534 ns
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_HI
);
1535 ns
+= lio
->ptp_adjust
;
1536 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1538 *ts
= ns_to_timespec64(ns
);
1544 * \brief Set hardware clock time. Reset adjustment
1545 * @param ptp PTP clock info
1546 * @param ts timespec
1548 static int liquidio_ptp_settime(struct ptp_clock_info
*ptp
,
1549 const struct timespec64
*ts
)
1552 unsigned long flags
;
1553 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1554 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1556 ns
= timespec_to_ns(ts
);
1558 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1559 lio_pci_writeq(oct
, ns
, CN6XXX_MIO_PTP_CLOCK_HI
);
1560 lio
->ptp_adjust
= 0;
1561 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1567 * \brief Check if PTP is enabled
1568 * @param ptp PTP clock info
1570 * @param on is it on
1572 static int liquidio_ptp_enable(struct ptp_clock_info
*ptp
,
1573 struct ptp_clock_request
*rq
, int on
)
1579 * \brief Open PTP clock source
1580 * @param netdev network device
1582 static void oct_ptp_open(struct net_device
*netdev
)
1584 struct lio
*lio
= GET_LIO(netdev
);
1585 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1587 spin_lock_init(&lio
->ptp_lock
);
1589 snprintf(lio
->ptp_info
.name
, 16, "%s", netdev
->name
);
1590 lio
->ptp_info
.owner
= THIS_MODULE
;
1591 lio
->ptp_info
.max_adj
= 250000000;
1592 lio
->ptp_info
.n_alarm
= 0;
1593 lio
->ptp_info
.n_ext_ts
= 0;
1594 lio
->ptp_info
.n_per_out
= 0;
1595 lio
->ptp_info
.pps
= 0;
1596 lio
->ptp_info
.adjfreq
= liquidio_ptp_adjfreq
;
1597 lio
->ptp_info
.adjtime
= liquidio_ptp_adjtime
;
1598 lio
->ptp_info
.gettime64
= liquidio_ptp_gettime
;
1599 lio
->ptp_info
.settime64
= liquidio_ptp_settime
;
1600 lio
->ptp_info
.enable
= liquidio_ptp_enable
;
1602 lio
->ptp_adjust
= 0;
1604 lio
->ptp_clock
= ptp_clock_register(&lio
->ptp_info
,
1605 &oct
->pci_dev
->dev
);
1607 if (IS_ERR(lio
->ptp_clock
))
1608 lio
->ptp_clock
= NULL
;
1612 * \brief Init PTP clock
1613 * @param oct octeon device
1615 static void liquidio_ptp_init(struct octeon_device
*oct
)
1617 u64 clock_comp
, cfg
;
1619 clock_comp
= (u64
)NSEC_PER_SEC
<< 32;
1620 do_div(clock_comp
, oct
->coproc_clock_rate
);
1621 lio_pci_writeq(oct
, clock_comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1624 cfg
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_CFG
);
1625 lio_pci_writeq(oct
, cfg
| 0x01, CN6XXX_MIO_PTP_CLOCK_CFG
);
1629 * \brief Load firmware to device
1630 * @param oct octeon device
1632 * Maps device to firmware filename, requests firmware, and downloads it
1634 static int load_firmware(struct octeon_device
*oct
)
1637 const struct firmware
*fw
;
1638 char fw_name
[LIO_MAX_FW_FILENAME_LEN
];
1641 if (strncmp(fw_type
, LIO_FW_NAME_TYPE_NONE
,
1642 sizeof(LIO_FW_NAME_TYPE_NONE
)) == 0) {
1643 dev_info(&oct
->pci_dev
->dev
, "Skipping firmware load\n");
1647 if (fw_type
[0] == '\0')
1648 tmp_fw_type
= LIO_FW_NAME_TYPE_NIC
;
1650 tmp_fw_type
= fw_type
;
1652 sprintf(fw_name
, "%s%s%s_%s%s", LIO_FW_DIR
, LIO_FW_BASE_NAME
,
1653 octeon_get_conf(oct
)->card_name
, tmp_fw_type
,
1654 LIO_FW_NAME_SUFFIX
);
1656 ret
= request_firmware(&fw
, fw_name
, &oct
->pci_dev
->dev
);
1658 dev_err(&oct
->pci_dev
->dev
, "Request firmware failed. Could not find file %s.\n.",
1663 ret
= octeon_download_firmware(oct
, fw
->data
, fw
->size
);
1665 release_firmware(fw
);
1671 * \brief Setup output queue
1672 * @param oct octeon device
1673 * @param q_no which queue
1674 * @param num_descs how many descriptors
1675 * @param desc_size size of each descriptor
1676 * @param app_ctx application context
1678 static int octeon_setup_droq(struct octeon_device
*oct
, int q_no
, int num_descs
,
1679 int desc_size
, void *app_ctx
)
1683 dev_dbg(&oct
->pci_dev
->dev
, "Creating Droq: %d\n", q_no
);
1684 /* droq creation and local register settings. */
1685 ret_val
= octeon_create_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
);
1690 dev_dbg(&oct
->pci_dev
->dev
, "Using default droq %d\n", q_no
);
1693 /* tasklet creation for the droq */
1695 /* Enable the droq queues */
1696 octeon_set_droq_pkt_op(oct
, q_no
, 1);
1698 /* Send Credit for Octeon Output queues. Credits are always
1699 * sent after the output queue is enabled.
1701 writel(oct
->droq
[q_no
]->max_count
,
1702 oct
->droq
[q_no
]->pkts_credit_reg
);
1708 * \brief Callback for getting interface configuration
1709 * @param status status of request
1710 * @param buf pointer to resp structure
1712 static void if_cfg_callback(struct octeon_device
*oct
,
1716 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
1717 struct liquidio_if_cfg_resp
*resp
;
1718 struct liquidio_if_cfg_context
*ctx
;
1720 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
1721 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
1723 oct
= lio_get_device(ctx
->octeon_id
);
1725 dev_err(&oct
->pci_dev
->dev
, "nic if cfg instruction failed. Status: %llx\n",
1726 CVM_CAST64(resp
->status
));
1727 ACCESS_ONCE(ctx
->cond
) = 1;
1729 /* This barrier is required to be sure that the response has been
1730 * written fully before waking up the handler
1734 wake_up_interruptible(&ctx
->wc
);
1738 * \brief Select queue based on hash
1739 * @param dev Net device
1740 * @param skb sk_buff structure
1741 * @returns selected queue number
1743 static u16
select_q(struct net_device
*dev
, struct sk_buff
*skb
,
1744 void *accel_priv
, select_queue_fallback_t fallback
)
1750 /* select queue on chosen queue_mapping or core */
1751 qindex
= skb_rx_queue_recorded(skb
) ?
1752 skb_get_rx_queue(skb
) : smp_processor_id();
1753 return (u16
)(qindex
& (lio
->linfo
.num_txpciq
- 1));
1756 /** Routine to push packets arriving on Octeon interface upto network layer.
1757 * @param oct_id - octeon device id.
1758 * @param skbuff - skbuff struct to be passed to network layer.
1759 * @param len - size of total data received.
1760 * @param rh - Control header associated with the packet
1761 * @param param - additional control data with the packet
1764 liquidio_push_packet(u32 octeon_id
,
1767 union octeon_rh
*rh
,
1770 struct napi_struct
*napi
= param
;
1771 struct octeon_device
*oct
= lio_get_device(octeon_id
);
1772 struct sk_buff
*skb
= (struct sk_buff
*)skbuff
;
1773 struct skb_shared_hwtstamps
*shhwtstamps
;
1775 struct net_device
*netdev
=
1776 (struct net_device
*)oct
->props
[rh
->r_dh
.link
].netdev
;
1777 struct octeon_droq
*droq
= container_of(param
, struct octeon_droq
,
1780 int packet_was_received
;
1781 struct lio
*lio
= GET_LIO(netdev
);
1783 /* Do not proceed if the interface is not in RUNNING state. */
1784 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
)) {
1785 recv_buffer_free(skb
);
1786 droq
->stats
.rx_dropped
++;
1792 if (rh
->r_dh
.has_hwtstamp
) {
1793 /* timestamp is included from the hardware at the
1794 * beginning of the packet.
1796 if (ifstate_check(lio
,
1797 LIO_IFSTATE_RX_TIMESTAMP_ENABLED
)) {
1798 /* Nanoseconds are in the first 64-bits
1801 memcpy(&ns
, (skb
->data
), sizeof(ns
));
1802 shhwtstamps
= skb_hwtstamps(skb
);
1803 shhwtstamps
->hwtstamp
=
1804 ns_to_ktime(ns
+ lio
->ptp_adjust
);
1806 skb_pull(skb
, sizeof(ns
));
1809 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1811 if ((netdev
->features
& NETIF_F_RXCSUM
) &&
1812 (rh
->r_dh
.csum_verified
== CNNIC_CSUM_VERIFIED
))
1813 /* checksum has already been verified */
1814 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1816 skb
->ip_summed
= CHECKSUM_NONE
;
1818 packet_was_received
= napi_gro_receive(napi
, skb
) != GRO_DROP
;
1820 if (packet_was_received
) {
1821 droq
->stats
.rx_bytes_received
+= len
;
1822 droq
->stats
.rx_pkts_received
++;
1823 netdev
->last_rx
= jiffies
;
1825 droq
->stats
.rx_dropped
++;
1826 netif_info(lio
, rx_err
, lio
->netdev
,
1827 "droq:%d error rx_dropped:%llu\n",
1828 droq
->q_no
, droq
->stats
.rx_dropped
);
1832 recv_buffer_free(skb
);
1837 * \brief wrapper for calling napi_schedule
1838 * @param param parameters to pass to napi_schedule
1840 * Used when scheduling on different CPUs
1842 static void napi_schedule_wrapper(void *param
)
1844 struct napi_struct
*napi
= param
;
1846 napi_schedule(napi
);
1850 * \brief callback when receive interrupt occurs and we are in NAPI mode
1851 * @param arg pointer to octeon output queue
1853 static void liquidio_napi_drv_callback(void *arg
)
1855 struct octeon_droq
*droq
= arg
;
1856 int this_cpu
= smp_processor_id();
1858 if (droq
->cpu_id
== this_cpu
) {
1859 napi_schedule(&droq
->napi
);
1861 struct call_single_data
*csd
= &droq
->csd
;
1863 csd
->func
= napi_schedule_wrapper
;
1864 csd
->info
= &droq
->napi
;
1867 smp_call_function_single_async(droq
->cpu_id
, csd
);
1872 * \brief Main NAPI poll function
1873 * @param droq octeon output queue
1874 * @param budget maximum number of items to process
1876 static int liquidio_napi_do_rx(struct octeon_droq
*droq
, int budget
)
1879 struct lio
*lio
= GET_LIO(droq
->napi
.dev
);
1880 struct octeon_device
*oct
= lio
->oct_dev
;
1882 work_done
= octeon_process_droq_poll_cmd(oct
, droq
->q_no
,
1883 POLL_EVENT_PROCESS_PKTS
,
1885 if (work_done
< 0) {
1886 netif_info(lio
, rx_err
, lio
->netdev
,
1887 "Receive work_done < 0, rxq:%d\n", droq
->q_no
);
1888 goto octnet_napi_finish
;
1891 if (work_done
> budget
)
1892 dev_err(&oct
->pci_dev
->dev
, ">>>> %s work_done: %d budget: %d\n",
1893 __func__
, work_done
, budget
);
1898 napi_complete(&droq
->napi
);
1899 octeon_process_droq_poll_cmd(oct
, droq
->q_no
, POLL_EVENT_ENABLE_INTR
,
1905 * \brief Entry point for NAPI polling
1906 * @param napi NAPI structure
1907 * @param budget maximum number of items to process
1909 static int liquidio_napi_poll(struct napi_struct
*napi
, int budget
)
1911 struct octeon_droq
*droq
;
1914 droq
= container_of(napi
, struct octeon_droq
, napi
);
1916 work_done
= liquidio_napi_do_rx(droq
, budget
);
1918 if (work_done
< budget
) {
1919 napi_complete(napi
);
1920 octeon_process_droq_poll_cmd(droq
->oct_dev
, droq
->q_no
,
1921 POLL_EVENT_ENABLE_INTR
, 0);
1929 * \brief Setup input and output queues
1930 * @param octeon_dev octeon device
1931 * @param net_device Net device
1933 * Note: Queues are with respect to the octeon device. Thus
1934 * an input queue is for egress packets, and output queues
1935 * are for ingress packets.
1937 static inline int setup_io_queues(struct octeon_device
*octeon_dev
,
1938 struct net_device
*net_device
)
1940 static int first_time
= 1;
1941 static struct octeon_droq_ops droq_ops
;
1943 static int cpu_id_modulus
;
1944 struct octeon_droq
*droq
;
1945 struct napi_struct
*napi
;
1946 int q
, q_no
, retval
= 0;
1950 lio
= GET_LIO(net_device
);
1953 memset(&droq_ops
, 0, sizeof(struct octeon_droq_ops
));
1955 droq_ops
.fptr
= liquidio_push_packet
;
1957 droq_ops
.poll_mode
= 1;
1958 droq_ops
.napi_fn
= liquidio_napi_drv_callback
;
1960 cpu_id_modulus
= num_present_cpus();
1964 for (q
= 0; q
< lio
->linfo
.num_rxpciq
; q
++) {
1965 q_no
= lio
->linfo
.rxpciq
[q
];
1967 retval
= octeon_setup_droq(octeon_dev
, q_no
,
1968 CFG_GET_NUM_RX_DESCS_NIC_IF
1969 (octeon_get_conf(octeon_dev
),
1971 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
1972 (octeon_get_conf(octeon_dev
),
1975 dev_err(&octeon_dev
->pci_dev
->dev
,
1976 " %s : Runtime DROQ(RxQ) creation failed.\n",
1981 droq
= octeon_dev
->droq
[q_no
];
1983 netif_napi_add(net_device
, napi
, liquidio_napi_poll
, 64);
1985 /* designate a CPU for this droq */
1986 droq
->cpu_id
= cpu_id
;
1988 if (cpu_id
>= cpu_id_modulus
)
1991 octeon_register_droq_ops(octeon_dev
, q_no
, &droq_ops
);
1995 for (q
= 0; q
< lio
->linfo
.num_txpciq
; q
++) {
1996 num_tx_descs
= CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
1999 retval
= octeon_setup_iq(octeon_dev
, lio
->linfo
.txpciq
[q
],
2001 netdev_get_tx_queue(net_device
, q
));
2003 dev_err(&octeon_dev
->pci_dev
->dev
,
2004 " %s : Runtime IQ(TxQ) creation failed.\n",
2014 * \brief Poll routine for checking transmit queue status
2015 * @param work work_struct data structure
2017 static void octnet_poll_check_txq_status(struct work_struct
*work
)
2019 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
2020 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
2022 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
))
2025 check_txq_status(lio
);
2026 queue_delayed_work(lio
->txq_status_wq
.wq
,
2027 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2031 * \brief Sets up the txq poll check
2032 * @param netdev network device
2034 static inline void setup_tx_poll_fn(struct net_device
*netdev
)
2036 struct lio
*lio
= GET_LIO(netdev
);
2037 struct octeon_device
*oct
= lio
->oct_dev
;
2039 lio
->txq_status_wq
.wq
= create_workqueue("txq-status");
2040 if (!lio
->txq_status_wq
.wq
) {
2041 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium txq status wq\n");
2044 INIT_DELAYED_WORK(&lio
->txq_status_wq
.wk
.work
,
2045 octnet_poll_check_txq_status
);
2046 lio
->txq_status_wq
.wk
.ctxptr
= lio
;
2047 queue_delayed_work(lio
->txq_status_wq
.wq
,
2048 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2052 * \brief Net device open for LiquidIO
2053 * @param netdev network device
2055 static int liquidio_open(struct net_device
*netdev
)
2057 struct lio
*lio
= GET_LIO(netdev
);
2058 struct octeon_device
*oct
= lio
->oct_dev
;
2059 struct napi_struct
*napi
, *n
;
2061 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2064 oct_ptp_open(netdev
);
2066 ifstate_set(lio
, LIO_IFSTATE_RUNNING
);
2067 setup_tx_poll_fn(netdev
);
2070 netif_info(lio
, ifup
, lio
->netdev
, "Interface Open, ready for traffic\n");
2071 try_module_get(THIS_MODULE
);
2073 /* tell Octeon to start forwarding packets to host */
2074 send_rx_ctrl_cmd(lio
, 1);
2076 /* Ready for link status updates */
2079 dev_info(&oct
->pci_dev
->dev
, "%s interface is opened\n",
2086 * \brief Net device stop for LiquidIO
2087 * @param netdev network device
2089 static int liquidio_stop(struct net_device
*netdev
)
2091 struct napi_struct
*napi
, *n
;
2092 struct lio
*lio
= GET_LIO(netdev
);
2093 struct octeon_device
*oct
= lio
->oct_dev
;
2095 netif_info(lio
, ifdown
, lio
->netdev
, "Stopping interface!\n");
2096 /* Inform that netif carrier is down */
2098 lio
->linfo
.link
.s
.status
= 0;
2100 netif_carrier_off(netdev
);
2102 /* tell Octeon to stop forwarding packets to host */
2103 send_rx_ctrl_cmd(lio
, 0);
2105 cancel_delayed_work_sync(&lio
->txq_status_wq
.wk
.work
);
2106 flush_workqueue(lio
->txq_status_wq
.wq
);
2107 destroy_workqueue(lio
->txq_status_wq
.wq
);
2109 if (lio
->ptp_clock
) {
2110 ptp_clock_unregister(lio
->ptp_clock
);
2111 lio
->ptp_clock
= NULL
;
2114 ifstate_reset(lio
, LIO_IFSTATE_RUNNING
);
2116 /* This is a hack that allows DHCP to continue working. */
2117 set_bit(__LINK_STATE_START
, &lio
->netdev
->state
);
2119 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2124 dev_info(&oct
->pci_dev
->dev
, "%s interface is stopped\n", netdev
->name
);
2125 module_put(THIS_MODULE
);
2130 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr
)
2132 struct octnic_ctrl_pkt
*nctrl
= (struct octnic_ctrl_pkt
*)nctrl_ptr
;
2133 struct net_device
*netdev
= (struct net_device
*)nctrl
->netpndev
;
2134 struct lio
*lio
= GET_LIO(netdev
);
2135 struct octeon_device
*oct
= lio
->oct_dev
;
2137 switch (nctrl
->ncmd
.s
.cmd
) {
2138 case OCTNET_CMD_CHANGE_DEVFLAGS
:
2139 case OCTNET_CMD_SET_MULTI_LIST
:
2142 case OCTNET_CMD_CHANGE_MACADDR
:
2143 /* If command is successful, change the MACADDR. */
2144 netif_info(lio
, probe
, lio
->netdev
, " MACAddr changed to 0x%llx\n",
2145 CVM_CAST64(nctrl
->udd
[0]));
2146 dev_info(&oct
->pci_dev
->dev
, "%s MACAddr changed to 0x%llx\n",
2147 netdev
->name
, CVM_CAST64(nctrl
->udd
[0]));
2148 memcpy(netdev
->dev_addr
, ((u8
*)&nctrl
->udd
[0]) + 2, ETH_ALEN
);
2151 case OCTNET_CMD_CHANGE_MTU
:
2152 /* If command is successful, change the MTU. */
2153 netif_info(lio
, probe
, lio
->netdev
, " MTU Changed from %d to %d\n",
2154 netdev
->mtu
, nctrl
->ncmd
.s
.param2
);
2155 dev_info(&oct
->pci_dev
->dev
, "%s MTU Changed from %d to %d\n",
2156 netdev
->name
, netdev
->mtu
,
2157 nctrl
->ncmd
.s
.param2
);
2158 netdev
->mtu
= nctrl
->ncmd
.s
.param2
;
2161 case OCTNET_CMD_GPIO_ACCESS
:
2162 netif_info(lio
, probe
, lio
->netdev
, "LED Flashing visual identification\n");
2166 case OCTNET_CMD_LRO_ENABLE
:
2167 dev_info(&oct
->pci_dev
->dev
, "%s LRO Enabled\n", netdev
->name
);
2170 case OCTNET_CMD_LRO_DISABLE
:
2171 dev_info(&oct
->pci_dev
->dev
, "%s LRO Disabled\n",
2175 case OCTNET_CMD_VERBOSE_ENABLE
:
2176 dev_info(&oct
->pci_dev
->dev
, "%s LRO Enabled\n", netdev
->name
);
2179 case OCTNET_CMD_VERBOSE_DISABLE
:
2180 dev_info(&oct
->pci_dev
->dev
, "%s LRO Disabled\n",
2184 case OCTNET_CMD_SET_SETTINGS
:
2185 dev_info(&oct
->pci_dev
->dev
, "%s settings changed\n",
2191 dev_err(&oct
->pci_dev
->dev
, "%s Unknown cmd %d\n", __func__
,
2197 * \brief Converts a mask based on net device flags
2198 * @param netdev network device
2200 * This routine generates a octnet_ifflags mask from the net device flags
2201 * received from the OS.
2203 static inline enum octnet_ifflags
get_new_flags(struct net_device
*netdev
)
2205 enum octnet_ifflags f
= OCTNET_IFFLAG_UNICAST
;
2207 if (netdev
->flags
& IFF_PROMISC
)
2208 f
|= OCTNET_IFFLAG_PROMISC
;
2210 if (netdev
->flags
& IFF_ALLMULTI
)
2211 f
|= OCTNET_IFFLAG_ALLMULTI
;
2213 if (netdev
->flags
& IFF_MULTICAST
) {
2214 f
|= OCTNET_IFFLAG_MULTICAST
;
2216 /* Accept all multicast addresses if there are more than we
2219 if (netdev_mc_count(netdev
) > MAX_OCTEON_MULTICAST_ADDR
)
2220 f
|= OCTNET_IFFLAG_ALLMULTI
;
2223 if (netdev
->flags
& IFF_BROADCAST
)
2224 f
|= OCTNET_IFFLAG_BROADCAST
;
2230 * \brief Net device set_multicast_list
2231 * @param netdev network device
2233 static void liquidio_set_mcast_list(struct net_device
*netdev
)
2235 struct lio
*lio
= GET_LIO(netdev
);
2236 struct octeon_device
*oct
= lio
->oct_dev
;
2237 struct octnic_ctrl_pkt nctrl
;
2238 struct octnic_ctrl_params nparams
;
2239 struct netdev_hw_addr
*ha
;
2242 int mc_count
= min(netdev_mc_count(netdev
), MAX_OCTEON_MULTICAST_ADDR
);
2244 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2246 /* Create a ctrl pkt command to be sent to core app. */
2248 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_MULTI_LIST
;
2249 nctrl
.ncmd
.s
.param1
= lio
->linfo
.ifidx
;
2250 nctrl
.ncmd
.s
.param2
= get_new_flags(netdev
);
2251 nctrl
.ncmd
.s
.param3
= mc_count
;
2252 nctrl
.ncmd
.s
.more
= mc_count
;
2253 nctrl
.netpndev
= (u64
)netdev
;
2254 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2256 /* copy all the addresses into the udd */
2259 netdev_for_each_mc_addr(ha
, netdev
) {
2261 memcpy(((u8
*)mc
) + 2, ha
->addr
, ETH_ALEN
);
2262 /* no need to swap bytes */
2264 if (++mc
> &nctrl
.udd
[mc_count
])
2268 /* Apparently, any activity in this call from the kernel has to
2269 * be atomic. So we won't wait for response.
2271 nctrl
.wait_time
= 0;
2273 nparams
.resp_order
= OCTEON_RESP_NORESPONSE
;
2275 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
, nparams
);
2277 dev_err(&oct
->pci_dev
->dev
, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2283 * \brief Net device set_mac_address
2284 * @param netdev network device
2286 static int liquidio_set_mac(struct net_device
*netdev
, void *p
)
2289 struct lio
*lio
= GET_LIO(netdev
);
2290 struct octeon_device
*oct
= lio
->oct_dev
;
2291 struct sockaddr
*addr
= (struct sockaddr
*)p
;
2292 struct octnic_ctrl_pkt nctrl
;
2293 struct octnic_ctrl_params nparams
;
2295 if ((!is_valid_ether_addr(addr
->sa_data
)) ||
2296 (ifstate_check(lio
, LIO_IFSTATE_RUNNING
)))
2297 return -EADDRNOTAVAIL
;
2299 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2302 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2303 nctrl
.ncmd
.s
.param1
= lio
->linfo
.ifidx
;
2304 nctrl
.ncmd
.s
.param2
= 0;
2305 nctrl
.ncmd
.s
.more
= 1;
2306 nctrl
.netpndev
= (u64
)netdev
;
2307 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2308 nctrl
.wait_time
= 100;
2311 /* The MAC Address is presented in network byte order. */
2312 memcpy((u8
*)&nctrl
.udd
[0] + 2, addr
->sa_data
, ETH_ALEN
);
2314 nparams
.resp_order
= OCTEON_RESP_ORDERED
;
2316 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
, nparams
);
2318 dev_err(&oct
->pci_dev
->dev
, "MAC Address change failed\n");
2321 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2322 memcpy(((u8
*)&lio
->linfo
.hw_addr
) + 2, addr
->sa_data
, ETH_ALEN
);
2328 * \brief Net device get_stats
2329 * @param netdev network device
2331 static struct net_device_stats
*liquidio_get_stats(struct net_device
*netdev
)
2333 struct lio
*lio
= GET_LIO(netdev
);
2334 struct net_device_stats
*stats
= &netdev
->stats
;
2335 struct octeon_device
*oct
;
2336 u64 pkts
= 0, drop
= 0, bytes
= 0;
2337 struct oct_droq_stats
*oq_stats
;
2338 struct oct_iq_stats
*iq_stats
;
2339 int i
, iq_no
, oq_no
;
2343 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
2344 iq_no
= lio
->linfo
.txpciq
[i
];
2345 iq_stats
= &oct
->instr_queue
[iq_no
]->stats
;
2346 pkts
+= iq_stats
->tx_done
;
2347 drop
+= iq_stats
->tx_dropped
;
2348 bytes
+= iq_stats
->tx_tot_bytes
;
2351 stats
->tx_packets
= pkts
;
2352 stats
->tx_bytes
= bytes
;
2353 stats
->tx_dropped
= drop
;
2359 for (i
= 0; i
< lio
->linfo
.num_rxpciq
; i
++) {
2360 oq_no
= lio
->linfo
.rxpciq
[i
];
2361 oq_stats
= &oct
->droq
[oq_no
]->stats
;
2362 pkts
+= oq_stats
->rx_pkts_received
;
2363 drop
+= (oq_stats
->rx_dropped
+
2364 oq_stats
->dropped_nodispatch
+
2365 oq_stats
->dropped_toomany
+
2366 oq_stats
->dropped_nomem
);
2367 bytes
+= oq_stats
->rx_bytes_received
;
2370 stats
->rx_bytes
= bytes
;
2371 stats
->rx_packets
= pkts
;
2372 stats
->rx_dropped
= drop
;
2378 * \brief Net device change_mtu
2379 * @param netdev network device
2381 static int liquidio_change_mtu(struct net_device
*netdev
, int new_mtu
)
2383 struct lio
*lio
= GET_LIO(netdev
);
2384 struct octeon_device
*oct
= lio
->oct_dev
;
2385 struct octnic_ctrl_pkt nctrl
;
2386 struct octnic_ctrl_params nparams
;
2387 int max_frm_size
= new_mtu
+ OCTNET_FRM_HEADER_SIZE
;
2390 /* Limit the MTU to make sure the ethernet packets are between 64 bytes
2393 if ((max_frm_size
< OCTNET_MIN_FRM_SIZE
) ||
2394 (max_frm_size
> OCTNET_MAX_FRM_SIZE
)) {
2395 dev_err(&oct
->pci_dev
->dev
, "Invalid MTU: %d\n", new_mtu
);
2396 dev_err(&oct
->pci_dev
->dev
, "Valid range %d and %d\n",
2397 (OCTNET_MIN_FRM_SIZE
- OCTNET_FRM_HEADER_SIZE
),
2398 (OCTNET_MAX_FRM_SIZE
- OCTNET_FRM_HEADER_SIZE
));
2402 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2405 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MTU
;
2406 nctrl
.ncmd
.s
.param1
= lio
->linfo
.ifidx
;
2407 nctrl
.ncmd
.s
.param2
= new_mtu
;
2408 nctrl
.wait_time
= 100;
2409 nctrl
.netpndev
= (u64
)netdev
;
2410 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2412 nparams
.resp_order
= OCTEON_RESP_ORDERED
;
2414 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
, nparams
);
2416 dev_err(&oct
->pci_dev
->dev
, "Failed to set MTU\n");
2426 * \brief Handler for SIOCSHWTSTAMP ioctl
2427 * @param netdev network device
2428 * @param ifr interface request
2429 * @param cmd command
2431 static int hwtstamp_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2433 struct hwtstamp_config conf
;
2434 struct lio
*lio
= GET_LIO(netdev
);
2436 if (copy_from_user(&conf
, ifr
->ifr_data
, sizeof(conf
)))
2442 switch (conf
.tx_type
) {
2443 case HWTSTAMP_TX_ON
:
2444 case HWTSTAMP_TX_OFF
:
2450 switch (conf
.rx_filter
) {
2451 case HWTSTAMP_FILTER_NONE
:
2453 case HWTSTAMP_FILTER_ALL
:
2454 case HWTSTAMP_FILTER_SOME
:
2455 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2456 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2457 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2458 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2459 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2460 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2461 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2462 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2463 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2464 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2465 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2466 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2467 conf
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2473 if (conf
.rx_filter
== HWTSTAMP_FILTER_ALL
)
2474 ifstate_set(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2477 ifstate_reset(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2479 return copy_to_user(ifr
->ifr_data
, &conf
, sizeof(conf
)) ? -EFAULT
: 0;
2483 * \brief ioctl handler
2484 * @param netdev network device
2485 * @param ifr interface request
2486 * @param cmd command
2488 static int liquidio_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2492 return hwtstamp_ioctl(netdev
, ifr
, cmd
);
2499 * \brief handle a Tx timestamp response
2500 * @param status response status
2501 * @param buf pointer to skb
2503 static void handle_timestamp(struct octeon_device
*oct
,
2507 struct octnet_buf_free_info
*finfo
;
2508 struct octeon_soft_command
*sc
;
2509 struct oct_timestamp_resp
*resp
;
2511 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
2513 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2517 resp
= (struct oct_timestamp_resp
*)sc
->virtrptr
;
2519 if (status
!= OCTEON_REQUEST_DONE
) {
2520 dev_err(&oct
->pci_dev
->dev
, "Tx timestamp instruction failed. Status: %llx\n",
2521 CVM_CAST64(status
));
2522 resp
->timestamp
= 0;
2525 octeon_swap_8B_data(&resp
->timestamp
, 1);
2527 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) != 0)) {
2528 struct skb_shared_hwtstamps ts
;
2529 u64 ns
= resp
->timestamp
;
2531 netif_info(lio
, tx_done
, lio
->netdev
,
2532 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2533 skb
, (unsigned long long)ns
);
2534 ts
.hwtstamp
= ns_to_ktime(ns
+ lio
->ptp_adjust
);
2535 skb_tstamp_tx(skb
, &ts
);
2538 octeon_free_soft_command(oct
, sc
);
2539 recv_buffer_free(skb
);
2542 /* \brief Send a data packet that will be timestamped
2543 * @param oct octeon device
2544 * @param ndata pointer to network data
2545 * @param finfo pointer to private network data
2547 static inline int send_nic_timestamp_pkt(struct octeon_device
*oct
,
2548 struct octnic_data_pkt
*ndata
,
2549 struct octnet_buf_free_info
*finfo
,
2553 struct octeon_soft_command
*sc
;
2554 struct octeon_instr_ih
*ih
;
2555 struct octeon_instr_rdp
*rdp
;
2561 sc
= octeon_alloc_soft_command_resp(oct
, &ndata
->cmd
,
2562 sizeof(struct oct_timestamp_resp
));
2566 dev_err(&oct
->pci_dev
->dev
, "No memory for timestamped data packet\n");
2567 return IQ_SEND_FAILED
;
2570 if (ndata
->reqtype
== REQTYPE_NORESP_NET
)
2571 ndata
->reqtype
= REQTYPE_RESP_NET
;
2572 else if (ndata
->reqtype
== REQTYPE_NORESP_NET_SG
)
2573 ndata
->reqtype
= REQTYPE_RESP_NET_SG
;
2575 sc
->callback
= handle_timestamp
;
2576 sc
->callback_arg
= finfo
->skb
;
2577 sc
->iq_no
= ndata
->q_no
;
2579 ih
= (struct octeon_instr_ih
*)&sc
->cmd
.ih
;
2580 rdp
= (struct octeon_instr_rdp
*)&sc
->cmd
.rdp
;
2582 ring_doorbell
= !xmit_more
;
2583 retval
= octeon_send_command(oct
, sc
->iq_no
, ring_doorbell
, &sc
->cmd
,
2584 sc
, ih
->dlengsz
, ndata
->reqtype
);
2587 dev_err(&oct
->pci_dev
->dev
, "timestamp data packet failed status: %x\n",
2589 octeon_free_soft_command(oct
, sc
);
2591 netif_info(lio
, tx_queued
, lio
->netdev
, "Queued timestamp packet\n");
2597 static inline int is_ipv4(struct sk_buff
*skb
)
2599 return (skb
->protocol
== htons(ETH_P_IP
)) &&
2600 (ip_hdr(skb
)->version
== 4);
2603 static inline int is_vlan(struct sk_buff
*skb
)
2605 return skb
->protocol
== htons(ETH_P_8021Q
);
2608 static inline int is_ip_fragmented(struct sk_buff
*skb
)
2610 /* The Don't fragment and Reserved flag fields are ignored.
2611 * IP is fragmented if
2612 * - the More fragments bit is set (indicating this IP is a fragment
2613 * with more to follow; the current offset could be 0 ).
2614 * - ths offset field is non-zero.
2616 return (ip_hdr(skb
)->frag_off
& htons(IP_MF
| IP_OFFSET
)) ? 1 : 0;
2619 static inline int is_ipv6(struct sk_buff
*skb
)
2621 return (skb
->protocol
== htons(ETH_P_IPV6
)) &&
2622 (ipv6_hdr(skb
)->version
== 6);
2625 static inline int is_with_extn_hdr(struct sk_buff
*skb
)
2627 return (ipv6_hdr(skb
)->nexthdr
!= IPPROTO_TCP
) &&
2628 (ipv6_hdr(skb
)->nexthdr
!= IPPROTO_UDP
);
2631 static inline int is_tcpudp(struct sk_buff
*skb
)
2633 return (ip_hdr(skb
)->protocol
== IPPROTO_TCP
) ||
2634 (ip_hdr(skb
)->protocol
== IPPROTO_UDP
);
2637 static inline u32
get_ipv4_5tuple_tag(struct sk_buff
*skb
)
2640 struct iphdr
*iphdr
= ip_hdr(skb
);
2642 tag
= crc32(0, &iphdr
->protocol
, 1);
2643 tag
= crc32(tag
, (u8
*)&iphdr
->saddr
, 8);
2644 tag
= crc32(tag
, skb_transport_header(skb
), 4);
2648 static inline u32
get_ipv6_5tuple_tag(struct sk_buff
*skb
)
2651 struct ipv6hdr
*ipv6hdr
= ipv6_hdr(skb
);
2653 tag
= crc32(0, &ipv6hdr
->nexthdr
, 1);
2654 tag
= crc32(tag
, (u8
*)&ipv6hdr
->saddr
, 32);
2655 tag
= crc32(tag
, skb_transport_header(skb
), 4);
2659 /** \brief Transmit networks packets to the Octeon interface
2660 * @param skbuff skbuff struct to be passed to network layer.
2661 * @param netdev pointer to network device
2662 * @returns whether the packet was transmitted to the device okay or not
2663 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2665 static int liquidio_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2668 struct octnet_buf_free_info
*finfo
;
2669 union octnic_cmd_setup cmdsetup
;
2670 struct octnic_data_pkt ndata
;
2671 struct octeon_device
*oct
;
2672 struct oct_iq_stats
*stats
;
2673 int cpu
= 0, status
= 0;
2674 int q_idx
= 0, iq_no
= 0;
2678 lio
= GET_LIO(netdev
);
2681 if (netif_is_multiqueue(netdev
)) {
2682 cpu
= skb
->queue_mapping
;
2683 q_idx
= (cpu
& (lio
->linfo
.num_txpciq
- 1));
2684 iq_no
= lio
->linfo
.txpciq
[q_idx
];
2689 stats
= &oct
->instr_queue
[iq_no
]->stats
;
2691 /* Check for all conditions in which the current packet cannot be
2694 if (!(atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
) ||
2695 (!lio
->linfo
.link
.s
.status
) ||
2697 netif_info(lio
, tx_err
, lio
->netdev
,
2698 "Transmit failed link_status : %d\n",
2699 lio
->linfo
.link
.s
.status
);
2700 goto lio_xmit_failed
;
2703 /* Use space in skb->cb to store info used to unmap and
2706 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2711 /* Prepare the attributes for the data to be passed to OSI. */
2712 memset(&ndata
, 0, sizeof(struct octnic_data_pkt
));
2714 ndata
.buf
= (void *)finfo
;
2718 if (netif_is_multiqueue(netdev
)) {
2719 if (octnet_iq_is_full(oct
, ndata
.q_no
)) {
2720 /* defer sending if queue is full */
2721 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2723 stats
->tx_iq_busy
++;
2724 return NETDEV_TX_BUSY
;
2727 if (octnet_iq_is_full(oct
, lio
->txq
)) {
2728 /* defer sending if queue is full */
2729 stats
->tx_iq_busy
++;
2730 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2732 return NETDEV_TX_BUSY
;
2735 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2736 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
2739 ndata
.datasize
= skb
->len
;
2742 cmdsetup
.s
.ifidx
= lio
->linfo
.ifidx
;
2744 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2745 if (is_ipv4(skb
) && !is_ip_fragmented(skb
) && is_tcpudp(skb
)) {
2746 tag
= get_ipv4_5tuple_tag(skb
);
2748 cmdsetup
.s
.cksum_offset
= sizeof(struct ethhdr
) + 1;
2750 if (ip_hdr(skb
)->ihl
> 5)
2751 cmdsetup
.s
.ipv4opts_ipv6exthdr
=
2752 OCT_PKT_PARAM_IPV4OPTS
;
2754 } else if (is_ipv6(skb
)) {
2755 tag
= get_ipv6_5tuple_tag(skb
);
2757 cmdsetup
.s
.cksum_offset
= sizeof(struct ethhdr
) + 1;
2759 if (is_with_extn_hdr(skb
))
2760 cmdsetup
.s
.ipv4opts_ipv6exthdr
=
2761 OCT_PKT_PARAM_IPV6EXTHDR
;
2763 } else if (is_vlan(skb
)) {
2764 if (vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
2765 == htons(ETH_P_IP
) &&
2766 !is_ip_fragmented(skb
) && is_tcpudp(skb
)) {
2767 tag
= get_ipv4_5tuple_tag(skb
);
2769 cmdsetup
.s
.cksum_offset
=
2770 sizeof(struct vlan_ethhdr
) + 1;
2772 if (ip_hdr(skb
)->ihl
> 5)
2773 cmdsetup
.s
.ipv4opts_ipv6exthdr
=
2774 OCT_PKT_PARAM_IPV4OPTS
;
2776 } else if (vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
2777 == htons(ETH_P_IPV6
)) {
2778 tag
= get_ipv6_5tuple_tag(skb
);
2780 cmdsetup
.s
.cksum_offset
=
2781 sizeof(struct vlan_ethhdr
) + 1;
2783 if (is_with_extn_hdr(skb
))
2784 cmdsetup
.s
.ipv4opts_ipv6exthdr
=
2785 OCT_PKT_PARAM_IPV6EXTHDR
;
2789 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
2790 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2791 cmdsetup
.s
.timestamp
= 1;
2794 if (skb_shinfo(skb
)->nr_frags
== 0) {
2795 cmdsetup
.s
.u
.datasize
= skb
->len
;
2796 octnet_prepare_pci_cmd(&ndata
.cmd
, &cmdsetup
, tag
);
2797 /* Offload checksum calculation for TCP/UDP packets */
2798 ndata
.cmd
.dptr
= dma_map_single(&oct
->pci_dev
->dev
,
2802 if (dma_mapping_error(&oct
->pci_dev
->dev
, ndata
.cmd
.dptr
)) {
2803 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 1\n",
2805 return NETDEV_TX_BUSY
;
2808 finfo
->dptr
= ndata
.cmd
.dptr
;
2810 ndata
.reqtype
= REQTYPE_NORESP_NET
;
2814 struct skb_frag_struct
*frag
;
2815 struct octnic_gather
*g
;
2817 spin_lock(&lio
->lock
);
2818 g
= (struct octnic_gather
*)list_delete_head(&lio
->glist
);
2819 spin_unlock(&lio
->lock
);
2822 netif_info(lio
, tx_err
, lio
->netdev
,
2823 "Transmit scatter gather: glist null!\n");
2824 goto lio_xmit_failed
;
2827 cmdsetup
.s
.gather
= 1;
2828 cmdsetup
.s
.u
.gatherptrs
= (skb_shinfo(skb
)->nr_frags
+ 1);
2829 octnet_prepare_pci_cmd(&ndata
.cmd
, &cmdsetup
, tag
);
2831 memset(g
->sg
, 0, g
->sg_size
);
2833 g
->sg
[0].ptr
[0] = dma_map_single(&oct
->pci_dev
->dev
,
2835 (skb
->len
- skb
->data_len
),
2837 if (dma_mapping_error(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0])) {
2838 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 2\n",
2840 return NETDEV_TX_BUSY
;
2842 add_sg_size(&g
->sg
[0], (skb
->len
- skb
->data_len
), 0);
2844 frags
= skb_shinfo(skb
)->nr_frags
;
2847 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
2849 g
->sg
[(i
>> 2)].ptr
[(i
& 3)] =
2850 dma_map_page(&oct
->pci_dev
->dev
,
2856 add_sg_size(&g
->sg
[(i
>> 2)], frag
->size
, (i
& 3));
2860 ndata
.cmd
.dptr
= dma_map_single(&oct
->pci_dev
->dev
,
2863 if (dma_mapping_error(&oct
->pci_dev
->dev
, ndata
.cmd
.dptr
)) {
2864 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 3\n",
2866 dma_unmap_single(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0],
2867 skb
->len
- skb
->data_len
,
2869 return NETDEV_TX_BUSY
;
2872 finfo
->dptr
= ndata
.cmd
.dptr
;
2875 ndata
.reqtype
= REQTYPE_NORESP_NET_SG
;
2878 if (skb_shinfo(skb
)->gso_size
) {
2879 struct octeon_instr_irh
*irh
=
2880 (struct octeon_instr_irh
*)&ndata
.cmd
.irh
;
2881 union tx_info
*tx_info
= (union tx_info
*)&ndata
.cmd
.ossp
[0];
2883 irh
->len
= 1; /* to indicate that ossp[0] contains tx_info */
2884 tx_info
->s
.gso_size
= skb_shinfo(skb
)->gso_size
;
2885 tx_info
->s
.gso_segs
= skb_shinfo(skb
)->gso_segs
;
2888 xmit_more
= skb
->xmit_more
;
2890 if (unlikely(cmdsetup
.s
.timestamp
))
2891 status
= send_nic_timestamp_pkt(oct
, &ndata
, finfo
, xmit_more
);
2893 status
= octnet_send_nic_data_pkt(oct
, &ndata
, xmit_more
);
2894 if (status
== IQ_SEND_FAILED
)
2895 goto lio_xmit_failed
;
2897 netif_info(lio
, tx_queued
, lio
->netdev
, "Transmit queued successfully\n");
2899 if (status
== IQ_SEND_STOP
)
2900 stop_q(lio
->netdev
, q_idx
);
2902 netdev
->trans_start
= jiffies
;
2905 stats
->tx_tot_bytes
+= skb
->len
;
2907 return NETDEV_TX_OK
;
2910 stats
->tx_dropped
++;
2911 netif_info(lio
, tx_err
, lio
->netdev
, "IQ%d Transmit dropped:%llu\n",
2912 iq_no
, stats
->tx_dropped
);
2913 dma_unmap_single(&oct
->pci_dev
->dev
, ndata
.cmd
.dptr
,
2914 ndata
.datasize
, DMA_TO_DEVICE
);
2915 recv_buffer_free(skb
);
2916 return NETDEV_TX_OK
;
2919 /** \brief Network device Tx timeout
2920 * @param netdev pointer to network device
2922 static void liquidio_tx_timeout(struct net_device
*netdev
)
2926 lio
= GET_LIO(netdev
);
2928 netif_info(lio
, tx_err
, lio
->netdev
,
2929 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2930 netdev
->stats
.tx_dropped
);
2931 netdev
->trans_start
= jiffies
;
2935 int liquidio_set_feature(struct net_device
*netdev
, int cmd
)
2937 struct lio
*lio
= GET_LIO(netdev
);
2938 struct octeon_device
*oct
= lio
->oct_dev
;
2939 struct octnic_ctrl_pkt nctrl
;
2940 struct octnic_ctrl_params nparams
;
2943 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2946 nctrl
.ncmd
.s
.cmd
= cmd
;
2947 nctrl
.ncmd
.s
.param1
= lio
->linfo
.ifidx
;
2948 nctrl
.ncmd
.s
.param2
= OCTNIC_LROIPV4
| OCTNIC_LROIPV6
;
2949 nctrl
.wait_time
= 100;
2950 nctrl
.netpndev
= (u64
)netdev
;
2951 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2953 nparams
.resp_order
= OCTEON_RESP_NORESPONSE
;
2955 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
, nparams
);
2957 dev_err(&oct
->pci_dev
->dev
, "Feature change failed in core (ret: 0x%x)\n",
2963 /** \brief Net device fix features
2964 * @param netdev pointer to network device
2965 * @param request features requested
2966 * @returns updated features list
2968 static netdev_features_t
liquidio_fix_features(struct net_device
*netdev
,
2969 netdev_features_t request
)
2971 struct lio
*lio
= netdev_priv(netdev
);
2973 if ((request
& NETIF_F_RXCSUM
) &&
2974 !(lio
->dev_capability
& NETIF_F_RXCSUM
))
2975 request
&= ~NETIF_F_RXCSUM
;
2977 if ((request
& NETIF_F_HW_CSUM
) &&
2978 !(lio
->dev_capability
& NETIF_F_HW_CSUM
))
2979 request
&= ~NETIF_F_HW_CSUM
;
2981 if ((request
& NETIF_F_TSO
) && !(lio
->dev_capability
& NETIF_F_TSO
))
2982 request
&= ~NETIF_F_TSO
;
2984 if ((request
& NETIF_F_TSO6
) && !(lio
->dev_capability
& NETIF_F_TSO6
))
2985 request
&= ~NETIF_F_TSO6
;
2987 if ((request
& NETIF_F_LRO
) && !(lio
->dev_capability
& NETIF_F_LRO
))
2988 request
&= ~NETIF_F_LRO
;
2990 /*Disable LRO if RXCSUM is off */
2991 if (!(request
& NETIF_F_RXCSUM
) && (netdev
->features
& NETIF_F_LRO
) &&
2992 (lio
->dev_capability
& NETIF_F_LRO
))
2993 request
&= ~NETIF_F_LRO
;
2998 /** \brief Net device set features
2999 * @param netdev pointer to network device
3000 * @param features features to enable/disable
3002 static int liquidio_set_features(struct net_device
*netdev
,
3003 netdev_features_t features
)
3005 struct lio
*lio
= netdev_priv(netdev
);
3007 if (!((netdev
->features
^ features
) & NETIF_F_LRO
))
3010 if ((features
& NETIF_F_LRO
) && (lio
->dev_capability
& NETIF_F_LRO
))
3011 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
);
3012 else if (!(features
& NETIF_F_LRO
) &&
3013 (lio
->dev_capability
& NETIF_F_LRO
))
3014 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_DISABLE
);
3019 static struct net_device_ops lionetdevops
= {
3020 .ndo_open
= liquidio_open
,
3021 .ndo_stop
= liquidio_stop
,
3022 .ndo_start_xmit
= liquidio_xmit
,
3023 .ndo_get_stats
= liquidio_get_stats
,
3024 .ndo_set_mac_address
= liquidio_set_mac
,
3025 .ndo_set_rx_mode
= liquidio_set_mcast_list
,
3026 .ndo_tx_timeout
= liquidio_tx_timeout
,
3027 .ndo_change_mtu
= liquidio_change_mtu
,
3028 .ndo_do_ioctl
= liquidio_ioctl
,
3029 .ndo_fix_features
= liquidio_fix_features
,
3030 .ndo_set_features
= liquidio_set_features
,
3033 /** \brief Entry point for the liquidio module
3035 static int __init
liquidio_init(void)
3038 struct handshake
*hs
;
3040 init_completion(&first_stage
);
3042 octeon_init_device_list(conf_type
);
3044 if (liquidio_init_pci())
3047 wait_for_completion_timeout(&first_stage
, msecs_to_jiffies(1000));
3049 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3052 wait_for_completion(&hs
->init
);
3054 /* init handshake failed */
3055 dev_err(&hs
->pci_dev
->dev
,
3056 "Failed to init device\n");
3057 liquidio_deinit_pci();
3063 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3066 wait_for_completion_timeout(&hs
->started
,
3067 msecs_to_jiffies(30000));
3068 if (!hs
->started_ok
) {
3069 /* starter handshake failed */
3070 dev_err(&hs
->pci_dev
->dev
,
3071 "Firmware failed to start\n");
3072 liquidio_deinit_pci();
3081 static int lio_nic_info(struct octeon_recv_info
*recv_info
, void *buf
)
3083 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3084 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3086 union oct_link_status
*ls
;
3089 if ((recv_pkt
->buffer_size
[0] != sizeof(*ls
)) ||
3090 (recv_pkt
->rh
.r_nic_info
.ifidx
> oct
->ifcount
)) {
3091 dev_err(&oct
->pci_dev
->dev
, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3092 recv_pkt
->buffer_size
[0],
3093 recv_pkt
->rh
.r_nic_info
.ifidx
);
3097 ifidx
= recv_pkt
->rh
.r_nic_info
.ifidx
;
3098 ls
= (union oct_link_status
*)get_rbd(recv_pkt
->buffer_ptr
[0]);
3100 octeon_swap_8B_data((u64
*)ls
, (sizeof(union oct_link_status
)) >> 3);
3102 update_link_status(oct
->props
[ifidx
].netdev
, ls
);
3105 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3106 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3107 octeon_free_recv_info(recv_info
);
3112 * \brief Setup network interfaces
3113 * @param octeon_dev octeon device
3115 * Called during init time for each device. It assumes the NIC
3116 * is already up and running. The link information for each
3117 * interface is passed in link_info.
3119 static int setup_nic_devices(struct octeon_device
*octeon_dev
)
3121 struct lio
*lio
= NULL
;
3122 struct net_device
*netdev
;
3124 struct octeon_soft_command
*sc
;
3125 struct liquidio_if_cfg_context
*ctx
;
3126 struct liquidio_if_cfg_resp
*resp
;
3127 struct octdev_props
*props
;
3128 int retval
, num_iqueues
, num_oqueues
, q_no
;
3130 int num_cpus
= num_online_cpus();
3131 union oct_nic_if_cfg if_cfg
;
3132 unsigned int base_queue
;
3133 unsigned int gmx_port_id
;
3134 u32 resp_size
, ctx_size
;
3136 /* This is to handle link status changes */
3137 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3139 lio_nic_info
, octeon_dev
);
3141 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3142 * They are handled directly.
3144 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET
,
3147 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET_SG
,
3150 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_RESP_NET_SG
,
3151 free_netsgbuf_with_resp
);
3153 for (i
= 0; i
< octeon_dev
->ifcount
; i
++) {
3154 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
3155 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
3156 sc
= (struct octeon_soft_command
*)
3157 octeon_alloc_soft_command(octeon_dev
, 0,
3158 resp_size
, ctx_size
);
3159 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
3160 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
3163 CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3165 CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3167 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3169 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3170 if (num_iqueues
> num_cpus
)
3171 num_iqueues
= num_cpus
;
3172 if (num_oqueues
> num_cpus
)
3173 num_oqueues
= num_cpus
;
3174 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3175 "requesting config for interface %d, iqs %d, oqs %d\n",
3176 i
, num_iqueues
, num_oqueues
);
3177 ACCESS_ONCE(ctx
->cond
) = 0;
3178 ctx
->octeon_id
= lio_get_device_id(octeon_dev
);
3179 init_waitqueue_head(&ctx
->wc
);
3182 if_cfg
.s
.num_iqueues
= num_iqueues
;
3183 if_cfg
.s
.num_oqueues
= num_oqueues
;
3184 if_cfg
.s
.base_queue
= base_queue
;
3185 if_cfg
.s
.gmx_port_id
= gmx_port_id
;
3186 octeon_prepare_soft_command(octeon_dev
, sc
, OPCODE_NIC
,
3187 OPCODE_NIC_IF_CFG
, i
,
3190 sc
->callback
= if_cfg_callback
;
3191 sc
->callback_arg
= sc
;
3192 sc
->wait_time
= 1000;
3194 retval
= octeon_send_soft_command(octeon_dev
, sc
);
3196 dev_err(&octeon_dev
->pci_dev
->dev
,
3197 "iq/oq config failed status: %x\n",
3199 /* Soft instr is freed by driver in case of failure. */
3200 goto setup_nic_dev_fail
;
3203 /* Sleep on a wait queue till the cond flag indicates that the
3204 * response arrived or timed-out.
3206 sleep_cond(&ctx
->wc
, &ctx
->cond
);
3207 retval
= resp
->status
;
3209 dev_err(&octeon_dev
->pci_dev
->dev
, "iq/oq config failed\n");
3210 goto setup_nic_dev_fail
;
3213 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
3214 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
3216 num_iqueues
= hweight64(resp
->cfg_info
.iqmask
);
3217 num_oqueues
= hweight64(resp
->cfg_info
.oqmask
);
3219 if (!(num_iqueues
) || !(num_oqueues
)) {
3220 dev_err(&octeon_dev
->pci_dev
->dev
,
3221 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3222 resp
->cfg_info
.iqmask
,
3223 resp
->cfg_info
.oqmask
);
3224 goto setup_nic_dev_fail
;
3226 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3227 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3228 i
, resp
->cfg_info
.iqmask
, resp
->cfg_info
.oqmask
,
3229 num_iqueues
, num_oqueues
);
3230 netdev
= alloc_etherdev_mq(LIO_SIZE
, num_iqueues
);
3233 dev_err(&octeon_dev
->pci_dev
->dev
, "Device allocation failed\n");
3234 goto setup_nic_dev_fail
;
3237 props
= &octeon_dev
->props
[i
];
3238 props
->netdev
= netdev
;
3240 if (num_iqueues
> 1)
3241 lionetdevops
.ndo_select_queue
= select_q
;
3243 /* Associate the routines that will handle different
3246 netdev
->netdev_ops
= &lionetdevops
;
3248 lio
= GET_LIO(netdev
);
3250 memset(lio
, 0, sizeof(struct lio
));
3252 lio
->linfo
.ifidx
= resp
->cfg_info
.ifidx
;
3253 lio
->ifidx
= resp
->cfg_info
.ifidx
;
3255 lio
->linfo
.num_rxpciq
= num_oqueues
;
3256 lio
->linfo
.num_txpciq
= num_iqueues
;
3257 q_mask
= resp
->cfg_info
.oqmask
;
3258 /* q_mask is 0-based and already verified mask is nonzero */
3259 for (j
= 0; j
< num_oqueues
; j
++) {
3260 q_no
= __ffs64(q_mask
);
3261 q_mask
&= (~(1UL << q_no
));
3262 lio
->linfo
.rxpciq
[j
] = q_no
;
3264 q_mask
= resp
->cfg_info
.iqmask
;
3265 for (j
= 0; j
< num_iqueues
; j
++) {
3266 q_no
= __ffs64(q_mask
);
3267 q_mask
&= (~(1UL << q_no
));
3268 lio
->linfo
.txpciq
[j
] = q_no
;
3270 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
3271 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3272 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
3274 lio
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3276 lio
->dev_capability
= NETIF_F_HIGHDMA
3277 | NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
3278 | NETIF_F_SG
| NETIF_F_RXCSUM
3279 | NETIF_F_TSO
| NETIF_F_TSO6
3281 netif_set_gso_max_size(netdev
, OCTNIC_GSO_MAX_SIZE
);
3283 netdev
->features
= lio
->dev_capability
;
3284 netdev
->vlan_features
= lio
->dev_capability
;
3286 netdev
->hw_features
= lio
->dev_capability
;
3288 /* Point to the properties for octeon device to which this
3289 * interface belongs.
3291 lio
->oct_dev
= octeon_dev
;
3292 lio
->octprops
= props
;
3293 lio
->netdev
= netdev
;
3294 spin_lock_init(&lio
->lock
);
3296 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3297 "if%d gmx: %d hw_addr: 0x%llx\n", i
,
3298 lio
->linfo
.gmxport
, CVM_CAST64(lio
->linfo
.hw_addr
));
3300 /* 64-bit swap required on LE machines */
3301 octeon_swap_8B_data(&lio
->linfo
.hw_addr
, 1);
3302 for (j
= 0; j
< 6; j
++)
3303 mac
[j
] = *((u8
*)(((u8
*)&lio
->linfo
.hw_addr
) + 2 + j
));
3305 /* Copy MAC Address to OS network device structure */
3307 ether_addr_copy(netdev
->dev_addr
, mac
);
3309 if (setup_io_queues(octeon_dev
, netdev
)) {
3310 dev_err(&octeon_dev
->pci_dev
->dev
, "I/O queues creation failed\n");
3311 goto setup_nic_dev_fail
;
3314 ifstate_set(lio
, LIO_IFSTATE_DROQ_OPS
);
3316 /* By default all interfaces on a single Octeon uses the same
3319 lio
->txq
= lio
->linfo
.txpciq
[0];
3320 lio
->rxq
= lio
->linfo
.rxpciq
[0];
3322 lio
->tx_qsize
= octeon_get_tx_qsize(octeon_dev
, lio
->txq
);
3323 lio
->rx_qsize
= octeon_get_rx_qsize(octeon_dev
, lio
->rxq
);
3325 if (setup_glist(lio
)) {
3326 dev_err(&octeon_dev
->pci_dev
->dev
,
3327 "Gather list allocation failed\n");
3328 goto setup_nic_dev_fail
;
3331 /* Register ethtool support */
3332 liquidio_set_ethtool_ops(netdev
);
3334 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
);
3336 if ((debug
!= -1) && (debug
& NETIF_MSG_HW
))
3337 liquidio_set_feature(netdev
, OCTNET_CMD_VERBOSE_ENABLE
);
3339 /* Register the network device with the OS */
3340 if (register_netdev(netdev
)) {
3341 dev_err(&octeon_dev
->pci_dev
->dev
, "Device registration failed\n");
3342 goto setup_nic_dev_fail
;
3345 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3346 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3347 i
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
3348 netif_carrier_off(netdev
);
3350 if (lio
->linfo
.link
.s
.status
) {
3351 netif_carrier_on(netdev
);
3354 netif_carrier_off(netdev
);
3357 ifstate_set(lio
, LIO_IFSTATE_REGISTERED
);
3359 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3360 "NIC ifidx:%d Setup successful\n", i
);
3362 octeon_free_soft_command(octeon_dev
, sc
);
3369 octeon_free_soft_command(octeon_dev
, sc
);
3372 dev_err(&octeon_dev
->pci_dev
->dev
,
3373 "NIC ifidx:%d Setup failed\n", i
);
3374 liquidio_destroy_nic_device(octeon_dev
, i
);
3380 * \brief initialize the NIC
3381 * @param oct octeon device
3383 * This initialization routine is called once the Octeon device application is
3386 static int liquidio_init_nic_module(struct octeon_device
*oct
)
3388 struct oct_intrmod_cfg
*intrmod_cfg
;
3390 int num_nic_ports
= CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct
));
3392 dev_dbg(&oct
->pci_dev
->dev
, "Initializing network interfaces\n");
3394 /* only default iq and oq were initialized
3395 * initialize the rest as well
3397 /* run port_config command for each port */
3398 oct
->ifcount
= num_nic_ports
;
3400 memset(oct
->props
, 0,
3401 sizeof(struct octdev_props
) * num_nic_ports
);
3403 retval
= setup_nic_devices(oct
);
3405 dev_err(&oct
->pci_dev
->dev
, "Setup NIC devices failed\n");
3406 goto octnet_init_failure
;
3409 liquidio_ptp_init(oct
);
3411 /* Initialize interrupt moderation params */
3412 intrmod_cfg
= &((struct octeon_device
*)oct
)->intrmod
;
3413 intrmod_cfg
->intrmod_enable
= 1;
3414 intrmod_cfg
->intrmod_check_intrvl
= LIO_INTRMOD_CHECK_INTERVAL
;
3415 intrmod_cfg
->intrmod_maxpkt_ratethr
= LIO_INTRMOD_MAXPKT_RATETHR
;
3416 intrmod_cfg
->intrmod_minpkt_ratethr
= LIO_INTRMOD_MINPKT_RATETHR
;
3417 intrmod_cfg
->intrmod_maxcnt_trigger
= LIO_INTRMOD_MAXCNT_TRIGGER
;
3418 intrmod_cfg
->intrmod_maxtmr_trigger
= LIO_INTRMOD_MAXTMR_TRIGGER
;
3419 intrmod_cfg
->intrmod_mintmr_trigger
= LIO_INTRMOD_MINTMR_TRIGGER
;
3420 intrmod_cfg
->intrmod_mincnt_trigger
= LIO_INTRMOD_MINCNT_TRIGGER
;
3422 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces ready\n");
3426 octnet_init_failure
:
3434 * \brief starter callback that invokes the remaining initialization work after
3435 * the NIC is up and running.
3436 * @param octptr work struct work_struct
3438 static void nic_starter(struct work_struct
*work
)
3440 struct octeon_device
*oct
;
3441 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
3443 oct
= (struct octeon_device
*)wk
->ctxptr
;
3445 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
)
3448 /* If the status of the device is CORE_OK, the core
3449 * application has reported its application type. Call
3450 * any registered handlers now and move to the RUNNING
3453 if (atomic_read(&oct
->status
) != OCT_DEV_CORE_OK
) {
3454 schedule_delayed_work(&oct
->nic_poll_work
.work
,
3455 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
3459 atomic_set(&oct
->status
, OCT_DEV_RUNNING
);
3461 if (oct
->app_mode
&& oct
->app_mode
== CVM_DRV_NIC_APP
) {
3462 dev_dbg(&oct
->pci_dev
->dev
, "Starting NIC module\n");
3464 if (liquidio_init_nic_module(oct
))
3465 dev_err(&oct
->pci_dev
->dev
, "NIC initialization failed\n");
3467 handshake
[oct
->octeon_id
].started_ok
= 1;
3469 dev_err(&oct
->pci_dev
->dev
,
3470 "Unexpected application running on NIC (%d). Check firmware.\n",
3474 complete(&handshake
[oct
->octeon_id
].started
);
3478 * \brief Device initialization for each Octeon device that is probed
3479 * @param octeon_dev octeon device
3481 static int octeon_device_init(struct octeon_device
*octeon_dev
)
3484 struct octeon_device_priv
*oct_priv
=
3485 (struct octeon_device_priv
*)octeon_dev
->priv
;
3486 atomic_set(&octeon_dev
->status
, OCT_DEV_BEGIN_STATE
);
3488 /* Enable access to the octeon device and make its DMA capability
3491 if (octeon_pci_os_setup(octeon_dev
))
3494 /* Identify the Octeon type and map the BAR address space. */
3495 if (octeon_chip_specific_setup(octeon_dev
)) {
3496 dev_err(&octeon_dev
->pci_dev
->dev
, "Chip specific setup failed\n");
3500 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_MAP_DONE
);
3502 octeon_dev
->app_mode
= CVM_DRV_INVALID_APP
;
3504 /* Do a soft reset of the Octeon device. */
3505 if (octeon_dev
->fn_list
.soft_reset(octeon_dev
))
3508 /* Initialize the dispatch mechanism used to push packets arriving on
3509 * Octeon Output queues.
3511 if (octeon_init_dispatch_list(octeon_dev
))
3514 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3515 OPCODE_NIC_CORE_DRV_ACTIVE
,
3516 octeon_core_drv_init
,
3519 INIT_DELAYED_WORK(&octeon_dev
->nic_poll_work
.work
, nic_starter
);
3520 octeon_dev
->nic_poll_work
.ctxptr
= (void *)octeon_dev
;
3521 schedule_delayed_work(&octeon_dev
->nic_poll_work
.work
,
3522 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
3524 atomic_set(&octeon_dev
->status
, OCT_DEV_DISPATCH_INIT_DONE
);
3526 octeon_set_io_queues_off(octeon_dev
);
3528 /* Setup the data structures that manage this Octeon's Input queues. */
3529 if (octeon_setup_instr_queues(octeon_dev
)) {
3530 dev_err(&octeon_dev
->pci_dev
->dev
,
3531 "instruction queue initialization failed\n");
3532 /* On error, release any previously allocated queues */
3533 for (j
= 0; j
< octeon_dev
->num_iqs
; j
++)
3534 octeon_delete_instr_queue(octeon_dev
, j
);
3537 atomic_set(&octeon_dev
->status
, OCT_DEV_INSTR_QUEUE_INIT_DONE
);
3539 /* Initialize soft command buffer pool
3541 if (octeon_setup_sc_buffer_pool(octeon_dev
)) {
3542 dev_err(&octeon_dev
->pci_dev
->dev
, "sc buffer pool allocation failed\n");
3545 atomic_set(&octeon_dev
->status
, OCT_DEV_SC_BUFF_POOL_INIT_DONE
);
3547 /* Initialize lists to manage the requests of different types that
3548 * arrive from user & kernel applications for this octeon device.
3550 if (octeon_setup_response_list(octeon_dev
)) {
3551 dev_err(&octeon_dev
->pci_dev
->dev
, "Response list allocation failed\n");
3554 atomic_set(&octeon_dev
->status
, OCT_DEV_RESP_LIST_INIT_DONE
);
3556 if (octeon_setup_output_queues(octeon_dev
)) {
3557 dev_err(&octeon_dev
->pci_dev
->dev
, "Output queue initialization failed\n");
3558 /* Release any previously allocated queues */
3559 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
3560 octeon_delete_droq(octeon_dev
, j
);
3563 atomic_set(&octeon_dev
->status
, OCT_DEV_DROQ_INIT_DONE
);
3565 /* The input and output queue registers were setup earlier (the queues
3566 * were not enabled). Any additional registers that need to be
3567 * programmed should be done now.
3569 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
3571 dev_err(&octeon_dev
->pci_dev
->dev
,
3572 "Failed to configure device registers\n");
3576 /* Initialize the tasklet that handles output queue packet processing.*/
3577 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing droq tasklet\n");
3578 tasklet_init(&oct_priv
->droq_tasklet
, octeon_droq_bh
,
3579 (unsigned long)octeon_dev
);
3581 /* Setup the interrupt handler and record the INT SUM register address
3583 octeon_setup_interrupt(octeon_dev
);
3585 /* Enable Octeon device interrupts */
3586 octeon_dev
->fn_list
.enable_interrupt(octeon_dev
->chip
);
3588 /* Enable the input and output queues for this Octeon device */
3589 octeon_dev
->fn_list
.enable_io_queues(octeon_dev
);
3591 atomic_set(&octeon_dev
->status
, OCT_DEV_IO_QUEUES_DONE
);
3593 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Waiting for DDR initialization...\n");
3595 if (ddr_timeout
== 0) {
3596 dev_info(&octeon_dev
->pci_dev
->dev
,
3597 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
3600 schedule_timeout_uninterruptible(HZ
* LIO_RESET_SECS
);
3602 /* Wait for the octeon to initialize DDR after the soft-reset. */
3603 ret
= octeon_wait_for_ddr_init(octeon_dev
, &ddr_timeout
);
3605 dev_err(&octeon_dev
->pci_dev
->dev
,
3606 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
3611 if (octeon_wait_for_bootloader(octeon_dev
, 1000) != 0) {
3612 dev_err(&octeon_dev
->pci_dev
->dev
, "Board not responding\n");
3616 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing consoles\n");
3617 ret
= octeon_init_consoles(octeon_dev
);
3619 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board consoles\n");
3622 ret
= octeon_add_console(octeon_dev
, 0);
3624 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board console\n");
3628 atomic_set(&octeon_dev
->status
, OCT_DEV_CONSOLE_INIT_DONE
);
3630 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Loading firmware\n");
3631 ret
= load_firmware(octeon_dev
);
3633 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not load firmware to board\n");
3637 handshake
[octeon_dev
->octeon_id
].init_ok
= 1;
3638 complete(&handshake
[octeon_dev
->octeon_id
].init
);
3640 atomic_set(&octeon_dev
->status
, OCT_DEV_HOST_OK
);
3642 /* Send Credit for Octeon Output queues. Credits are always sent after
3643 * the output queue is enabled.
3645 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
3646 writel(octeon_dev
->droq
[j
]->max_count
,
3647 octeon_dev
->droq
[j
]->pkts_credit_reg
);
3649 /* Packets can start arriving on the output queues from this point. */
3655 * \brief Exits the module
3657 static void __exit
liquidio_exit(void)
3659 liquidio_deinit_pci();
3661 pr_info("LiquidIO network module is now unloaded\n");
3664 module_init(liquidio_init
);
3665 module_exit(liquidio_exit
);