1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
81 static const char ibmvnic_driver_name
[] = "ibmvnic";
82 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
89 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
90 static int ibmvnic_remove(struct vio_dev
*);
91 static void release_sub_crqs(struct ibmvnic_adapter
*);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
96 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
97 union sub_crq
*sub_crq
);
98 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
99 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
100 static int enable_scrq_irq(struct ibmvnic_adapter
*,
101 struct ibmvnic_sub_crq_queue
*);
102 static int disable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int pending_scrq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
109 static void send_map_query(struct ibmvnic_adapter
*adapter
);
110 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
111 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
112 static void send_login(struct ibmvnic_adapter
*adapter
);
113 static void send_cap_queries(struct ibmvnic_adapter
*adapter
);
114 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
);
115 static int ibmvnic_init(struct ibmvnic_adapter
*);
116 static void release_crq_queue(struct ibmvnic_adapter
*);
118 struct ibmvnic_stat
{
119 char name
[ETH_GSTRING_LEN
];
123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124 offsetof(struct ibmvnic_statistics, stat))
125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
127 static const struct ibmvnic_stat ibmvnic_stats
[] = {
128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
138 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
152 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
153 unsigned long length
, unsigned long *number
,
156 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
159 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
166 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
167 struct ibmvnic_long_term_buff
*ltb
, int size
)
169 struct device
*dev
= &adapter
->vdev
->dev
;
172 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
176 dev_err(dev
, "Couldn't alloc long term buffer\n");
179 ltb
->map_id
= adapter
->map_id
;
182 init_completion(&adapter
->fw_done
);
183 send_request_map(adapter
, ltb
->addr
,
184 ltb
->size
, ltb
->map_id
);
185 wait_for_completion(&adapter
->fw_done
);
189 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
190 struct ibmvnic_long_term_buff
*ltb
)
192 struct device
*dev
= &adapter
->vdev
->dev
;
197 if (!adapter
->failover
)
198 send_request_unmap(adapter
, ltb
->map_id
);
199 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
202 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
203 struct ibmvnic_rx_pool
*pool
)
205 int count
= pool
->size
- atomic_read(&pool
->available
);
206 struct device
*dev
= &adapter
->vdev
->dev
;
207 int buffers_added
= 0;
208 unsigned long lpar_rc
;
209 union sub_crq sub_crq
;
219 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
220 be32_to_cpu(adapter
->login_rsp_buf
->
223 for (i
= 0; i
< count
; ++i
) {
224 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
226 dev_err(dev
, "Couldn't replenish rx buff\n");
227 adapter
->replenish_no_mem
++;
231 index
= pool
->free_map
[pool
->next_free
];
233 if (pool
->rx_buff
[index
].skb
)
234 dev_err(dev
, "Inconsistent free_map!\n");
236 /* Copy the skb to the long term mapped DMA buffer */
237 offset
= index
* pool
->buff_size
;
238 dst
= pool
->long_term_buff
.buff
+ offset
;
239 memset(dst
, 0, pool
->buff_size
);
240 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
241 pool
->rx_buff
[index
].data
= dst
;
243 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
244 pool
->rx_buff
[index
].dma
= dma_addr
;
245 pool
->rx_buff
[index
].skb
= skb
;
246 pool
->rx_buff
[index
].pool_index
= pool
->index
;
247 pool
->rx_buff
[index
].size
= pool
->buff_size
;
249 memset(&sub_crq
, 0, sizeof(sub_crq
));
250 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
251 sub_crq
.rx_add
.correlator
=
252 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
253 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
254 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
256 /* The length field of the sCRQ is defined to be 24 bits so the
257 * buffer size needs to be left shifted by a byte before it is
258 * converted to big endian to prevent the last byte from being
261 #ifdef __LITTLE_ENDIAN__
264 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
266 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
268 if (lpar_rc
!= H_SUCCESS
)
272 adapter
->replenish_add_buff_success
++;
273 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
275 atomic_add(buffers_added
, &pool
->available
);
279 dev_info(dev
, "replenish pools failure\n");
280 pool
->free_map
[pool
->next_free
] = index
;
281 pool
->rx_buff
[index
].skb
= NULL
;
282 if (!dma_mapping_error(dev
, dma_addr
))
283 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
286 dev_kfree_skb_any(skb
);
287 adapter
->replenish_add_buff_failure
++;
288 atomic_add(buffers_added
, &pool
->available
);
291 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
295 if (adapter
->migrated
)
298 adapter
->replenish_task_cycles
++;
299 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
301 if (adapter
->rx_pool
[i
].active
)
302 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
306 static void release_stats_token(struct ibmvnic_adapter
*adapter
)
308 struct device
*dev
= &adapter
->vdev
->dev
;
310 if (!adapter
->stats_token
)
313 dma_unmap_single(dev
, adapter
->stats_token
,
314 sizeof(struct ibmvnic_statistics
),
316 adapter
->stats_token
= 0;
319 static int init_stats_token(struct ibmvnic_adapter
*adapter
)
321 struct device
*dev
= &adapter
->vdev
->dev
;
324 stok
= dma_map_single(dev
, &adapter
->stats
,
325 sizeof(struct ibmvnic_statistics
),
327 if (dma_mapping_error(dev
, stok
)) {
328 dev_err(dev
, "Couldn't map stats buffer\n");
332 adapter
->stats_token
= stok
;
336 static void release_rx_pools(struct ibmvnic_adapter
*adapter
)
338 struct ibmvnic_rx_pool
*rx_pool
;
342 if (!adapter
->rx_pool
)
345 rx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
346 for (i
= 0; i
< rx_scrqs
; i
++) {
347 rx_pool
= &adapter
->rx_pool
[i
];
349 kfree(rx_pool
->free_map
);
350 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
352 if (!rx_pool
->rx_buff
)
355 for (j
= 0; j
< rx_pool
->size
; j
++) {
356 if (rx_pool
->rx_buff
[j
].skb
) {
357 dev_kfree_skb_any(rx_pool
->rx_buff
[i
].skb
);
358 rx_pool
->rx_buff
[i
].skb
= NULL
;
362 kfree(rx_pool
->rx_buff
);
365 kfree(adapter
->rx_pool
);
366 adapter
->rx_pool
= NULL
;
369 static int init_rx_pools(struct net_device
*netdev
)
371 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
372 struct device
*dev
= &adapter
->vdev
->dev
;
373 struct ibmvnic_rx_pool
*rx_pool
;
379 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
380 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
381 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
383 adapter
->rx_pool
= kcalloc(rxadd_subcrqs
,
384 sizeof(struct ibmvnic_rx_pool
),
386 if (!adapter
->rx_pool
) {
387 dev_err(dev
, "Failed to allocate rx pools\n");
391 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
392 rx_pool
= &adapter
->rx_pool
[i
];
394 netdev_dbg(adapter
->netdev
,
395 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
396 i
, adapter
->req_rx_add_entries_per_subcrq
,
397 be64_to_cpu(size_array
[i
]));
399 rx_pool
->size
= adapter
->req_rx_add_entries_per_subcrq
;
401 rx_pool
->buff_size
= be64_to_cpu(size_array
[i
]);
404 rx_pool
->free_map
= kcalloc(rx_pool
->size
, sizeof(int),
406 if (!rx_pool
->free_map
) {
407 release_rx_pools(adapter
);
411 rx_pool
->rx_buff
= kcalloc(rx_pool
->size
,
412 sizeof(struct ibmvnic_rx_buff
),
414 if (!rx_pool
->rx_buff
) {
415 dev_err(dev
, "Couldn't alloc rx buffers\n");
416 release_rx_pools(adapter
);
420 if (alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
421 rx_pool
->size
* rx_pool
->buff_size
)) {
422 release_rx_pools(adapter
);
426 for (j
= 0; j
< rx_pool
->size
; ++j
)
427 rx_pool
->free_map
[j
] = j
;
429 atomic_set(&rx_pool
->available
, 0);
430 rx_pool
->next_alloc
= 0;
431 rx_pool
->next_free
= 0;
437 static void release_tx_pools(struct ibmvnic_adapter
*adapter
)
439 struct ibmvnic_tx_pool
*tx_pool
;
442 if (!adapter
->tx_pool
)
445 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
446 for (i
= 0; i
< tx_scrqs
; i
++) {
447 tx_pool
= &adapter
->tx_pool
[i
];
448 kfree(tx_pool
->tx_buff
);
449 free_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
450 kfree(tx_pool
->free_map
);
453 kfree(adapter
->tx_pool
);
454 adapter
->tx_pool
= NULL
;
457 static int init_tx_pools(struct net_device
*netdev
)
459 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
460 struct device
*dev
= &adapter
->vdev
->dev
;
461 struct ibmvnic_tx_pool
*tx_pool
;
465 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
466 adapter
->tx_pool
= kcalloc(tx_subcrqs
,
467 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
468 if (!adapter
->tx_pool
)
471 for (i
= 0; i
< tx_subcrqs
; i
++) {
472 tx_pool
= &adapter
->tx_pool
[i
];
473 tx_pool
->tx_buff
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
474 sizeof(struct ibmvnic_tx_buff
),
476 if (!tx_pool
->tx_buff
) {
477 dev_err(dev
, "tx pool buffer allocation failed\n");
478 release_tx_pools(adapter
);
482 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
483 adapter
->req_tx_entries_per_subcrq
*
485 release_tx_pools(adapter
);
489 tx_pool
->free_map
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
490 sizeof(int), GFP_KERNEL
);
491 if (!tx_pool
->free_map
) {
492 release_tx_pools(adapter
);
496 for (j
= 0; j
< adapter
->req_tx_entries_per_subcrq
; j
++)
497 tx_pool
->free_map
[j
] = j
;
499 tx_pool
->consumer_index
= 0;
500 tx_pool
->producer_index
= 0;
506 static void release_error_buffers(struct ibmvnic_adapter
*adapter
)
508 struct device
*dev
= &adapter
->vdev
->dev
;
509 struct ibmvnic_error_buff
*error_buff
, *tmp
;
512 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
513 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
) {
514 list_del(&error_buff
->list
);
515 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
517 kfree(error_buff
->buff
);
520 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
523 static int ibmvnic_login(struct net_device
*netdev
)
525 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
526 unsigned long timeout
= msecs_to_jiffies(30000);
527 struct device
*dev
= &adapter
->vdev
->dev
;
530 if (adapter
->renegotiate
) {
531 adapter
->renegotiate
= false;
532 release_sub_crqs(adapter
);
534 reinit_completion(&adapter
->init_done
);
535 send_cap_queries(adapter
);
536 if (!wait_for_completion_timeout(&adapter
->init_done
,
538 dev_err(dev
, "Capabilities query timeout\n");
543 reinit_completion(&adapter
->init_done
);
545 if (!wait_for_completion_timeout(&adapter
->init_done
,
547 dev_err(dev
, "Login timeout\n");
550 } while (adapter
->renegotiate
);
555 static void release_resources(struct ibmvnic_adapter
*adapter
)
557 release_tx_pools(adapter
);
558 release_rx_pools(adapter
);
560 release_stats_token(adapter
);
561 release_error_buffers(adapter
);
564 static int set_link_state(struct ibmvnic_adapter
*adapter
, u8 link_state
)
566 struct net_device
*netdev
= adapter
->netdev
;
567 unsigned long timeout
= msecs_to_jiffies(30000);
568 union ibmvnic_crq crq
;
572 if (adapter
->logical_link_state
== link_state
) {
573 netdev_dbg(netdev
, "Link state already %d\n", link_state
);
577 netdev_err(netdev
, "setting link state %d\n", link_state
);
578 memset(&crq
, 0, sizeof(crq
));
579 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
580 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
581 crq
.logical_link_state
.link_state
= link_state
;
586 reinit_completion(&adapter
->init_done
);
587 rc
= ibmvnic_send_crq(adapter
, &crq
);
589 netdev_err(netdev
, "Failed to set link state\n");
593 if (!wait_for_completion_timeout(&adapter
->init_done
,
595 netdev_err(netdev
, "timeout setting link state\n");
599 if (adapter
->init_done_rc
== 1) {
600 /* Partuial success, delay and re-send */
609 static int set_real_num_queues(struct net_device
*netdev
)
611 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
614 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->req_tx_queues
);
616 netdev_err(netdev
, "failed to set the number of tx queues\n");
620 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->req_rx_queues
);
622 netdev_err(netdev
, "failed to set the number of rx queues\n");
627 static int ibmvnic_open(struct net_device
*netdev
)
629 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
630 struct device
*dev
= &adapter
->vdev
->dev
;
634 if (adapter
->is_closed
) {
635 rc
= ibmvnic_init(adapter
);
640 rc
= ibmvnic_login(netdev
);
644 rc
= set_real_num_queues(netdev
);
648 rc
= init_sub_crq_irqs(adapter
);
650 dev_err(dev
, "failed to initialize sub crq irqs\n");
654 rc
= init_stats_token(adapter
);
659 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
660 sizeof(struct napi_struct
), GFP_KERNEL
);
662 goto ibmvnic_open_fail
;
663 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
664 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
666 napi_enable(&adapter
->napi
[i
]);
669 send_map_query(adapter
);
671 rc
= init_rx_pools(netdev
);
673 goto ibmvnic_open_fail
;
675 rc
= init_tx_pools(netdev
);
677 goto ibmvnic_open_fail
;
679 replenish_pools(adapter
);
681 /* We're ready to receive frames, enable the sub-crq interrupts and
682 * set the logical link state to up
684 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
685 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
687 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
688 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
690 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_UP
);
692 goto ibmvnic_open_fail
;
694 netif_tx_start_all_queues(netdev
);
695 adapter
->is_closed
= false;
700 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
701 napi_disable(&adapter
->napi
[i
]);
702 release_resources(adapter
);
706 static void disable_sub_crqs(struct ibmvnic_adapter
*adapter
)
710 if (adapter
->tx_scrq
) {
711 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
712 if (adapter
->tx_scrq
[i
])
713 disable_irq(adapter
->tx_scrq
[i
]->irq
);
716 if (adapter
->rx_scrq
) {
717 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
718 if (adapter
->rx_scrq
[i
])
719 disable_irq(adapter
->rx_scrq
[i
]->irq
);
723 static int ibmvnic_close(struct net_device
*netdev
)
725 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
729 adapter
->closing
= true;
730 disable_sub_crqs(adapter
);
733 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
734 napi_disable(&adapter
->napi
[i
]);
737 if (!adapter
->failover
)
738 netif_tx_stop_all_queues(netdev
);
740 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_DN
);
742 release_resources(adapter
);
744 adapter
->is_closed
= true;
745 adapter
->closing
= false;
750 * build_hdr_data - creates L2/L3/L4 header data buffer
751 * @hdr_field - bitfield determining needed headers
752 * @skb - socket buffer
753 * @hdr_len - array of header lengths
754 * @tot_len - total length of data
756 * Reads hdr_field to determine which headers are needed by firmware.
757 * Builds a buffer containing these headers. Saves individual header
758 * lengths and total buffer length to be used to build descriptors.
760 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
761 int *hdr_len
, u8
*hdr_data
)
766 hdr_len
[0] = sizeof(struct ethhdr
);
768 if (skb
->protocol
== htons(ETH_P_IP
)) {
769 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
770 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
771 hdr_len
[2] = tcp_hdrlen(skb
);
772 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
773 hdr_len
[2] = sizeof(struct udphdr
);
774 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
775 hdr_len
[1] = sizeof(struct ipv6hdr
);
776 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
777 hdr_len
[2] = tcp_hdrlen(skb
);
778 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
779 hdr_len
[2] = sizeof(struct udphdr
);
782 memset(hdr_data
, 0, 120);
783 if ((hdr_field
>> 6) & 1) {
784 hdr
= skb_mac_header(skb
);
785 memcpy(hdr_data
, hdr
, hdr_len
[0]);
789 if ((hdr_field
>> 5) & 1) {
790 hdr
= skb_network_header(skb
);
791 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
795 if ((hdr_field
>> 4) & 1) {
796 hdr
= skb_transport_header(skb
);
797 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
804 * create_hdr_descs - create header and header extension descriptors
805 * @hdr_field - bitfield determining needed headers
806 * @data - buffer containing header data
807 * @len - length of data buffer
808 * @hdr_len - array of individual header lengths
809 * @scrq_arr - descriptor array
811 * Creates header and, if needed, header extension descriptors and
812 * places them in a descriptor array, scrq_arr
815 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
816 union sub_crq
*scrq_arr
)
818 union sub_crq hdr_desc
;
823 while (tmp_len
> 0) {
824 cur
= hdr_data
+ len
- tmp_len
;
826 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
827 if (cur
!= hdr_data
) {
828 data
= hdr_desc
.hdr_ext
.data
;
829 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
830 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
831 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
832 hdr_desc
.hdr_ext
.len
= tmp
;
834 data
= hdr_desc
.hdr
.data
;
835 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
836 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
837 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
838 hdr_desc
.hdr
.len
= tmp
;
839 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
840 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
841 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
842 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
844 memcpy(data
, cur
, tmp
);
846 *scrq_arr
= hdr_desc
;
852 * build_hdr_descs_arr - build a header descriptor array
853 * @skb - socket buffer
854 * @num_entries - number of descriptors to be sent
855 * @subcrq - first TX descriptor
856 * @hdr_field - bit field determining which headers will be sent
858 * This function will build a TX descriptor array with applicable
859 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
862 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
863 int *num_entries
, u8 hdr_field
)
865 int hdr_len
[3] = {0, 0, 0};
867 u8
*hdr_data
= txbuff
->hdr_data
;
869 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
874 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
875 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
876 txbuff
->indir_arr
+ 1);
879 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
881 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
882 int queue_num
= skb_get_queue_mapping(skb
);
883 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
884 struct device
*dev
= &adapter
->vdev
->dev
;
885 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
886 struct ibmvnic_sub_crq_queue
*tx_scrq
;
887 struct ibmvnic_tx_pool
*tx_pool
;
888 unsigned int tx_send_failed
= 0;
889 unsigned int tx_map_failed
= 0;
890 unsigned int tx_dropped
= 0;
891 unsigned int tx_packets
= 0;
892 unsigned int tx_bytes
= 0;
893 dma_addr_t data_dma_addr
;
894 struct netdev_queue
*txq
;
895 unsigned long lpar_rc
;
896 union sub_crq tx_crq
;
904 tx_pool
= &adapter
->tx_pool
[queue_num
];
905 tx_scrq
= adapter
->tx_scrq
[queue_num
];
906 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
907 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
908 be32_to_cpu(adapter
->login_rsp_buf
->
909 off_txsubm_subcrqs
));
910 if (adapter
->migrated
) {
911 if (!netif_subqueue_stopped(netdev
, skb
))
912 netif_stop_subqueue(netdev
, queue_num
);
913 dev_kfree_skb_any(skb
);
921 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
922 offset
= index
* adapter
->req_mtu
;
923 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
924 memset(dst
, 0, adapter
->req_mtu
);
925 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
926 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
928 tx_pool
->consumer_index
=
929 (tx_pool
->consumer_index
+ 1) %
930 adapter
->req_tx_entries_per_subcrq
;
932 tx_buff
= &tx_pool
->tx_buff
[index
];
934 tx_buff
->data_dma
[0] = data_dma_addr
;
935 tx_buff
->data_len
[0] = skb
->len
;
936 tx_buff
->index
= index
;
937 tx_buff
->pool_index
= queue_num
;
938 tx_buff
->last_frag
= true;
940 memset(&tx_crq
, 0, sizeof(tx_crq
));
941 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
942 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
943 tx_crq
.v1
.n_crq_elem
= 1;
945 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
946 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
947 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
948 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
949 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
951 if (adapter
->vlan_header_insertion
) {
952 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
953 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
956 if (skb
->protocol
== htons(ETH_P_IP
)) {
957 if (ip_hdr(skb
)->version
== 4)
958 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
959 else if (ip_hdr(skb
)->version
== 6)
960 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
962 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
963 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
964 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
965 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
968 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
969 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
972 /* determine if l2/3/4 headers are sent to firmware */
973 if ((*hdrs
>> 7) & 1 &&
974 (skb
->protocol
== htons(ETH_P_IP
) ||
975 skb
->protocol
== htons(ETH_P_IPV6
))) {
976 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
977 tx_crq
.v1
.n_crq_elem
= num_entries
;
978 tx_buff
->indir_arr
[0] = tx_crq
;
979 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
980 sizeof(tx_buff
->indir_arr
),
982 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
983 dev_kfree_skb_any(skb
);
985 if (!firmware_has_feature(FW_FEATURE_CMO
))
986 dev_err(dev
, "tx: unable to map descriptor array\n");
992 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
993 (u64
)tx_buff
->indir_dma
,
996 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
999 if (lpar_rc
!= H_SUCCESS
) {
1000 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
1002 if (tx_pool
->consumer_index
== 0)
1003 tx_pool
->consumer_index
=
1004 adapter
->req_tx_entries_per_subcrq
- 1;
1006 tx_pool
->consumer_index
--;
1008 dev_kfree_skb_any(skb
);
1009 tx_buff
->skb
= NULL
;
1011 if (lpar_rc
== H_CLOSED
)
1012 netif_stop_subqueue(netdev
, queue_num
);
1020 if (atomic_inc_return(&tx_scrq
->used
)
1021 >= adapter
->req_tx_entries_per_subcrq
) {
1022 netdev_info(netdev
, "Stopping queue %d\n", queue_num
);
1023 netif_stop_subqueue(netdev
, queue_num
);
1027 tx_bytes
+= skb
->len
;
1028 txq
->trans_start
= jiffies
;
1032 netdev
->stats
.tx_dropped
+= tx_dropped
;
1033 netdev
->stats
.tx_bytes
+= tx_bytes
;
1034 netdev
->stats
.tx_packets
+= tx_packets
;
1035 adapter
->tx_send_failed
+= tx_send_failed
;
1036 adapter
->tx_map_failed
+= tx_map_failed
;
1041 static void ibmvnic_set_multi(struct net_device
*netdev
)
1043 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1044 struct netdev_hw_addr
*ha
;
1045 union ibmvnic_crq crq
;
1047 memset(&crq
, 0, sizeof(crq
));
1048 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1049 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1051 if (netdev
->flags
& IFF_PROMISC
) {
1052 if (!adapter
->promisc_supported
)
1055 if (netdev
->flags
& IFF_ALLMULTI
) {
1056 /* Accept all multicast */
1057 memset(&crq
, 0, sizeof(crq
));
1058 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1059 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1060 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
1061 ibmvnic_send_crq(adapter
, &crq
);
1062 } else if (netdev_mc_empty(netdev
)) {
1063 /* Reject all multicast */
1064 memset(&crq
, 0, sizeof(crq
));
1065 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1066 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1067 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
1068 ibmvnic_send_crq(adapter
, &crq
);
1070 /* Accept one or more multicast(s) */
1071 netdev_for_each_mc_addr(ha
, netdev
) {
1072 memset(&crq
, 0, sizeof(crq
));
1073 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1074 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1075 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
1076 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
1078 ibmvnic_send_crq(adapter
, &crq
);
1084 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
1086 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1087 struct sockaddr
*addr
= p
;
1088 union ibmvnic_crq crq
;
1090 if (!is_valid_ether_addr(addr
->sa_data
))
1091 return -EADDRNOTAVAIL
;
1093 memset(&crq
, 0, sizeof(crq
));
1094 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
1095 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
1096 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
1097 ibmvnic_send_crq(adapter
, &crq
);
1098 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1102 static void ibmvnic_tx_timeout(struct net_device
*dev
)
1104 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1107 /* Adapter timed out, resetting it */
1108 release_sub_crqs(adapter
);
1109 rc
= ibmvnic_reset_crq(adapter
);
1111 dev_err(&adapter
->vdev
->dev
, "Adapter timeout, reset failed\n");
1113 ibmvnic_send_crq_init(adapter
);
1116 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
1117 struct ibmvnic_rx_buff
*rx_buff
)
1119 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
1121 rx_buff
->skb
= NULL
;
1123 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
1124 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
1126 atomic_dec(&pool
->available
);
1129 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
1131 struct net_device
*netdev
= napi
->dev
;
1132 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1133 int scrq_num
= (int)(napi
- adapter
->napi
);
1134 int frames_processed
= 0;
1136 while (frames_processed
< budget
) {
1137 struct sk_buff
*skb
;
1138 struct ibmvnic_rx_buff
*rx_buff
;
1139 union sub_crq
*next
;
1144 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
1146 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1148 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
1149 rx_comp
.correlator
);
1150 /* do error checking */
1151 if (next
->rx_comp
.rc
) {
1152 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
1153 /* free the entry */
1154 next
->rx_comp
.first
= 0;
1155 remove_buff_from_pool(adapter
, rx_buff
);
1159 length
= be32_to_cpu(next
->rx_comp
.len
);
1160 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
1161 flags
= next
->rx_comp
.flags
;
1163 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
1166 /* VLAN Header has been stripped by the system firmware and
1167 * needs to be inserted by the driver
1169 if (adapter
->rx_vlan_header_insertion
&&
1170 (flags
& IBMVNIC_VLAN_STRIPPED
))
1171 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1172 ntohs(next
->rx_comp
.vlan_tci
));
1174 /* free the entry */
1175 next
->rx_comp
.first
= 0;
1176 remove_buff_from_pool(adapter
, rx_buff
);
1178 skb_put(skb
, length
);
1179 skb
->protocol
= eth_type_trans(skb
, netdev
);
1181 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
1182 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
1183 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1187 napi_gro_receive(napi
, skb
); /* send it up */
1188 netdev
->stats
.rx_packets
++;
1189 netdev
->stats
.rx_bytes
+= length
;
1192 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
1194 if (frames_processed
< budget
) {
1195 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1196 napi_complete_done(napi
, frames_processed
);
1197 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
1198 napi_reschedule(napi
)) {
1199 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1203 return frames_processed
;
1206 #ifdef CONFIG_NET_POLL_CONTROLLER
1207 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1209 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1212 replenish_pools(netdev_priv(dev
));
1213 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1214 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1215 adapter
->rx_scrq
[i
]);
1219 static const struct net_device_ops ibmvnic_netdev_ops
= {
1220 .ndo_open
= ibmvnic_open
,
1221 .ndo_stop
= ibmvnic_close
,
1222 .ndo_start_xmit
= ibmvnic_xmit
,
1223 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1224 .ndo_set_mac_address
= ibmvnic_set_mac
,
1225 .ndo_validate_addr
= eth_validate_addr
,
1226 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1227 #ifdef CONFIG_NET_POLL_CONTROLLER
1228 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1232 /* ethtool functions */
1234 static int ibmvnic_get_link_ksettings(struct net_device
*netdev
,
1235 struct ethtool_link_ksettings
*cmd
)
1237 u32 supported
, advertising
;
1239 supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1241 advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1243 cmd
->base
.speed
= SPEED_1000
;
1244 cmd
->base
.duplex
= DUPLEX_FULL
;
1245 cmd
->base
.port
= PORT_FIBRE
;
1246 cmd
->base
.phy_address
= 0;
1247 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
1249 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
1251 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
1257 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1258 struct ethtool_drvinfo
*info
)
1260 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1261 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1264 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1266 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1268 return adapter
->msg_enable
;
1271 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1273 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1275 adapter
->msg_enable
= data
;
1278 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1280 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1282 /* Don't need to send a query because we request a logical link up at
1283 * init and then we wait for link state indications
1285 return adapter
->logical_link_state
;
1288 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1289 struct ethtool_ringparam
*ring
)
1291 ring
->rx_max_pending
= 0;
1292 ring
->tx_max_pending
= 0;
1293 ring
->rx_mini_max_pending
= 0;
1294 ring
->rx_jumbo_max_pending
= 0;
1295 ring
->rx_pending
= 0;
1296 ring
->tx_pending
= 0;
1297 ring
->rx_mini_pending
= 0;
1298 ring
->rx_jumbo_pending
= 0;
1301 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1305 if (stringset
!= ETH_SS_STATS
)
1308 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1309 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1312 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1316 return ARRAY_SIZE(ibmvnic_stats
);
1322 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1323 struct ethtool_stats
*stats
, u64
*data
)
1325 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1326 union ibmvnic_crq crq
;
1329 memset(&crq
, 0, sizeof(crq
));
1330 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1331 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1332 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1333 crq
.request_statistics
.len
=
1334 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1336 /* Wait for data to be written */
1337 init_completion(&adapter
->stats_done
);
1338 ibmvnic_send_crq(adapter
, &crq
);
1339 wait_for_completion(&adapter
->stats_done
);
1341 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1342 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1345 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1346 .get_drvinfo
= ibmvnic_get_drvinfo
,
1347 .get_msglevel
= ibmvnic_get_msglevel
,
1348 .set_msglevel
= ibmvnic_set_msglevel
,
1349 .get_link
= ibmvnic_get_link
,
1350 .get_ringparam
= ibmvnic_get_ringparam
,
1351 .get_strings
= ibmvnic_get_strings
,
1352 .get_sset_count
= ibmvnic_get_sset_count
,
1353 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1354 .get_link_ksettings
= ibmvnic_get_link_ksettings
,
1357 /* Routines for managing CRQs/sCRQs */
1359 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1360 struct ibmvnic_sub_crq_queue
*scrq
)
1362 struct device
*dev
= &adapter
->vdev
->dev
;
1365 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1367 /* Close the sub-crqs */
1369 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1370 adapter
->vdev
->unit_address
,
1372 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1375 netdev_err(adapter
->netdev
,
1376 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1380 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1382 free_pages((unsigned long)scrq
->msgs
, 2);
1386 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1389 struct device
*dev
= &adapter
->vdev
->dev
;
1390 struct ibmvnic_sub_crq_queue
*scrq
;
1393 scrq
= kzalloc(sizeof(*scrq
), GFP_ATOMIC
);
1398 (union sub_crq
*)__get_free_pages(GFP_ATOMIC
| __GFP_ZERO
, 2);
1400 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1401 goto zero_page_failed
;
1404 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1406 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1407 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1411 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1412 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1414 if (rc
== H_RESOURCE
)
1415 rc
= ibmvnic_reset_crq(adapter
);
1417 if (rc
== H_CLOSED
) {
1418 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1420 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1424 scrq
->adapter
= adapter
;
1425 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1426 spin_lock_init(&scrq
->lock
);
1428 netdev_dbg(adapter
->netdev
,
1429 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1430 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1435 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1438 free_pages((unsigned long)scrq
->msgs
, 2);
1445 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1449 if (adapter
->tx_scrq
) {
1450 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1451 if (!adapter
->tx_scrq
[i
])
1454 if (adapter
->tx_scrq
[i
]->irq
) {
1455 free_irq(adapter
->tx_scrq
[i
]->irq
,
1456 adapter
->tx_scrq
[i
]);
1457 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
1458 adapter
->tx_scrq
[i
]->irq
= 0;
1461 release_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
1464 kfree(adapter
->tx_scrq
);
1465 adapter
->tx_scrq
= NULL
;
1468 if (adapter
->rx_scrq
) {
1469 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1470 if (!adapter
->rx_scrq
[i
])
1473 if (adapter
->rx_scrq
[i
]->irq
) {
1474 free_irq(adapter
->rx_scrq
[i
]->irq
,
1475 adapter
->rx_scrq
[i
]);
1476 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
1477 adapter
->rx_scrq
[i
]->irq
= 0;
1480 release_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
1483 kfree(adapter
->rx_scrq
);
1484 adapter
->rx_scrq
= NULL
;
1488 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1489 struct ibmvnic_sub_crq_queue
*scrq
)
1491 struct device
*dev
= &adapter
->vdev
->dev
;
1494 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1495 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1497 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1502 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1503 struct ibmvnic_sub_crq_queue
*scrq
)
1505 struct device
*dev
= &adapter
->vdev
->dev
;
1508 if (scrq
->hw_irq
> 0x100000000ULL
) {
1509 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1513 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1514 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1516 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1521 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1522 struct ibmvnic_sub_crq_queue
*scrq
)
1524 struct device
*dev
= &adapter
->vdev
->dev
;
1525 struct ibmvnic_tx_buff
*txbuff
;
1526 union sub_crq
*next
;
1532 while (pending_scrq(adapter
, scrq
)) {
1533 unsigned int pool
= scrq
->pool_index
;
1535 next
= ibmvnic_next_scrq(adapter
, scrq
);
1536 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1537 if (next
->tx_comp
.rcs
[i
]) {
1538 dev_err(dev
, "tx error %x\n",
1539 next
->tx_comp
.rcs
[i
]);
1542 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1543 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1545 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1546 if (!txbuff
->data_dma
[j
])
1549 txbuff
->data_dma
[j
] = 0;
1551 /* if sub_crq was sent indirectly */
1552 first
= txbuff
->indir_arr
[0].generic
.first
;
1553 if (first
== IBMVNIC_CRQ_CMD
) {
1554 dma_unmap_single(dev
, txbuff
->indir_dma
,
1555 sizeof(txbuff
->indir_arr
),
1559 if (txbuff
->last_frag
) {
1560 if (atomic_sub_return(next
->tx_comp
.num_comps
,
1562 (adapter
->req_tx_entries_per_subcrq
/ 2) &&
1563 netif_subqueue_stopped(adapter
->netdev
,
1565 netif_wake_subqueue(adapter
->netdev
,
1567 netdev_dbg(adapter
->netdev
,
1568 "Started queue %d\n",
1572 dev_kfree_skb_any(txbuff
->skb
);
1575 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
1576 producer_index
] = index
;
1577 adapter
->tx_pool
[pool
].producer_index
=
1578 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
1579 adapter
->req_tx_entries_per_subcrq
;
1581 /* remove tx_comp scrq*/
1582 next
->tx_comp
.first
= 0;
1585 enable_scrq_irq(adapter
, scrq
);
1587 if (pending_scrq(adapter
, scrq
)) {
1588 disable_scrq_irq(adapter
, scrq
);
1595 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
1597 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1598 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1600 disable_scrq_irq(adapter
, scrq
);
1601 ibmvnic_complete_tx(adapter
, scrq
);
1606 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
1608 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1609 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1611 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
1612 disable_scrq_irq(adapter
, scrq
);
1613 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
1619 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
1621 struct device
*dev
= &adapter
->vdev
->dev
;
1622 struct ibmvnic_sub_crq_queue
*scrq
;
1626 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1627 scrq
= adapter
->tx_scrq
[i
];
1628 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1632 dev_err(dev
, "Error mapping irq\n");
1633 goto req_tx_irq_failed
;
1636 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
1637 0, "ibmvnic_tx", scrq
);
1640 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
1642 irq_dispose_mapping(scrq
->irq
);
1643 goto req_rx_irq_failed
;
1647 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1648 scrq
= adapter
->rx_scrq
[i
];
1649 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1652 dev_err(dev
, "Error mapping irq\n");
1653 goto req_rx_irq_failed
;
1655 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
1656 0, "ibmvnic_rx", scrq
);
1658 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
1660 irq_dispose_mapping(scrq
->irq
);
1661 goto req_rx_irq_failed
;
1667 for (j
= 0; j
< i
; j
++) {
1668 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
1669 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1671 i
= adapter
->req_tx_queues
;
1673 for (j
= 0; j
< i
; j
++) {
1674 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
1675 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1677 release_sub_crqs(adapter
);
1681 static void init_sub_crqs(struct ibmvnic_adapter
*adapter
, int retry
)
1683 struct device
*dev
= &adapter
->vdev
->dev
;
1684 struct ibmvnic_sub_crq_queue
**allqueues
;
1685 int registered_queues
= 0;
1686 union ibmvnic_crq crq
;
1692 /* Sub-CRQ entries are 32 byte long */
1693 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
1695 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
1696 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
1697 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
1698 goto allqueues_failed
;
1701 /* Get the minimum between the queried max and the entries
1702 * that fit in our PAGE_SIZE
1704 adapter
->req_tx_entries_per_subcrq
=
1705 adapter
->max_tx_entries_per_subcrq
> entries_page
?
1706 entries_page
: adapter
->max_tx_entries_per_subcrq
;
1707 adapter
->req_rx_add_entries_per_subcrq
=
1708 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
1709 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
1711 adapter
->req_tx_queues
= adapter
->opt_tx_comp_sub_queues
;
1712 adapter
->req_rx_queues
= adapter
->opt_rx_comp_queues
;
1713 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
1715 adapter
->req_mtu
= adapter
->netdev
->mtu
+ ETH_HLEN
;
1718 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
1720 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_ATOMIC
);
1722 goto allqueues_failed
;
1724 for (i
= 0; i
< total_queues
; i
++) {
1725 allqueues
[i
] = init_sub_crq_queue(adapter
);
1726 if (!allqueues
[i
]) {
1727 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
1730 registered_queues
++;
1733 /* Make sure we were able to register the minimum number of queues */
1734 if (registered_queues
<
1735 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
1736 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
1740 /* Distribute the failed allocated queues*/
1741 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
1742 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
1745 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
1746 adapter
->req_rx_queues
--;
1751 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
1752 adapter
->req_tx_queues
--;
1759 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
1760 sizeof(*adapter
->tx_scrq
), GFP_ATOMIC
);
1761 if (!adapter
->tx_scrq
)
1764 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1765 adapter
->tx_scrq
[i
] = allqueues
[i
];
1766 adapter
->tx_scrq
[i
]->pool_index
= i
;
1769 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
1770 sizeof(*adapter
->rx_scrq
), GFP_ATOMIC
);
1771 if (!adapter
->rx_scrq
)
1774 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1775 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
1776 adapter
->rx_scrq
[i
]->scrq_num
= i
;
1779 memset(&crq
, 0, sizeof(crq
));
1780 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1781 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1783 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
1784 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
1785 atomic_inc(&adapter
->running_cap_crqs
);
1786 ibmvnic_send_crq(adapter
, &crq
);
1788 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
1789 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
1790 atomic_inc(&adapter
->running_cap_crqs
);
1791 ibmvnic_send_crq(adapter
, &crq
);
1793 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
1794 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
1795 atomic_inc(&adapter
->running_cap_crqs
);
1796 ibmvnic_send_crq(adapter
, &crq
);
1798 crq
.request_capability
.capability
=
1799 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
1800 crq
.request_capability
.number
=
1801 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
1802 atomic_inc(&adapter
->running_cap_crqs
);
1803 ibmvnic_send_crq(adapter
, &crq
);
1805 crq
.request_capability
.capability
=
1806 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
1807 crq
.request_capability
.number
=
1808 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
1809 atomic_inc(&adapter
->running_cap_crqs
);
1810 ibmvnic_send_crq(adapter
, &crq
);
1812 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
1813 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
1814 atomic_inc(&adapter
->running_cap_crqs
);
1815 ibmvnic_send_crq(adapter
, &crq
);
1817 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
1818 if (adapter
->promisc_supported
) {
1819 crq
.request_capability
.capability
=
1820 cpu_to_be16(PROMISC_REQUESTED
);
1821 crq
.request_capability
.number
= cpu_to_be64(1);
1822 atomic_inc(&adapter
->running_cap_crqs
);
1823 ibmvnic_send_crq(adapter
, &crq
);
1826 crq
.request_capability
.capability
=
1827 cpu_to_be16(PROMISC_REQUESTED
);
1828 crq
.request_capability
.number
= cpu_to_be64(0);
1829 atomic_inc(&adapter
->running_cap_crqs
);
1830 ibmvnic_send_crq(adapter
, &crq
);
1838 kfree(adapter
->tx_scrq
);
1839 adapter
->tx_scrq
= NULL
;
1841 for (i
= 0; i
< registered_queues
; i
++)
1842 release_sub_crq_queue(adapter
, allqueues
[i
]);
1845 ibmvnic_remove(adapter
->vdev
);
1848 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
1849 struct ibmvnic_sub_crq_queue
*scrq
)
1851 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
1853 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
|| adapter
->closing
)
1859 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
1860 struct ibmvnic_sub_crq_queue
*scrq
)
1862 union sub_crq
*entry
;
1863 unsigned long flags
;
1865 spin_lock_irqsave(&scrq
->lock
, flags
);
1866 entry
= &scrq
->msgs
[scrq
->cur
];
1867 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1868 if (++scrq
->cur
== scrq
->size
)
1873 spin_unlock_irqrestore(&scrq
->lock
, flags
);
1878 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
1880 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
1881 union ibmvnic_crq
*crq
;
1883 crq
= &queue
->msgs
[queue
->cur
];
1884 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1885 if (++queue
->cur
== queue
->size
)
1894 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
1895 union sub_crq
*sub_crq
)
1897 unsigned int ua
= adapter
->vdev
->unit_address
;
1898 struct device
*dev
= &adapter
->vdev
->dev
;
1899 u64
*u64_crq
= (u64
*)sub_crq
;
1902 netdev_dbg(adapter
->netdev
,
1903 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1904 (unsigned long int)cpu_to_be64(remote_handle
),
1905 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1906 (unsigned long int)cpu_to_be64(u64_crq
[1]),
1907 (unsigned long int)cpu_to_be64(u64_crq
[2]),
1908 (unsigned long int)cpu_to_be64(u64_crq
[3]));
1910 /* Make sure the hypervisor sees the complete request */
1913 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
1914 cpu_to_be64(remote_handle
),
1915 cpu_to_be64(u64_crq
[0]),
1916 cpu_to_be64(u64_crq
[1]),
1917 cpu_to_be64(u64_crq
[2]),
1918 cpu_to_be64(u64_crq
[3]));
1922 dev_warn(dev
, "CRQ Queue closed\n");
1923 dev_err(dev
, "Send error (rc=%d)\n", rc
);
1929 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
1930 u64 remote_handle
, u64 ioba
, u64 num_entries
)
1932 unsigned int ua
= adapter
->vdev
->unit_address
;
1933 struct device
*dev
= &adapter
->vdev
->dev
;
1936 /* Make sure the hypervisor sees the complete request */
1938 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
1939 cpu_to_be64(remote_handle
),
1944 dev_warn(dev
, "CRQ Queue closed\n");
1945 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
1951 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
1952 union ibmvnic_crq
*crq
)
1954 unsigned int ua
= adapter
->vdev
->unit_address
;
1955 struct device
*dev
= &adapter
->vdev
->dev
;
1956 u64
*u64_crq
= (u64
*)crq
;
1959 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
1960 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1961 (unsigned long int)cpu_to_be64(u64_crq
[1]));
1963 /* Make sure the hypervisor sees the complete request */
1966 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
1967 cpu_to_be64(u64_crq
[0]),
1968 cpu_to_be64(u64_crq
[1]));
1972 dev_warn(dev
, "CRQ Queue closed\n");
1973 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
1979 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
1981 union ibmvnic_crq crq
;
1983 memset(&crq
, 0, sizeof(crq
));
1984 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1985 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
1986 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
1988 return ibmvnic_send_crq(adapter
, &crq
);
1991 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter
*adapter
)
1993 union ibmvnic_crq crq
;
1995 memset(&crq
, 0, sizeof(crq
));
1996 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1997 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT_COMPLETE
;
1998 netdev_dbg(adapter
->netdev
, "Sending CRQ init complete\n");
2000 return ibmvnic_send_crq(adapter
, &crq
);
2003 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
2005 union ibmvnic_crq crq
;
2007 memset(&crq
, 0, sizeof(crq
));
2008 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
2009 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
2010 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
2012 return ibmvnic_send_crq(adapter
, &crq
);
2015 static void send_login(struct ibmvnic_adapter
*adapter
)
2017 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
2018 struct ibmvnic_login_buffer
*login_buffer
;
2019 struct device
*dev
= &adapter
->vdev
->dev
;
2020 dma_addr_t rsp_buffer_token
;
2021 dma_addr_t buffer_token
;
2022 size_t rsp_buffer_size
;
2023 union ibmvnic_crq crq
;
2030 sizeof(struct ibmvnic_login_buffer
) +
2031 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
2033 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
2035 goto buf_alloc_failed
;
2037 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
2039 if (dma_mapping_error(dev
, buffer_token
)) {
2040 dev_err(dev
, "Couldn't map login buffer\n");
2041 goto buf_map_failed
;
2044 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
2045 sizeof(u64
) * adapter
->req_tx_queues
+
2046 sizeof(u64
) * adapter
->req_rx_queues
+
2047 sizeof(u64
) * adapter
->req_rx_queues
+
2048 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
2050 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
2051 if (!login_rsp_buffer
)
2052 goto buf_rsp_alloc_failed
;
2054 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
2055 rsp_buffer_size
, DMA_FROM_DEVICE
);
2056 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
2057 dev_err(dev
, "Couldn't map login rsp buffer\n");
2058 goto buf_rsp_map_failed
;
2061 adapter
->login_buf
= login_buffer
;
2062 adapter
->login_buf_token
= buffer_token
;
2063 adapter
->login_buf_sz
= buffer_size
;
2064 adapter
->login_rsp_buf
= login_rsp_buffer
;
2065 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
2066 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
2068 login_buffer
->len
= cpu_to_be32(buffer_size
);
2069 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
2070 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
2071 login_buffer
->off_txcomp_subcrqs
=
2072 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
2073 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
2074 login_buffer
->off_rxcomp_subcrqs
=
2075 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
2076 sizeof(u64
) * adapter
->req_tx_queues
);
2077 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
2078 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
2080 tx_list_p
= (__be64
*)((char *)login_buffer
+
2081 sizeof(struct ibmvnic_login_buffer
));
2082 rx_list_p
= (__be64
*)((char *)login_buffer
+
2083 sizeof(struct ibmvnic_login_buffer
) +
2084 sizeof(u64
) * adapter
->req_tx_queues
);
2086 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2087 if (adapter
->tx_scrq
[i
]) {
2088 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
2093 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2094 if (adapter
->rx_scrq
[i
]) {
2095 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
2100 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
2101 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
2102 netdev_dbg(adapter
->netdev
, "%016lx\n",
2103 ((unsigned long int *)(adapter
->login_buf
))[i
]);
2106 memset(&crq
, 0, sizeof(crq
));
2107 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
2108 crq
.login
.cmd
= LOGIN
;
2109 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
2110 crq
.login
.len
= cpu_to_be32(buffer_size
);
2111 ibmvnic_send_crq(adapter
, &crq
);
2116 kfree(login_rsp_buffer
);
2117 buf_rsp_alloc_failed
:
2118 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
2120 kfree(login_buffer
);
2125 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
2128 union ibmvnic_crq crq
;
2130 memset(&crq
, 0, sizeof(crq
));
2131 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
2132 crq
.request_map
.cmd
= REQUEST_MAP
;
2133 crq
.request_map
.map_id
= map_id
;
2134 crq
.request_map
.ioba
= cpu_to_be32(addr
);
2135 crq
.request_map
.len
= cpu_to_be32(len
);
2136 ibmvnic_send_crq(adapter
, &crq
);
2139 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
2141 union ibmvnic_crq crq
;
2143 memset(&crq
, 0, sizeof(crq
));
2144 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
2145 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
2146 crq
.request_unmap
.map_id
= map_id
;
2147 ibmvnic_send_crq(adapter
, &crq
);
2150 static void send_map_query(struct ibmvnic_adapter
*adapter
)
2152 union ibmvnic_crq crq
;
2154 memset(&crq
, 0, sizeof(crq
));
2155 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
2156 crq
.query_map
.cmd
= QUERY_MAP
;
2157 ibmvnic_send_crq(adapter
, &crq
);
2160 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2161 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
2163 union ibmvnic_crq crq
;
2165 atomic_set(&adapter
->running_cap_crqs
, 0);
2166 memset(&crq
, 0, sizeof(crq
));
2167 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
2168 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
2170 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
2171 atomic_inc(&adapter
->running_cap_crqs
);
2172 ibmvnic_send_crq(adapter
, &crq
);
2174 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
2175 atomic_inc(&adapter
->running_cap_crqs
);
2176 ibmvnic_send_crq(adapter
, &crq
);
2178 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
2179 atomic_inc(&adapter
->running_cap_crqs
);
2180 ibmvnic_send_crq(adapter
, &crq
);
2182 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
2183 atomic_inc(&adapter
->running_cap_crqs
);
2184 ibmvnic_send_crq(adapter
, &crq
);
2186 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
2187 atomic_inc(&adapter
->running_cap_crqs
);
2188 ibmvnic_send_crq(adapter
, &crq
);
2190 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
2191 atomic_inc(&adapter
->running_cap_crqs
);
2192 ibmvnic_send_crq(adapter
, &crq
);
2194 crq
.query_capability
.capability
=
2195 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
2196 atomic_inc(&adapter
->running_cap_crqs
);
2197 ibmvnic_send_crq(adapter
, &crq
);
2199 crq
.query_capability
.capability
=
2200 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
2201 atomic_inc(&adapter
->running_cap_crqs
);
2202 ibmvnic_send_crq(adapter
, &crq
);
2204 crq
.query_capability
.capability
=
2205 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
2206 atomic_inc(&adapter
->running_cap_crqs
);
2207 ibmvnic_send_crq(adapter
, &crq
);
2209 crq
.query_capability
.capability
=
2210 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
2211 atomic_inc(&adapter
->running_cap_crqs
);
2212 ibmvnic_send_crq(adapter
, &crq
);
2214 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
2215 atomic_inc(&adapter
->running_cap_crqs
);
2216 ibmvnic_send_crq(adapter
, &crq
);
2218 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
2219 atomic_inc(&adapter
->running_cap_crqs
);
2220 ibmvnic_send_crq(adapter
, &crq
);
2222 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
2223 atomic_inc(&adapter
->running_cap_crqs
);
2224 ibmvnic_send_crq(adapter
, &crq
);
2226 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
2227 atomic_inc(&adapter
->running_cap_crqs
);
2228 ibmvnic_send_crq(adapter
, &crq
);
2230 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
2231 atomic_inc(&adapter
->running_cap_crqs
);
2232 ibmvnic_send_crq(adapter
, &crq
);
2234 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
2235 atomic_inc(&adapter
->running_cap_crqs
);
2236 ibmvnic_send_crq(adapter
, &crq
);
2238 crq
.query_capability
.capability
= cpu_to_be16(RX_VLAN_HEADER_INSERTION
);
2239 atomic_inc(&adapter
->running_cap_crqs
);
2240 ibmvnic_send_crq(adapter
, &crq
);
2242 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
2243 atomic_inc(&adapter
->running_cap_crqs
);
2244 ibmvnic_send_crq(adapter
, &crq
);
2246 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
2247 atomic_inc(&adapter
->running_cap_crqs
);
2248 ibmvnic_send_crq(adapter
, &crq
);
2250 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
2251 atomic_inc(&adapter
->running_cap_crqs
);
2252 ibmvnic_send_crq(adapter
, &crq
);
2254 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2255 atomic_inc(&adapter
->running_cap_crqs
);
2256 ibmvnic_send_crq(adapter
, &crq
);
2258 crq
.query_capability
.capability
=
2259 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2260 atomic_inc(&adapter
->running_cap_crqs
);
2261 ibmvnic_send_crq(adapter
, &crq
);
2263 crq
.query_capability
.capability
=
2264 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2265 atomic_inc(&adapter
->running_cap_crqs
);
2266 ibmvnic_send_crq(adapter
, &crq
);
2268 crq
.query_capability
.capability
=
2269 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2270 atomic_inc(&adapter
->running_cap_crqs
);
2271 ibmvnic_send_crq(adapter
, &crq
);
2273 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2274 atomic_inc(&adapter
->running_cap_crqs
);
2275 ibmvnic_send_crq(adapter
, &crq
);
2278 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2280 struct device
*dev
= &adapter
->vdev
->dev
;
2281 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2282 union ibmvnic_crq crq
;
2285 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2286 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2288 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2289 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2290 netdev_dbg(adapter
->netdev
, "%016lx\n",
2291 ((unsigned long int *)(buf
))[i
]);
2293 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2294 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2295 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2296 buf
->tcp_ipv4_chksum
);
2297 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2298 buf
->tcp_ipv6_chksum
);
2299 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2300 buf
->udp_ipv4_chksum
);
2301 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2302 buf
->udp_ipv6_chksum
);
2303 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2304 buf
->large_tx_ipv4
);
2305 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2306 buf
->large_tx_ipv6
);
2307 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2308 buf
->large_rx_ipv4
);
2309 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2310 buf
->large_rx_ipv6
);
2311 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2312 buf
->max_ipv4_header_size
);
2313 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2314 buf
->max_ipv6_header_size
);
2315 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2316 buf
->max_tcp_header_size
);
2317 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2318 buf
->max_udp_header_size
);
2319 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2320 buf
->max_large_tx_size
);
2321 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2322 buf
->max_large_rx_size
);
2323 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2324 buf
->ipv6_extension_header
);
2325 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2326 buf
->tcp_pseudosum_req
);
2327 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2328 buf
->num_ipv6_ext_headers
);
2329 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2330 buf
->off_ipv6_ext_headers
);
2332 adapter
->ip_offload_ctrl_tok
=
2333 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2334 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2336 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2337 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2341 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2342 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2343 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2344 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2345 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2347 /* large_tx/rx disabled for now, additional features needed */
2348 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2349 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2350 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2351 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2353 adapter
->netdev
->features
= NETIF_F_GSO
;
2355 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2356 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2358 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2359 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2361 if ((adapter
->netdev
->features
&
2362 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2363 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2365 memset(&crq
, 0, sizeof(crq
));
2366 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2367 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2368 crq
.control_ip_offload
.len
=
2369 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2370 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2371 ibmvnic_send_crq(adapter
, &crq
);
2374 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2375 struct ibmvnic_adapter
*adapter
)
2377 struct device
*dev
= &adapter
->vdev
->dev
;
2378 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2379 unsigned long flags
;
2383 if (!crq
->request_error_rsp
.rc
.code
) {
2384 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2385 crq
->request_error_rsp
.rc
.code
);
2389 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2390 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2391 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2393 list_del(&error_buff
->list
);
2396 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2399 dev_err(dev
, "Couldn't find error id %x\n",
2400 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2404 dev_err(dev
, "Detailed info for error id %x:",
2405 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2407 for (i
= 0; i
< error_buff
->len
; i
++) {
2408 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2414 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2416 kfree(error_buff
->buff
);
2420 static void request_error_information(struct ibmvnic_adapter
*adapter
,
2421 union ibmvnic_crq
*err_crq
)
2423 struct device
*dev
= &adapter
->vdev
->dev
;
2424 struct net_device
*netdev
= adapter
->netdev
;
2425 struct ibmvnic_error_buff
*error_buff
;
2426 unsigned long timeout
= msecs_to_jiffies(30000);
2427 union ibmvnic_crq crq
;
2428 unsigned long flags
;
2431 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2435 detail_len
= be32_to_cpu(err_crq
->error_indication
.detail_error_sz
);
2436 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2437 if (!error_buff
->buff
) {
2442 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2444 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2445 netdev_err(netdev
, "Couldn't map error buffer\n");
2446 kfree(error_buff
->buff
);
2451 error_buff
->len
= detail_len
;
2452 error_buff
->error_id
= err_crq
->error_indication
.error_id
;
2454 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2455 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2456 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2458 memset(&crq
, 0, sizeof(crq
));
2459 crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2460 crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2461 crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2462 crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2463 crq
.request_error_info
.error_id
= err_crq
->error_indication
.error_id
;
2465 rc
= ibmvnic_send_crq(adapter
, &crq
);
2467 netdev_err(netdev
, "failed to request error information\n");
2471 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
2472 netdev_err(netdev
, "timeout waiting for error information\n");
2479 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2480 list_del(&error_buff
->list
);
2481 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2483 kfree(error_buff
->buff
);
2487 static void handle_error_indication(union ibmvnic_crq
*crq
,
2488 struct ibmvnic_adapter
*adapter
)
2490 struct device
*dev
= &adapter
->vdev
->dev
;
2492 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2493 crq
->error_indication
.flags
2494 & IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2495 be32_to_cpu(crq
->error_indication
.error_id
),
2496 be16_to_cpu(crq
->error_indication
.error_cause
));
2498 if (be32_to_cpu(crq
->error_indication
.error_id
))
2499 request_error_information(adapter
, crq
);
2502 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2503 struct ibmvnic_adapter
*adapter
)
2505 struct net_device
*netdev
= adapter
->netdev
;
2506 struct device
*dev
= &adapter
->vdev
->dev
;
2509 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2511 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2514 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2518 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2519 struct ibmvnic_adapter
*adapter
)
2521 struct device
*dev
= &adapter
->vdev
->dev
;
2525 atomic_dec(&adapter
->running_cap_crqs
);
2526 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2528 req_value
= &adapter
->req_tx_queues
;
2532 req_value
= &adapter
->req_rx_queues
;
2535 case REQ_RX_ADD_QUEUES
:
2536 req_value
= &adapter
->req_rx_add_queues
;
2539 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2540 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2541 name
= "tx_entries_per_subcrq";
2543 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2544 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2545 name
= "rx_add_entries_per_subcrq";
2548 req_value
= &adapter
->req_mtu
;
2551 case PROMISC_REQUESTED
:
2552 req_value
= &adapter
->promisc
;
2556 dev_err(dev
, "Got invalid cap request rsp %d\n",
2557 crq
->request_capability
.capability
);
2561 switch (crq
->request_capability_rsp
.rc
.code
) {
2564 case PARTIALSUCCESS
:
2565 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2567 (long int)be64_to_cpu(crq
->request_capability_rsp
.
2569 release_sub_crqs(adapter
);
2570 *req_value
= be64_to_cpu(crq
->request_capability_rsp
.number
);
2571 init_sub_crqs(adapter
, 1);
2574 dev_err(dev
, "Error %d in request cap rsp\n",
2575 crq
->request_capability_rsp
.rc
.code
);
2579 /* Done receiving requested capabilities, query IP offload support */
2580 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
2581 union ibmvnic_crq newcrq
;
2582 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
2583 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
2584 &adapter
->ip_offload_buf
;
2586 adapter
->wait_capability
= false;
2587 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
2591 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
2592 if (!firmware_has_feature(FW_FEATURE_CMO
))
2593 dev_err(dev
, "Couldn't map offload buffer\n");
2597 memset(&newcrq
, 0, sizeof(newcrq
));
2598 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2599 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
2600 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
2601 newcrq
.query_ip_offload
.ioba
=
2602 cpu_to_be32(adapter
->ip_offload_tok
);
2604 ibmvnic_send_crq(adapter
, &newcrq
);
2608 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
2609 struct ibmvnic_adapter
*adapter
)
2611 struct device
*dev
= &adapter
->vdev
->dev
;
2612 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
2613 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
2616 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
2618 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2619 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
2621 /* If the number of queues requested can't be allocated by the
2622 * server, the login response will return with code 1. We will need
2623 * to resend the login buffer with fewer queues requested.
2625 if (login_rsp_crq
->generic
.rc
.code
) {
2626 adapter
->renegotiate
= true;
2627 complete(&adapter
->init_done
);
2631 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
2632 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
2633 netdev_dbg(adapter
->netdev
, "%016lx\n",
2634 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
2638 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
2639 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
2640 adapter
->req_rx_add_queues
!=
2641 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
2642 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
2643 ibmvnic_remove(adapter
->vdev
);
2646 complete(&adapter
->init_done
);
2651 static void handle_request_map_rsp(union ibmvnic_crq
*crq
,
2652 struct ibmvnic_adapter
*adapter
)
2654 struct device
*dev
= &adapter
->vdev
->dev
;
2655 u8 map_id
= crq
->request_map_rsp
.map_id
;
2661 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
2662 rx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
2664 rc
= crq
->request_map_rsp
.rc
.code
;
2666 dev_err(dev
, "Error %ld in REQUEST_MAP_RSP\n", rc
);
2668 /* need to find and zero tx/rx_pool map_id */
2669 for (i
= 0; i
< tx_subcrqs
; i
++) {
2670 if (adapter
->tx_pool
[i
].long_term_buff
.map_id
== map_id
)
2671 adapter
->tx_pool
[i
].long_term_buff
.map_id
= 0;
2673 for (i
= 0; i
< rx_subcrqs
; i
++) {
2674 if (adapter
->rx_pool
[i
].long_term_buff
.map_id
== map_id
)
2675 adapter
->rx_pool
[i
].long_term_buff
.map_id
= 0;
2678 complete(&adapter
->fw_done
);
2681 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
2682 struct ibmvnic_adapter
*adapter
)
2684 struct device
*dev
= &adapter
->vdev
->dev
;
2687 rc
= crq
->request_unmap_rsp
.rc
.code
;
2689 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
2692 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
2693 struct ibmvnic_adapter
*adapter
)
2695 struct net_device
*netdev
= adapter
->netdev
;
2696 struct device
*dev
= &adapter
->vdev
->dev
;
2699 rc
= crq
->query_map_rsp
.rc
.code
;
2701 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
2704 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2705 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
2706 crq
->query_map_rsp
.free_pages
);
2709 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
2710 struct ibmvnic_adapter
*adapter
)
2712 struct net_device
*netdev
= adapter
->netdev
;
2713 struct device
*dev
= &adapter
->vdev
->dev
;
2716 atomic_dec(&adapter
->running_cap_crqs
);
2717 netdev_dbg(netdev
, "Outstanding queries: %d\n",
2718 atomic_read(&adapter
->running_cap_crqs
));
2719 rc
= crq
->query_capability
.rc
.code
;
2721 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
2725 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
2727 adapter
->min_tx_queues
=
2728 be64_to_cpu(crq
->query_capability
.number
);
2729 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
2730 adapter
->min_tx_queues
);
2733 adapter
->min_rx_queues
=
2734 be64_to_cpu(crq
->query_capability
.number
);
2735 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
2736 adapter
->min_rx_queues
);
2738 case MIN_RX_ADD_QUEUES
:
2739 adapter
->min_rx_add_queues
=
2740 be64_to_cpu(crq
->query_capability
.number
);
2741 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
2742 adapter
->min_rx_add_queues
);
2745 adapter
->max_tx_queues
=
2746 be64_to_cpu(crq
->query_capability
.number
);
2747 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
2748 adapter
->max_tx_queues
);
2751 adapter
->max_rx_queues
=
2752 be64_to_cpu(crq
->query_capability
.number
);
2753 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
2754 adapter
->max_rx_queues
);
2756 case MAX_RX_ADD_QUEUES
:
2757 adapter
->max_rx_add_queues
=
2758 be64_to_cpu(crq
->query_capability
.number
);
2759 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
2760 adapter
->max_rx_add_queues
);
2762 case MIN_TX_ENTRIES_PER_SUBCRQ
:
2763 adapter
->min_tx_entries_per_subcrq
=
2764 be64_to_cpu(crq
->query_capability
.number
);
2765 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
2766 adapter
->min_tx_entries_per_subcrq
);
2768 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
2769 adapter
->min_rx_add_entries_per_subcrq
=
2770 be64_to_cpu(crq
->query_capability
.number
);
2771 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
2772 adapter
->min_rx_add_entries_per_subcrq
);
2774 case MAX_TX_ENTRIES_PER_SUBCRQ
:
2775 adapter
->max_tx_entries_per_subcrq
=
2776 be64_to_cpu(crq
->query_capability
.number
);
2777 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
2778 adapter
->max_tx_entries_per_subcrq
);
2780 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
2781 adapter
->max_rx_add_entries_per_subcrq
=
2782 be64_to_cpu(crq
->query_capability
.number
);
2783 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
2784 adapter
->max_rx_add_entries_per_subcrq
);
2786 case TCP_IP_OFFLOAD
:
2787 adapter
->tcp_ip_offload
=
2788 be64_to_cpu(crq
->query_capability
.number
);
2789 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
2790 adapter
->tcp_ip_offload
);
2792 case PROMISC_SUPPORTED
:
2793 adapter
->promisc_supported
=
2794 be64_to_cpu(crq
->query_capability
.number
);
2795 netdev_dbg(netdev
, "promisc_supported = %lld\n",
2796 adapter
->promisc_supported
);
2799 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2800 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
2801 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
2804 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2805 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
2806 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
2808 case MAX_MULTICAST_FILTERS
:
2809 adapter
->max_multicast_filters
=
2810 be64_to_cpu(crq
->query_capability
.number
);
2811 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
2812 adapter
->max_multicast_filters
);
2814 case VLAN_HEADER_INSERTION
:
2815 adapter
->vlan_header_insertion
=
2816 be64_to_cpu(crq
->query_capability
.number
);
2817 if (adapter
->vlan_header_insertion
)
2818 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
2819 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
2820 adapter
->vlan_header_insertion
);
2822 case RX_VLAN_HEADER_INSERTION
:
2823 adapter
->rx_vlan_header_insertion
=
2824 be64_to_cpu(crq
->query_capability
.number
);
2825 netdev_dbg(netdev
, "rx_vlan_header_insertion = %lld\n",
2826 adapter
->rx_vlan_header_insertion
);
2828 case MAX_TX_SG_ENTRIES
:
2829 adapter
->max_tx_sg_entries
=
2830 be64_to_cpu(crq
->query_capability
.number
);
2831 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
2832 adapter
->max_tx_sg_entries
);
2834 case RX_SG_SUPPORTED
:
2835 adapter
->rx_sg_supported
=
2836 be64_to_cpu(crq
->query_capability
.number
);
2837 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
2838 adapter
->rx_sg_supported
);
2840 case OPT_TX_COMP_SUB_QUEUES
:
2841 adapter
->opt_tx_comp_sub_queues
=
2842 be64_to_cpu(crq
->query_capability
.number
);
2843 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
2844 adapter
->opt_tx_comp_sub_queues
);
2846 case OPT_RX_COMP_QUEUES
:
2847 adapter
->opt_rx_comp_queues
=
2848 be64_to_cpu(crq
->query_capability
.number
);
2849 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
2850 adapter
->opt_rx_comp_queues
);
2852 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
2853 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
2854 be64_to_cpu(crq
->query_capability
.number
);
2855 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2856 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
2858 case OPT_TX_ENTRIES_PER_SUBCRQ
:
2859 adapter
->opt_tx_entries_per_subcrq
=
2860 be64_to_cpu(crq
->query_capability
.number
);
2861 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
2862 adapter
->opt_tx_entries_per_subcrq
);
2864 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
2865 adapter
->opt_rxba_entries_per_subcrq
=
2866 be64_to_cpu(crq
->query_capability
.number
);
2867 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
2868 adapter
->opt_rxba_entries_per_subcrq
);
2870 case TX_RX_DESC_REQ
:
2871 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
2872 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
2873 adapter
->tx_rx_desc_req
);
2877 netdev_err(netdev
, "Got invalid cap rsp %d\n",
2878 crq
->query_capability
.capability
);
2882 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
2883 adapter
->wait_capability
= false;
2884 init_sub_crqs(adapter
, 0);
2885 /* We're done querying the capabilities, initialize sub-crqs */
2889 static void ibmvnic_xport_event(struct work_struct
*work
)
2891 struct ibmvnic_adapter
*adapter
= container_of(work
,
2892 struct ibmvnic_adapter
,
2894 struct device
*dev
= &adapter
->vdev
->dev
;
2897 release_sub_crqs(adapter
);
2898 if (adapter
->migrated
) {
2899 rc
= ibmvnic_reenable_crq_queue(adapter
);
2901 dev_err(dev
, "Error after enable rc=%ld\n", rc
);
2902 adapter
->migrated
= false;
2903 rc
= ibmvnic_send_crq_init(adapter
);
2905 dev_err(dev
, "Error sending init rc=%ld\n", rc
);
2909 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
2910 struct ibmvnic_adapter
*adapter
)
2912 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
2913 struct net_device
*netdev
= adapter
->netdev
;
2914 struct device
*dev
= &adapter
->vdev
->dev
;
2915 u64
*u64_crq
= (u64
*)crq
;
2918 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
2919 (unsigned long int)cpu_to_be64(u64_crq
[0]),
2920 (unsigned long int)cpu_to_be64(u64_crq
[1]));
2921 switch (gen_crq
->first
) {
2922 case IBMVNIC_CRQ_INIT_RSP
:
2923 switch (gen_crq
->cmd
) {
2924 case IBMVNIC_CRQ_INIT
:
2925 dev_info(dev
, "Partner initialized\n");
2926 /* Send back a response */
2927 rc
= ibmvnic_send_crq_init_complete(adapter
);
2929 schedule_work(&adapter
->vnic_crq_init
);
2931 dev_err(dev
, "Can't send initrsp rc=%ld\n", rc
);
2933 case IBMVNIC_CRQ_INIT_COMPLETE
:
2934 dev_info(dev
, "Partner initialization complete\n");
2935 send_version_xchg(adapter
);
2938 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
2941 case IBMVNIC_CRQ_XPORT_EVENT
:
2942 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
2943 dev_info(dev
, "Re-enabling adapter\n");
2944 adapter
->migrated
= true;
2945 schedule_work(&adapter
->ibmvnic_xport
);
2946 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
2947 dev_info(dev
, "Backing device failover detected\n");
2948 netif_carrier_off(netdev
);
2949 adapter
->failover
= true;
2951 /* The adapter lost the connection */
2952 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
2954 schedule_work(&adapter
->ibmvnic_xport
);
2957 case IBMVNIC_CRQ_CMD_RSP
:
2960 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
2965 switch (gen_crq
->cmd
) {
2966 case VERSION_EXCHANGE_RSP
:
2967 rc
= crq
->version_exchange_rsp
.rc
.code
;
2969 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
2972 dev_info(dev
, "Partner protocol version is %d\n",
2973 crq
->version_exchange_rsp
.version
);
2974 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
2977 be16_to_cpu(crq
->version_exchange_rsp
.version
);
2978 send_cap_queries(adapter
);
2980 case QUERY_CAPABILITY_RSP
:
2981 handle_query_cap_rsp(crq
, adapter
);
2984 handle_query_map_rsp(crq
, adapter
);
2986 case REQUEST_MAP_RSP
:
2987 handle_request_map_rsp(crq
, adapter
);
2989 case REQUEST_UNMAP_RSP
:
2990 handle_request_unmap_rsp(crq
, adapter
);
2992 case REQUEST_CAPABILITY_RSP
:
2993 handle_request_cap_rsp(crq
, adapter
);
2996 netdev_dbg(netdev
, "Got Login Response\n");
2997 handle_login_rsp(crq
, adapter
);
2999 case LOGICAL_LINK_STATE_RSP
:
3001 "Got Logical Link State Response, state: %d rc: %d\n",
3002 crq
->logical_link_state_rsp
.link_state
,
3003 crq
->logical_link_state_rsp
.rc
.code
);
3004 adapter
->logical_link_state
=
3005 crq
->logical_link_state_rsp
.link_state
;
3006 adapter
->init_done_rc
= crq
->logical_link_state_rsp
.rc
.code
;
3007 complete(&adapter
->init_done
);
3009 case LINK_STATE_INDICATION
:
3010 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3011 adapter
->phys_link_state
=
3012 crq
->link_state_indication
.phys_link_state
;
3013 adapter
->logical_link_state
=
3014 crq
->link_state_indication
.logical_link_state
;
3016 case CHANGE_MAC_ADDR_RSP
:
3017 netdev_dbg(netdev
, "Got MAC address change Response\n");
3018 handle_change_mac_rsp(crq
, adapter
);
3020 case ERROR_INDICATION
:
3021 netdev_dbg(netdev
, "Got Error Indication\n");
3022 handle_error_indication(crq
, adapter
);
3024 case REQUEST_ERROR_RSP
:
3025 netdev_dbg(netdev
, "Got Error Detail Response\n");
3026 handle_error_info_rsp(crq
, adapter
);
3028 case REQUEST_STATISTICS_RSP
:
3029 netdev_dbg(netdev
, "Got Statistics Response\n");
3030 complete(&adapter
->stats_done
);
3032 case QUERY_IP_OFFLOAD_RSP
:
3033 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3034 handle_query_ip_offload_rsp(adapter
);
3036 case MULTICAST_CTRL_RSP
:
3037 netdev_dbg(netdev
, "Got multicast control Response\n");
3039 case CONTROL_IP_OFFLOAD_RSP
:
3040 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3041 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3042 sizeof(adapter
->ip_offload_ctrl
),
3044 complete(&adapter
->init_done
);
3046 case COLLECT_FW_TRACE_RSP
:
3047 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3048 complete(&adapter
->fw_done
);
3051 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3056 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3058 struct ibmvnic_adapter
*adapter
= instance
;
3060 tasklet_schedule(&adapter
->tasklet
);
3064 static void ibmvnic_tasklet(void *data
)
3066 struct ibmvnic_adapter
*adapter
= data
;
3067 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3068 union ibmvnic_crq
*crq
;
3069 unsigned long flags
;
3072 spin_lock_irqsave(&queue
->lock
, flags
);
3074 /* Pull all the valid messages off the CRQ */
3075 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3076 ibmvnic_handle_crq(crq
, adapter
);
3077 crq
->generic
.first
= 0;
3080 /* remain in tasklet until all
3081 * capabilities responses are received
3083 if (!adapter
->wait_capability
)
3086 /* if capabilities CRQ's were sent in this tasklet, the following
3087 * tasklet must wait until all responses are received
3089 if (atomic_read(&adapter
->running_cap_crqs
) != 0)
3090 adapter
->wait_capability
= true;
3091 spin_unlock_irqrestore(&queue
->lock
, flags
);
3094 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3096 struct vio_dev
*vdev
= adapter
->vdev
;
3100 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3101 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3104 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3109 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3111 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3112 struct device
*dev
= &adapter
->vdev
->dev
;
3113 struct vio_dev
*vdev
= adapter
->vdev
;
3118 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3119 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3121 /* Clean out the queue */
3122 memset(crq
->msgs
, 0, PAGE_SIZE
);
3125 /* And re-open it again */
3126 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3127 crq
->msg_token
, PAGE_SIZE
);
3130 /* Adapter is good, but other end is not ready */
3131 dev_warn(dev
, "Partner adapter not ready\n");
3133 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3138 static void release_crq_queue(struct ibmvnic_adapter
*adapter
)
3140 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3141 struct vio_dev
*vdev
= adapter
->vdev
;
3147 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3148 free_irq(vdev
->irq
, adapter
);
3149 tasklet_kill(&adapter
->tasklet
);
3151 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3152 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3154 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3156 free_page((unsigned long)crq
->msgs
);
3160 static int init_crq_queue(struct ibmvnic_adapter
*adapter
)
3162 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3163 struct device
*dev
= &adapter
->vdev
->dev
;
3164 struct vio_dev
*vdev
= adapter
->vdev
;
3165 int rc
, retrc
= -ENOMEM
;
3170 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3171 /* Should we allocate more than one page? */
3176 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3177 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3179 if (dma_mapping_error(dev
, crq
->msg_token
))
3182 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3183 crq
->msg_token
, PAGE_SIZE
);
3185 if (rc
== H_RESOURCE
)
3186 /* maybe kexecing and resource is busy. try a reset */
3187 rc
= ibmvnic_reset_crq(adapter
);
3190 if (rc
== H_CLOSED
) {
3191 dev_warn(dev
, "Partner adapter not ready\n");
3193 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3194 goto reg_crq_failed
;
3199 tasklet_init(&adapter
->tasklet
, (void *)ibmvnic_tasklet
,
3200 (unsigned long)adapter
);
3202 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3203 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3206 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3208 goto req_irq_failed
;
3211 rc
= vio_enable_interrupts(vdev
);
3213 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3214 goto req_irq_failed
;
3218 spin_lock_init(&crq
->lock
);
3223 tasklet_kill(&adapter
->tasklet
);
3225 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3226 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3228 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3230 free_page((unsigned long)crq
->msgs
);
3235 static void handle_crq_init_rsp(struct work_struct
*work
)
3237 struct ibmvnic_adapter
*adapter
= container_of(work
,
3238 struct ibmvnic_adapter
,
3240 struct device
*dev
= &adapter
->vdev
->dev
;
3241 struct net_device
*netdev
= adapter
->netdev
;
3242 unsigned long timeout
= msecs_to_jiffies(30000);
3243 bool restart
= false;
3246 if (adapter
->failover
) {
3247 release_sub_crqs(adapter
);
3248 if (netif_running(netdev
)) {
3249 netif_tx_disable(netdev
);
3250 ibmvnic_close(netdev
);
3255 reinit_completion(&adapter
->init_done
);
3256 send_version_xchg(adapter
);
3257 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3258 dev_err(dev
, "Passive init timeout\n");
3262 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
3264 if (adapter
->failover
) {
3265 adapter
->failover
= false;
3267 rc
= ibmvnic_open(netdev
);
3269 goto restart_failed
;
3271 netif_carrier_on(netdev
);
3275 rc
= register_netdev(netdev
);
3278 "failed to register netdev rc=%d\n", rc
);
3279 goto register_failed
;
3281 dev_info(dev
, "ibmvnic registered\n");
3286 dev_err(dev
, "Failed to restart ibmvnic, rc=%d\n", rc
);
3288 release_sub_crqs(adapter
);
3290 dev_err(dev
, "Passive initialization was not successful\n");
3293 static int ibmvnic_init(struct ibmvnic_adapter
*adapter
)
3295 struct device
*dev
= &adapter
->vdev
->dev
;
3296 unsigned long timeout
= msecs_to_jiffies(30000);
3299 rc
= init_crq_queue(adapter
);
3301 dev_err(dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3305 init_completion(&adapter
->init_done
);
3306 ibmvnic_send_crq_init(adapter
);
3307 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3308 dev_err(dev
, "Initialization sequence timed out\n");
3309 release_crq_queue(adapter
);
3316 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3318 struct ibmvnic_adapter
*adapter
;
3319 struct net_device
*netdev
;
3320 unsigned char *mac_addr_p
;
3323 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3326 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3327 VETH_MAC_ADDR
, NULL
);
3330 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3331 __FILE__
, __LINE__
);
3335 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3336 IBMVNIC_MAX_TX_QUEUES
);
3340 adapter
= netdev_priv(netdev
);
3341 dev_set_drvdata(&dev
->dev
, netdev
);
3342 adapter
->vdev
= dev
;
3343 adapter
->netdev
= netdev
;
3344 adapter
->failover
= false;
3346 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3347 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3348 netdev
->irq
= dev
->irq
;
3349 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3350 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3351 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3353 INIT_WORK(&adapter
->vnic_crq_init
, handle_crq_init_rsp
);
3354 INIT_WORK(&adapter
->ibmvnic_xport
, ibmvnic_xport_event
);
3356 spin_lock_init(&adapter
->stats_lock
);
3358 INIT_LIST_HEAD(&adapter
->errors
);
3359 spin_lock_init(&adapter
->error_list_lock
);
3361 rc
= ibmvnic_init(adapter
);
3363 free_netdev(netdev
);
3367 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
3368 adapter
->is_closed
= false;
3370 rc
= register_netdev(netdev
);
3372 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3373 free_netdev(netdev
);
3376 dev_info(&dev
->dev
, "ibmvnic registered\n");
3381 static int ibmvnic_remove(struct vio_dev
*dev
)
3383 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3384 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3386 unregister_netdev(netdev
);
3388 release_resources(adapter
);
3389 release_sub_crqs(adapter
);
3390 release_crq_queue(adapter
);
3392 free_netdev(netdev
);
3393 dev_set_drvdata(&dev
->dev
, NULL
);
3398 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3400 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3401 struct ibmvnic_adapter
*adapter
;
3402 struct iommu_table
*tbl
;
3403 unsigned long ret
= 0;
3406 tbl
= get_iommu_table_base(&vdev
->dev
);
3408 /* netdev inits at probe time along with the structures we need below*/
3410 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3412 adapter
= netdev_priv(netdev
);
3414 ret
+= PAGE_SIZE
; /* the crq message queue */
3415 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3417 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3418 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3420 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3422 ret
+= adapter
->rx_pool
[i
].size
*
3423 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3428 static int ibmvnic_resume(struct device
*dev
)
3430 struct net_device
*netdev
= dev_get_drvdata(dev
);
3431 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3434 /* kick the interrupt handlers just in case we lost an interrupt */
3435 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3436 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3437 adapter
->rx_scrq
[i
]);
3442 static struct vio_device_id ibmvnic_device_table
[] = {
3443 {"network", "IBM,vnic"},
3446 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3448 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3449 .resume
= ibmvnic_resume
3452 static struct vio_driver ibmvnic_driver
= {
3453 .id_table
= ibmvnic_device_table
,
3454 .probe
= ibmvnic_probe
,
3455 .remove
= ibmvnic_remove
,
3456 .get_desired_dma
= ibmvnic_get_desired_dma
,
3457 .name
= ibmvnic_driver_name
,
3458 .pm
= &ibmvnic_pm_ops
,
3461 /* module functions */
3462 static int __init
ibmvnic_module_init(void)
3464 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3465 IBMVNIC_DRIVER_VERSION
);
3467 return vio_register_driver(&ibmvnic_driver
);
3470 static void __exit
ibmvnic_module_exit(void)
3472 vio_unregister_driver(&ibmvnic_driver
);
3475 module_init(ibmvnic_module_init
);
3476 module_exit(ibmvnic_module_exit
);