1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name
[] = "ibmvnic";
73 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
80 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
81 static int ibmvnic_remove(struct vio_dev
*);
82 static void release_sub_crqs(struct ibmvnic_adapter
*, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
87 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
88 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
89 static int enable_scrq_irq(struct ibmvnic_adapter
*,
90 struct ibmvnic_sub_crq_queue
*);
91 static int disable_scrq_irq(struct ibmvnic_adapter
*,
92 struct ibmvnic_sub_crq_queue
*);
93 static int pending_scrq(struct ibmvnic_adapter
*,
94 struct ibmvnic_sub_crq_queue
*);
95 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
96 struct ibmvnic_sub_crq_queue
*);
97 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
98 static void send_query_map(struct ibmvnic_adapter
*adapter
);
99 static int send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
100 static int send_request_unmap(struct ibmvnic_adapter
*, u8
);
101 static int send_login(struct ibmvnic_adapter
*adapter
);
102 static void send_query_cap(struct ibmvnic_adapter
*adapter
);
103 static int init_sub_crqs(struct ibmvnic_adapter
*);
104 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
);
105 static int ibmvnic_reset_init(struct ibmvnic_adapter
*, bool reset
);
106 static void release_crq_queue(struct ibmvnic_adapter
*);
107 static int __ibmvnic_set_mac(struct net_device
*, u8
*);
108 static int init_crq_queue(struct ibmvnic_adapter
*adapter
);
109 static int send_query_phys_parms(struct ibmvnic_adapter
*adapter
);
111 struct ibmvnic_stat
{
112 char name
[ETH_GSTRING_LEN
];
116 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
117 offsetof(struct ibmvnic_statistics, stat))
118 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
120 static const struct ibmvnic_stat ibmvnic_stats
[] = {
121 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
122 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
123 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
124 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
125 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
126 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
127 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
128 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
129 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
130 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
131 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
132 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
133 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
134 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
135 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
136 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
137 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
138 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
139 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
140 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
141 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
142 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
145 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
146 unsigned long length
, unsigned long *number
,
149 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
152 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
160 * ibmvnic_wait_for_completion - Check device state and wait for completion
161 * @adapter: private device data
162 * @comp_done: completion structure to wait for
163 * @timeout: time to wait in milliseconds
165 * Wait for a completion signal or until the timeout limit is reached
166 * while checking that the device is still active.
168 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter
*adapter
,
169 struct completion
*comp_done
,
170 unsigned long timeout
)
172 struct net_device
*netdev
;
173 unsigned long div_timeout
;
176 netdev
= adapter
->netdev
;
178 div_timeout
= msecs_to_jiffies(timeout
/ retry
);
180 if (!adapter
->crq
.active
) {
181 netdev_err(netdev
, "Device down!\n");
186 if (wait_for_completion_timeout(comp_done
, div_timeout
))
189 netdev_err(netdev
, "Operation timed out.\n");
193 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
194 struct ibmvnic_long_term_buff
*ltb
, int size
)
196 struct device
*dev
= &adapter
->vdev
->dev
;
200 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
204 dev_err(dev
, "Couldn't alloc long term buffer\n");
207 ltb
->map_id
= adapter
->map_id
;
210 mutex_lock(&adapter
->fw_lock
);
211 adapter
->fw_done_rc
= 0;
212 reinit_completion(&adapter
->fw_done
);
213 rc
= send_request_map(adapter
, ltb
->addr
,
214 ltb
->size
, ltb
->map_id
);
216 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
217 mutex_unlock(&adapter
->fw_lock
);
221 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->fw_done
, 10000);
224 "Long term map request aborted or timed out,rc = %d\n",
226 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
227 mutex_unlock(&adapter
->fw_lock
);
231 if (adapter
->fw_done_rc
) {
232 dev_err(dev
, "Couldn't map long term buffer,rc = %d\n",
233 adapter
->fw_done_rc
);
234 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
235 mutex_unlock(&adapter
->fw_lock
);
238 mutex_unlock(&adapter
->fw_lock
);
242 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
243 struct ibmvnic_long_term_buff
*ltb
)
245 struct device
*dev
= &adapter
->vdev
->dev
;
250 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
&&
251 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
)
252 send_request_unmap(adapter
, ltb
->map_id
);
253 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
256 static int reset_long_term_buff(struct ibmvnic_adapter
*adapter
,
257 struct ibmvnic_long_term_buff
*ltb
)
259 struct device
*dev
= &adapter
->vdev
->dev
;
262 memset(ltb
->buff
, 0, ltb
->size
);
264 mutex_lock(&adapter
->fw_lock
);
265 adapter
->fw_done_rc
= 0;
267 reinit_completion(&adapter
->fw_done
);
268 rc
= send_request_map(adapter
, ltb
->addr
, ltb
->size
, ltb
->map_id
);
270 mutex_unlock(&adapter
->fw_lock
);
274 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->fw_done
, 10000);
277 "Reset failed, long term map request timed out or aborted\n");
278 mutex_unlock(&adapter
->fw_lock
);
282 if (adapter
->fw_done_rc
) {
284 "Reset failed, attempting to free and reallocate buffer\n");
285 free_long_term_buff(adapter
, ltb
);
286 mutex_unlock(&adapter
->fw_lock
);
287 return alloc_long_term_buff(adapter
, ltb
, ltb
->size
);
289 mutex_unlock(&adapter
->fw_lock
);
293 static void deactivate_rx_pools(struct ibmvnic_adapter
*adapter
)
297 for (i
= 0; i
< adapter
->num_active_rx_pools
; i
++)
298 adapter
->rx_pool
[i
].active
= 0;
301 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
302 struct ibmvnic_rx_pool
*pool
)
304 int count
= pool
->size
- atomic_read(&pool
->available
);
305 u64 handle
= adapter
->rx_scrq
[pool
->index
]->handle
;
306 struct device
*dev
= &adapter
->vdev
->dev
;
307 struct ibmvnic_ind_xmit_queue
*ind_bufp
;
308 struct ibmvnic_sub_crq_queue
*rx_scrq
;
309 union sub_crq
*sub_crq
;
310 int buffers_added
= 0;
311 unsigned long lpar_rc
;
323 rx_scrq
= adapter
->rx_scrq
[pool
->index
];
324 ind_bufp
= &rx_scrq
->ind_buf
;
325 for (i
= 0; i
< count
; ++i
) {
326 skb
= netdev_alloc_skb(adapter
->netdev
, pool
->buff_size
);
328 dev_err(dev
, "Couldn't replenish rx buff\n");
329 adapter
->replenish_no_mem
++;
333 index
= pool
->free_map
[pool
->next_free
];
335 if (pool
->rx_buff
[index
].skb
)
336 dev_err(dev
, "Inconsistent free_map!\n");
338 /* Copy the skb to the long term mapped DMA buffer */
339 offset
= index
* pool
->buff_size
;
340 dst
= pool
->long_term_buff
.buff
+ offset
;
341 memset(dst
, 0, pool
->buff_size
);
342 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
343 pool
->rx_buff
[index
].data
= dst
;
345 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
346 pool
->rx_buff
[index
].dma
= dma_addr
;
347 pool
->rx_buff
[index
].skb
= skb
;
348 pool
->rx_buff
[index
].pool_index
= pool
->index
;
349 pool
->rx_buff
[index
].size
= pool
->buff_size
;
351 sub_crq
= &ind_bufp
->indir_arr
[ind_bufp
->index
++];
352 memset(sub_crq
, 0, sizeof(*sub_crq
));
353 sub_crq
->rx_add
.first
= IBMVNIC_CRQ_CMD
;
354 sub_crq
->rx_add
.correlator
=
355 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
356 sub_crq
->rx_add
.ioba
= cpu_to_be32(dma_addr
);
357 sub_crq
->rx_add
.map_id
= pool
->long_term_buff
.map_id
;
359 /* The length field of the sCRQ is defined to be 24 bits so the
360 * buffer size needs to be left shifted by a byte before it is
361 * converted to big endian to prevent the last byte from being
364 #ifdef __LITTLE_ENDIAN__
367 sub_crq
->rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
368 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
369 if (ind_bufp
->index
== IBMVNIC_MAX_IND_DESCS
||
372 send_subcrq_indirect(adapter
, handle
,
373 (u64
)ind_bufp
->indir_dma
,
374 (u64
)ind_bufp
->index
);
375 if (lpar_rc
!= H_SUCCESS
)
377 buffers_added
+= ind_bufp
->index
;
378 adapter
->replenish_add_buff_success
+= ind_bufp
->index
;
382 atomic_add(buffers_added
, &pool
->available
);
386 if (lpar_rc
!= H_PARAMETER
&& lpar_rc
!= H_CLOSED
)
387 dev_err_ratelimited(dev
, "rx: replenish packet buffer failed\n");
388 for (i
= ind_bufp
->index
- 1; i
>= 0; --i
) {
389 struct ibmvnic_rx_buff
*rx_buff
;
391 pool
->next_free
= pool
->next_free
== 0 ?
392 pool
->size
- 1 : pool
->next_free
- 1;
393 sub_crq
= &ind_bufp
->indir_arr
[i
];
394 rx_buff
= (struct ibmvnic_rx_buff
*)
395 be64_to_cpu(sub_crq
->rx_add
.correlator
);
396 index
= (int)(rx_buff
- pool
->rx_buff
);
397 pool
->free_map
[pool
->next_free
] = index
;
398 dev_kfree_skb_any(pool
->rx_buff
[index
].skb
);
399 pool
->rx_buff
[index
].skb
= NULL
;
402 if (lpar_rc
== H_CLOSED
|| adapter
->failover_pending
) {
403 /* Disable buffer pool replenishment and report carrier off if
404 * queue is closed or pending failover.
405 * Firmware guarantees that a signal will be sent to the
406 * driver, triggering a reset.
408 deactivate_rx_pools(adapter
);
409 netif_carrier_off(adapter
->netdev
);
413 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
417 adapter
->replenish_task_cycles
++;
418 for (i
= 0; i
< adapter
->num_active_rx_pools
; i
++) {
419 if (adapter
->rx_pool
[i
].active
)
420 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
424 static void release_stats_buffers(struct ibmvnic_adapter
*adapter
)
426 kfree(adapter
->tx_stats_buffers
);
427 kfree(adapter
->rx_stats_buffers
);
428 adapter
->tx_stats_buffers
= NULL
;
429 adapter
->rx_stats_buffers
= NULL
;
432 static int init_stats_buffers(struct ibmvnic_adapter
*adapter
)
434 adapter
->tx_stats_buffers
=
435 kcalloc(IBMVNIC_MAX_QUEUES
,
436 sizeof(struct ibmvnic_tx_queue_stats
),
438 if (!adapter
->tx_stats_buffers
)
441 adapter
->rx_stats_buffers
=
442 kcalloc(IBMVNIC_MAX_QUEUES
,
443 sizeof(struct ibmvnic_rx_queue_stats
),
445 if (!adapter
->rx_stats_buffers
)
451 static void release_stats_token(struct ibmvnic_adapter
*adapter
)
453 struct device
*dev
= &adapter
->vdev
->dev
;
455 if (!adapter
->stats_token
)
458 dma_unmap_single(dev
, adapter
->stats_token
,
459 sizeof(struct ibmvnic_statistics
),
461 adapter
->stats_token
= 0;
464 static int init_stats_token(struct ibmvnic_adapter
*adapter
)
466 struct device
*dev
= &adapter
->vdev
->dev
;
469 stok
= dma_map_single(dev
, &adapter
->stats
,
470 sizeof(struct ibmvnic_statistics
),
472 if (dma_mapping_error(dev
, stok
)) {
473 dev_err(dev
, "Couldn't map stats buffer\n");
477 adapter
->stats_token
= stok
;
478 netdev_dbg(adapter
->netdev
, "Stats token initialized (%llx)\n", stok
);
482 static int reset_rx_pools(struct ibmvnic_adapter
*adapter
)
484 struct ibmvnic_rx_pool
*rx_pool
;
489 if (!adapter
->rx_pool
)
492 buff_size
= adapter
->cur_rx_buf_sz
;
493 rx_scrqs
= adapter
->num_active_rx_pools
;
494 for (i
= 0; i
< rx_scrqs
; i
++) {
495 rx_pool
= &adapter
->rx_pool
[i
];
497 netdev_dbg(adapter
->netdev
, "Re-setting rx_pool[%d]\n", i
);
499 if (rx_pool
->buff_size
!= buff_size
) {
500 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
501 rx_pool
->buff_size
= ALIGN(buff_size
, L1_CACHE_BYTES
);
502 rc
= alloc_long_term_buff(adapter
,
503 &rx_pool
->long_term_buff
,
507 rc
= reset_long_term_buff(adapter
,
508 &rx_pool
->long_term_buff
);
514 for (j
= 0; j
< rx_pool
->size
; j
++)
515 rx_pool
->free_map
[j
] = j
;
517 memset(rx_pool
->rx_buff
, 0,
518 rx_pool
->size
* sizeof(struct ibmvnic_rx_buff
));
520 atomic_set(&rx_pool
->available
, 0);
521 rx_pool
->next_alloc
= 0;
522 rx_pool
->next_free
= 0;
529 static void release_rx_pools(struct ibmvnic_adapter
*adapter
)
531 struct ibmvnic_rx_pool
*rx_pool
;
534 if (!adapter
->rx_pool
)
537 for (i
= 0; i
< adapter
->num_active_rx_pools
; i
++) {
538 rx_pool
= &adapter
->rx_pool
[i
];
540 netdev_dbg(adapter
->netdev
, "Releasing rx_pool[%d]\n", i
);
542 kfree(rx_pool
->free_map
);
543 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
545 if (!rx_pool
->rx_buff
)
548 for (j
= 0; j
< rx_pool
->size
; j
++) {
549 if (rx_pool
->rx_buff
[j
].skb
) {
550 dev_kfree_skb_any(rx_pool
->rx_buff
[j
].skb
);
551 rx_pool
->rx_buff
[j
].skb
= NULL
;
555 kfree(rx_pool
->rx_buff
);
558 kfree(adapter
->rx_pool
);
559 adapter
->rx_pool
= NULL
;
560 adapter
->num_active_rx_pools
= 0;
563 static int init_rx_pools(struct net_device
*netdev
)
565 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
566 struct device
*dev
= &adapter
->vdev
->dev
;
567 struct ibmvnic_rx_pool
*rx_pool
;
572 rxadd_subcrqs
= adapter
->num_active_rx_scrqs
;
573 buff_size
= adapter
->cur_rx_buf_sz
;
575 adapter
->rx_pool
= kcalloc(rxadd_subcrqs
,
576 sizeof(struct ibmvnic_rx_pool
),
578 if (!adapter
->rx_pool
) {
579 dev_err(dev
, "Failed to allocate rx pools\n");
583 adapter
->num_active_rx_pools
= rxadd_subcrqs
;
585 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
586 rx_pool
= &adapter
->rx_pool
[i
];
588 netdev_dbg(adapter
->netdev
,
589 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
590 i
, adapter
->req_rx_add_entries_per_subcrq
,
593 rx_pool
->size
= adapter
->req_rx_add_entries_per_subcrq
;
595 rx_pool
->buff_size
= ALIGN(buff_size
, L1_CACHE_BYTES
);
598 rx_pool
->free_map
= kcalloc(rx_pool
->size
, sizeof(int),
600 if (!rx_pool
->free_map
) {
601 release_rx_pools(adapter
);
605 rx_pool
->rx_buff
= kcalloc(rx_pool
->size
,
606 sizeof(struct ibmvnic_rx_buff
),
608 if (!rx_pool
->rx_buff
) {
609 dev_err(dev
, "Couldn't alloc rx buffers\n");
610 release_rx_pools(adapter
);
614 if (alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
615 rx_pool
->size
* rx_pool
->buff_size
)) {
616 release_rx_pools(adapter
);
620 for (j
= 0; j
< rx_pool
->size
; ++j
)
621 rx_pool
->free_map
[j
] = j
;
623 atomic_set(&rx_pool
->available
, 0);
624 rx_pool
->next_alloc
= 0;
625 rx_pool
->next_free
= 0;
631 static int reset_one_tx_pool(struct ibmvnic_adapter
*adapter
,
632 struct ibmvnic_tx_pool
*tx_pool
)
636 rc
= reset_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
640 memset(tx_pool
->tx_buff
, 0,
641 tx_pool
->num_buffers
*
642 sizeof(struct ibmvnic_tx_buff
));
644 for (i
= 0; i
< tx_pool
->num_buffers
; i
++)
645 tx_pool
->free_map
[i
] = i
;
647 tx_pool
->consumer_index
= 0;
648 tx_pool
->producer_index
= 0;
653 static int reset_tx_pools(struct ibmvnic_adapter
*adapter
)
658 if (!adapter
->tx_pool
)
661 tx_scrqs
= adapter
->num_active_tx_pools
;
662 for (i
= 0; i
< tx_scrqs
; i
++) {
663 rc
= reset_one_tx_pool(adapter
, &adapter
->tso_pool
[i
]);
666 rc
= reset_one_tx_pool(adapter
, &adapter
->tx_pool
[i
]);
674 static void release_vpd_data(struct ibmvnic_adapter
*adapter
)
679 kfree(adapter
->vpd
->buff
);
685 static void release_one_tx_pool(struct ibmvnic_adapter
*adapter
,
686 struct ibmvnic_tx_pool
*tx_pool
)
688 kfree(tx_pool
->tx_buff
);
689 kfree(tx_pool
->free_map
);
690 free_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
693 static void release_tx_pools(struct ibmvnic_adapter
*adapter
)
697 if (!adapter
->tx_pool
)
700 for (i
= 0; i
< adapter
->num_active_tx_pools
; i
++) {
701 release_one_tx_pool(adapter
, &adapter
->tx_pool
[i
]);
702 release_one_tx_pool(adapter
, &adapter
->tso_pool
[i
]);
705 kfree(adapter
->tx_pool
);
706 adapter
->tx_pool
= NULL
;
707 kfree(adapter
->tso_pool
);
708 adapter
->tso_pool
= NULL
;
709 adapter
->num_active_tx_pools
= 0;
712 static int init_one_tx_pool(struct net_device
*netdev
,
713 struct ibmvnic_tx_pool
*tx_pool
,
714 int num_entries
, int buf_size
)
716 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
719 tx_pool
->tx_buff
= kcalloc(num_entries
,
720 sizeof(struct ibmvnic_tx_buff
),
722 if (!tx_pool
->tx_buff
)
725 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
726 num_entries
* buf_size
))
729 tx_pool
->free_map
= kcalloc(num_entries
, sizeof(int), GFP_KERNEL
);
730 if (!tx_pool
->free_map
)
733 for (i
= 0; i
< num_entries
; i
++)
734 tx_pool
->free_map
[i
] = i
;
736 tx_pool
->consumer_index
= 0;
737 tx_pool
->producer_index
= 0;
738 tx_pool
->num_buffers
= num_entries
;
739 tx_pool
->buf_size
= buf_size
;
744 static int init_tx_pools(struct net_device
*netdev
)
746 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
751 tx_subcrqs
= adapter
->num_active_tx_scrqs
;
752 adapter
->tx_pool
= kcalloc(tx_subcrqs
,
753 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
754 if (!adapter
->tx_pool
)
757 adapter
->tso_pool
= kcalloc(tx_subcrqs
,
758 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
759 if (!adapter
->tso_pool
)
762 adapter
->num_active_tx_pools
= tx_subcrqs
;
764 for (i
= 0; i
< tx_subcrqs
; i
++) {
765 buff_size
= adapter
->req_mtu
+ VLAN_HLEN
;
766 buff_size
= ALIGN(buff_size
, L1_CACHE_BYTES
);
767 rc
= init_one_tx_pool(netdev
, &adapter
->tx_pool
[i
],
768 adapter
->req_tx_entries_per_subcrq
,
771 release_tx_pools(adapter
);
775 rc
= init_one_tx_pool(netdev
, &adapter
->tso_pool
[i
],
779 release_tx_pools(adapter
);
787 static void ibmvnic_napi_enable(struct ibmvnic_adapter
*adapter
)
791 if (adapter
->napi_enabled
)
794 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
795 napi_enable(&adapter
->napi
[i
]);
797 adapter
->napi_enabled
= true;
800 static void ibmvnic_napi_disable(struct ibmvnic_adapter
*adapter
)
804 if (!adapter
->napi_enabled
)
807 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
808 netdev_dbg(adapter
->netdev
, "Disabling napi[%d]\n", i
);
809 napi_disable(&adapter
->napi
[i
]);
812 adapter
->napi_enabled
= false;
815 static int init_napi(struct ibmvnic_adapter
*adapter
)
819 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
820 sizeof(struct napi_struct
), GFP_KERNEL
);
824 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
825 netdev_dbg(adapter
->netdev
, "Adding napi[%d]\n", i
);
826 netif_napi_add(adapter
->netdev
, &adapter
->napi
[i
],
827 ibmvnic_poll
, NAPI_POLL_WEIGHT
);
830 adapter
->num_active_rx_napi
= adapter
->req_rx_queues
;
834 static void release_napi(struct ibmvnic_adapter
*adapter
)
841 for (i
= 0; i
< adapter
->num_active_rx_napi
; i
++) {
842 netdev_dbg(adapter
->netdev
, "Releasing napi[%d]\n", i
);
843 netif_napi_del(&adapter
->napi
[i
]);
846 kfree(adapter
->napi
);
847 adapter
->napi
= NULL
;
848 adapter
->num_active_rx_napi
= 0;
849 adapter
->napi_enabled
= false;
852 static int ibmvnic_login(struct net_device
*netdev
)
854 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
855 unsigned long timeout
= msecs_to_jiffies(30000);
863 if (retry_count
> retries
) {
864 netdev_warn(netdev
, "Login attempts exceeded\n");
868 adapter
->init_done_rc
= 0;
869 reinit_completion(&adapter
->init_done
);
870 rc
= send_login(adapter
);
872 netdev_warn(netdev
, "Unable to login\n");
876 if (!wait_for_completion_timeout(&adapter
->init_done
,
878 netdev_warn(netdev
, "Login timed out, retrying...\n");
880 adapter
->init_done_rc
= 0;
885 if (adapter
->init_done_rc
== ABORTED
) {
886 netdev_warn(netdev
, "Login aborted, retrying...\n");
888 adapter
->init_done_rc
= 0;
890 /* FW or device may be busy, so
891 * wait a bit before retrying login
894 } else if (adapter
->init_done_rc
== PARTIALSUCCESS
) {
896 release_sub_crqs(adapter
, 1);
900 "Received partial success, retrying...\n");
901 adapter
->init_done_rc
= 0;
902 reinit_completion(&adapter
->init_done
);
903 send_query_cap(adapter
);
904 if (!wait_for_completion_timeout(&adapter
->init_done
,
907 "Capabilities query timed out\n");
911 rc
= init_sub_crqs(adapter
);
914 "SCRQ initialization failed\n");
918 rc
= init_sub_crq_irqs(adapter
);
921 "SCRQ irq initialization failed\n");
924 } else if (adapter
->init_done_rc
) {
925 netdev_warn(netdev
, "Adapter login failed\n");
930 __ibmvnic_set_mac(netdev
, adapter
->mac_addr
);
935 static void release_login_buffer(struct ibmvnic_adapter
*adapter
)
937 kfree(adapter
->login_buf
);
938 adapter
->login_buf
= NULL
;
941 static void release_login_rsp_buffer(struct ibmvnic_adapter
*adapter
)
943 kfree(adapter
->login_rsp_buf
);
944 adapter
->login_rsp_buf
= NULL
;
947 static void release_resources(struct ibmvnic_adapter
*adapter
)
949 release_vpd_data(adapter
);
951 release_tx_pools(adapter
);
952 release_rx_pools(adapter
);
954 release_napi(adapter
);
955 release_login_rsp_buffer(adapter
);
958 static int set_link_state(struct ibmvnic_adapter
*adapter
, u8 link_state
)
960 struct net_device
*netdev
= adapter
->netdev
;
961 unsigned long timeout
= msecs_to_jiffies(30000);
962 union ibmvnic_crq crq
;
966 netdev_dbg(netdev
, "setting link state %d\n", link_state
);
968 memset(&crq
, 0, sizeof(crq
));
969 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
970 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
971 crq
.logical_link_state
.link_state
= link_state
;
976 reinit_completion(&adapter
->init_done
);
977 rc
= ibmvnic_send_crq(adapter
, &crq
);
979 netdev_err(netdev
, "Failed to set link state\n");
983 if (!wait_for_completion_timeout(&adapter
->init_done
,
985 netdev_err(netdev
, "timeout setting link state\n");
989 if (adapter
->init_done_rc
== PARTIALSUCCESS
) {
990 /* Partuial success, delay and re-send */
993 } else if (adapter
->init_done_rc
) {
994 netdev_warn(netdev
, "Unable to set link state, rc=%d\n",
995 adapter
->init_done_rc
);
996 return adapter
->init_done_rc
;
1003 static int set_real_num_queues(struct net_device
*netdev
)
1005 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1008 netdev_dbg(netdev
, "Setting real tx/rx queues (%llx/%llx)\n",
1009 adapter
->req_tx_queues
, adapter
->req_rx_queues
);
1011 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->req_tx_queues
);
1013 netdev_err(netdev
, "failed to set the number of tx queues\n");
1017 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->req_rx_queues
);
1019 netdev_err(netdev
, "failed to set the number of rx queues\n");
1024 static int ibmvnic_get_vpd(struct ibmvnic_adapter
*adapter
)
1026 struct device
*dev
= &adapter
->vdev
->dev
;
1027 union ibmvnic_crq crq
;
1031 if (adapter
->vpd
->buff
)
1032 len
= adapter
->vpd
->len
;
1034 mutex_lock(&adapter
->fw_lock
);
1035 adapter
->fw_done_rc
= 0;
1036 reinit_completion(&adapter
->fw_done
);
1038 crq
.get_vpd_size
.first
= IBMVNIC_CRQ_CMD
;
1039 crq
.get_vpd_size
.cmd
= GET_VPD_SIZE
;
1040 rc
= ibmvnic_send_crq(adapter
, &crq
);
1042 mutex_unlock(&adapter
->fw_lock
);
1046 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->fw_done
, 10000);
1048 dev_err(dev
, "Could not retrieve VPD size, rc = %d\n", rc
);
1049 mutex_unlock(&adapter
->fw_lock
);
1052 mutex_unlock(&adapter
->fw_lock
);
1054 if (!adapter
->vpd
->len
)
1057 if (!adapter
->vpd
->buff
)
1058 adapter
->vpd
->buff
= kzalloc(adapter
->vpd
->len
, GFP_KERNEL
);
1059 else if (adapter
->vpd
->len
!= len
)
1060 adapter
->vpd
->buff
=
1061 krealloc(adapter
->vpd
->buff
,
1062 adapter
->vpd
->len
, GFP_KERNEL
);
1064 if (!adapter
->vpd
->buff
) {
1065 dev_err(dev
, "Could allocate VPD buffer\n");
1069 adapter
->vpd
->dma_addr
=
1070 dma_map_single(dev
, adapter
->vpd
->buff
, adapter
->vpd
->len
,
1072 if (dma_mapping_error(dev
, adapter
->vpd
->dma_addr
)) {
1073 dev_err(dev
, "Could not map VPD buffer\n");
1074 kfree(adapter
->vpd
->buff
);
1075 adapter
->vpd
->buff
= NULL
;
1079 mutex_lock(&adapter
->fw_lock
);
1080 adapter
->fw_done_rc
= 0;
1081 reinit_completion(&adapter
->fw_done
);
1083 crq
.get_vpd
.first
= IBMVNIC_CRQ_CMD
;
1084 crq
.get_vpd
.cmd
= GET_VPD
;
1085 crq
.get_vpd
.ioba
= cpu_to_be32(adapter
->vpd
->dma_addr
);
1086 crq
.get_vpd
.len
= cpu_to_be32((u32
)adapter
->vpd
->len
);
1087 rc
= ibmvnic_send_crq(adapter
, &crq
);
1089 kfree(adapter
->vpd
->buff
);
1090 adapter
->vpd
->buff
= NULL
;
1091 mutex_unlock(&adapter
->fw_lock
);
1095 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->fw_done
, 10000);
1097 dev_err(dev
, "Unable to retrieve VPD, rc = %d\n", rc
);
1098 kfree(adapter
->vpd
->buff
);
1099 adapter
->vpd
->buff
= NULL
;
1100 mutex_unlock(&adapter
->fw_lock
);
1104 mutex_unlock(&adapter
->fw_lock
);
1108 static int init_resources(struct ibmvnic_adapter
*adapter
)
1110 struct net_device
*netdev
= adapter
->netdev
;
1113 rc
= set_real_num_queues(netdev
);
1117 adapter
->vpd
= kzalloc(sizeof(*adapter
->vpd
), GFP_KERNEL
);
1121 /* Vital Product Data (VPD) */
1122 rc
= ibmvnic_get_vpd(adapter
);
1124 netdev_err(netdev
, "failed to initialize Vital Product Data (VPD)\n");
1128 adapter
->map_id
= 1;
1130 rc
= init_napi(adapter
);
1134 send_query_map(adapter
);
1136 rc
= init_rx_pools(netdev
);
1140 rc
= init_tx_pools(netdev
);
1144 static int __ibmvnic_open(struct net_device
*netdev
)
1146 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1147 enum vnic_state prev_state
= adapter
->state
;
1150 adapter
->state
= VNIC_OPENING
;
1151 replenish_pools(adapter
);
1152 ibmvnic_napi_enable(adapter
);
1154 /* We're ready to receive frames, enable the sub-crq interrupts and
1155 * set the logical link state to up
1157 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1158 netdev_dbg(netdev
, "Enabling rx_scrq[%d] irq\n", i
);
1159 if (prev_state
== VNIC_CLOSED
)
1160 enable_irq(adapter
->rx_scrq
[i
]->irq
);
1161 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
1164 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1165 netdev_dbg(netdev
, "Enabling tx_scrq[%d] irq\n", i
);
1166 if (prev_state
== VNIC_CLOSED
)
1167 enable_irq(adapter
->tx_scrq
[i
]->irq
);
1168 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
1169 netdev_tx_reset_queue(netdev_get_tx_queue(netdev
, i
));
1172 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_UP
);
1174 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1175 napi_disable(&adapter
->napi
[i
]);
1176 release_resources(adapter
);
1180 netif_tx_start_all_queues(netdev
);
1182 if (prev_state
== VNIC_CLOSED
) {
1183 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1184 napi_schedule(&adapter
->napi
[i
]);
1187 adapter
->state
= VNIC_OPEN
;
1191 static int ibmvnic_open(struct net_device
*netdev
)
1193 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1196 /* If device failover is pending, just set device state and return.
1197 * Device operation will be handled by reset routine.
1199 if (adapter
->failover_pending
) {
1200 adapter
->state
= VNIC_OPEN
;
1204 if (adapter
->state
!= VNIC_CLOSED
) {
1205 rc
= ibmvnic_login(netdev
);
1209 rc
= init_resources(adapter
);
1211 netdev_err(netdev
, "failed to initialize resources\n");
1212 release_resources(adapter
);
1217 rc
= __ibmvnic_open(netdev
);
1221 * If open fails due to a pending failover, set device state and
1222 * return. Device operation will be handled by reset routine.
1224 if (rc
&& adapter
->failover_pending
) {
1225 adapter
->state
= VNIC_OPEN
;
1231 static void clean_rx_pools(struct ibmvnic_adapter
*adapter
)
1233 struct ibmvnic_rx_pool
*rx_pool
;
1234 struct ibmvnic_rx_buff
*rx_buff
;
1239 if (!adapter
->rx_pool
)
1242 rx_scrqs
= adapter
->num_active_rx_pools
;
1243 rx_entries
= adapter
->req_rx_add_entries_per_subcrq
;
1245 /* Free any remaining skbs in the rx buffer pools */
1246 for (i
= 0; i
< rx_scrqs
; i
++) {
1247 rx_pool
= &adapter
->rx_pool
[i
];
1248 if (!rx_pool
|| !rx_pool
->rx_buff
)
1251 netdev_dbg(adapter
->netdev
, "Cleaning rx_pool[%d]\n", i
);
1252 for (j
= 0; j
< rx_entries
; j
++) {
1253 rx_buff
= &rx_pool
->rx_buff
[j
];
1254 if (rx_buff
&& rx_buff
->skb
) {
1255 dev_kfree_skb_any(rx_buff
->skb
);
1256 rx_buff
->skb
= NULL
;
1262 static void clean_one_tx_pool(struct ibmvnic_adapter
*adapter
,
1263 struct ibmvnic_tx_pool
*tx_pool
)
1265 struct ibmvnic_tx_buff
*tx_buff
;
1269 if (!tx_pool
|| !tx_pool
->tx_buff
)
1272 tx_entries
= tx_pool
->num_buffers
;
1274 for (i
= 0; i
< tx_entries
; i
++) {
1275 tx_buff
= &tx_pool
->tx_buff
[i
];
1276 if (tx_buff
&& tx_buff
->skb
) {
1277 dev_kfree_skb_any(tx_buff
->skb
);
1278 tx_buff
->skb
= NULL
;
1283 static void clean_tx_pools(struct ibmvnic_adapter
*adapter
)
1288 if (!adapter
->tx_pool
|| !adapter
->tso_pool
)
1291 tx_scrqs
= adapter
->num_active_tx_pools
;
1293 /* Free any remaining skbs in the tx buffer pools */
1294 for (i
= 0; i
< tx_scrqs
; i
++) {
1295 netdev_dbg(adapter
->netdev
, "Cleaning tx_pool[%d]\n", i
);
1296 clean_one_tx_pool(adapter
, &adapter
->tx_pool
[i
]);
1297 clean_one_tx_pool(adapter
, &adapter
->tso_pool
[i
]);
1301 static void ibmvnic_disable_irqs(struct ibmvnic_adapter
*adapter
)
1303 struct net_device
*netdev
= adapter
->netdev
;
1306 if (adapter
->tx_scrq
) {
1307 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1308 if (adapter
->tx_scrq
[i
]->irq
) {
1310 "Disabling tx_scrq[%d] irq\n", i
);
1311 disable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
1312 disable_irq(adapter
->tx_scrq
[i
]->irq
);
1316 if (adapter
->rx_scrq
) {
1317 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1318 if (adapter
->rx_scrq
[i
]->irq
) {
1320 "Disabling rx_scrq[%d] irq\n", i
);
1321 disable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
1322 disable_irq(adapter
->rx_scrq
[i
]->irq
);
1328 static void ibmvnic_cleanup(struct net_device
*netdev
)
1330 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1332 /* ensure that transmissions are stopped if called by do_reset */
1333 if (test_bit(0, &adapter
->resetting
))
1334 netif_tx_disable(netdev
);
1336 netif_tx_stop_all_queues(netdev
);
1338 ibmvnic_napi_disable(adapter
);
1339 ibmvnic_disable_irqs(adapter
);
1341 clean_rx_pools(adapter
);
1342 clean_tx_pools(adapter
);
1345 static int __ibmvnic_close(struct net_device
*netdev
)
1347 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1350 adapter
->state
= VNIC_CLOSING
;
1351 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_DN
);
1354 adapter
->state
= VNIC_CLOSED
;
1358 static int ibmvnic_close(struct net_device
*netdev
)
1360 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1363 /* If device failover is pending, just set device state and return.
1364 * Device operation will be handled by reset routine.
1366 if (adapter
->failover_pending
) {
1367 adapter
->state
= VNIC_CLOSED
;
1371 rc
= __ibmvnic_close(netdev
);
1372 ibmvnic_cleanup(netdev
);
1378 * build_hdr_data - creates L2/L3/L4 header data buffer
1379 * @hdr_field - bitfield determining needed headers
1380 * @skb - socket buffer
1381 * @hdr_len - array of header lengths
1382 * @tot_len - total length of data
1384 * Reads hdr_field to determine which headers are needed by firmware.
1385 * Builds a buffer containing these headers. Saves individual header
1386 * lengths and total buffer length to be used to build descriptors.
1388 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
1389 int *hdr_len
, u8
*hdr_data
)
1394 if (skb_vlan_tagged(skb
) && !skb_vlan_tag_present(skb
))
1395 hdr_len
[0] = sizeof(struct vlan_ethhdr
);
1397 hdr_len
[0] = sizeof(struct ethhdr
);
1399 if (skb
->protocol
== htons(ETH_P_IP
)) {
1400 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
1401 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1402 hdr_len
[2] = tcp_hdrlen(skb
);
1403 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
1404 hdr_len
[2] = sizeof(struct udphdr
);
1405 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1406 hdr_len
[1] = sizeof(struct ipv6hdr
);
1407 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1408 hdr_len
[2] = tcp_hdrlen(skb
);
1409 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
1410 hdr_len
[2] = sizeof(struct udphdr
);
1411 } else if (skb
->protocol
== htons(ETH_P_ARP
)) {
1412 hdr_len
[1] = arp_hdr_len(skb
->dev
);
1416 memset(hdr_data
, 0, 120);
1417 if ((hdr_field
>> 6) & 1) {
1418 hdr
= skb_mac_header(skb
);
1419 memcpy(hdr_data
, hdr
, hdr_len
[0]);
1423 if ((hdr_field
>> 5) & 1) {
1424 hdr
= skb_network_header(skb
);
1425 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
1429 if ((hdr_field
>> 4) & 1) {
1430 hdr
= skb_transport_header(skb
);
1431 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
1438 * create_hdr_descs - create header and header extension descriptors
1439 * @hdr_field - bitfield determining needed headers
1440 * @data - buffer containing header data
1441 * @len - length of data buffer
1442 * @hdr_len - array of individual header lengths
1443 * @scrq_arr - descriptor array
1445 * Creates header and, if needed, header extension descriptors and
1446 * places them in a descriptor array, scrq_arr
1449 static int create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
1450 union sub_crq
*scrq_arr
)
1452 union sub_crq hdr_desc
;
1458 while (tmp_len
> 0) {
1459 cur
= hdr_data
+ len
- tmp_len
;
1461 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
1462 if (cur
!= hdr_data
) {
1463 data
= hdr_desc
.hdr_ext
.data
;
1464 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
1465 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
1466 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
1467 hdr_desc
.hdr_ext
.len
= tmp
;
1469 data
= hdr_desc
.hdr
.data
;
1470 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
1471 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
1472 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
1473 hdr_desc
.hdr
.len
= tmp
;
1474 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
1475 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
1476 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
1477 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
1479 memcpy(data
, cur
, tmp
);
1481 *scrq_arr
= hdr_desc
;
1490 * build_hdr_descs_arr - build a header descriptor array
1491 * @skb - socket buffer
1492 * @num_entries - number of descriptors to be sent
1493 * @subcrq - first TX descriptor
1494 * @hdr_field - bit field determining which headers will be sent
1496 * This function will build a TX descriptor array with applicable
1497 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1500 static void build_hdr_descs_arr(struct sk_buff
*skb
,
1501 union sub_crq
*indir_arr
,
1502 int *num_entries
, u8 hdr_field
)
1504 int hdr_len
[3] = {0, 0, 0};
1505 u8 hdr_data
[140] = {0};
1508 tot_len
= build_hdr_data(hdr_field
, skb
, hdr_len
,
1510 *num_entries
+= create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
1514 static int ibmvnic_xmit_workarounds(struct sk_buff
*skb
,
1515 struct net_device
*netdev
)
1517 /* For some backing devices, mishandling of small packets
1518 * can result in a loss of connection or TX stall. Device
1519 * architects recommend that no packet should be smaller
1520 * than the minimum MTU value provided to the driver, so
1521 * pad any packets to that length
1523 if (skb
->len
< netdev
->min_mtu
)
1524 return skb_put_padto(skb
, netdev
->min_mtu
);
1529 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter
*adapter
,
1530 struct ibmvnic_sub_crq_queue
*tx_scrq
)
1532 struct ibmvnic_ind_xmit_queue
*ind_bufp
;
1533 struct ibmvnic_tx_buff
*tx_buff
;
1534 struct ibmvnic_tx_pool
*tx_pool
;
1535 union sub_crq tx_scrq_entry
;
1541 ind_bufp
= &tx_scrq
->ind_buf
;
1542 entries
= (u64
)ind_bufp
->index
;
1543 queue_num
= tx_scrq
->pool_index
;
1545 for (i
= entries
- 1; i
>= 0; --i
) {
1546 tx_scrq_entry
= ind_bufp
->indir_arr
[i
];
1547 if (tx_scrq_entry
.v1
.type
!= IBMVNIC_TX_DESC
)
1549 index
= be32_to_cpu(tx_scrq_entry
.v1
.correlator
);
1550 if (index
& IBMVNIC_TSO_POOL_MASK
) {
1551 tx_pool
= &adapter
->tso_pool
[queue_num
];
1552 index
&= ~IBMVNIC_TSO_POOL_MASK
;
1554 tx_pool
= &adapter
->tx_pool
[queue_num
];
1556 tx_pool
->free_map
[tx_pool
->consumer_index
] = index
;
1557 tx_pool
->consumer_index
= tx_pool
->consumer_index
== 0 ?
1558 tx_pool
->num_buffers
- 1 :
1559 tx_pool
->consumer_index
- 1;
1560 tx_buff
= &tx_pool
->tx_buff
[index
];
1561 adapter
->netdev
->stats
.tx_packets
--;
1562 adapter
->netdev
->stats
.tx_bytes
-= tx_buff
->skb
->len
;
1563 adapter
->tx_stats_buffers
[queue_num
].packets
--;
1564 adapter
->tx_stats_buffers
[queue_num
].bytes
-=
1566 dev_kfree_skb_any(tx_buff
->skb
);
1567 tx_buff
->skb
= NULL
;
1568 adapter
->netdev
->stats
.tx_dropped
++;
1570 ind_bufp
->index
= 0;
1571 if (atomic_sub_return(entries
, &tx_scrq
->used
) <=
1572 (adapter
->req_tx_entries_per_subcrq
/ 2) &&
1573 __netif_subqueue_stopped(adapter
->netdev
, queue_num
)) {
1574 netif_wake_subqueue(adapter
->netdev
, queue_num
);
1575 netdev_dbg(adapter
->netdev
, "Started queue %d\n",
1580 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter
*adapter
,
1581 struct ibmvnic_sub_crq_queue
*tx_scrq
)
1583 struct ibmvnic_ind_xmit_queue
*ind_bufp
;
1589 ind_bufp
= &tx_scrq
->ind_buf
;
1590 dma_addr
= (u64
)ind_bufp
->indir_dma
;
1591 entries
= (u64
)ind_bufp
->index
;
1592 handle
= tx_scrq
->handle
;
1596 rc
= send_subcrq_indirect(adapter
, handle
, dma_addr
, entries
);
1598 ibmvnic_tx_scrq_clean_buffer(adapter
, tx_scrq
);
1600 ind_bufp
->index
= 0;
1604 static netdev_tx_t
ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1606 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1607 int queue_num
= skb_get_queue_mapping(skb
);
1608 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
1609 struct device
*dev
= &adapter
->vdev
->dev
;
1610 struct ibmvnic_ind_xmit_queue
*ind_bufp
;
1611 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
1612 struct ibmvnic_sub_crq_queue
*tx_scrq
;
1613 struct ibmvnic_tx_pool
*tx_pool
;
1614 unsigned int tx_send_failed
= 0;
1615 netdev_tx_t ret
= NETDEV_TX_OK
;
1616 unsigned int tx_map_failed
= 0;
1617 union sub_crq indir_arr
[16];
1618 unsigned int tx_dropped
= 0;
1619 unsigned int tx_packets
= 0;
1620 unsigned int tx_bytes
= 0;
1621 dma_addr_t data_dma_addr
;
1622 struct netdev_queue
*txq
;
1623 unsigned long lpar_rc
;
1624 union sub_crq tx_crq
;
1625 unsigned int offset
;
1626 int num_entries
= 1;
1631 tx_scrq
= adapter
->tx_scrq
[queue_num
];
1632 txq
= netdev_get_tx_queue(netdev
, queue_num
);
1633 ind_bufp
= &tx_scrq
->ind_buf
;
1635 if (test_bit(0, &adapter
->resetting
)) {
1636 if (!netif_subqueue_stopped(netdev
, skb
))
1637 netif_stop_subqueue(netdev
, queue_num
);
1638 dev_kfree_skb_any(skb
);
1643 ibmvnic_tx_scrq_flush(adapter
, tx_scrq
);
1647 if (ibmvnic_xmit_workarounds(skb
, netdev
)) {
1651 ibmvnic_tx_scrq_flush(adapter
, tx_scrq
);
1654 if (skb_is_gso(skb
))
1655 tx_pool
= &adapter
->tso_pool
[queue_num
];
1657 tx_pool
= &adapter
->tx_pool
[queue_num
];
1659 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
1661 if (index
== IBMVNIC_INVALID_MAP
) {
1662 dev_kfree_skb_any(skb
);
1666 ibmvnic_tx_scrq_flush(adapter
, tx_scrq
);
1670 tx_pool
->free_map
[tx_pool
->consumer_index
] = IBMVNIC_INVALID_MAP
;
1672 offset
= index
* tx_pool
->buf_size
;
1673 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
1674 memset(dst
, 0, tx_pool
->buf_size
);
1675 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
1677 if (skb_shinfo(skb
)->nr_frags
) {
1681 skb_copy_from_linear_data(skb
, dst
, skb_headlen(skb
));
1682 cur
= skb_headlen(skb
);
1684 /* Copy the frags */
1685 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1686 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1689 page_address(skb_frag_page(frag
)) +
1690 skb_frag_off(frag
), skb_frag_size(frag
));
1691 cur
+= skb_frag_size(frag
);
1694 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
1697 tx_pool
->consumer_index
=
1698 (tx_pool
->consumer_index
+ 1) % tx_pool
->num_buffers
;
1700 tx_buff
= &tx_pool
->tx_buff
[index
];
1702 tx_buff
->index
= index
;
1703 tx_buff
->pool_index
= queue_num
;
1705 memset(&tx_crq
, 0, sizeof(tx_crq
));
1706 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
1707 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
1708 tx_crq
.v1
.n_crq_elem
= 1;
1709 tx_crq
.v1
.n_sge
= 1;
1710 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
1712 if (skb_is_gso(skb
))
1713 tx_crq
.v1
.correlator
=
1714 cpu_to_be32(index
| IBMVNIC_TSO_POOL_MASK
);
1716 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
1717 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
1718 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
1719 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
1721 if (adapter
->vlan_header_insertion
&& skb_vlan_tag_present(skb
)) {
1722 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
1723 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
1726 if (skb
->protocol
== htons(ETH_P_IP
)) {
1727 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
1728 proto
= ip_hdr(skb
)->protocol
;
1729 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1730 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
1731 proto
= ipv6_hdr(skb
)->nexthdr
;
1734 if (proto
== IPPROTO_TCP
)
1735 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
1736 else if (proto
== IPPROTO_UDP
)
1737 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
1739 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1740 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
1743 if (skb_is_gso(skb
)) {
1744 tx_crq
.v1
.flags1
|= IBMVNIC_TX_LSO
;
1745 tx_crq
.v1
.mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
1749 if ((*hdrs
>> 7) & 1)
1750 build_hdr_descs_arr(skb
, indir_arr
, &num_entries
, *hdrs
);
1752 tx_crq
.v1
.n_crq_elem
= num_entries
;
1753 tx_buff
->num_entries
= num_entries
;
1754 /* flush buffer if current entry can not fit */
1755 if (num_entries
+ ind_bufp
->index
> IBMVNIC_MAX_IND_DESCS
) {
1756 lpar_rc
= ibmvnic_tx_scrq_flush(adapter
, tx_scrq
);
1757 if (lpar_rc
!= H_SUCCESS
)
1761 indir_arr
[0] = tx_crq
;
1762 memcpy(&ind_bufp
->indir_arr
[ind_bufp
->index
], &indir_arr
[0],
1763 num_entries
* sizeof(struct ibmvnic_generic_scrq
));
1764 ind_bufp
->index
+= num_entries
;
1765 if (__netdev_tx_sent_queue(txq
, skb
->len
,
1766 netdev_xmit_more() &&
1767 ind_bufp
->index
< IBMVNIC_MAX_IND_DESCS
)) {
1768 lpar_rc
= ibmvnic_tx_scrq_flush(adapter
, tx_scrq
);
1769 if (lpar_rc
!= H_SUCCESS
)
1773 if (atomic_add_return(num_entries
, &tx_scrq
->used
)
1774 >= adapter
->req_tx_entries_per_subcrq
) {
1775 netdev_dbg(netdev
, "Stopping queue %d\n", queue_num
);
1776 netif_stop_subqueue(netdev
, queue_num
);
1780 tx_bytes
+= skb
->len
;
1781 txq
->trans_start
= jiffies
;
1786 dev_kfree_skb_any(skb
);
1787 tx_buff
->skb
= NULL
;
1788 tx_pool
->consumer_index
= tx_pool
->consumer_index
== 0 ?
1789 tx_pool
->num_buffers
- 1 :
1790 tx_pool
->consumer_index
- 1;
1793 if (lpar_rc
!= H_CLOSED
&& lpar_rc
!= H_PARAMETER
)
1794 dev_err_ratelimited(dev
, "tx: send failed\n");
1796 if (lpar_rc
== H_CLOSED
|| adapter
->failover_pending
) {
1797 /* Disable TX and report carrier off if queue is closed
1798 * or pending failover.
1799 * Firmware guarantees that a signal will be sent to the
1800 * driver, triggering a reset or some other action.
1802 netif_tx_stop_all_queues(netdev
);
1803 netif_carrier_off(netdev
);
1806 netdev
->stats
.tx_dropped
+= tx_dropped
;
1807 netdev
->stats
.tx_bytes
+= tx_bytes
;
1808 netdev
->stats
.tx_packets
+= tx_packets
;
1809 adapter
->tx_send_failed
+= tx_send_failed
;
1810 adapter
->tx_map_failed
+= tx_map_failed
;
1811 adapter
->tx_stats_buffers
[queue_num
].packets
+= tx_packets
;
1812 adapter
->tx_stats_buffers
[queue_num
].bytes
+= tx_bytes
;
1813 adapter
->tx_stats_buffers
[queue_num
].dropped_packets
+= tx_dropped
;
1818 static void ibmvnic_set_multi(struct net_device
*netdev
)
1820 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1821 struct netdev_hw_addr
*ha
;
1822 union ibmvnic_crq crq
;
1824 memset(&crq
, 0, sizeof(crq
));
1825 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1826 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1828 if (netdev
->flags
& IFF_PROMISC
) {
1829 if (!adapter
->promisc_supported
)
1832 if (netdev
->flags
& IFF_ALLMULTI
) {
1833 /* Accept all multicast */
1834 memset(&crq
, 0, sizeof(crq
));
1835 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1836 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1837 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
1838 ibmvnic_send_crq(adapter
, &crq
);
1839 } else if (netdev_mc_empty(netdev
)) {
1840 /* Reject all multicast */
1841 memset(&crq
, 0, sizeof(crq
));
1842 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1843 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1844 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
1845 ibmvnic_send_crq(adapter
, &crq
);
1847 /* Accept one or more multicast(s) */
1848 netdev_for_each_mc_addr(ha
, netdev
) {
1849 memset(&crq
, 0, sizeof(crq
));
1850 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1851 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1852 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
1853 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
1855 ibmvnic_send_crq(adapter
, &crq
);
1861 static int __ibmvnic_set_mac(struct net_device
*netdev
, u8
*dev_addr
)
1863 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1864 union ibmvnic_crq crq
;
1867 if (!is_valid_ether_addr(dev_addr
)) {
1868 rc
= -EADDRNOTAVAIL
;
1872 memset(&crq
, 0, sizeof(crq
));
1873 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
1874 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
1875 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], dev_addr
);
1877 mutex_lock(&adapter
->fw_lock
);
1878 adapter
->fw_done_rc
= 0;
1879 reinit_completion(&adapter
->fw_done
);
1881 rc
= ibmvnic_send_crq(adapter
, &crq
);
1884 mutex_unlock(&adapter
->fw_lock
);
1888 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->fw_done
, 10000);
1889 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1890 if (rc
|| adapter
->fw_done_rc
) {
1892 mutex_unlock(&adapter
->fw_lock
);
1895 mutex_unlock(&adapter
->fw_lock
);
1898 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
1902 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
1904 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1905 struct sockaddr
*addr
= p
;
1909 if (!is_valid_ether_addr(addr
->sa_data
))
1910 return -EADDRNOTAVAIL
;
1912 if (adapter
->state
!= VNIC_PROBED
) {
1913 ether_addr_copy(adapter
->mac_addr
, addr
->sa_data
);
1914 rc
= __ibmvnic_set_mac(netdev
, addr
->sa_data
);
1921 * do_change_param_reset returns zero if we are able to keep processing reset
1922 * events, or non-zero if we hit a fatal error and must halt.
1924 static int do_change_param_reset(struct ibmvnic_adapter
*adapter
,
1925 struct ibmvnic_rwi
*rwi
,
1928 struct net_device
*netdev
= adapter
->netdev
;
1931 netdev_dbg(adapter
->netdev
, "Change param resetting driver (%d)\n",
1934 netif_carrier_off(netdev
);
1935 adapter
->reset_reason
= rwi
->reset_reason
;
1937 ibmvnic_cleanup(netdev
);
1939 if (reset_state
== VNIC_OPEN
) {
1940 rc
= __ibmvnic_close(netdev
);
1945 release_resources(adapter
);
1946 release_sub_crqs(adapter
, 1);
1947 release_crq_queue(adapter
);
1949 adapter
->state
= VNIC_PROBED
;
1951 rc
= init_crq_queue(adapter
);
1954 netdev_err(adapter
->netdev
,
1955 "Couldn't initialize crq. rc=%d\n", rc
);
1959 rc
= ibmvnic_reset_init(adapter
, true);
1961 return IBMVNIC_INIT_FAILED
;
1963 /* If the adapter was in PROBE state prior to the reset,
1966 if (reset_state
== VNIC_PROBED
)
1969 rc
= ibmvnic_login(netdev
);
1971 adapter
->state
= reset_state
;
1975 rc
= init_resources(adapter
);
1979 ibmvnic_disable_irqs(adapter
);
1981 adapter
->state
= VNIC_CLOSED
;
1983 if (reset_state
== VNIC_CLOSED
)
1986 rc
= __ibmvnic_open(netdev
);
1988 return IBMVNIC_OPEN_FAILED
;
1990 /* refresh device's multicast list */
1991 ibmvnic_set_multi(netdev
);
1994 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1995 napi_schedule(&adapter
->napi
[i
]);
2001 * do_reset returns zero if we are able to keep processing reset events, or
2002 * non-zero if we hit a fatal error and must halt.
2004 static int do_reset(struct ibmvnic_adapter
*adapter
,
2005 struct ibmvnic_rwi
*rwi
, u32 reset_state
)
2007 u64 old_num_rx_queues
, old_num_tx_queues
;
2008 u64 old_num_rx_slots
, old_num_tx_slots
;
2009 struct net_device
*netdev
= adapter
->netdev
;
2012 netdev_dbg(adapter
->netdev
, "Re-setting driver (%d)\n",
2017 * Now that we have the rtnl lock, clear any pending failover.
2018 * This will ensure ibmvnic_open() has either completed or will
2019 * block until failover is complete.
2021 if (rwi
->reset_reason
== VNIC_RESET_FAILOVER
)
2022 adapter
->failover_pending
= false;
2024 netif_carrier_off(netdev
);
2025 adapter
->reset_reason
= rwi
->reset_reason
;
2027 old_num_rx_queues
= adapter
->req_rx_queues
;
2028 old_num_tx_queues
= adapter
->req_tx_queues
;
2029 old_num_rx_slots
= adapter
->req_rx_add_entries_per_subcrq
;
2030 old_num_tx_slots
= adapter
->req_tx_entries_per_subcrq
;
2032 ibmvnic_cleanup(netdev
);
2034 if (reset_state
== VNIC_OPEN
&&
2035 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
&&
2036 adapter
->reset_reason
!= VNIC_RESET_FAILOVER
) {
2037 adapter
->state
= VNIC_CLOSING
;
2039 /* Release the RTNL lock before link state change and
2040 * re-acquire after the link state change to allow
2041 * linkwatch_event to grab the RTNL lock and run during
2045 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_DN
);
2050 if (adapter
->state
!= VNIC_CLOSING
) {
2055 adapter
->state
= VNIC_CLOSED
;
2058 if (adapter
->reset_reason
!= VNIC_RESET_NON_FATAL
) {
2059 /* remove the closed state so when we call open it appears
2060 * we are coming from the probed state.
2062 adapter
->state
= VNIC_PROBED
;
2064 if (adapter
->reset_reason
== VNIC_RESET_MOBILITY
) {
2065 rc
= ibmvnic_reenable_crq_queue(adapter
);
2066 release_sub_crqs(adapter
, 1);
2068 rc
= ibmvnic_reset_crq(adapter
);
2069 if (rc
== H_CLOSED
|| rc
== H_SUCCESS
) {
2070 rc
= vio_enable_interrupts(adapter
->vdev
);
2072 netdev_err(adapter
->netdev
,
2073 "Reset failed to enable interrupts. rc=%d\n",
2079 netdev_err(adapter
->netdev
,
2080 "Reset couldn't initialize crq. rc=%d\n", rc
);
2084 rc
= ibmvnic_reset_init(adapter
, true);
2086 rc
= IBMVNIC_INIT_FAILED
;
2090 /* If the adapter was in PROBE state prior to the reset,
2093 if (reset_state
== VNIC_PROBED
) {
2098 rc
= ibmvnic_login(netdev
);
2100 adapter
->state
= reset_state
;
2104 if (adapter
->req_rx_queues
!= old_num_rx_queues
||
2105 adapter
->req_tx_queues
!= old_num_tx_queues
||
2106 adapter
->req_rx_add_entries_per_subcrq
!=
2108 adapter
->req_tx_entries_per_subcrq
!=
2110 !adapter
->rx_pool
||
2111 !adapter
->tso_pool
||
2112 !adapter
->tx_pool
) {
2113 release_rx_pools(adapter
);
2114 release_tx_pools(adapter
);
2115 release_napi(adapter
);
2116 release_vpd_data(adapter
);
2118 rc
= init_resources(adapter
);
2123 rc
= reset_tx_pools(adapter
);
2125 netdev_dbg(adapter
->netdev
, "reset tx pools failed (%d)\n",
2130 rc
= reset_rx_pools(adapter
);
2132 netdev_dbg(adapter
->netdev
, "reset rx pools failed (%d)\n",
2137 ibmvnic_disable_irqs(adapter
);
2139 adapter
->state
= VNIC_CLOSED
;
2141 if (reset_state
== VNIC_CLOSED
) {
2146 rc
= __ibmvnic_open(netdev
);
2148 rc
= IBMVNIC_OPEN_FAILED
;
2152 /* refresh device's multicast list */
2153 ibmvnic_set_multi(netdev
);
2156 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
2157 napi_schedule(&adapter
->napi
[i
]);
2159 if (adapter
->reset_reason
== VNIC_RESET_FAILOVER
||
2160 adapter
->reset_reason
== VNIC_RESET_MOBILITY
) {
2161 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, netdev
);
2162 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, netdev
);
2173 static int do_hard_reset(struct ibmvnic_adapter
*adapter
,
2174 struct ibmvnic_rwi
*rwi
, u32 reset_state
)
2176 struct net_device
*netdev
= adapter
->netdev
;
2179 netdev_dbg(adapter
->netdev
, "Hard resetting driver (%d)\n",
2182 netif_carrier_off(netdev
);
2183 adapter
->reset_reason
= rwi
->reset_reason
;
2185 ibmvnic_cleanup(netdev
);
2186 release_resources(adapter
);
2187 release_sub_crqs(adapter
, 0);
2188 release_crq_queue(adapter
);
2190 /* remove the closed state so when we call open it appears
2191 * we are coming from the probed state.
2193 adapter
->state
= VNIC_PROBED
;
2195 reinit_completion(&adapter
->init_done
);
2196 rc
= init_crq_queue(adapter
);
2198 netdev_err(adapter
->netdev
,
2199 "Couldn't initialize crq. rc=%d\n", rc
);
2203 rc
= ibmvnic_reset_init(adapter
, false);
2207 /* If the adapter was in PROBE state prior to the reset,
2210 if (reset_state
== VNIC_PROBED
)
2213 rc
= ibmvnic_login(netdev
);
2215 adapter
->state
= VNIC_PROBED
;
2219 rc
= init_resources(adapter
);
2223 ibmvnic_disable_irqs(adapter
);
2224 adapter
->state
= VNIC_CLOSED
;
2226 if (reset_state
== VNIC_CLOSED
)
2229 rc
= __ibmvnic_open(netdev
);
2231 return IBMVNIC_OPEN_FAILED
;
2233 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, netdev
);
2234 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, netdev
);
2239 static struct ibmvnic_rwi
*get_next_rwi(struct ibmvnic_adapter
*adapter
)
2241 struct ibmvnic_rwi
*rwi
;
2242 unsigned long flags
;
2244 spin_lock_irqsave(&adapter
->rwi_lock
, flags
);
2246 if (!list_empty(&adapter
->rwi_list
)) {
2247 rwi
= list_first_entry(&adapter
->rwi_list
, struct ibmvnic_rwi
,
2249 list_del(&rwi
->list
);
2254 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
2258 static void free_all_rwi(struct ibmvnic_adapter
*adapter
)
2260 struct ibmvnic_rwi
*rwi
;
2262 rwi
= get_next_rwi(adapter
);
2265 rwi
= get_next_rwi(adapter
);
2269 static void __ibmvnic_reset(struct work_struct
*work
)
2271 struct ibmvnic_rwi
*rwi
;
2272 struct ibmvnic_adapter
*adapter
;
2273 bool saved_state
= false;
2274 unsigned long flags
;
2278 adapter
= container_of(work
, struct ibmvnic_adapter
, ibmvnic_reset
);
2280 if (test_and_set_bit_lock(0, &adapter
->resetting
)) {
2281 schedule_delayed_work(&adapter
->ibmvnic_delayed_reset
,
2282 IBMVNIC_RESET_DELAY
);
2286 rwi
= get_next_rwi(adapter
);
2288 spin_lock_irqsave(&adapter
->state_lock
, flags
);
2290 if (adapter
->state
== VNIC_REMOVING
||
2291 adapter
->state
== VNIC_REMOVED
) {
2292 spin_unlock_irqrestore(&adapter
->state_lock
, flags
);
2299 reset_state
= adapter
->state
;
2302 spin_unlock_irqrestore(&adapter
->state_lock
, flags
);
2304 if (rwi
->reset_reason
== VNIC_RESET_CHANGE_PARAM
) {
2305 /* CHANGE_PARAM requestor holds rtnl_lock */
2306 rc
= do_change_param_reset(adapter
, rwi
, reset_state
);
2307 } else if (adapter
->force_reset_recovery
) {
2309 * Since we are doing a hard reset now, clear the
2310 * failover_pending flag so we don't ignore any
2311 * future MOBILITY or other resets.
2313 adapter
->failover_pending
= false;
2315 /* Transport event occurred during previous reset */
2316 if (adapter
->wait_for_reset
) {
2317 /* Previous was CHANGE_PARAM; caller locked */
2318 adapter
->force_reset_recovery
= false;
2319 rc
= do_hard_reset(adapter
, rwi
, reset_state
);
2322 adapter
->force_reset_recovery
= false;
2323 rc
= do_hard_reset(adapter
, rwi
, reset_state
);
2326 } else if (!(rwi
->reset_reason
== VNIC_RESET_FATAL
&&
2327 adapter
->from_passive_init
)) {
2328 rc
= do_reset(adapter
, rwi
, reset_state
);
2331 if (rc
== IBMVNIC_OPEN_FAILED
) {
2332 if (list_empty(&adapter
->rwi_list
))
2333 adapter
->state
= VNIC_CLOSED
;
2335 adapter
->state
= reset_state
;
2337 } else if (rc
&& rc
!= IBMVNIC_INIT_FAILED
&&
2338 !adapter
->force_reset_recovery
)
2341 rwi
= get_next_rwi(adapter
);
2343 if (rwi
&& (rwi
->reset_reason
== VNIC_RESET_FAILOVER
||
2344 rwi
->reset_reason
== VNIC_RESET_MOBILITY
))
2345 adapter
->force_reset_recovery
= true;
2348 if (adapter
->wait_for_reset
) {
2349 adapter
->reset_done_rc
= rc
;
2350 complete(&adapter
->reset_done
);
2354 netdev_dbg(adapter
->netdev
, "Reset failed\n");
2355 free_all_rwi(adapter
);
2358 clear_bit_unlock(0, &adapter
->resetting
);
2361 static void __ibmvnic_delayed_reset(struct work_struct
*work
)
2363 struct ibmvnic_adapter
*adapter
;
2365 adapter
= container_of(work
, struct ibmvnic_adapter
,
2366 ibmvnic_delayed_reset
.work
);
2367 __ibmvnic_reset(&adapter
->ibmvnic_reset
);
2370 static int ibmvnic_reset(struct ibmvnic_adapter
*adapter
,
2371 enum ibmvnic_reset_reason reason
)
2373 struct list_head
*entry
, *tmp_entry
;
2374 struct ibmvnic_rwi
*rwi
, *tmp
;
2375 struct net_device
*netdev
= adapter
->netdev
;
2376 unsigned long flags
;
2380 * If failover is pending don't schedule any other reset.
2381 * Instead let the failover complete. If there is already a
2382 * a failover reset scheduled, we will detect and drop the
2383 * duplicate reset when walking the ->rwi_list below.
2385 if (adapter
->state
== VNIC_REMOVING
||
2386 adapter
->state
== VNIC_REMOVED
||
2387 (adapter
->failover_pending
&& reason
!= VNIC_RESET_FAILOVER
)) {
2389 netdev_dbg(netdev
, "Adapter removing or pending failover, skipping reset\n");
2393 if (adapter
->state
== VNIC_PROBING
) {
2394 netdev_warn(netdev
, "Adapter reset during probe\n");
2395 ret
= adapter
->init_done_rc
= EAGAIN
;
2399 spin_lock_irqsave(&adapter
->rwi_lock
, flags
);
2401 list_for_each(entry
, &adapter
->rwi_list
) {
2402 tmp
= list_entry(entry
, struct ibmvnic_rwi
, list
);
2403 if (tmp
->reset_reason
== reason
) {
2404 netdev_dbg(netdev
, "Skipping matching reset\n");
2405 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
2411 rwi
= kzalloc(sizeof(*rwi
), GFP_ATOMIC
);
2413 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
2414 ibmvnic_close(netdev
);
2418 /* if we just received a transport event,
2419 * flush reset queue and process this reset
2421 if (adapter
->force_reset_recovery
&& !list_empty(&adapter
->rwi_list
)) {
2422 list_for_each_safe(entry
, tmp_entry
, &adapter
->rwi_list
)
2425 rwi
->reset_reason
= reason
;
2426 list_add_tail(&rwi
->list
, &adapter
->rwi_list
);
2427 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
2428 netdev_dbg(adapter
->netdev
, "Scheduling reset (reason %d)\n", reason
);
2429 schedule_work(&adapter
->ibmvnic_reset
);
2436 static void ibmvnic_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
2438 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2440 if (test_bit(0, &adapter
->resetting
)) {
2441 netdev_err(adapter
->netdev
,
2442 "Adapter is resetting, skip timeout reset\n");
2446 ibmvnic_reset(adapter
, VNIC_RESET_TIMEOUT
);
2449 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
2450 struct ibmvnic_rx_buff
*rx_buff
)
2452 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
2454 rx_buff
->skb
= NULL
;
2456 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
2457 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
2459 atomic_dec(&pool
->available
);
2462 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
2464 struct ibmvnic_sub_crq_queue
*rx_scrq
;
2465 struct ibmvnic_adapter
*adapter
;
2466 struct net_device
*netdev
;
2467 int frames_processed
;
2471 adapter
= netdev_priv(netdev
);
2472 scrq_num
= (int)(napi
- adapter
->napi
);
2473 frames_processed
= 0;
2474 rx_scrq
= adapter
->rx_scrq
[scrq_num
];
2477 while (frames_processed
< budget
) {
2478 struct sk_buff
*skb
;
2479 struct ibmvnic_rx_buff
*rx_buff
;
2480 union sub_crq
*next
;
2485 if (unlikely(test_bit(0, &adapter
->resetting
) &&
2486 adapter
->reset_reason
!= VNIC_RESET_NON_FATAL
)) {
2487 enable_scrq_irq(adapter
, rx_scrq
);
2488 napi_complete_done(napi
, frames_processed
);
2489 return frames_processed
;
2492 if (!pending_scrq(adapter
, rx_scrq
))
2494 next
= ibmvnic_next_scrq(adapter
, rx_scrq
);
2496 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
2497 rx_comp
.correlator
);
2498 /* do error checking */
2499 if (next
->rx_comp
.rc
) {
2500 netdev_dbg(netdev
, "rx buffer returned with rc %x\n",
2501 be16_to_cpu(next
->rx_comp
.rc
));
2502 /* free the entry */
2503 next
->rx_comp
.first
= 0;
2504 dev_kfree_skb_any(rx_buff
->skb
);
2505 remove_buff_from_pool(adapter
, rx_buff
);
2507 } else if (!rx_buff
->skb
) {
2508 /* free the entry */
2509 next
->rx_comp
.first
= 0;
2510 remove_buff_from_pool(adapter
, rx_buff
);
2514 length
= be32_to_cpu(next
->rx_comp
.len
);
2515 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
2516 flags
= next
->rx_comp
.flags
;
2518 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
2521 /* VLAN Header has been stripped by the system firmware and
2522 * needs to be inserted by the driver
2524 if (adapter
->rx_vlan_header_insertion
&&
2525 (flags
& IBMVNIC_VLAN_STRIPPED
))
2526 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2527 ntohs(next
->rx_comp
.vlan_tci
));
2529 /* free the entry */
2530 next
->rx_comp
.first
= 0;
2531 remove_buff_from_pool(adapter
, rx_buff
);
2533 skb_put(skb
, length
);
2534 skb
->protocol
= eth_type_trans(skb
, netdev
);
2535 skb_record_rx_queue(skb
, scrq_num
);
2537 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
2538 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
2539 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2543 napi_gro_receive(napi
, skb
); /* send it up */
2544 netdev
->stats
.rx_packets
++;
2545 netdev
->stats
.rx_bytes
+= length
;
2546 adapter
->rx_stats_buffers
[scrq_num
].packets
++;
2547 adapter
->rx_stats_buffers
[scrq_num
].bytes
+= length
;
2551 if (adapter
->state
!= VNIC_CLOSING
&&
2552 ((atomic_read(&adapter
->rx_pool
[scrq_num
].available
) <
2553 adapter
->req_rx_add_entries_per_subcrq
/ 2) ||
2554 frames_processed
< budget
))
2555 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
2556 if (frames_processed
< budget
) {
2557 if (napi_complete_done(napi
, frames_processed
)) {
2558 enable_scrq_irq(adapter
, rx_scrq
);
2559 if (pending_scrq(adapter
, rx_scrq
)) {
2561 if (napi_reschedule(napi
)) {
2562 disable_scrq_irq(adapter
, rx_scrq
);
2568 return frames_processed
;
2571 static int wait_for_reset(struct ibmvnic_adapter
*adapter
)
2575 adapter
->fallback
.mtu
= adapter
->req_mtu
;
2576 adapter
->fallback
.rx_queues
= adapter
->req_rx_queues
;
2577 adapter
->fallback
.tx_queues
= adapter
->req_tx_queues
;
2578 adapter
->fallback
.rx_entries
= adapter
->req_rx_add_entries_per_subcrq
;
2579 adapter
->fallback
.tx_entries
= adapter
->req_tx_entries_per_subcrq
;
2581 reinit_completion(&adapter
->reset_done
);
2582 adapter
->wait_for_reset
= true;
2583 rc
= ibmvnic_reset(adapter
, VNIC_RESET_CHANGE_PARAM
);
2589 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->reset_done
, 60000);
2596 if (adapter
->reset_done_rc
) {
2598 adapter
->desired
.mtu
= adapter
->fallback
.mtu
;
2599 adapter
->desired
.rx_queues
= adapter
->fallback
.rx_queues
;
2600 adapter
->desired
.tx_queues
= adapter
->fallback
.tx_queues
;
2601 adapter
->desired
.rx_entries
= adapter
->fallback
.rx_entries
;
2602 adapter
->desired
.tx_entries
= adapter
->fallback
.tx_entries
;
2604 reinit_completion(&adapter
->reset_done
);
2605 adapter
->wait_for_reset
= true;
2606 rc
= ibmvnic_reset(adapter
, VNIC_RESET_CHANGE_PARAM
);
2611 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->reset_done
,
2619 adapter
->wait_for_reset
= false;
2624 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
2626 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2628 adapter
->desired
.mtu
= new_mtu
+ ETH_HLEN
;
2630 return wait_for_reset(adapter
);
2633 static netdev_features_t
ibmvnic_features_check(struct sk_buff
*skb
,
2634 struct net_device
*dev
,
2635 netdev_features_t features
)
2637 /* Some backing hardware adapters can not
2638 * handle packets with a MSS less than 224
2639 * or with only one segment.
2641 if (skb_is_gso(skb
)) {
2642 if (skb_shinfo(skb
)->gso_size
< 224 ||
2643 skb_shinfo(skb
)->gso_segs
== 1)
2644 features
&= ~NETIF_F_GSO_MASK
;
2650 static const struct net_device_ops ibmvnic_netdev_ops
= {
2651 .ndo_open
= ibmvnic_open
,
2652 .ndo_stop
= ibmvnic_close
,
2653 .ndo_start_xmit
= ibmvnic_xmit
,
2654 .ndo_set_rx_mode
= ibmvnic_set_multi
,
2655 .ndo_set_mac_address
= ibmvnic_set_mac
,
2656 .ndo_validate_addr
= eth_validate_addr
,
2657 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
2658 .ndo_change_mtu
= ibmvnic_change_mtu
,
2659 .ndo_features_check
= ibmvnic_features_check
,
2662 /* ethtool functions */
2664 static int ibmvnic_get_link_ksettings(struct net_device
*netdev
,
2665 struct ethtool_link_ksettings
*cmd
)
2667 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2670 rc
= send_query_phys_parms(adapter
);
2672 adapter
->speed
= SPEED_UNKNOWN
;
2673 adapter
->duplex
= DUPLEX_UNKNOWN
;
2675 cmd
->base
.speed
= adapter
->speed
;
2676 cmd
->base
.duplex
= adapter
->duplex
;
2677 cmd
->base
.port
= PORT_FIBRE
;
2678 cmd
->base
.phy_address
= 0;
2679 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
2684 static void ibmvnic_get_drvinfo(struct net_device
*netdev
,
2685 struct ethtool_drvinfo
*info
)
2687 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2689 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
2690 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
2691 strlcpy(info
->fw_version
, adapter
->fw_version
,
2692 sizeof(info
->fw_version
));
2695 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
2697 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2699 return adapter
->msg_enable
;
2702 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
2704 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2706 adapter
->msg_enable
= data
;
2709 static u32
ibmvnic_get_link(struct net_device
*netdev
)
2711 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2713 /* Don't need to send a query because we request a logical link up at
2714 * init and then we wait for link state indications
2716 return adapter
->logical_link_state
;
2719 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
2720 struct ethtool_ringparam
*ring
)
2722 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2724 if (adapter
->priv_flags
& IBMVNIC_USE_SERVER_MAXES
) {
2725 ring
->rx_max_pending
= adapter
->max_rx_add_entries_per_subcrq
;
2726 ring
->tx_max_pending
= adapter
->max_tx_entries_per_subcrq
;
2728 ring
->rx_max_pending
= IBMVNIC_MAX_QUEUE_SZ
;
2729 ring
->tx_max_pending
= IBMVNIC_MAX_QUEUE_SZ
;
2731 ring
->rx_mini_max_pending
= 0;
2732 ring
->rx_jumbo_max_pending
= 0;
2733 ring
->rx_pending
= adapter
->req_rx_add_entries_per_subcrq
;
2734 ring
->tx_pending
= adapter
->req_tx_entries_per_subcrq
;
2735 ring
->rx_mini_pending
= 0;
2736 ring
->rx_jumbo_pending
= 0;
2739 static int ibmvnic_set_ringparam(struct net_device
*netdev
,
2740 struct ethtool_ringparam
*ring
)
2742 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2746 adapter
->desired
.rx_entries
= ring
->rx_pending
;
2747 adapter
->desired
.tx_entries
= ring
->tx_pending
;
2749 ret
= wait_for_reset(adapter
);
2752 (adapter
->req_rx_add_entries_per_subcrq
!= ring
->rx_pending
||
2753 adapter
->req_tx_entries_per_subcrq
!= ring
->tx_pending
))
2755 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2756 ring
->rx_pending
, ring
->tx_pending
,
2757 adapter
->req_rx_add_entries_per_subcrq
,
2758 adapter
->req_tx_entries_per_subcrq
);
2762 static void ibmvnic_get_channels(struct net_device
*netdev
,
2763 struct ethtool_channels
*channels
)
2765 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2767 if (adapter
->priv_flags
& IBMVNIC_USE_SERVER_MAXES
) {
2768 channels
->max_rx
= adapter
->max_rx_queues
;
2769 channels
->max_tx
= adapter
->max_tx_queues
;
2771 channels
->max_rx
= IBMVNIC_MAX_QUEUES
;
2772 channels
->max_tx
= IBMVNIC_MAX_QUEUES
;
2775 channels
->max_other
= 0;
2776 channels
->max_combined
= 0;
2777 channels
->rx_count
= adapter
->req_rx_queues
;
2778 channels
->tx_count
= adapter
->req_tx_queues
;
2779 channels
->other_count
= 0;
2780 channels
->combined_count
= 0;
2783 static int ibmvnic_set_channels(struct net_device
*netdev
,
2784 struct ethtool_channels
*channels
)
2786 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2790 adapter
->desired
.rx_queues
= channels
->rx_count
;
2791 adapter
->desired
.tx_queues
= channels
->tx_count
;
2793 ret
= wait_for_reset(adapter
);
2796 (adapter
->req_rx_queues
!= channels
->rx_count
||
2797 adapter
->req_tx_queues
!= channels
->tx_count
))
2799 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2800 channels
->rx_count
, channels
->tx_count
,
2801 adapter
->req_rx_queues
, adapter
->req_tx_queues
);
2806 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2808 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2811 switch (stringset
) {
2813 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
);
2814 i
++, data
+= ETH_GSTRING_LEN
)
2815 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
2817 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2818 snprintf(data
, ETH_GSTRING_LEN
, "tx%d_packets", i
);
2819 data
+= ETH_GSTRING_LEN
;
2821 snprintf(data
, ETH_GSTRING_LEN
, "tx%d_bytes", i
);
2822 data
+= ETH_GSTRING_LEN
;
2824 snprintf(data
, ETH_GSTRING_LEN
,
2825 "tx%d_dropped_packets", i
);
2826 data
+= ETH_GSTRING_LEN
;
2829 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2830 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_packets", i
);
2831 data
+= ETH_GSTRING_LEN
;
2833 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_bytes", i
);
2834 data
+= ETH_GSTRING_LEN
;
2836 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_interrupts", i
);
2837 data
+= ETH_GSTRING_LEN
;
2841 case ETH_SS_PRIV_FLAGS
:
2842 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_priv_flags
); i
++)
2843 strcpy(data
+ i
* ETH_GSTRING_LEN
,
2844 ibmvnic_priv_flags
[i
]);
2851 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
2853 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2857 return ARRAY_SIZE(ibmvnic_stats
) +
2858 adapter
->req_tx_queues
* NUM_TX_STATS
+
2859 adapter
->req_rx_queues
* NUM_RX_STATS
;
2860 case ETH_SS_PRIV_FLAGS
:
2861 return ARRAY_SIZE(ibmvnic_priv_flags
);
2867 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
2868 struct ethtool_stats
*stats
, u64
*data
)
2870 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2871 union ibmvnic_crq crq
;
2875 memset(&crq
, 0, sizeof(crq
));
2876 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
2877 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
2878 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
2879 crq
.request_statistics
.len
=
2880 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
2882 /* Wait for data to be written */
2883 reinit_completion(&adapter
->stats_done
);
2884 rc
= ibmvnic_send_crq(adapter
, &crq
);
2887 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->stats_done
, 10000);
2891 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
2892 data
[i
] = be64_to_cpu(IBMVNIC_GET_STAT(adapter
,
2893 ibmvnic_stats
[i
].offset
));
2895 for (j
= 0; j
< adapter
->req_tx_queues
; j
++) {
2896 data
[i
] = adapter
->tx_stats_buffers
[j
].packets
;
2898 data
[i
] = adapter
->tx_stats_buffers
[j
].bytes
;
2900 data
[i
] = adapter
->tx_stats_buffers
[j
].dropped_packets
;
2904 for (j
= 0; j
< adapter
->req_rx_queues
; j
++) {
2905 data
[i
] = adapter
->rx_stats_buffers
[j
].packets
;
2907 data
[i
] = adapter
->rx_stats_buffers
[j
].bytes
;
2909 data
[i
] = adapter
->rx_stats_buffers
[j
].interrupts
;
2914 static u32
ibmvnic_get_priv_flags(struct net_device
*netdev
)
2916 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2918 return adapter
->priv_flags
;
2921 static int ibmvnic_set_priv_flags(struct net_device
*netdev
, u32 flags
)
2923 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2924 bool which_maxes
= !!(flags
& IBMVNIC_USE_SERVER_MAXES
);
2927 adapter
->priv_flags
|= IBMVNIC_USE_SERVER_MAXES
;
2929 adapter
->priv_flags
&= ~IBMVNIC_USE_SERVER_MAXES
;
2933 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
2934 .get_drvinfo
= ibmvnic_get_drvinfo
,
2935 .get_msglevel
= ibmvnic_get_msglevel
,
2936 .set_msglevel
= ibmvnic_set_msglevel
,
2937 .get_link
= ibmvnic_get_link
,
2938 .get_ringparam
= ibmvnic_get_ringparam
,
2939 .set_ringparam
= ibmvnic_set_ringparam
,
2940 .get_channels
= ibmvnic_get_channels
,
2941 .set_channels
= ibmvnic_set_channels
,
2942 .get_strings
= ibmvnic_get_strings
,
2943 .get_sset_count
= ibmvnic_get_sset_count
,
2944 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
2945 .get_link_ksettings
= ibmvnic_get_link_ksettings
,
2946 .get_priv_flags
= ibmvnic_get_priv_flags
,
2947 .set_priv_flags
= ibmvnic_set_priv_flags
,
2950 /* Routines for managing CRQs/sCRQs */
2952 static int reset_one_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
2953 struct ibmvnic_sub_crq_queue
*scrq
)
2958 free_irq(scrq
->irq
, scrq
);
2959 irq_dispose_mapping(scrq
->irq
);
2963 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
2964 atomic_set(&scrq
->used
, 0);
2966 scrq
->ind_buf
.index
= 0;
2968 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
2969 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
2973 static int reset_sub_crq_queues(struct ibmvnic_adapter
*adapter
)
2977 if (!adapter
->tx_scrq
|| !adapter
->rx_scrq
)
2980 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2981 netdev_dbg(adapter
->netdev
, "Re-setting tx_scrq[%d]\n", i
);
2982 rc
= reset_one_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
2987 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2988 netdev_dbg(adapter
->netdev
, "Re-setting rx_scrq[%d]\n", i
);
2989 rc
= reset_one_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
2997 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
2998 struct ibmvnic_sub_crq_queue
*scrq
,
3001 struct device
*dev
= &adapter
->vdev
->dev
;
3004 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
3007 /* Close the sub-crqs */
3009 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
3010 adapter
->vdev
->unit_address
,
3012 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3015 netdev_err(adapter
->netdev
,
3016 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3021 dma_free_coherent(dev
,
3023 scrq
->ind_buf
.indir_arr
,
3024 scrq
->ind_buf
.indir_dma
);
3026 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
3028 free_pages((unsigned long)scrq
->msgs
, 2);
3032 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
3035 struct device
*dev
= &adapter
->vdev
->dev
;
3036 struct ibmvnic_sub_crq_queue
*scrq
;
3039 scrq
= kzalloc(sizeof(*scrq
), GFP_KERNEL
);
3044 (union sub_crq
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 2);
3046 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
3047 goto zero_page_failed
;
3050 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
3052 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
3053 dev_warn(dev
, "Couldn't map crq queue messages page\n");
3057 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
3058 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
3060 if (rc
== H_RESOURCE
)
3061 rc
= ibmvnic_reset_crq(adapter
);
3063 if (rc
== H_CLOSED
) {
3064 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
3066 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
3070 scrq
->adapter
= adapter
;
3071 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
3072 scrq
->ind_buf
.index
= 0;
3074 scrq
->ind_buf
.indir_arr
=
3075 dma_alloc_coherent(dev
,
3077 &scrq
->ind_buf
.indir_dma
,
3080 if (!scrq
->ind_buf
.indir_arr
)
3083 spin_lock_init(&scrq
->lock
);
3085 netdev_dbg(adapter
->netdev
,
3086 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3087 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
3093 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
3094 adapter
->vdev
->unit_address
,
3096 } while (rc
== H_BUSY
|| rc
== H_IS_LONG_BUSY(rc
));
3098 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
3101 free_pages((unsigned long)scrq
->msgs
, 2);
3108 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
, bool do_h_free
)
3112 if (adapter
->tx_scrq
) {
3113 for (i
= 0; i
< adapter
->num_active_tx_scrqs
; i
++) {
3114 if (!adapter
->tx_scrq
[i
])
3117 netdev_dbg(adapter
->netdev
, "Releasing tx_scrq[%d]\n",
3119 if (adapter
->tx_scrq
[i
]->irq
) {
3120 free_irq(adapter
->tx_scrq
[i
]->irq
,
3121 adapter
->tx_scrq
[i
]);
3122 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
3123 adapter
->tx_scrq
[i
]->irq
= 0;
3126 release_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
],
3130 kfree(adapter
->tx_scrq
);
3131 adapter
->tx_scrq
= NULL
;
3132 adapter
->num_active_tx_scrqs
= 0;
3135 if (adapter
->rx_scrq
) {
3136 for (i
= 0; i
< adapter
->num_active_rx_scrqs
; i
++) {
3137 if (!adapter
->rx_scrq
[i
])
3140 netdev_dbg(adapter
->netdev
, "Releasing rx_scrq[%d]\n",
3142 if (adapter
->rx_scrq
[i
]->irq
) {
3143 free_irq(adapter
->rx_scrq
[i
]->irq
,
3144 adapter
->rx_scrq
[i
]);
3145 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
3146 adapter
->rx_scrq
[i
]->irq
= 0;
3149 release_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
],
3153 kfree(adapter
->rx_scrq
);
3154 adapter
->rx_scrq
= NULL
;
3155 adapter
->num_active_rx_scrqs
= 0;
3159 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
3160 struct ibmvnic_sub_crq_queue
*scrq
)
3162 struct device
*dev
= &adapter
->vdev
->dev
;
3165 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
3166 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
3168 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3173 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
3174 struct ibmvnic_sub_crq_queue
*scrq
)
3176 struct device
*dev
= &adapter
->vdev
->dev
;
3179 if (scrq
->hw_irq
> 0x100000000ULL
) {
3180 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
3184 if (test_bit(0, &adapter
->resetting
) &&
3185 adapter
->reset_reason
== VNIC_RESET_MOBILITY
) {
3186 u64 val
= (0xff000000) | scrq
->hw_irq
;
3188 rc
= plpar_hcall_norets(H_EOI
, val
);
3189 /* H_EOI would fail with rc = H_FUNCTION when running
3190 * in XIVE mode which is expected, but not an error.
3192 if (rc
&& (rc
!= H_FUNCTION
))
3193 dev_err(dev
, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3197 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
3198 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
3200 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3205 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
3206 struct ibmvnic_sub_crq_queue
*scrq
)
3208 struct device
*dev
= &adapter
->vdev
->dev
;
3209 struct ibmvnic_tx_pool
*tx_pool
;
3210 struct ibmvnic_tx_buff
*txbuff
;
3211 struct netdev_queue
*txq
;
3212 union sub_crq
*next
;
3217 while (pending_scrq(adapter
, scrq
)) {
3218 unsigned int pool
= scrq
->pool_index
;
3219 int num_entries
= 0;
3220 int total_bytes
= 0;
3221 int num_packets
= 0;
3223 next
= ibmvnic_next_scrq(adapter
, scrq
);
3224 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
3225 if (next
->tx_comp
.rcs
[i
]) {
3226 dev_err(dev
, "tx error %x\n",
3227 next
->tx_comp
.rcs
[i
]);
3230 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
3231 if (index
& IBMVNIC_TSO_POOL_MASK
) {
3232 tx_pool
= &adapter
->tso_pool
[pool
];
3233 index
&= ~IBMVNIC_TSO_POOL_MASK
;
3235 tx_pool
= &adapter
->tx_pool
[pool
];
3238 txbuff
= &tx_pool
->tx_buff
[index
];
3240 num_entries
+= txbuff
->num_entries
;
3242 total_bytes
+= txbuff
->skb
->len
;
3243 dev_consume_skb_irq(txbuff
->skb
);
3246 netdev_warn(adapter
->netdev
,
3247 "TX completion received with NULL socket buffer\n");
3249 tx_pool
->free_map
[tx_pool
->producer_index
] = index
;
3250 tx_pool
->producer_index
=
3251 (tx_pool
->producer_index
+ 1) %
3252 tx_pool
->num_buffers
;
3254 /* remove tx_comp scrq*/
3255 next
->tx_comp
.first
= 0;
3257 txq
= netdev_get_tx_queue(adapter
->netdev
, scrq
->pool_index
);
3258 netdev_tx_completed_queue(txq
, num_packets
, total_bytes
);
3260 if (atomic_sub_return(num_entries
, &scrq
->used
) <=
3261 (adapter
->req_tx_entries_per_subcrq
/ 2) &&
3262 __netif_subqueue_stopped(adapter
->netdev
,
3263 scrq
->pool_index
)) {
3264 netif_wake_subqueue(adapter
->netdev
, scrq
->pool_index
);
3265 netdev_dbg(adapter
->netdev
, "Started queue %d\n",
3270 enable_scrq_irq(adapter
, scrq
);
3272 if (pending_scrq(adapter
, scrq
)) {
3273 disable_scrq_irq(adapter
, scrq
);
3280 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
3282 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
3283 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
3285 disable_scrq_irq(adapter
, scrq
);
3286 ibmvnic_complete_tx(adapter
, scrq
);
3291 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
3293 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
3294 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
3296 /* When booting a kdump kernel we can hit pending interrupts
3297 * prior to completing driver initialization.
3299 if (unlikely(adapter
->state
!= VNIC_OPEN
))
3302 adapter
->rx_stats_buffers
[scrq
->scrq_num
].interrupts
++;
3304 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
3305 disable_scrq_irq(adapter
, scrq
);
3306 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
3312 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
3314 struct device
*dev
= &adapter
->vdev
->dev
;
3315 struct ibmvnic_sub_crq_queue
*scrq
;
3319 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
3320 netdev_dbg(adapter
->netdev
, "Initializing tx_scrq[%d] irq\n",
3322 scrq
= adapter
->tx_scrq
[i
];
3323 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
3327 dev_err(dev
, "Error mapping irq\n");
3328 goto req_tx_irq_failed
;
3331 snprintf(scrq
->name
, sizeof(scrq
->name
), "ibmvnic-%x-tx%d",
3332 adapter
->vdev
->unit_address
, i
);
3333 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
3334 0, scrq
->name
, scrq
);
3337 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
3339 irq_dispose_mapping(scrq
->irq
);
3340 goto req_tx_irq_failed
;
3344 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
3345 netdev_dbg(adapter
->netdev
, "Initializing rx_scrq[%d] irq\n",
3347 scrq
= adapter
->rx_scrq
[i
];
3348 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
3351 dev_err(dev
, "Error mapping irq\n");
3352 goto req_rx_irq_failed
;
3354 snprintf(scrq
->name
, sizeof(scrq
->name
), "ibmvnic-%x-rx%d",
3355 adapter
->vdev
->unit_address
, i
);
3356 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
3357 0, scrq
->name
, scrq
);
3359 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
3361 irq_dispose_mapping(scrq
->irq
);
3362 goto req_rx_irq_failed
;
3368 for (j
= 0; j
< i
; j
++) {
3369 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
3370 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
3372 i
= adapter
->req_tx_queues
;
3374 for (j
= 0; j
< i
; j
++) {
3375 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
3376 irq_dispose_mapping(adapter
->tx_scrq
[j
]->irq
);
3378 release_sub_crqs(adapter
, 1);
3382 static int init_sub_crqs(struct ibmvnic_adapter
*adapter
)
3384 struct device
*dev
= &adapter
->vdev
->dev
;
3385 struct ibmvnic_sub_crq_queue
**allqueues
;
3386 int registered_queues
= 0;
3391 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
3393 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_KERNEL
);
3397 for (i
= 0; i
< total_queues
; i
++) {
3398 allqueues
[i
] = init_sub_crq_queue(adapter
);
3399 if (!allqueues
[i
]) {
3400 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
3403 registered_queues
++;
3406 /* Make sure we were able to register the minimum number of queues */
3407 if (registered_queues
<
3408 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
3409 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
3413 /* Distribute the failed allocated queues*/
3414 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
3415 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
3418 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
3419 adapter
->req_rx_queues
--;
3424 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
3425 adapter
->req_tx_queues
--;
3432 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
3433 sizeof(*adapter
->tx_scrq
), GFP_KERNEL
);
3434 if (!adapter
->tx_scrq
)
3437 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
3438 adapter
->tx_scrq
[i
] = allqueues
[i
];
3439 adapter
->tx_scrq
[i
]->pool_index
= i
;
3440 adapter
->num_active_tx_scrqs
++;
3443 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
3444 sizeof(*adapter
->rx_scrq
), GFP_KERNEL
);
3445 if (!adapter
->rx_scrq
)
3448 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
3449 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
3450 adapter
->rx_scrq
[i
]->scrq_num
= i
;
3451 adapter
->num_active_rx_scrqs
++;
3458 kfree(adapter
->tx_scrq
);
3459 adapter
->tx_scrq
= NULL
;
3461 for (i
= 0; i
< registered_queues
; i
++)
3462 release_sub_crq_queue(adapter
, allqueues
[i
], 1);
3467 static void send_request_cap(struct ibmvnic_adapter
*adapter
, int retry
)
3469 struct device
*dev
= &adapter
->vdev
->dev
;
3470 union ibmvnic_crq crq
;
3474 /* Sub-CRQ entries are 32 byte long */
3475 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
3477 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
3478 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
3479 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
3483 if (adapter
->desired
.mtu
)
3484 adapter
->req_mtu
= adapter
->desired
.mtu
;
3486 adapter
->req_mtu
= adapter
->netdev
->mtu
+ ETH_HLEN
;
3488 if (!adapter
->desired
.tx_entries
)
3489 adapter
->desired
.tx_entries
=
3490 adapter
->max_tx_entries_per_subcrq
;
3491 if (!adapter
->desired
.rx_entries
)
3492 adapter
->desired
.rx_entries
=
3493 adapter
->max_rx_add_entries_per_subcrq
;
3495 max_entries
= IBMVNIC_MAX_LTB_SIZE
/
3496 (adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
);
3498 if ((adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
) *
3499 adapter
->desired
.tx_entries
> IBMVNIC_MAX_LTB_SIZE
) {
3500 adapter
->desired
.tx_entries
= max_entries
;
3503 if ((adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
) *
3504 adapter
->desired
.rx_entries
> IBMVNIC_MAX_LTB_SIZE
) {
3505 adapter
->desired
.rx_entries
= max_entries
;
3508 if (adapter
->desired
.tx_entries
)
3509 adapter
->req_tx_entries_per_subcrq
=
3510 adapter
->desired
.tx_entries
;
3512 adapter
->req_tx_entries_per_subcrq
=
3513 adapter
->max_tx_entries_per_subcrq
;
3515 if (adapter
->desired
.rx_entries
)
3516 adapter
->req_rx_add_entries_per_subcrq
=
3517 adapter
->desired
.rx_entries
;
3519 adapter
->req_rx_add_entries_per_subcrq
=
3520 adapter
->max_rx_add_entries_per_subcrq
;
3522 if (adapter
->desired
.tx_queues
)
3523 adapter
->req_tx_queues
=
3524 adapter
->desired
.tx_queues
;
3526 adapter
->req_tx_queues
=
3527 adapter
->opt_tx_comp_sub_queues
;
3529 if (adapter
->desired
.rx_queues
)
3530 adapter
->req_rx_queues
=
3531 adapter
->desired
.rx_queues
;
3533 adapter
->req_rx_queues
=
3534 adapter
->opt_rx_comp_queues
;
3536 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
3539 memset(&crq
, 0, sizeof(crq
));
3540 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
3541 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
3543 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
3544 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
3545 atomic_inc(&adapter
->running_cap_crqs
);
3546 ibmvnic_send_crq(adapter
, &crq
);
3548 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
3549 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
3550 atomic_inc(&adapter
->running_cap_crqs
);
3551 ibmvnic_send_crq(adapter
, &crq
);
3553 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
3554 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
3555 atomic_inc(&adapter
->running_cap_crqs
);
3556 ibmvnic_send_crq(adapter
, &crq
);
3558 crq
.request_capability
.capability
=
3559 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
3560 crq
.request_capability
.number
=
3561 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
3562 atomic_inc(&adapter
->running_cap_crqs
);
3563 ibmvnic_send_crq(adapter
, &crq
);
3565 crq
.request_capability
.capability
=
3566 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
3567 crq
.request_capability
.number
=
3568 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
3569 atomic_inc(&adapter
->running_cap_crqs
);
3570 ibmvnic_send_crq(adapter
, &crq
);
3572 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
3573 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
3574 atomic_inc(&adapter
->running_cap_crqs
);
3575 ibmvnic_send_crq(adapter
, &crq
);
3577 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
3578 if (adapter
->promisc_supported
) {
3579 crq
.request_capability
.capability
=
3580 cpu_to_be16(PROMISC_REQUESTED
);
3581 crq
.request_capability
.number
= cpu_to_be64(1);
3582 atomic_inc(&adapter
->running_cap_crqs
);
3583 ibmvnic_send_crq(adapter
, &crq
);
3586 crq
.request_capability
.capability
=
3587 cpu_to_be16(PROMISC_REQUESTED
);
3588 crq
.request_capability
.number
= cpu_to_be64(0);
3589 atomic_inc(&adapter
->running_cap_crqs
);
3590 ibmvnic_send_crq(adapter
, &crq
);
3594 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
3595 struct ibmvnic_sub_crq_queue
*scrq
)
3597 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
3599 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
)
3605 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
3606 struct ibmvnic_sub_crq_queue
*scrq
)
3608 union sub_crq
*entry
;
3609 unsigned long flags
;
3611 spin_lock_irqsave(&scrq
->lock
, flags
);
3612 entry
= &scrq
->msgs
[scrq
->cur
];
3613 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
3614 if (++scrq
->cur
== scrq
->size
)
3619 spin_unlock_irqrestore(&scrq
->lock
, flags
);
3624 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
3626 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3627 union ibmvnic_crq
*crq
;
3629 crq
= &queue
->msgs
[queue
->cur
];
3630 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
3631 if (++queue
->cur
== queue
->size
)
3640 static void print_subcrq_error(struct device
*dev
, int rc
, const char *func
)
3644 dev_warn_ratelimited(dev
,
3645 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3649 dev_warn_ratelimited(dev
,
3650 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3654 dev_err_ratelimited(dev
, "%s failed: (rc=%d)\n", func
, rc
);
3659 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
3660 u64 remote_handle
, u64 ioba
, u64 num_entries
)
3662 unsigned int ua
= adapter
->vdev
->unit_address
;
3663 struct device
*dev
= &adapter
->vdev
->dev
;
3666 /* Make sure the hypervisor sees the complete request */
3668 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
3669 cpu_to_be64(remote_handle
),
3673 print_subcrq_error(dev
, rc
, __func__
);
3678 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
3679 union ibmvnic_crq
*crq
)
3681 unsigned int ua
= adapter
->vdev
->unit_address
;
3682 struct device
*dev
= &adapter
->vdev
->dev
;
3683 u64
*u64_crq
= (u64
*)crq
;
3686 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
3687 (unsigned long int)cpu_to_be64(u64_crq
[0]),
3688 (unsigned long int)cpu_to_be64(u64_crq
[1]));
3690 if (!adapter
->crq
.active
&&
3691 crq
->generic
.first
!= IBMVNIC_CRQ_INIT_CMD
) {
3692 dev_warn(dev
, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3696 /* Make sure the hypervisor sees the complete request */
3699 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
3700 cpu_to_be64(u64_crq
[0]),
3701 cpu_to_be64(u64_crq
[1]));
3704 if (rc
== H_CLOSED
) {
3705 dev_warn(dev
, "CRQ Queue closed\n");
3706 /* do not reset, report the fail, wait for passive init from server */
3709 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
3715 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
3717 struct device
*dev
= &adapter
->vdev
->dev
;
3718 union ibmvnic_crq crq
;
3722 memset(&crq
, 0, sizeof(crq
));
3723 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
3724 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
3725 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
3728 rc
= ibmvnic_send_crq(adapter
, &crq
);
3734 } while (retries
> 0);
3737 dev_err(dev
, "Failed to send init request, rc = %d\n", rc
);
3744 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
3746 union ibmvnic_crq crq
;
3748 memset(&crq
, 0, sizeof(crq
));
3749 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
3750 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
3751 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
3753 return ibmvnic_send_crq(adapter
, &crq
);
3756 struct vnic_login_client_data
{
3762 static int vnic_client_data_len(struct ibmvnic_adapter
*adapter
)
3766 /* Calculate the amount of buffer space needed for the
3767 * vnic client data in the login buffer. There are four entries,
3768 * OS name, LPAR name, device name, and a null last entry.
3770 len
= 4 * sizeof(struct vnic_login_client_data
);
3771 len
+= 6; /* "Linux" plus NULL */
3772 len
+= strlen(utsname()->nodename
) + 1;
3773 len
+= strlen(adapter
->netdev
->name
) + 1;
3778 static void vnic_add_client_data(struct ibmvnic_adapter
*adapter
,
3779 struct vnic_login_client_data
*vlcd
)
3781 const char *os_name
= "Linux";
3784 /* Type 1 - LPAR OS */
3786 len
= strlen(os_name
) + 1;
3787 vlcd
->len
= cpu_to_be16(len
);
3788 strncpy(vlcd
->name
, os_name
, len
);
3789 vlcd
= (struct vnic_login_client_data
*)(vlcd
->name
+ len
);
3791 /* Type 2 - LPAR name */
3793 len
= strlen(utsname()->nodename
) + 1;
3794 vlcd
->len
= cpu_to_be16(len
);
3795 strncpy(vlcd
->name
, utsname()->nodename
, len
);
3796 vlcd
= (struct vnic_login_client_data
*)(vlcd
->name
+ len
);
3798 /* Type 3 - device name */
3800 len
= strlen(adapter
->netdev
->name
) + 1;
3801 vlcd
->len
= cpu_to_be16(len
);
3802 strncpy(vlcd
->name
, adapter
->netdev
->name
, len
);
3805 static int send_login(struct ibmvnic_adapter
*adapter
)
3807 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
3808 struct ibmvnic_login_buffer
*login_buffer
;
3809 struct device
*dev
= &adapter
->vdev
->dev
;
3810 dma_addr_t rsp_buffer_token
;
3811 dma_addr_t buffer_token
;
3812 size_t rsp_buffer_size
;
3813 union ibmvnic_crq crq
;
3817 int client_data_len
;
3818 struct vnic_login_client_data
*vlcd
;
3821 if (!adapter
->tx_scrq
|| !adapter
->rx_scrq
) {
3822 netdev_err(adapter
->netdev
,
3823 "RX or TX queues are not allocated, device login failed\n");
3827 release_login_rsp_buffer(adapter
);
3828 client_data_len
= vnic_client_data_len(adapter
);
3831 sizeof(struct ibmvnic_login_buffer
) +
3832 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
) +
3835 login_buffer
= kzalloc(buffer_size
, GFP_ATOMIC
);
3837 goto buf_alloc_failed
;
3839 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
3841 if (dma_mapping_error(dev
, buffer_token
)) {
3842 dev_err(dev
, "Couldn't map login buffer\n");
3843 goto buf_map_failed
;
3846 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
3847 sizeof(u64
) * adapter
->req_tx_queues
+
3848 sizeof(u64
) * adapter
->req_rx_queues
+
3849 sizeof(u64
) * adapter
->req_rx_queues
+
3850 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
3852 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
3853 if (!login_rsp_buffer
)
3854 goto buf_rsp_alloc_failed
;
3856 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
3857 rsp_buffer_size
, DMA_FROM_DEVICE
);
3858 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
3859 dev_err(dev
, "Couldn't map login rsp buffer\n");
3860 goto buf_rsp_map_failed
;
3863 adapter
->login_buf
= login_buffer
;
3864 adapter
->login_buf_token
= buffer_token
;
3865 adapter
->login_buf_sz
= buffer_size
;
3866 adapter
->login_rsp_buf
= login_rsp_buffer
;
3867 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
3868 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
3870 login_buffer
->len
= cpu_to_be32(buffer_size
);
3871 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
3872 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
3873 login_buffer
->off_txcomp_subcrqs
=
3874 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
3875 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
3876 login_buffer
->off_rxcomp_subcrqs
=
3877 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
3878 sizeof(u64
) * adapter
->req_tx_queues
);
3879 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
3880 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
3882 tx_list_p
= (__be64
*)((char *)login_buffer
+
3883 sizeof(struct ibmvnic_login_buffer
));
3884 rx_list_p
= (__be64
*)((char *)login_buffer
+
3885 sizeof(struct ibmvnic_login_buffer
) +
3886 sizeof(u64
) * adapter
->req_tx_queues
);
3888 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
3889 if (adapter
->tx_scrq
[i
]) {
3890 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
3895 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
3896 if (adapter
->rx_scrq
[i
]) {
3897 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
3902 /* Insert vNIC login client data */
3903 vlcd
= (struct vnic_login_client_data
*)
3904 ((char *)rx_list_p
+ (sizeof(u64
) * adapter
->req_rx_queues
));
3905 login_buffer
->client_data_offset
=
3906 cpu_to_be32((char *)vlcd
- (char *)login_buffer
);
3907 login_buffer
->client_data_len
= cpu_to_be32(client_data_len
);
3909 vnic_add_client_data(adapter
, vlcd
);
3911 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
3912 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
3913 netdev_dbg(adapter
->netdev
, "%016lx\n",
3914 ((unsigned long int *)(adapter
->login_buf
))[i
]);
3917 memset(&crq
, 0, sizeof(crq
));
3918 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
3919 crq
.login
.cmd
= LOGIN
;
3920 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
3921 crq
.login
.len
= cpu_to_be32(buffer_size
);
3922 ibmvnic_send_crq(adapter
, &crq
);
3927 kfree(login_rsp_buffer
);
3928 buf_rsp_alloc_failed
:
3929 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
3931 kfree(login_buffer
);
3936 static int send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
3939 union ibmvnic_crq crq
;
3941 memset(&crq
, 0, sizeof(crq
));
3942 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
3943 crq
.request_map
.cmd
= REQUEST_MAP
;
3944 crq
.request_map
.map_id
= map_id
;
3945 crq
.request_map
.ioba
= cpu_to_be32(addr
);
3946 crq
.request_map
.len
= cpu_to_be32(len
);
3947 return ibmvnic_send_crq(adapter
, &crq
);
3950 static int send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
3952 union ibmvnic_crq crq
;
3954 memset(&crq
, 0, sizeof(crq
));
3955 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
3956 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
3957 crq
.request_unmap
.map_id
= map_id
;
3958 return ibmvnic_send_crq(adapter
, &crq
);
3961 static void send_query_map(struct ibmvnic_adapter
*adapter
)
3963 union ibmvnic_crq crq
;
3965 memset(&crq
, 0, sizeof(crq
));
3966 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
3967 crq
.query_map
.cmd
= QUERY_MAP
;
3968 ibmvnic_send_crq(adapter
, &crq
);
3971 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3972 static void send_query_cap(struct ibmvnic_adapter
*adapter
)
3974 union ibmvnic_crq crq
;
3976 atomic_set(&adapter
->running_cap_crqs
, 0);
3977 memset(&crq
, 0, sizeof(crq
));
3978 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
3979 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
3981 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
3982 atomic_inc(&adapter
->running_cap_crqs
);
3983 ibmvnic_send_crq(adapter
, &crq
);
3985 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
3986 atomic_inc(&adapter
->running_cap_crqs
);
3987 ibmvnic_send_crq(adapter
, &crq
);
3989 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
3990 atomic_inc(&adapter
->running_cap_crqs
);
3991 ibmvnic_send_crq(adapter
, &crq
);
3993 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
3994 atomic_inc(&adapter
->running_cap_crqs
);
3995 ibmvnic_send_crq(adapter
, &crq
);
3997 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
3998 atomic_inc(&adapter
->running_cap_crqs
);
3999 ibmvnic_send_crq(adapter
, &crq
);
4001 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
4002 atomic_inc(&adapter
->running_cap_crqs
);
4003 ibmvnic_send_crq(adapter
, &crq
);
4005 crq
.query_capability
.capability
=
4006 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
4007 atomic_inc(&adapter
->running_cap_crqs
);
4008 ibmvnic_send_crq(adapter
, &crq
);
4010 crq
.query_capability
.capability
=
4011 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
4012 atomic_inc(&adapter
->running_cap_crqs
);
4013 ibmvnic_send_crq(adapter
, &crq
);
4015 crq
.query_capability
.capability
=
4016 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
4017 atomic_inc(&adapter
->running_cap_crqs
);
4018 ibmvnic_send_crq(adapter
, &crq
);
4020 crq
.query_capability
.capability
=
4021 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
4022 atomic_inc(&adapter
->running_cap_crqs
);
4023 ibmvnic_send_crq(adapter
, &crq
);
4025 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
4026 atomic_inc(&adapter
->running_cap_crqs
);
4027 ibmvnic_send_crq(adapter
, &crq
);
4029 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
4030 atomic_inc(&adapter
->running_cap_crqs
);
4031 ibmvnic_send_crq(adapter
, &crq
);
4033 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
4034 atomic_inc(&adapter
->running_cap_crqs
);
4035 ibmvnic_send_crq(adapter
, &crq
);
4037 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
4038 atomic_inc(&adapter
->running_cap_crqs
);
4039 ibmvnic_send_crq(adapter
, &crq
);
4041 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
4042 atomic_inc(&adapter
->running_cap_crqs
);
4043 ibmvnic_send_crq(adapter
, &crq
);
4045 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
4046 atomic_inc(&adapter
->running_cap_crqs
);
4047 ibmvnic_send_crq(adapter
, &crq
);
4049 crq
.query_capability
.capability
= cpu_to_be16(RX_VLAN_HEADER_INSERTION
);
4050 atomic_inc(&adapter
->running_cap_crqs
);
4051 ibmvnic_send_crq(adapter
, &crq
);
4053 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
4054 atomic_inc(&adapter
->running_cap_crqs
);
4055 ibmvnic_send_crq(adapter
, &crq
);
4057 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
4058 atomic_inc(&adapter
->running_cap_crqs
);
4059 ibmvnic_send_crq(adapter
, &crq
);
4061 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
4062 atomic_inc(&adapter
->running_cap_crqs
);
4063 ibmvnic_send_crq(adapter
, &crq
);
4065 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
4066 atomic_inc(&adapter
->running_cap_crqs
);
4067 ibmvnic_send_crq(adapter
, &crq
);
4069 crq
.query_capability
.capability
=
4070 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
4071 atomic_inc(&adapter
->running_cap_crqs
);
4072 ibmvnic_send_crq(adapter
, &crq
);
4074 crq
.query_capability
.capability
=
4075 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
4076 atomic_inc(&adapter
->running_cap_crqs
);
4077 ibmvnic_send_crq(adapter
, &crq
);
4079 crq
.query_capability
.capability
=
4080 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
4081 atomic_inc(&adapter
->running_cap_crqs
);
4082 ibmvnic_send_crq(adapter
, &crq
);
4084 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
4085 atomic_inc(&adapter
->running_cap_crqs
);
4086 ibmvnic_send_crq(adapter
, &crq
);
4089 static void send_query_ip_offload(struct ibmvnic_adapter
*adapter
)
4091 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
4092 struct device
*dev
= &adapter
->vdev
->dev
;
4093 union ibmvnic_crq crq
;
4095 adapter
->ip_offload_tok
=
4097 &adapter
->ip_offload_buf
,
4101 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
4102 if (!firmware_has_feature(FW_FEATURE_CMO
))
4103 dev_err(dev
, "Couldn't map offload buffer\n");
4107 memset(&crq
, 0, sizeof(crq
));
4108 crq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
4109 crq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
4110 crq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
4111 crq
.query_ip_offload
.ioba
=
4112 cpu_to_be32(adapter
->ip_offload_tok
);
4114 ibmvnic_send_crq(adapter
, &crq
);
4117 static void send_control_ip_offload(struct ibmvnic_adapter
*adapter
)
4119 struct ibmvnic_control_ip_offload_buffer
*ctrl_buf
= &adapter
->ip_offload_ctrl
;
4120 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
4121 struct device
*dev
= &adapter
->vdev
->dev
;
4122 netdev_features_t old_hw_features
= 0;
4123 union ibmvnic_crq crq
;
4125 adapter
->ip_offload_ctrl_tok
=
4128 sizeof(adapter
->ip_offload_ctrl
),
4131 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
4132 dev_err(dev
, "Couldn't map ip offload control buffer\n");
4136 ctrl_buf
->len
= cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
4137 ctrl_buf
->version
= cpu_to_be32(INITIAL_VERSION_IOB
);
4138 ctrl_buf
->ipv4_chksum
= buf
->ipv4_chksum
;
4139 ctrl_buf
->ipv6_chksum
= buf
->ipv6_chksum
;
4140 ctrl_buf
->tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
4141 ctrl_buf
->udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
4142 ctrl_buf
->tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
4143 ctrl_buf
->udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
4144 ctrl_buf
->large_tx_ipv4
= buf
->large_tx_ipv4
;
4145 ctrl_buf
->large_tx_ipv6
= buf
->large_tx_ipv6
;
4147 /* large_rx disabled for now, additional features needed */
4148 ctrl_buf
->large_rx_ipv4
= 0;
4149 ctrl_buf
->large_rx_ipv6
= 0;
4151 if (adapter
->state
!= VNIC_PROBING
) {
4152 old_hw_features
= adapter
->netdev
->hw_features
;
4153 adapter
->netdev
->hw_features
= 0;
4156 adapter
->netdev
->hw_features
= NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
;
4158 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
4159 adapter
->netdev
->hw_features
|= NETIF_F_IP_CSUM
;
4161 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
4162 adapter
->netdev
->hw_features
|= NETIF_F_IPV6_CSUM
;
4164 if ((adapter
->netdev
->features
&
4165 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
4166 adapter
->netdev
->hw_features
|= NETIF_F_RXCSUM
;
4168 if (buf
->large_tx_ipv4
)
4169 adapter
->netdev
->hw_features
|= NETIF_F_TSO
;
4170 if (buf
->large_tx_ipv6
)
4171 adapter
->netdev
->hw_features
|= NETIF_F_TSO6
;
4173 if (adapter
->state
== VNIC_PROBING
) {
4174 adapter
->netdev
->features
|= adapter
->netdev
->hw_features
;
4175 } else if (old_hw_features
!= adapter
->netdev
->hw_features
) {
4176 netdev_features_t tmp
= 0;
4178 /* disable features no longer supported */
4179 adapter
->netdev
->features
&= adapter
->netdev
->hw_features
;
4180 /* turn on features now supported if previously enabled */
4181 tmp
= (old_hw_features
^ adapter
->netdev
->hw_features
) &
4182 adapter
->netdev
->hw_features
;
4183 adapter
->netdev
->features
|=
4184 tmp
& adapter
->netdev
->wanted_features
;
4187 memset(&crq
, 0, sizeof(crq
));
4188 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
4189 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
4190 crq
.control_ip_offload
.len
=
4191 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
4192 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
4193 ibmvnic_send_crq(adapter
, &crq
);
4196 static void handle_vpd_size_rsp(union ibmvnic_crq
*crq
,
4197 struct ibmvnic_adapter
*adapter
)
4199 struct device
*dev
= &adapter
->vdev
->dev
;
4201 if (crq
->get_vpd_size_rsp
.rc
.code
) {
4202 dev_err(dev
, "Error retrieving VPD size, rc=%x\n",
4203 crq
->get_vpd_size_rsp
.rc
.code
);
4204 complete(&adapter
->fw_done
);
4208 adapter
->vpd
->len
= be64_to_cpu(crq
->get_vpd_size_rsp
.len
);
4209 complete(&adapter
->fw_done
);
4212 static void handle_vpd_rsp(union ibmvnic_crq
*crq
,
4213 struct ibmvnic_adapter
*adapter
)
4215 struct device
*dev
= &adapter
->vdev
->dev
;
4216 unsigned char *substr
= NULL
;
4217 u8 fw_level_len
= 0;
4219 memset(adapter
->fw_version
, 0, 32);
4221 dma_unmap_single(dev
, adapter
->vpd
->dma_addr
, adapter
->vpd
->len
,
4224 if (crq
->get_vpd_rsp
.rc
.code
) {
4225 dev_err(dev
, "Error retrieving VPD from device, rc=%x\n",
4226 crq
->get_vpd_rsp
.rc
.code
);
4230 /* get the position of the firmware version info
4231 * located after the ASCII 'RM' substring in the buffer
4233 substr
= strnstr(adapter
->vpd
->buff
, "RM", adapter
->vpd
->len
);
4235 dev_info(dev
, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4239 /* get length of firmware level ASCII substring */
4240 if ((substr
+ 2) < (adapter
->vpd
->buff
+ adapter
->vpd
->len
)) {
4241 fw_level_len
= *(substr
+ 2);
4243 dev_info(dev
, "Length of FW substr extrapolated VDP buff\n");
4247 /* copy firmware version string from vpd into adapter */
4248 if ((substr
+ 3 + fw_level_len
) <
4249 (adapter
->vpd
->buff
+ adapter
->vpd
->len
)) {
4250 strncpy((char *)adapter
->fw_version
, substr
+ 3, fw_level_len
);
4252 dev_info(dev
, "FW substr extrapolated VPD buff\n");
4256 if (adapter
->fw_version
[0] == '\0')
4257 strncpy((char *)adapter
->fw_version
, "N/A", 3 * sizeof(char));
4258 complete(&adapter
->fw_done
);
4261 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
4263 struct device
*dev
= &adapter
->vdev
->dev
;
4264 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
4267 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
4268 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
4270 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
4271 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
4272 netdev_dbg(adapter
->netdev
, "%016lx\n",
4273 ((unsigned long int *)(buf
))[i
]);
4275 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
4276 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
4277 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
4278 buf
->tcp_ipv4_chksum
);
4279 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
4280 buf
->tcp_ipv6_chksum
);
4281 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
4282 buf
->udp_ipv4_chksum
);
4283 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
4284 buf
->udp_ipv6_chksum
);
4285 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
4286 buf
->large_tx_ipv4
);
4287 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
4288 buf
->large_tx_ipv6
);
4289 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
4290 buf
->large_rx_ipv4
);
4291 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
4292 buf
->large_rx_ipv6
);
4293 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
4294 buf
->max_ipv4_header_size
);
4295 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
4296 buf
->max_ipv6_header_size
);
4297 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
4298 buf
->max_tcp_header_size
);
4299 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
4300 buf
->max_udp_header_size
);
4301 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
4302 buf
->max_large_tx_size
);
4303 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
4304 buf
->max_large_rx_size
);
4305 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
4306 buf
->ipv6_extension_header
);
4307 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
4308 buf
->tcp_pseudosum_req
);
4309 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
4310 buf
->num_ipv6_ext_headers
);
4311 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
4312 buf
->off_ipv6_ext_headers
);
4314 send_control_ip_offload(adapter
);
4317 static const char *ibmvnic_fw_err_cause(u16 cause
)
4320 case ADAPTER_PROBLEM
:
4321 return "adapter problem";
4323 return "bus problem";
4325 return "firmware problem";
4327 return "device driver problem";
4329 return "EEH recovery";
4331 return "firmware updated";
4333 return "low Memory";
4339 static void handle_error_indication(union ibmvnic_crq
*crq
,
4340 struct ibmvnic_adapter
*adapter
)
4342 struct device
*dev
= &adapter
->vdev
->dev
;
4345 cause
= be16_to_cpu(crq
->error_indication
.error_cause
);
4347 dev_warn_ratelimited(dev
,
4348 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4349 crq
->error_indication
.flags
4350 & IBMVNIC_FATAL_ERROR
? "FATAL " : "",
4351 ibmvnic_fw_err_cause(cause
));
4353 if (crq
->error_indication
.flags
& IBMVNIC_FATAL_ERROR
)
4354 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
4356 ibmvnic_reset(adapter
, VNIC_RESET_NON_FATAL
);
4359 static int handle_change_mac_rsp(union ibmvnic_crq
*crq
,
4360 struct ibmvnic_adapter
*adapter
)
4362 struct net_device
*netdev
= adapter
->netdev
;
4363 struct device
*dev
= &adapter
->vdev
->dev
;
4366 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
4368 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
4371 /* crq->change_mac_addr.mac_addr is the requested one
4372 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4374 ether_addr_copy(netdev
->dev_addr
,
4375 &crq
->change_mac_addr_rsp
.mac_addr
[0]);
4376 ether_addr_copy(adapter
->mac_addr
,
4377 &crq
->change_mac_addr_rsp
.mac_addr
[0]);
4379 complete(&adapter
->fw_done
);
4383 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
4384 struct ibmvnic_adapter
*adapter
)
4386 struct device
*dev
= &adapter
->vdev
->dev
;
4390 atomic_dec(&adapter
->running_cap_crqs
);
4391 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
4393 req_value
= &adapter
->req_tx_queues
;
4397 req_value
= &adapter
->req_rx_queues
;
4400 case REQ_RX_ADD_QUEUES
:
4401 req_value
= &adapter
->req_rx_add_queues
;
4404 case REQ_TX_ENTRIES_PER_SUBCRQ
:
4405 req_value
= &adapter
->req_tx_entries_per_subcrq
;
4406 name
= "tx_entries_per_subcrq";
4408 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
4409 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
4410 name
= "rx_add_entries_per_subcrq";
4413 req_value
= &adapter
->req_mtu
;
4416 case PROMISC_REQUESTED
:
4417 req_value
= &adapter
->promisc
;
4421 dev_err(dev
, "Got invalid cap request rsp %d\n",
4422 crq
->request_capability
.capability
);
4426 switch (crq
->request_capability_rsp
.rc
.code
) {
4429 case PARTIALSUCCESS
:
4430 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4432 (long int)be64_to_cpu(crq
->request_capability_rsp
.
4435 if (be16_to_cpu(crq
->request_capability_rsp
.capability
) ==
4437 pr_err("mtu of %llu is not supported. Reverting.\n",
4439 *req_value
= adapter
->fallback
.mtu
;
4442 be64_to_cpu(crq
->request_capability_rsp
.number
);
4445 send_request_cap(adapter
, 1);
4448 dev_err(dev
, "Error %d in request cap rsp\n",
4449 crq
->request_capability_rsp
.rc
.code
);
4453 /* Done receiving requested capabilities, query IP offload support */
4454 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
4455 adapter
->wait_capability
= false;
4456 send_query_ip_offload(adapter
);
4460 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
4461 struct ibmvnic_adapter
*adapter
)
4463 struct device
*dev
= &adapter
->vdev
->dev
;
4464 struct net_device
*netdev
= adapter
->netdev
;
4465 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
4466 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
4467 u64
*tx_handle_array
;
4468 u64
*rx_handle_array
;
4474 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
4476 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
4477 adapter
->login_rsp_buf_sz
, DMA_FROM_DEVICE
);
4479 /* If the number of queues requested can't be allocated by the
4480 * server, the login response will return with code 1. We will need
4481 * to resend the login buffer with fewer queues requested.
4483 if (login_rsp_crq
->generic
.rc
.code
) {
4484 adapter
->init_done_rc
= login_rsp_crq
->generic
.rc
.code
;
4485 complete(&adapter
->init_done
);
4489 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
4491 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
4492 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
4493 netdev_dbg(adapter
->netdev
, "%016lx\n",
4494 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
4498 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
4499 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
4500 adapter
->req_rx_add_queues
!=
4501 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
4502 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
4503 ibmvnic_remove(adapter
->vdev
);
4506 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
4507 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
4508 /* variable buffer sizes are not supported, so just read the
4511 adapter
->cur_rx_buf_sz
= be64_to_cpu(size_array
[0]);
4513 num_tx_pools
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
4514 num_rx_pools
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
4516 tx_handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
4517 be32_to_cpu(adapter
->login_rsp_buf
->off_txsubm_subcrqs
));
4518 rx_handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
4519 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_subcrqs
));
4521 for (i
= 0; i
< num_tx_pools
; i
++)
4522 adapter
->tx_scrq
[i
]->handle
= tx_handle_array
[i
];
4524 for (i
= 0; i
< num_rx_pools
; i
++)
4525 adapter
->rx_scrq
[i
]->handle
= rx_handle_array
[i
];
4527 adapter
->num_active_tx_scrqs
= num_tx_pools
;
4528 adapter
->num_active_rx_scrqs
= num_rx_pools
;
4529 release_login_rsp_buffer(adapter
);
4530 release_login_buffer(adapter
);
4531 complete(&adapter
->init_done
);
4536 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
4537 struct ibmvnic_adapter
*adapter
)
4539 struct device
*dev
= &adapter
->vdev
->dev
;
4542 rc
= crq
->request_unmap_rsp
.rc
.code
;
4544 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
4547 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
4548 struct ibmvnic_adapter
*adapter
)
4550 struct net_device
*netdev
= adapter
->netdev
;
4551 struct device
*dev
= &adapter
->vdev
->dev
;
4554 rc
= crq
->query_map_rsp
.rc
.code
;
4556 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
4559 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4560 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
4561 crq
->query_map_rsp
.free_pages
);
4564 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
4565 struct ibmvnic_adapter
*adapter
)
4567 struct net_device
*netdev
= adapter
->netdev
;
4568 struct device
*dev
= &adapter
->vdev
->dev
;
4571 atomic_dec(&adapter
->running_cap_crqs
);
4572 netdev_dbg(netdev
, "Outstanding queries: %d\n",
4573 atomic_read(&adapter
->running_cap_crqs
));
4574 rc
= crq
->query_capability
.rc
.code
;
4576 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
4580 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
4582 adapter
->min_tx_queues
=
4583 be64_to_cpu(crq
->query_capability
.number
);
4584 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
4585 adapter
->min_tx_queues
);
4588 adapter
->min_rx_queues
=
4589 be64_to_cpu(crq
->query_capability
.number
);
4590 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
4591 adapter
->min_rx_queues
);
4593 case MIN_RX_ADD_QUEUES
:
4594 adapter
->min_rx_add_queues
=
4595 be64_to_cpu(crq
->query_capability
.number
);
4596 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
4597 adapter
->min_rx_add_queues
);
4600 adapter
->max_tx_queues
=
4601 be64_to_cpu(crq
->query_capability
.number
);
4602 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
4603 adapter
->max_tx_queues
);
4606 adapter
->max_rx_queues
=
4607 be64_to_cpu(crq
->query_capability
.number
);
4608 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
4609 adapter
->max_rx_queues
);
4611 case MAX_RX_ADD_QUEUES
:
4612 adapter
->max_rx_add_queues
=
4613 be64_to_cpu(crq
->query_capability
.number
);
4614 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
4615 adapter
->max_rx_add_queues
);
4617 case MIN_TX_ENTRIES_PER_SUBCRQ
:
4618 adapter
->min_tx_entries_per_subcrq
=
4619 be64_to_cpu(crq
->query_capability
.number
);
4620 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
4621 adapter
->min_tx_entries_per_subcrq
);
4623 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
4624 adapter
->min_rx_add_entries_per_subcrq
=
4625 be64_to_cpu(crq
->query_capability
.number
);
4626 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
4627 adapter
->min_rx_add_entries_per_subcrq
);
4629 case MAX_TX_ENTRIES_PER_SUBCRQ
:
4630 adapter
->max_tx_entries_per_subcrq
=
4631 be64_to_cpu(crq
->query_capability
.number
);
4632 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
4633 adapter
->max_tx_entries_per_subcrq
);
4635 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
4636 adapter
->max_rx_add_entries_per_subcrq
=
4637 be64_to_cpu(crq
->query_capability
.number
);
4638 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
4639 adapter
->max_rx_add_entries_per_subcrq
);
4641 case TCP_IP_OFFLOAD
:
4642 adapter
->tcp_ip_offload
=
4643 be64_to_cpu(crq
->query_capability
.number
);
4644 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
4645 adapter
->tcp_ip_offload
);
4647 case PROMISC_SUPPORTED
:
4648 adapter
->promisc_supported
=
4649 be64_to_cpu(crq
->query_capability
.number
);
4650 netdev_dbg(netdev
, "promisc_supported = %lld\n",
4651 adapter
->promisc_supported
);
4654 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
4655 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
4656 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
4659 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
4660 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
4661 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
4663 case MAX_MULTICAST_FILTERS
:
4664 adapter
->max_multicast_filters
=
4665 be64_to_cpu(crq
->query_capability
.number
);
4666 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
4667 adapter
->max_multicast_filters
);
4669 case VLAN_HEADER_INSERTION
:
4670 adapter
->vlan_header_insertion
=
4671 be64_to_cpu(crq
->query_capability
.number
);
4672 if (adapter
->vlan_header_insertion
)
4673 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
4674 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
4675 adapter
->vlan_header_insertion
);
4677 case RX_VLAN_HEADER_INSERTION
:
4678 adapter
->rx_vlan_header_insertion
=
4679 be64_to_cpu(crq
->query_capability
.number
);
4680 netdev_dbg(netdev
, "rx_vlan_header_insertion = %lld\n",
4681 adapter
->rx_vlan_header_insertion
);
4683 case MAX_TX_SG_ENTRIES
:
4684 adapter
->max_tx_sg_entries
=
4685 be64_to_cpu(crq
->query_capability
.number
);
4686 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
4687 adapter
->max_tx_sg_entries
);
4689 case RX_SG_SUPPORTED
:
4690 adapter
->rx_sg_supported
=
4691 be64_to_cpu(crq
->query_capability
.number
);
4692 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
4693 adapter
->rx_sg_supported
);
4695 case OPT_TX_COMP_SUB_QUEUES
:
4696 adapter
->opt_tx_comp_sub_queues
=
4697 be64_to_cpu(crq
->query_capability
.number
);
4698 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
4699 adapter
->opt_tx_comp_sub_queues
);
4701 case OPT_RX_COMP_QUEUES
:
4702 adapter
->opt_rx_comp_queues
=
4703 be64_to_cpu(crq
->query_capability
.number
);
4704 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
4705 adapter
->opt_rx_comp_queues
);
4707 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
4708 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
4709 be64_to_cpu(crq
->query_capability
.number
);
4710 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4711 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
4713 case OPT_TX_ENTRIES_PER_SUBCRQ
:
4714 adapter
->opt_tx_entries_per_subcrq
=
4715 be64_to_cpu(crq
->query_capability
.number
);
4716 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
4717 adapter
->opt_tx_entries_per_subcrq
);
4719 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
4720 adapter
->opt_rxba_entries_per_subcrq
=
4721 be64_to_cpu(crq
->query_capability
.number
);
4722 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
4723 adapter
->opt_rxba_entries_per_subcrq
);
4725 case TX_RX_DESC_REQ
:
4726 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
4727 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
4728 adapter
->tx_rx_desc_req
);
4732 netdev_err(netdev
, "Got invalid cap rsp %d\n",
4733 crq
->query_capability
.capability
);
4737 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
4738 adapter
->wait_capability
= false;
4739 send_request_cap(adapter
, 0);
4743 static int send_query_phys_parms(struct ibmvnic_adapter
*adapter
)
4745 union ibmvnic_crq crq
;
4748 memset(&crq
, 0, sizeof(crq
));
4749 crq
.query_phys_parms
.first
= IBMVNIC_CRQ_CMD
;
4750 crq
.query_phys_parms
.cmd
= QUERY_PHYS_PARMS
;
4752 mutex_lock(&adapter
->fw_lock
);
4753 adapter
->fw_done_rc
= 0;
4754 reinit_completion(&adapter
->fw_done
);
4756 rc
= ibmvnic_send_crq(adapter
, &crq
);
4758 mutex_unlock(&adapter
->fw_lock
);
4762 rc
= ibmvnic_wait_for_completion(adapter
, &adapter
->fw_done
, 10000);
4764 mutex_unlock(&adapter
->fw_lock
);
4768 mutex_unlock(&adapter
->fw_lock
);
4769 return adapter
->fw_done_rc
? -EIO
: 0;
4772 static int handle_query_phys_parms_rsp(union ibmvnic_crq
*crq
,
4773 struct ibmvnic_adapter
*adapter
)
4775 struct net_device
*netdev
= adapter
->netdev
;
4777 __be32 rspeed
= cpu_to_be32(crq
->query_phys_parms_rsp
.speed
);
4779 rc
= crq
->query_phys_parms_rsp
.rc
.code
;
4781 netdev_err(netdev
, "Error %d in QUERY_PHYS_PARMS\n", rc
);
4785 case IBMVNIC_10MBPS
:
4786 adapter
->speed
= SPEED_10
;
4788 case IBMVNIC_100MBPS
:
4789 adapter
->speed
= SPEED_100
;
4792 adapter
->speed
= SPEED_1000
;
4794 case IBMVNIC_10GBPS
:
4795 adapter
->speed
= SPEED_10000
;
4797 case IBMVNIC_25GBPS
:
4798 adapter
->speed
= SPEED_25000
;
4800 case IBMVNIC_40GBPS
:
4801 adapter
->speed
= SPEED_40000
;
4803 case IBMVNIC_50GBPS
:
4804 adapter
->speed
= SPEED_50000
;
4806 case IBMVNIC_100GBPS
:
4807 adapter
->speed
= SPEED_100000
;
4809 case IBMVNIC_200GBPS
:
4810 adapter
->speed
= SPEED_200000
;
4813 if (netif_carrier_ok(netdev
))
4814 netdev_warn(netdev
, "Unknown speed 0x%08x\n", rspeed
);
4815 adapter
->speed
= SPEED_UNKNOWN
;
4817 if (crq
->query_phys_parms_rsp
.flags1
& IBMVNIC_FULL_DUPLEX
)
4818 adapter
->duplex
= DUPLEX_FULL
;
4819 else if (crq
->query_phys_parms_rsp
.flags1
& IBMVNIC_HALF_DUPLEX
)
4820 adapter
->duplex
= DUPLEX_HALF
;
4822 adapter
->duplex
= DUPLEX_UNKNOWN
;
4827 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
4828 struct ibmvnic_adapter
*adapter
)
4830 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
4831 struct net_device
*netdev
= adapter
->netdev
;
4832 struct device
*dev
= &adapter
->vdev
->dev
;
4833 u64
*u64_crq
= (u64
*)crq
;
4836 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
4837 (unsigned long int)cpu_to_be64(u64_crq
[0]),
4838 (unsigned long int)cpu_to_be64(u64_crq
[1]));
4839 switch (gen_crq
->first
) {
4840 case IBMVNIC_CRQ_INIT_RSP
:
4841 switch (gen_crq
->cmd
) {
4842 case IBMVNIC_CRQ_INIT
:
4843 dev_info(dev
, "Partner initialized\n");
4844 adapter
->from_passive_init
= true;
4845 if (!completion_done(&adapter
->init_done
)) {
4846 complete(&adapter
->init_done
);
4847 adapter
->init_done_rc
= -EIO
;
4849 ibmvnic_reset(adapter
, VNIC_RESET_FAILOVER
);
4851 case IBMVNIC_CRQ_INIT_COMPLETE
:
4852 dev_info(dev
, "Partner initialization complete\n");
4853 adapter
->crq
.active
= true;
4854 send_version_xchg(adapter
);
4857 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
4860 case IBMVNIC_CRQ_XPORT_EVENT
:
4861 netif_carrier_off(netdev
);
4862 adapter
->crq
.active
= false;
4863 /* terminate any thread waiting for a response
4866 if (!completion_done(&adapter
->fw_done
)) {
4867 adapter
->fw_done_rc
= -EIO
;
4868 complete(&adapter
->fw_done
);
4870 if (!completion_done(&adapter
->stats_done
))
4871 complete(&adapter
->stats_done
);
4872 if (test_bit(0, &adapter
->resetting
))
4873 adapter
->force_reset_recovery
= true;
4874 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
4875 dev_info(dev
, "Migrated, re-enabling adapter\n");
4876 ibmvnic_reset(adapter
, VNIC_RESET_MOBILITY
);
4877 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
4878 dev_info(dev
, "Backing device failover detected\n");
4879 adapter
->failover_pending
= true;
4881 /* The adapter lost the connection */
4882 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
4884 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
4887 case IBMVNIC_CRQ_CMD_RSP
:
4890 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
4895 switch (gen_crq
->cmd
) {
4896 case VERSION_EXCHANGE_RSP
:
4897 rc
= crq
->version_exchange_rsp
.rc
.code
;
4899 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
4903 be16_to_cpu(crq
->version_exchange_rsp
.version
);
4904 dev_info(dev
, "Partner protocol version is %d\n",
4906 send_query_cap(adapter
);
4908 case QUERY_CAPABILITY_RSP
:
4909 handle_query_cap_rsp(crq
, adapter
);
4912 handle_query_map_rsp(crq
, adapter
);
4914 case REQUEST_MAP_RSP
:
4915 adapter
->fw_done_rc
= crq
->request_map_rsp
.rc
.code
;
4916 complete(&adapter
->fw_done
);
4918 case REQUEST_UNMAP_RSP
:
4919 handle_request_unmap_rsp(crq
, adapter
);
4921 case REQUEST_CAPABILITY_RSP
:
4922 handle_request_cap_rsp(crq
, adapter
);
4925 netdev_dbg(netdev
, "Got Login Response\n");
4926 handle_login_rsp(crq
, adapter
);
4928 case LOGICAL_LINK_STATE_RSP
:
4930 "Got Logical Link State Response, state: %d rc: %d\n",
4931 crq
->logical_link_state_rsp
.link_state
,
4932 crq
->logical_link_state_rsp
.rc
.code
);
4933 adapter
->logical_link_state
=
4934 crq
->logical_link_state_rsp
.link_state
;
4935 adapter
->init_done_rc
= crq
->logical_link_state_rsp
.rc
.code
;
4936 complete(&adapter
->init_done
);
4938 case LINK_STATE_INDICATION
:
4939 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
4940 adapter
->phys_link_state
=
4941 crq
->link_state_indication
.phys_link_state
;
4942 adapter
->logical_link_state
=
4943 crq
->link_state_indication
.logical_link_state
;
4944 if (adapter
->phys_link_state
&& adapter
->logical_link_state
)
4945 netif_carrier_on(netdev
);
4947 netif_carrier_off(netdev
);
4949 case CHANGE_MAC_ADDR_RSP
:
4950 netdev_dbg(netdev
, "Got MAC address change Response\n");
4951 adapter
->fw_done_rc
= handle_change_mac_rsp(crq
, adapter
);
4953 case ERROR_INDICATION
:
4954 netdev_dbg(netdev
, "Got Error Indication\n");
4955 handle_error_indication(crq
, adapter
);
4957 case REQUEST_STATISTICS_RSP
:
4958 netdev_dbg(netdev
, "Got Statistics Response\n");
4959 complete(&adapter
->stats_done
);
4961 case QUERY_IP_OFFLOAD_RSP
:
4962 netdev_dbg(netdev
, "Got Query IP offload Response\n");
4963 handle_query_ip_offload_rsp(adapter
);
4965 case MULTICAST_CTRL_RSP
:
4966 netdev_dbg(netdev
, "Got multicast control Response\n");
4968 case CONTROL_IP_OFFLOAD_RSP
:
4969 netdev_dbg(netdev
, "Got Control IP offload Response\n");
4970 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
4971 sizeof(adapter
->ip_offload_ctrl
),
4973 complete(&adapter
->init_done
);
4975 case COLLECT_FW_TRACE_RSP
:
4976 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
4977 complete(&adapter
->fw_done
);
4979 case GET_VPD_SIZE_RSP
:
4980 handle_vpd_size_rsp(crq
, adapter
);
4983 handle_vpd_rsp(crq
, adapter
);
4985 case QUERY_PHYS_PARMS_RSP
:
4986 adapter
->fw_done_rc
= handle_query_phys_parms_rsp(crq
, adapter
);
4987 complete(&adapter
->fw_done
);
4990 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
4995 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
4997 struct ibmvnic_adapter
*adapter
= instance
;
4999 tasklet_schedule(&adapter
->tasklet
);
5003 static void ibmvnic_tasklet(struct tasklet_struct
*t
)
5005 struct ibmvnic_adapter
*adapter
= from_tasklet(adapter
, t
, tasklet
);
5006 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
5007 union ibmvnic_crq
*crq
;
5008 unsigned long flags
;
5011 spin_lock_irqsave(&queue
->lock
, flags
);
5013 /* Pull all the valid messages off the CRQ */
5014 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
5015 ibmvnic_handle_crq(crq
, adapter
);
5016 crq
->generic
.first
= 0;
5019 /* remain in tasklet until all
5020 * capabilities responses are received
5022 if (!adapter
->wait_capability
)
5025 /* if capabilities CRQ's were sent in this tasklet, the following
5026 * tasklet must wait until all responses are received
5028 if (atomic_read(&adapter
->running_cap_crqs
) != 0)
5029 adapter
->wait_capability
= true;
5030 spin_unlock_irqrestore(&queue
->lock
, flags
);
5033 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
5035 struct vio_dev
*vdev
= adapter
->vdev
;
5039 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
5040 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
5043 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
5048 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
5050 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
5051 struct device
*dev
= &adapter
->vdev
->dev
;
5052 struct vio_dev
*vdev
= adapter
->vdev
;
5057 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
5058 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
5060 /* Clean out the queue */
5064 memset(crq
->msgs
, 0, PAGE_SIZE
);
5066 crq
->active
= false;
5068 /* And re-open it again */
5069 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
5070 crq
->msg_token
, PAGE_SIZE
);
5073 /* Adapter is good, but other end is not ready */
5074 dev_warn(dev
, "Partner adapter not ready\n");
5076 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
5081 static void release_crq_queue(struct ibmvnic_adapter
*adapter
)
5083 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
5084 struct vio_dev
*vdev
= adapter
->vdev
;
5090 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
5091 free_irq(vdev
->irq
, adapter
);
5092 tasklet_kill(&adapter
->tasklet
);
5094 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
5095 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
5097 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
5099 free_page((unsigned long)crq
->msgs
);
5101 crq
->active
= false;
5104 static int init_crq_queue(struct ibmvnic_adapter
*adapter
)
5106 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
5107 struct device
*dev
= &adapter
->vdev
->dev
;
5108 struct vio_dev
*vdev
= adapter
->vdev
;
5109 int rc
, retrc
= -ENOMEM
;
5114 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
5115 /* Should we allocate more than one page? */
5120 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
5121 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
5123 if (dma_mapping_error(dev
, crq
->msg_token
))
5126 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
5127 crq
->msg_token
, PAGE_SIZE
);
5129 if (rc
== H_RESOURCE
)
5130 /* maybe kexecing and resource is busy. try a reset */
5131 rc
= ibmvnic_reset_crq(adapter
);
5134 if (rc
== H_CLOSED
) {
5135 dev_warn(dev
, "Partner adapter not ready\n");
5137 dev_warn(dev
, "Error %d opening adapter\n", rc
);
5138 goto reg_crq_failed
;
5143 tasklet_setup(&adapter
->tasklet
, (void *)ibmvnic_tasklet
);
5145 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
5146 snprintf(crq
->name
, sizeof(crq
->name
), "ibmvnic-%x",
5147 adapter
->vdev
->unit_address
);
5148 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, crq
->name
, adapter
);
5150 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
5152 goto req_irq_failed
;
5155 rc
= vio_enable_interrupts(vdev
);
5157 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
5158 goto req_irq_failed
;
5162 spin_lock_init(&crq
->lock
);
5167 tasklet_kill(&adapter
->tasklet
);
5169 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
5170 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
5172 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
5174 free_page((unsigned long)crq
->msgs
);
5179 static int ibmvnic_reset_init(struct ibmvnic_adapter
*adapter
, bool reset
)
5181 struct device
*dev
= &adapter
->vdev
->dev
;
5182 unsigned long timeout
= msecs_to_jiffies(30000);
5183 u64 old_num_rx_queues
, old_num_tx_queues
;
5186 adapter
->from_passive_init
= false;
5189 old_num_rx_queues
= adapter
->req_rx_queues
;
5190 old_num_tx_queues
= adapter
->req_tx_queues
;
5191 reinit_completion(&adapter
->init_done
);
5194 adapter
->init_done_rc
= 0;
5195 rc
= ibmvnic_send_crq_init(adapter
);
5197 dev_err(dev
, "Send crq init failed with error %d\n", rc
);
5201 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
5202 dev_err(dev
, "Initialization sequence timed out\n");
5206 if (adapter
->init_done_rc
) {
5207 release_crq_queue(adapter
);
5208 return adapter
->init_done_rc
;
5211 if (adapter
->from_passive_init
) {
5212 adapter
->state
= VNIC_OPEN
;
5213 adapter
->from_passive_init
= false;
5218 test_bit(0, &adapter
->resetting
) && !adapter
->wait_for_reset
&&
5219 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
) {
5220 if (adapter
->req_rx_queues
!= old_num_rx_queues
||
5221 adapter
->req_tx_queues
!= old_num_tx_queues
) {
5222 release_sub_crqs(adapter
, 0);
5223 rc
= init_sub_crqs(adapter
);
5225 rc
= reset_sub_crq_queues(adapter
);
5228 rc
= init_sub_crqs(adapter
);
5232 dev_err(dev
, "Initialization of sub crqs failed\n");
5233 release_crq_queue(adapter
);
5237 rc
= init_sub_crq_irqs(adapter
);
5239 dev_err(dev
, "Failed to initialize sub crq irqs\n");
5240 release_crq_queue(adapter
);
5246 static struct device_attribute dev_attr_failover
;
5248 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
5250 struct ibmvnic_adapter
*adapter
;
5251 struct net_device
*netdev
;
5252 unsigned char *mac_addr_p
;
5255 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
5258 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
5259 VETH_MAC_ADDR
, NULL
);
5262 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5263 __FILE__
, __LINE__
);
5267 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
5268 IBMVNIC_MAX_QUEUES
);
5272 adapter
= netdev_priv(netdev
);
5273 adapter
->state
= VNIC_PROBING
;
5274 dev_set_drvdata(&dev
->dev
, netdev
);
5275 adapter
->vdev
= dev
;
5276 adapter
->netdev
= netdev
;
5278 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
5279 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
5280 netdev
->irq
= dev
->irq
;
5281 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
5282 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
5283 SET_NETDEV_DEV(netdev
, &dev
->dev
);
5285 spin_lock_init(&adapter
->stats_lock
);
5287 INIT_WORK(&adapter
->ibmvnic_reset
, __ibmvnic_reset
);
5288 INIT_DELAYED_WORK(&adapter
->ibmvnic_delayed_reset
,
5289 __ibmvnic_delayed_reset
);
5290 INIT_LIST_HEAD(&adapter
->rwi_list
);
5291 spin_lock_init(&adapter
->rwi_lock
);
5292 spin_lock_init(&adapter
->state_lock
);
5293 mutex_init(&adapter
->fw_lock
);
5294 init_completion(&adapter
->init_done
);
5295 init_completion(&adapter
->fw_done
);
5296 init_completion(&adapter
->reset_done
);
5297 init_completion(&adapter
->stats_done
);
5298 clear_bit(0, &adapter
->resetting
);
5301 rc
= init_crq_queue(adapter
);
5303 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n",
5305 goto ibmvnic_init_fail
;
5308 rc
= ibmvnic_reset_init(adapter
, false);
5309 if (rc
&& rc
!= EAGAIN
)
5310 goto ibmvnic_init_fail
;
5311 } while (rc
== EAGAIN
);
5313 rc
= init_stats_buffers(adapter
);
5315 goto ibmvnic_init_fail
;
5317 rc
= init_stats_token(adapter
);
5319 goto ibmvnic_stats_fail
;
5321 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
5322 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
5323 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
5325 rc
= device_create_file(&dev
->dev
, &dev_attr_failover
);
5327 goto ibmvnic_dev_file_err
;
5329 netif_carrier_off(netdev
);
5330 rc
= register_netdev(netdev
);
5332 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
5333 goto ibmvnic_register_fail
;
5335 dev_info(&dev
->dev
, "ibmvnic registered\n");
5337 adapter
->state
= VNIC_PROBED
;
5339 adapter
->wait_for_reset
= false;
5343 ibmvnic_register_fail
:
5344 device_remove_file(&dev
->dev
, &dev_attr_failover
);
5346 ibmvnic_dev_file_err
:
5347 release_stats_token(adapter
);
5350 release_stats_buffers(adapter
);
5353 release_sub_crqs(adapter
, 1);
5354 release_crq_queue(adapter
);
5355 mutex_destroy(&adapter
->fw_lock
);
5356 free_netdev(netdev
);
5361 static int ibmvnic_remove(struct vio_dev
*dev
)
5363 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
5364 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
5365 unsigned long flags
;
5367 spin_lock_irqsave(&adapter
->state_lock
, flags
);
5368 if (test_bit(0, &adapter
->resetting
)) {
5369 spin_unlock_irqrestore(&adapter
->state_lock
, flags
);
5373 adapter
->state
= VNIC_REMOVING
;
5374 spin_unlock_irqrestore(&adapter
->state_lock
, flags
);
5376 flush_work(&adapter
->ibmvnic_reset
);
5377 flush_delayed_work(&adapter
->ibmvnic_delayed_reset
);
5380 unregister_netdevice(netdev
);
5382 release_resources(adapter
);
5383 release_sub_crqs(adapter
, 1);
5384 release_crq_queue(adapter
);
5386 release_stats_token(adapter
);
5387 release_stats_buffers(adapter
);
5389 adapter
->state
= VNIC_REMOVED
;
5392 mutex_destroy(&adapter
->fw_lock
);
5393 device_remove_file(&dev
->dev
, &dev_attr_failover
);
5394 free_netdev(netdev
);
5395 dev_set_drvdata(&dev
->dev
, NULL
);
5400 static ssize_t
failover_store(struct device
*dev
, struct device_attribute
*attr
,
5401 const char *buf
, size_t count
)
5403 struct net_device
*netdev
= dev_get_drvdata(dev
);
5404 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
5405 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
5406 __be64 session_token
;
5409 if (!sysfs_streq(buf
, "1"))
5412 rc
= plpar_hcall(H_VIOCTL
, retbuf
, adapter
->vdev
->unit_address
,
5413 H_GET_SESSION_TOKEN
, 0, 0, 0);
5415 netdev_err(netdev
, "Couldn't retrieve session token, rc %ld\n",
5420 session_token
= (__be64
)retbuf
[0];
5421 netdev_dbg(netdev
, "Initiating client failover, session id %llx\n",
5422 be64_to_cpu(session_token
));
5423 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
5424 H_SESSION_ERR_DETECTED
, session_token
, 0, 0);
5426 netdev_err(netdev
, "Client initiated failover failed, rc %ld\n",
5434 static DEVICE_ATTR_WO(failover
);
5436 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
5438 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
5439 struct ibmvnic_adapter
*adapter
;
5440 struct iommu_table
*tbl
;
5441 unsigned long ret
= 0;
5444 tbl
= get_iommu_table_base(&vdev
->dev
);
5446 /* netdev inits at probe time along with the structures we need below*/
5448 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
5450 adapter
= netdev_priv(netdev
);
5452 ret
+= PAGE_SIZE
; /* the crq message queue */
5453 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
5455 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
5456 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
5458 for (i
= 0; i
< adapter
->num_active_rx_pools
; i
++)
5459 ret
+= adapter
->rx_pool
[i
].size
*
5460 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
5465 static int ibmvnic_resume(struct device
*dev
)
5467 struct net_device
*netdev
= dev_get_drvdata(dev
);
5468 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
5470 if (adapter
->state
!= VNIC_OPEN
)
5473 tasklet_schedule(&adapter
->tasklet
);
5478 static const struct vio_device_id ibmvnic_device_table
[] = {
5479 {"network", "IBM,vnic"},
5482 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
5484 static const struct dev_pm_ops ibmvnic_pm_ops
= {
5485 .resume
= ibmvnic_resume
5488 static struct vio_driver ibmvnic_driver
= {
5489 .id_table
= ibmvnic_device_table
,
5490 .probe
= ibmvnic_probe
,
5491 .remove
= ibmvnic_remove
,
5492 .get_desired_dma
= ibmvnic_get_desired_dma
,
5493 .name
= ibmvnic_driver_name
,
5494 .pm
= &ibmvnic_pm_ops
,
5497 /* module functions */
5498 static int __init
ibmvnic_module_init(void)
5500 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
5501 IBMVNIC_DRIVER_VERSION
);
5503 return vio_register_driver(&ibmvnic_driver
);
5506 static void __exit
ibmvnic_module_exit(void)
5508 vio_unregister_driver(&ibmvnic_driver
);
5511 module_init(ibmvnic_module_init
);
5512 module_exit(ibmvnic_module_exit
);