1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
81 static const char ibmvnic_driver_name
[] = "ibmvnic";
82 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
89 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
90 static int ibmvnic_remove(struct vio_dev
*);
91 static void release_sub_crqs(struct ibmvnic_adapter
*);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
96 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
97 union sub_crq
*sub_crq
);
98 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
99 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
100 static int enable_scrq_irq(struct ibmvnic_adapter
*,
101 struct ibmvnic_sub_crq_queue
*);
102 static int disable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int pending_scrq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
109 static void send_map_query(struct ibmvnic_adapter
*adapter
);
110 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
111 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
112 static void send_login(struct ibmvnic_adapter
*adapter
);
113 static void send_cap_queries(struct ibmvnic_adapter
*adapter
);
114 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
);
115 static int ibmvnic_init(struct ibmvnic_adapter
*);
116 static void release_crq_queue(struct ibmvnic_adapter
*);
118 struct ibmvnic_stat
{
119 char name
[ETH_GSTRING_LEN
];
123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124 offsetof(struct ibmvnic_statistics, stat))
125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
127 static const struct ibmvnic_stat ibmvnic_stats
[] = {
128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
138 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
152 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
153 unsigned long length
, unsigned long *number
,
156 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
159 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
166 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
167 struct ibmvnic_long_term_buff
*ltb
, int size
)
169 struct device
*dev
= &adapter
->vdev
->dev
;
172 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
176 dev_err(dev
, "Couldn't alloc long term buffer\n");
179 ltb
->map_id
= adapter
->map_id
;
182 init_completion(&adapter
->fw_done
);
183 send_request_map(adapter
, ltb
->addr
,
184 ltb
->size
, ltb
->map_id
);
185 wait_for_completion(&adapter
->fw_done
);
187 if (adapter
->fw_done_rc
) {
188 dev_err(dev
, "Couldn't map long term buffer,rc = %d\n",
189 adapter
->fw_done_rc
);
195 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
196 struct ibmvnic_long_term_buff
*ltb
)
198 struct device
*dev
= &adapter
->vdev
->dev
;
203 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
&&
204 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
)
205 send_request_unmap(adapter
, ltb
->map_id
);
206 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
209 static int reset_long_term_buff(struct ibmvnic_adapter
*adapter
,
210 struct ibmvnic_long_term_buff
*ltb
)
212 memset(ltb
->buff
, 0, ltb
->size
);
214 init_completion(&adapter
->fw_done
);
215 send_request_map(adapter
, ltb
->addr
, ltb
->size
, ltb
->map_id
);
216 wait_for_completion(&adapter
->fw_done
);
218 if (adapter
->fw_done_rc
) {
219 dev_info(&adapter
->vdev
->dev
,
220 "Reset failed, attempting to free and reallocate buffer\n");
221 free_long_term_buff(adapter
, ltb
);
222 return alloc_long_term_buff(adapter
, ltb
, ltb
->size
);
227 static void deactivate_rx_pools(struct ibmvnic_adapter
*adapter
)
231 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
233 adapter
->rx_pool
[i
].active
= 0;
236 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
237 struct ibmvnic_rx_pool
*pool
)
239 int count
= pool
->size
- atomic_read(&pool
->available
);
240 struct device
*dev
= &adapter
->vdev
->dev
;
241 int buffers_added
= 0;
242 unsigned long lpar_rc
;
243 union sub_crq sub_crq
;
256 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
257 be32_to_cpu(adapter
->login_rsp_buf
->
260 for (i
= 0; i
< count
; ++i
) {
261 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
263 dev_err(dev
, "Couldn't replenish rx buff\n");
264 adapter
->replenish_no_mem
++;
268 index
= pool
->free_map
[pool
->next_free
];
270 if (pool
->rx_buff
[index
].skb
)
271 dev_err(dev
, "Inconsistent free_map!\n");
273 /* Copy the skb to the long term mapped DMA buffer */
274 offset
= index
* pool
->buff_size
;
275 dst
= pool
->long_term_buff
.buff
+ offset
;
276 memset(dst
, 0, pool
->buff_size
);
277 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
278 pool
->rx_buff
[index
].data
= dst
;
280 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
281 pool
->rx_buff
[index
].dma
= dma_addr
;
282 pool
->rx_buff
[index
].skb
= skb
;
283 pool
->rx_buff
[index
].pool_index
= pool
->index
;
284 pool
->rx_buff
[index
].size
= pool
->buff_size
;
286 memset(&sub_crq
, 0, sizeof(sub_crq
));
287 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
288 sub_crq
.rx_add
.correlator
=
289 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
290 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
291 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
293 /* The length field of the sCRQ is defined to be 24 bits so the
294 * buffer size needs to be left shifted by a byte before it is
295 * converted to big endian to prevent the last byte from being
298 #ifdef __LITTLE_ENDIAN__
301 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
303 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
305 if (lpar_rc
!= H_SUCCESS
)
309 adapter
->replenish_add_buff_success
++;
310 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
312 atomic_add(buffers_added
, &pool
->available
);
316 dev_info(dev
, "replenish pools failure\n");
317 pool
->free_map
[pool
->next_free
] = index
;
318 pool
->rx_buff
[index
].skb
= NULL
;
319 if (!dma_mapping_error(dev
, dma_addr
))
320 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
323 dev_kfree_skb_any(skb
);
324 adapter
->replenish_add_buff_failure
++;
325 atomic_add(buffers_added
, &pool
->available
);
327 if (lpar_rc
== H_CLOSED
) {
328 /* Disable buffer pool replenishment and report carrier off if
329 * queue is closed. Firmware guarantees that a signal will
330 * be sent to the driver, triggering a reset.
332 deactivate_rx_pools(adapter
);
333 netif_carrier_off(adapter
->netdev
);
337 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
341 adapter
->replenish_task_cycles
++;
342 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
344 if (adapter
->rx_pool
[i
].active
)
345 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
349 static void release_stats_token(struct ibmvnic_adapter
*adapter
)
351 struct device
*dev
= &adapter
->vdev
->dev
;
353 if (!adapter
->stats_token
)
356 dma_unmap_single(dev
, adapter
->stats_token
,
357 sizeof(struct ibmvnic_statistics
),
359 adapter
->stats_token
= 0;
362 static int init_stats_token(struct ibmvnic_adapter
*adapter
)
364 struct device
*dev
= &adapter
->vdev
->dev
;
367 stok
= dma_map_single(dev
, &adapter
->stats
,
368 sizeof(struct ibmvnic_statistics
),
370 if (dma_mapping_error(dev
, stok
)) {
371 dev_err(dev
, "Couldn't map stats buffer\n");
375 adapter
->stats_token
= stok
;
379 static int reset_rx_pools(struct ibmvnic_adapter
*adapter
)
381 struct ibmvnic_rx_pool
*rx_pool
;
385 rx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
386 for (i
= 0; i
< rx_scrqs
; i
++) {
387 rx_pool
= &adapter
->rx_pool
[i
];
389 rc
= reset_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
393 for (j
= 0; j
< rx_pool
->size
; j
++)
394 rx_pool
->free_map
[j
] = j
;
396 memset(rx_pool
->rx_buff
, 0,
397 rx_pool
->size
* sizeof(struct ibmvnic_rx_buff
));
399 atomic_set(&rx_pool
->available
, 0);
400 rx_pool
->next_alloc
= 0;
401 rx_pool
->next_free
= 0;
408 static void release_rx_pools(struct ibmvnic_adapter
*adapter
)
410 struct ibmvnic_rx_pool
*rx_pool
;
414 if (!adapter
->rx_pool
)
417 rx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
418 for (i
= 0; i
< rx_scrqs
; i
++) {
419 rx_pool
= &adapter
->rx_pool
[i
];
421 kfree(rx_pool
->free_map
);
422 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
424 if (!rx_pool
->rx_buff
)
427 for (j
= 0; j
< rx_pool
->size
; j
++) {
428 if (rx_pool
->rx_buff
[j
].skb
) {
429 dev_kfree_skb_any(rx_pool
->rx_buff
[i
].skb
);
430 rx_pool
->rx_buff
[i
].skb
= NULL
;
434 kfree(rx_pool
->rx_buff
);
437 kfree(adapter
->rx_pool
);
438 adapter
->rx_pool
= NULL
;
441 static int init_rx_pools(struct net_device
*netdev
)
443 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
444 struct device
*dev
= &adapter
->vdev
->dev
;
445 struct ibmvnic_rx_pool
*rx_pool
;
451 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
452 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
453 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
455 adapter
->rx_pool
= kcalloc(rxadd_subcrqs
,
456 sizeof(struct ibmvnic_rx_pool
),
458 if (!adapter
->rx_pool
) {
459 dev_err(dev
, "Failed to allocate rx pools\n");
463 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
464 rx_pool
= &adapter
->rx_pool
[i
];
466 netdev_dbg(adapter
->netdev
,
467 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
468 i
, adapter
->req_rx_add_entries_per_subcrq
,
469 be64_to_cpu(size_array
[i
]));
471 rx_pool
->size
= adapter
->req_rx_add_entries_per_subcrq
;
473 rx_pool
->buff_size
= be64_to_cpu(size_array
[i
]);
476 rx_pool
->free_map
= kcalloc(rx_pool
->size
, sizeof(int),
478 if (!rx_pool
->free_map
) {
479 release_rx_pools(adapter
);
483 rx_pool
->rx_buff
= kcalloc(rx_pool
->size
,
484 sizeof(struct ibmvnic_rx_buff
),
486 if (!rx_pool
->rx_buff
) {
487 dev_err(dev
, "Couldn't alloc rx buffers\n");
488 release_rx_pools(adapter
);
492 if (alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
493 rx_pool
->size
* rx_pool
->buff_size
)) {
494 release_rx_pools(adapter
);
498 for (j
= 0; j
< rx_pool
->size
; ++j
)
499 rx_pool
->free_map
[j
] = j
;
501 atomic_set(&rx_pool
->available
, 0);
502 rx_pool
->next_alloc
= 0;
503 rx_pool
->next_free
= 0;
509 static int reset_tx_pools(struct ibmvnic_adapter
*adapter
)
511 struct ibmvnic_tx_pool
*tx_pool
;
515 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
516 for (i
= 0; i
< tx_scrqs
; i
++) {
517 tx_pool
= &adapter
->tx_pool
[i
];
519 rc
= reset_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
523 memset(tx_pool
->tx_buff
, 0,
524 adapter
->req_tx_entries_per_subcrq
*
525 sizeof(struct ibmvnic_tx_buff
));
527 for (j
= 0; j
< adapter
->req_tx_entries_per_subcrq
; j
++)
528 tx_pool
->free_map
[j
] = j
;
530 tx_pool
->consumer_index
= 0;
531 tx_pool
->producer_index
= 0;
537 static void release_tx_pools(struct ibmvnic_adapter
*adapter
)
539 struct ibmvnic_tx_pool
*tx_pool
;
542 if (!adapter
->tx_pool
)
545 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
546 for (i
= 0; i
< tx_scrqs
; i
++) {
547 tx_pool
= &adapter
->tx_pool
[i
];
548 kfree(tx_pool
->tx_buff
);
549 free_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
550 kfree(tx_pool
->free_map
);
553 kfree(adapter
->tx_pool
);
554 adapter
->tx_pool
= NULL
;
557 static int init_tx_pools(struct net_device
*netdev
)
559 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
560 struct device
*dev
= &adapter
->vdev
->dev
;
561 struct ibmvnic_tx_pool
*tx_pool
;
565 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
566 adapter
->tx_pool
= kcalloc(tx_subcrqs
,
567 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
568 if (!adapter
->tx_pool
)
571 for (i
= 0; i
< tx_subcrqs
; i
++) {
572 tx_pool
= &adapter
->tx_pool
[i
];
573 tx_pool
->tx_buff
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
574 sizeof(struct ibmvnic_tx_buff
),
576 if (!tx_pool
->tx_buff
) {
577 dev_err(dev
, "tx pool buffer allocation failed\n");
578 release_tx_pools(adapter
);
582 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
583 adapter
->req_tx_entries_per_subcrq
*
585 release_tx_pools(adapter
);
589 tx_pool
->free_map
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
590 sizeof(int), GFP_KERNEL
);
591 if (!tx_pool
->free_map
) {
592 release_tx_pools(adapter
);
596 for (j
= 0; j
< adapter
->req_tx_entries_per_subcrq
; j
++)
597 tx_pool
->free_map
[j
] = j
;
599 tx_pool
->consumer_index
= 0;
600 tx_pool
->producer_index
= 0;
606 static void release_error_buffers(struct ibmvnic_adapter
*adapter
)
608 struct device
*dev
= &adapter
->vdev
->dev
;
609 struct ibmvnic_error_buff
*error_buff
, *tmp
;
612 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
613 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
) {
614 list_del(&error_buff
->list
);
615 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
617 kfree(error_buff
->buff
);
620 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
623 static void ibmvnic_napi_enable(struct ibmvnic_adapter
*adapter
)
627 if (adapter
->napi_enabled
)
630 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
631 napi_enable(&adapter
->napi
[i
]);
633 adapter
->napi_enabled
= true;
636 static void ibmvnic_napi_disable(struct ibmvnic_adapter
*adapter
)
640 if (!adapter
->napi_enabled
)
643 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
644 napi_disable(&adapter
->napi
[i
]);
646 adapter
->napi_enabled
= false;
649 static int ibmvnic_login(struct net_device
*netdev
)
651 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
652 unsigned long timeout
= msecs_to_jiffies(30000);
653 struct device
*dev
= &adapter
->vdev
->dev
;
656 if (adapter
->renegotiate
) {
657 adapter
->renegotiate
= false;
658 release_sub_crqs(adapter
);
660 reinit_completion(&adapter
->init_done
);
661 send_cap_queries(adapter
);
662 if (!wait_for_completion_timeout(&adapter
->init_done
,
664 dev_err(dev
, "Capabilities query timeout\n");
669 reinit_completion(&adapter
->init_done
);
671 if (!wait_for_completion_timeout(&adapter
->init_done
,
673 dev_err(dev
, "Login timeout\n");
676 } while (adapter
->renegotiate
);
681 static void release_resources(struct ibmvnic_adapter
*adapter
)
685 release_tx_pools(adapter
);
686 release_rx_pools(adapter
);
688 release_stats_token(adapter
);
689 release_error_buffers(adapter
);
692 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
693 if (&adapter
->napi
[i
])
694 netif_napi_del(&adapter
->napi
[i
]);
699 static int set_link_state(struct ibmvnic_adapter
*adapter
, u8 link_state
)
701 struct net_device
*netdev
= adapter
->netdev
;
702 unsigned long timeout
= msecs_to_jiffies(30000);
703 union ibmvnic_crq crq
;
707 netdev_err(netdev
, "setting link state %d\n", link_state
);
708 memset(&crq
, 0, sizeof(crq
));
709 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
710 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
711 crq
.logical_link_state
.link_state
= link_state
;
716 reinit_completion(&adapter
->init_done
);
717 rc
= ibmvnic_send_crq(adapter
, &crq
);
719 netdev_err(netdev
, "Failed to set link state\n");
723 if (!wait_for_completion_timeout(&adapter
->init_done
,
725 netdev_err(netdev
, "timeout setting link state\n");
729 if (adapter
->init_done_rc
== 1) {
730 /* Partuial success, delay and re-send */
739 static int set_real_num_queues(struct net_device
*netdev
)
741 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
744 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->req_tx_queues
);
746 netdev_err(netdev
, "failed to set the number of tx queues\n");
750 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->req_rx_queues
);
752 netdev_err(netdev
, "failed to set the number of rx queues\n");
757 static int init_resources(struct ibmvnic_adapter
*adapter
)
759 struct net_device
*netdev
= adapter
->netdev
;
762 rc
= set_real_num_queues(netdev
);
766 rc
= init_stats_token(adapter
);
771 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
772 sizeof(struct napi_struct
), GFP_KERNEL
);
776 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
777 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
781 send_map_query(adapter
);
783 rc
= init_rx_pools(netdev
);
787 rc
= init_tx_pools(netdev
);
791 static int __ibmvnic_open(struct net_device
*netdev
)
793 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
794 enum vnic_state prev_state
= adapter
->state
;
797 adapter
->state
= VNIC_OPENING
;
798 replenish_pools(adapter
);
799 ibmvnic_napi_enable(adapter
);
801 /* We're ready to receive frames, enable the sub-crq interrupts and
802 * set the logical link state to up
804 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
805 if (prev_state
== VNIC_CLOSED
)
806 enable_irq(adapter
->rx_scrq
[i
]->irq
);
808 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
811 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
812 if (prev_state
== VNIC_CLOSED
)
813 enable_irq(adapter
->tx_scrq
[i
]->irq
);
815 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
818 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_UP
);
820 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
821 napi_disable(&adapter
->napi
[i
]);
822 release_resources(adapter
);
826 netif_tx_start_all_queues(netdev
);
828 if (prev_state
== VNIC_CLOSED
) {
829 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
830 napi_schedule(&adapter
->napi
[i
]);
833 adapter
->state
= VNIC_OPEN
;
837 static int ibmvnic_open(struct net_device
*netdev
)
839 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
842 mutex_lock(&adapter
->reset_lock
);
844 if (adapter
->state
!= VNIC_CLOSED
) {
845 rc
= ibmvnic_login(netdev
);
847 mutex_unlock(&adapter
->reset_lock
);
851 rc
= init_resources(adapter
);
853 netdev_err(netdev
, "failed to initialize resources\n");
854 release_resources(adapter
);
855 mutex_unlock(&adapter
->reset_lock
);
860 rc
= __ibmvnic_open(netdev
);
861 mutex_unlock(&adapter
->reset_lock
);
866 static void clean_tx_pools(struct ibmvnic_adapter
*adapter
)
868 struct ibmvnic_tx_pool
*tx_pool
;
873 if (!adapter
->tx_pool
)
876 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
877 tx_entries
= adapter
->req_tx_entries_per_subcrq
;
879 /* Free any remaining skbs in the tx buffer pools */
880 for (i
= 0; i
< tx_scrqs
; i
++) {
881 tx_pool
= &adapter
->tx_pool
[i
];
885 for (j
= 0; j
< tx_entries
; j
++) {
886 if (tx_pool
->tx_buff
[j
].skb
) {
887 dev_kfree_skb_any(tx_pool
->tx_buff
[j
].skb
);
888 tx_pool
->tx_buff
[j
].skb
= NULL
;
894 static int __ibmvnic_close(struct net_device
*netdev
)
896 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
900 adapter
->state
= VNIC_CLOSING
;
902 /* ensure that transmissions are stopped if called by do_reset */
903 if (adapter
->resetting
)
904 netif_tx_disable(netdev
);
906 netif_tx_stop_all_queues(netdev
);
908 ibmvnic_napi_disable(adapter
);
910 if (adapter
->tx_scrq
) {
911 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
912 if (adapter
->tx_scrq
[i
]->irq
)
913 disable_irq(adapter
->tx_scrq
[i
]->irq
);
916 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_DN
);
920 if (adapter
->rx_scrq
) {
921 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
924 while (pending_scrq(adapter
, adapter
->rx_scrq
[i
])) {
932 if (adapter
->rx_scrq
[i
]->irq
)
933 disable_irq(adapter
->rx_scrq
[i
]->irq
);
937 clean_tx_pools(adapter
);
938 adapter
->state
= VNIC_CLOSED
;
942 static int ibmvnic_close(struct net_device
*netdev
)
944 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
947 mutex_lock(&adapter
->reset_lock
);
948 rc
= __ibmvnic_close(netdev
);
949 mutex_unlock(&adapter
->reset_lock
);
955 * build_hdr_data - creates L2/L3/L4 header data buffer
956 * @hdr_field - bitfield determining needed headers
957 * @skb - socket buffer
958 * @hdr_len - array of header lengths
959 * @tot_len - total length of data
961 * Reads hdr_field to determine which headers are needed by firmware.
962 * Builds a buffer containing these headers. Saves individual header
963 * lengths and total buffer length to be used to build descriptors.
965 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
966 int *hdr_len
, u8
*hdr_data
)
971 hdr_len
[0] = sizeof(struct ethhdr
);
973 if (skb
->protocol
== htons(ETH_P_IP
)) {
974 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
975 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
976 hdr_len
[2] = tcp_hdrlen(skb
);
977 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
978 hdr_len
[2] = sizeof(struct udphdr
);
979 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
980 hdr_len
[1] = sizeof(struct ipv6hdr
);
981 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
982 hdr_len
[2] = tcp_hdrlen(skb
);
983 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
984 hdr_len
[2] = sizeof(struct udphdr
);
987 memset(hdr_data
, 0, 120);
988 if ((hdr_field
>> 6) & 1) {
989 hdr
= skb_mac_header(skb
);
990 memcpy(hdr_data
, hdr
, hdr_len
[0]);
994 if ((hdr_field
>> 5) & 1) {
995 hdr
= skb_network_header(skb
);
996 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
1000 if ((hdr_field
>> 4) & 1) {
1001 hdr
= skb_transport_header(skb
);
1002 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
1009 * create_hdr_descs - create header and header extension descriptors
1010 * @hdr_field - bitfield determining needed headers
1011 * @data - buffer containing header data
1012 * @len - length of data buffer
1013 * @hdr_len - array of individual header lengths
1014 * @scrq_arr - descriptor array
1016 * Creates header and, if needed, header extension descriptors and
1017 * places them in a descriptor array, scrq_arr
1020 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
1021 union sub_crq
*scrq_arr
)
1023 union sub_crq hdr_desc
;
1028 while (tmp_len
> 0) {
1029 cur
= hdr_data
+ len
- tmp_len
;
1031 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
1032 if (cur
!= hdr_data
) {
1033 data
= hdr_desc
.hdr_ext
.data
;
1034 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
1035 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
1036 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
1037 hdr_desc
.hdr_ext
.len
= tmp
;
1039 data
= hdr_desc
.hdr
.data
;
1040 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
1041 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
1042 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
1043 hdr_desc
.hdr
.len
= tmp
;
1044 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
1045 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
1046 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
1047 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
1049 memcpy(data
, cur
, tmp
);
1051 *scrq_arr
= hdr_desc
;
1057 * build_hdr_descs_arr - build a header descriptor array
1058 * @skb - socket buffer
1059 * @num_entries - number of descriptors to be sent
1060 * @subcrq - first TX descriptor
1061 * @hdr_field - bit field determining which headers will be sent
1063 * This function will build a TX descriptor array with applicable
1064 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1067 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
1068 int *num_entries
, u8 hdr_field
)
1070 int hdr_len
[3] = {0, 0, 0};
1072 u8
*hdr_data
= txbuff
->hdr_data
;
1074 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
1079 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
1080 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
1081 txbuff
->indir_arr
+ 1);
1084 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1086 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1087 int queue_num
= skb_get_queue_mapping(skb
);
1088 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
1089 struct device
*dev
= &adapter
->vdev
->dev
;
1090 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
1091 struct ibmvnic_sub_crq_queue
*tx_scrq
;
1092 struct ibmvnic_tx_pool
*tx_pool
;
1093 unsigned int tx_send_failed
= 0;
1094 unsigned int tx_map_failed
= 0;
1095 unsigned int tx_dropped
= 0;
1096 unsigned int tx_packets
= 0;
1097 unsigned int tx_bytes
= 0;
1098 dma_addr_t data_dma_addr
;
1099 struct netdev_queue
*txq
;
1100 unsigned long lpar_rc
;
1101 union sub_crq tx_crq
;
1102 unsigned int offset
;
1103 int num_entries
= 1;
1109 if (adapter
->resetting
) {
1110 if (!netif_subqueue_stopped(netdev
, skb
))
1111 netif_stop_subqueue(netdev
, queue_num
);
1112 dev_kfree_skb_any(skb
);
1120 tx_pool
= &adapter
->tx_pool
[queue_num
];
1121 tx_scrq
= adapter
->tx_scrq
[queue_num
];
1122 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
1123 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
1124 be32_to_cpu(adapter
->login_rsp_buf
->off_txsubm_subcrqs
));
1126 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
1127 offset
= index
* adapter
->req_mtu
;
1128 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
1129 memset(dst
, 0, adapter
->req_mtu
);
1130 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
1131 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
1133 tx_pool
->consumer_index
=
1134 (tx_pool
->consumer_index
+ 1) %
1135 adapter
->req_tx_entries_per_subcrq
;
1137 tx_buff
= &tx_pool
->tx_buff
[index
];
1139 tx_buff
->data_dma
[0] = data_dma_addr
;
1140 tx_buff
->data_len
[0] = skb
->len
;
1141 tx_buff
->index
= index
;
1142 tx_buff
->pool_index
= queue_num
;
1143 tx_buff
->last_frag
= true;
1145 memset(&tx_crq
, 0, sizeof(tx_crq
));
1146 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
1147 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
1148 tx_crq
.v1
.n_crq_elem
= 1;
1149 tx_crq
.v1
.n_sge
= 1;
1150 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
1151 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
1152 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
1153 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
1154 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
1156 if (adapter
->vlan_header_insertion
) {
1157 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
1158 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
1161 if (skb
->protocol
== htons(ETH_P_IP
)) {
1162 if (ip_hdr(skb
)->version
== 4)
1163 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
1164 else if (ip_hdr(skb
)->version
== 6)
1165 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
1167 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1168 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
1169 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
1170 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
1173 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1174 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
1177 /* determine if l2/3/4 headers are sent to firmware */
1178 if ((*hdrs
>> 7) & 1 &&
1179 (skb
->protocol
== htons(ETH_P_IP
) ||
1180 skb
->protocol
== htons(ETH_P_IPV6
))) {
1181 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
1182 tx_crq
.v1
.n_crq_elem
= num_entries
;
1183 tx_buff
->indir_arr
[0] = tx_crq
;
1184 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
1185 sizeof(tx_buff
->indir_arr
),
1187 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
1188 dev_kfree_skb_any(skb
);
1189 tx_buff
->skb
= NULL
;
1190 if (!firmware_has_feature(FW_FEATURE_CMO
))
1191 dev_err(dev
, "tx: unable to map descriptor array\n");
1197 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
1198 (u64
)tx_buff
->indir_dma
,
1201 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
1204 if (lpar_rc
!= H_SUCCESS
) {
1205 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
1207 if (tx_pool
->consumer_index
== 0)
1208 tx_pool
->consumer_index
=
1209 adapter
->req_tx_entries_per_subcrq
- 1;
1211 tx_pool
->consumer_index
--;
1213 dev_kfree_skb_any(skb
);
1214 tx_buff
->skb
= NULL
;
1216 if (lpar_rc
== H_CLOSED
) {
1217 /* Disable TX and report carrier off if queue is closed.
1218 * Firmware guarantees that a signal will be sent to the
1219 * driver, triggering a reset or some other action.
1221 netif_tx_stop_all_queues(netdev
);
1222 netif_carrier_off(netdev
);
1231 if (atomic_inc_return(&tx_scrq
->used
)
1232 >= adapter
->req_tx_entries_per_subcrq
) {
1233 netdev_info(netdev
, "Stopping queue %d\n", queue_num
);
1234 netif_stop_subqueue(netdev
, queue_num
);
1238 tx_bytes
+= skb
->len
;
1239 txq
->trans_start
= jiffies
;
1243 netdev
->stats
.tx_dropped
+= tx_dropped
;
1244 netdev
->stats
.tx_bytes
+= tx_bytes
;
1245 netdev
->stats
.tx_packets
+= tx_packets
;
1246 adapter
->tx_send_failed
+= tx_send_failed
;
1247 adapter
->tx_map_failed
+= tx_map_failed
;
1252 static void ibmvnic_set_multi(struct net_device
*netdev
)
1254 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1255 struct netdev_hw_addr
*ha
;
1256 union ibmvnic_crq crq
;
1258 memset(&crq
, 0, sizeof(crq
));
1259 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1260 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1262 if (netdev
->flags
& IFF_PROMISC
) {
1263 if (!adapter
->promisc_supported
)
1266 if (netdev
->flags
& IFF_ALLMULTI
) {
1267 /* Accept all multicast */
1268 memset(&crq
, 0, sizeof(crq
));
1269 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1270 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1271 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
1272 ibmvnic_send_crq(adapter
, &crq
);
1273 } else if (netdev_mc_empty(netdev
)) {
1274 /* Reject all multicast */
1275 memset(&crq
, 0, sizeof(crq
));
1276 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1277 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1278 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
1279 ibmvnic_send_crq(adapter
, &crq
);
1281 /* Accept one or more multicast(s) */
1282 netdev_for_each_mc_addr(ha
, netdev
) {
1283 memset(&crq
, 0, sizeof(crq
));
1284 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1285 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1286 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
1287 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
1289 ibmvnic_send_crq(adapter
, &crq
);
1295 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
1297 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1298 struct sockaddr
*addr
= p
;
1299 union ibmvnic_crq crq
;
1301 if (!is_valid_ether_addr(addr
->sa_data
))
1302 return -EADDRNOTAVAIL
;
1304 memset(&crq
, 0, sizeof(crq
));
1305 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
1306 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
1307 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
1308 ibmvnic_send_crq(adapter
, &crq
);
1309 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1314 * do_reset returns zero if we are able to keep processing reset events, or
1315 * non-zero if we hit a fatal error and must halt.
1317 static int do_reset(struct ibmvnic_adapter
*adapter
,
1318 struct ibmvnic_rwi
*rwi
, u32 reset_state
)
1320 struct net_device
*netdev
= adapter
->netdev
;
1323 netif_carrier_off(netdev
);
1324 adapter
->reset_reason
= rwi
->reset_reason
;
1326 if (rwi
->reset_reason
== VNIC_RESET_MOBILITY
) {
1327 rc
= ibmvnic_reenable_crq_queue(adapter
);
1332 rc
= __ibmvnic_close(netdev
);
1336 if (adapter
->reset_reason
!= VNIC_RESET_NON_FATAL
) {
1337 /* remove the closed state so when we call open it appears
1338 * we are coming from the probed state.
1340 adapter
->state
= VNIC_PROBED
;
1342 rc
= ibmvnic_init(adapter
);
1346 /* If the adapter was in PROBE state prior to the reset,
1349 if (reset_state
== VNIC_PROBED
)
1352 rc
= ibmvnic_login(netdev
);
1354 adapter
->state
= VNIC_PROBED
;
1358 rc
= reset_tx_pools(adapter
);
1362 rc
= reset_rx_pools(adapter
);
1366 if (reset_state
== VNIC_CLOSED
)
1370 rc
= __ibmvnic_open(netdev
);
1372 if (list_empty(&adapter
->rwi_list
))
1373 adapter
->state
= VNIC_CLOSED
;
1375 adapter
->state
= reset_state
;
1380 netif_carrier_on(netdev
);
1383 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1384 napi_schedule(&adapter
->napi
[i
]);
1386 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
)
1387 netdev_notify_peers(netdev
);
1392 static struct ibmvnic_rwi
*get_next_rwi(struct ibmvnic_adapter
*adapter
)
1394 struct ibmvnic_rwi
*rwi
;
1396 mutex_lock(&adapter
->rwi_lock
);
1398 if (!list_empty(&adapter
->rwi_list
)) {
1399 rwi
= list_first_entry(&adapter
->rwi_list
, struct ibmvnic_rwi
,
1401 list_del(&rwi
->list
);
1406 mutex_unlock(&adapter
->rwi_lock
);
1410 static void free_all_rwi(struct ibmvnic_adapter
*adapter
)
1412 struct ibmvnic_rwi
*rwi
;
1414 rwi
= get_next_rwi(adapter
);
1417 rwi
= get_next_rwi(adapter
);
1421 static void __ibmvnic_reset(struct work_struct
*work
)
1423 struct ibmvnic_rwi
*rwi
;
1424 struct ibmvnic_adapter
*adapter
;
1425 struct net_device
*netdev
;
1429 adapter
= container_of(work
, struct ibmvnic_adapter
, ibmvnic_reset
);
1430 netdev
= adapter
->netdev
;
1432 mutex_lock(&adapter
->reset_lock
);
1433 adapter
->resetting
= true;
1434 reset_state
= adapter
->state
;
1436 rwi
= get_next_rwi(adapter
);
1438 rc
= do_reset(adapter
, rwi
, reset_state
);
1443 rwi
= get_next_rwi(adapter
);
1447 free_all_rwi(adapter
);
1448 mutex_unlock(&adapter
->reset_lock
);
1452 adapter
->resetting
= false;
1453 mutex_unlock(&adapter
->reset_lock
);
1456 static void ibmvnic_reset(struct ibmvnic_adapter
*adapter
,
1457 enum ibmvnic_reset_reason reason
)
1459 struct ibmvnic_rwi
*rwi
, *tmp
;
1460 struct net_device
*netdev
= adapter
->netdev
;
1461 struct list_head
*entry
;
1463 if (adapter
->state
== VNIC_REMOVING
||
1464 adapter
->state
== VNIC_REMOVED
) {
1465 netdev_dbg(netdev
, "Adapter removing, skipping reset\n");
1469 if (adapter
->state
== VNIC_PROBING
) {
1470 netdev_warn(netdev
, "Adapter reset during probe\n");
1471 adapter
->init_done_rc
= EAGAIN
;
1475 mutex_lock(&adapter
->rwi_lock
);
1477 list_for_each(entry
, &adapter
->rwi_list
) {
1478 tmp
= list_entry(entry
, struct ibmvnic_rwi
, list
);
1479 if (tmp
->reset_reason
== reason
) {
1480 netdev_err(netdev
, "Matching reset found, skipping\n");
1481 mutex_unlock(&adapter
->rwi_lock
);
1486 rwi
= kzalloc(sizeof(*rwi
), GFP_KERNEL
);
1488 mutex_unlock(&adapter
->rwi_lock
);
1489 ibmvnic_close(netdev
);
1493 rwi
->reset_reason
= reason
;
1494 list_add_tail(&rwi
->list
, &adapter
->rwi_list
);
1495 mutex_unlock(&adapter
->rwi_lock
);
1496 schedule_work(&adapter
->ibmvnic_reset
);
1499 static void ibmvnic_tx_timeout(struct net_device
*dev
)
1501 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1503 ibmvnic_reset(adapter
, VNIC_RESET_TIMEOUT
);
1506 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
1507 struct ibmvnic_rx_buff
*rx_buff
)
1509 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
1511 rx_buff
->skb
= NULL
;
1513 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
1514 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
1516 atomic_dec(&pool
->available
);
1519 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
1521 struct net_device
*netdev
= napi
->dev
;
1522 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1523 int scrq_num
= (int)(napi
- adapter
->napi
);
1524 int frames_processed
= 0;
1527 while (frames_processed
< budget
) {
1528 struct sk_buff
*skb
;
1529 struct ibmvnic_rx_buff
*rx_buff
;
1530 union sub_crq
*next
;
1535 if (unlikely(adapter
->resetting
)) {
1536 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1537 napi_complete_done(napi
, frames_processed
);
1538 return frames_processed
;
1541 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
1543 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1545 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
1546 rx_comp
.correlator
);
1547 /* do error checking */
1548 if (next
->rx_comp
.rc
) {
1549 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
1550 /* free the entry */
1551 next
->rx_comp
.first
= 0;
1552 remove_buff_from_pool(adapter
, rx_buff
);
1556 length
= be32_to_cpu(next
->rx_comp
.len
);
1557 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
1558 flags
= next
->rx_comp
.flags
;
1560 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
1563 /* VLAN Header has been stripped by the system firmware and
1564 * needs to be inserted by the driver
1566 if (adapter
->rx_vlan_header_insertion
&&
1567 (flags
& IBMVNIC_VLAN_STRIPPED
))
1568 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1569 ntohs(next
->rx_comp
.vlan_tci
));
1571 /* free the entry */
1572 next
->rx_comp
.first
= 0;
1573 remove_buff_from_pool(adapter
, rx_buff
);
1575 skb_put(skb
, length
);
1576 skb
->protocol
= eth_type_trans(skb
, netdev
);
1577 skb_record_rx_queue(skb
, scrq_num
);
1579 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
1580 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
1581 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1585 napi_gro_receive(napi
, skb
); /* send it up */
1586 netdev
->stats
.rx_packets
++;
1587 netdev
->stats
.rx_bytes
+= length
;
1591 if (adapter
->state
!= VNIC_CLOSING
)
1592 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
1594 if (frames_processed
< budget
) {
1595 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1596 napi_complete_done(napi
, frames_processed
);
1597 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
1598 napi_reschedule(napi
)) {
1599 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1603 return frames_processed
;
1606 #ifdef CONFIG_NET_POLL_CONTROLLER
1607 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1609 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1612 replenish_pools(netdev_priv(dev
));
1613 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1614 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1615 adapter
->rx_scrq
[i
]);
1619 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1624 static const struct net_device_ops ibmvnic_netdev_ops
= {
1625 .ndo_open
= ibmvnic_open
,
1626 .ndo_stop
= ibmvnic_close
,
1627 .ndo_start_xmit
= ibmvnic_xmit
,
1628 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1629 .ndo_set_mac_address
= ibmvnic_set_mac
,
1630 .ndo_validate_addr
= eth_validate_addr
,
1631 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1632 #ifdef CONFIG_NET_POLL_CONTROLLER
1633 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1635 .ndo_change_mtu
= ibmvnic_change_mtu
,
1638 /* ethtool functions */
1640 static int ibmvnic_get_link_ksettings(struct net_device
*netdev
,
1641 struct ethtool_link_ksettings
*cmd
)
1643 u32 supported
, advertising
;
1645 supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1647 advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1649 cmd
->base
.speed
= SPEED_1000
;
1650 cmd
->base
.duplex
= DUPLEX_FULL
;
1651 cmd
->base
.port
= PORT_FIBRE
;
1652 cmd
->base
.phy_address
= 0;
1653 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
1655 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
1657 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
1663 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1664 struct ethtool_drvinfo
*info
)
1666 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1667 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1670 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1672 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1674 return adapter
->msg_enable
;
1677 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1679 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1681 adapter
->msg_enable
= data
;
1684 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1686 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1688 /* Don't need to send a query because we request a logical link up at
1689 * init and then we wait for link state indications
1691 return adapter
->logical_link_state
;
1694 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1695 struct ethtool_ringparam
*ring
)
1697 ring
->rx_max_pending
= 0;
1698 ring
->tx_max_pending
= 0;
1699 ring
->rx_mini_max_pending
= 0;
1700 ring
->rx_jumbo_max_pending
= 0;
1701 ring
->rx_pending
= 0;
1702 ring
->tx_pending
= 0;
1703 ring
->rx_mini_pending
= 0;
1704 ring
->rx_jumbo_pending
= 0;
1707 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1711 if (stringset
!= ETH_SS_STATS
)
1714 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1715 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1718 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1722 return ARRAY_SIZE(ibmvnic_stats
);
1728 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1729 struct ethtool_stats
*stats
, u64
*data
)
1731 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1732 union ibmvnic_crq crq
;
1735 memset(&crq
, 0, sizeof(crq
));
1736 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1737 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1738 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1739 crq
.request_statistics
.len
=
1740 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1742 /* Wait for data to be written */
1743 init_completion(&adapter
->stats_done
);
1744 ibmvnic_send_crq(adapter
, &crq
);
1745 wait_for_completion(&adapter
->stats_done
);
1747 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1748 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1751 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1752 .get_drvinfo
= ibmvnic_get_drvinfo
,
1753 .get_msglevel
= ibmvnic_get_msglevel
,
1754 .set_msglevel
= ibmvnic_set_msglevel
,
1755 .get_link
= ibmvnic_get_link
,
1756 .get_ringparam
= ibmvnic_get_ringparam
,
1757 .get_strings
= ibmvnic_get_strings
,
1758 .get_sset_count
= ibmvnic_get_sset_count
,
1759 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1760 .get_link_ksettings
= ibmvnic_get_link_ksettings
,
1763 /* Routines for managing CRQs/sCRQs */
1765 static int reset_one_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1766 struct ibmvnic_sub_crq_queue
*scrq
)
1771 free_irq(scrq
->irq
, scrq
);
1772 irq_dispose_mapping(scrq
->irq
);
1776 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1779 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1780 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1784 static int reset_sub_crq_queues(struct ibmvnic_adapter
*adapter
)
1788 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1789 rc
= reset_one_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
1794 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1795 rc
= reset_one_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
1803 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1804 struct ibmvnic_sub_crq_queue
*scrq
)
1806 struct device
*dev
= &adapter
->vdev
->dev
;
1809 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1811 /* Close the sub-crqs */
1813 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1814 adapter
->vdev
->unit_address
,
1816 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1819 netdev_err(adapter
->netdev
,
1820 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1824 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1826 free_pages((unsigned long)scrq
->msgs
, 2);
1830 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1833 struct device
*dev
= &adapter
->vdev
->dev
;
1834 struct ibmvnic_sub_crq_queue
*scrq
;
1837 scrq
= kzalloc(sizeof(*scrq
), GFP_KERNEL
);
1842 (union sub_crq
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 2);
1844 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1845 goto zero_page_failed
;
1848 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1850 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1851 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1855 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1856 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1858 if (rc
== H_RESOURCE
)
1859 rc
= ibmvnic_reset_crq(adapter
);
1861 if (rc
== H_CLOSED
) {
1862 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1864 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1868 scrq
->adapter
= adapter
;
1869 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1870 spin_lock_init(&scrq
->lock
);
1872 netdev_dbg(adapter
->netdev
,
1873 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1874 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1879 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1882 free_pages((unsigned long)scrq
->msgs
, 2);
1889 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1893 if (adapter
->tx_scrq
) {
1894 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1895 if (!adapter
->tx_scrq
[i
])
1898 if (adapter
->tx_scrq
[i
]->irq
) {
1899 free_irq(adapter
->tx_scrq
[i
]->irq
,
1900 adapter
->tx_scrq
[i
]);
1901 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
1902 adapter
->tx_scrq
[i
]->irq
= 0;
1905 release_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
1908 kfree(adapter
->tx_scrq
);
1909 adapter
->tx_scrq
= NULL
;
1912 if (adapter
->rx_scrq
) {
1913 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1914 if (!adapter
->rx_scrq
[i
])
1917 if (adapter
->rx_scrq
[i
]->irq
) {
1918 free_irq(adapter
->rx_scrq
[i
]->irq
,
1919 adapter
->rx_scrq
[i
]);
1920 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
1921 adapter
->rx_scrq
[i
]->irq
= 0;
1924 release_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
1927 kfree(adapter
->rx_scrq
);
1928 adapter
->rx_scrq
= NULL
;
1932 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1933 struct ibmvnic_sub_crq_queue
*scrq
)
1935 struct device
*dev
= &adapter
->vdev
->dev
;
1938 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1939 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1941 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1946 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1947 struct ibmvnic_sub_crq_queue
*scrq
)
1949 struct device
*dev
= &adapter
->vdev
->dev
;
1952 if (scrq
->hw_irq
> 0x100000000ULL
) {
1953 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1957 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1958 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1960 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1965 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1966 struct ibmvnic_sub_crq_queue
*scrq
)
1968 struct device
*dev
= &adapter
->vdev
->dev
;
1969 struct ibmvnic_tx_buff
*txbuff
;
1970 union sub_crq
*next
;
1976 while (pending_scrq(adapter
, scrq
)) {
1977 unsigned int pool
= scrq
->pool_index
;
1979 next
= ibmvnic_next_scrq(adapter
, scrq
);
1980 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1981 if (next
->tx_comp
.rcs
[i
]) {
1982 dev_err(dev
, "tx error %x\n",
1983 next
->tx_comp
.rcs
[i
]);
1986 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1987 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1989 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1990 if (!txbuff
->data_dma
[j
])
1993 txbuff
->data_dma
[j
] = 0;
1995 /* if sub_crq was sent indirectly */
1996 first
= txbuff
->indir_arr
[0].generic
.first
;
1997 if (first
== IBMVNIC_CRQ_CMD
) {
1998 dma_unmap_single(dev
, txbuff
->indir_dma
,
1999 sizeof(txbuff
->indir_arr
),
2003 if (txbuff
->last_frag
) {
2004 dev_kfree_skb_any(txbuff
->skb
);
2008 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
2009 producer_index
] = index
;
2010 adapter
->tx_pool
[pool
].producer_index
=
2011 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
2012 adapter
->req_tx_entries_per_subcrq
;
2014 /* remove tx_comp scrq*/
2015 next
->tx_comp
.first
= 0;
2017 if (atomic_sub_return(next
->tx_comp
.num_comps
, &scrq
->used
) <=
2018 (adapter
->req_tx_entries_per_subcrq
/ 2) &&
2019 __netif_subqueue_stopped(adapter
->netdev
,
2020 scrq
->pool_index
)) {
2021 netif_wake_subqueue(adapter
->netdev
, scrq
->pool_index
);
2022 netdev_info(adapter
->netdev
, "Started queue %d\n",
2027 enable_scrq_irq(adapter
, scrq
);
2029 if (pending_scrq(adapter
, scrq
)) {
2030 disable_scrq_irq(adapter
, scrq
);
2037 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
2039 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2040 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2042 disable_scrq_irq(adapter
, scrq
);
2043 ibmvnic_complete_tx(adapter
, scrq
);
2048 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
2050 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2051 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2053 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
2054 disable_scrq_irq(adapter
, scrq
);
2055 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
2061 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
2063 struct device
*dev
= &adapter
->vdev
->dev
;
2064 struct ibmvnic_sub_crq_queue
*scrq
;
2068 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2069 scrq
= adapter
->tx_scrq
[i
];
2070 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2074 dev_err(dev
, "Error mapping irq\n");
2075 goto req_tx_irq_failed
;
2078 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
2079 0, "ibmvnic_tx", scrq
);
2082 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
2084 irq_dispose_mapping(scrq
->irq
);
2085 goto req_rx_irq_failed
;
2089 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2090 scrq
= adapter
->rx_scrq
[i
];
2091 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2094 dev_err(dev
, "Error mapping irq\n");
2095 goto req_rx_irq_failed
;
2097 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
2098 0, "ibmvnic_rx", scrq
);
2100 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
2102 irq_dispose_mapping(scrq
->irq
);
2103 goto req_rx_irq_failed
;
2109 for (j
= 0; j
< i
; j
++) {
2110 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
2111 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2113 i
= adapter
->req_tx_queues
;
2115 for (j
= 0; j
< i
; j
++) {
2116 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
2117 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2119 release_sub_crqs(adapter
);
2123 static int init_sub_crqs(struct ibmvnic_adapter
*adapter
)
2125 struct device
*dev
= &adapter
->vdev
->dev
;
2126 struct ibmvnic_sub_crq_queue
**allqueues
;
2127 int registered_queues
= 0;
2132 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
2134 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_KERNEL
);
2138 for (i
= 0; i
< total_queues
; i
++) {
2139 allqueues
[i
] = init_sub_crq_queue(adapter
);
2140 if (!allqueues
[i
]) {
2141 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
2144 registered_queues
++;
2147 /* Make sure we were able to register the minimum number of queues */
2148 if (registered_queues
<
2149 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
2150 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
2154 /* Distribute the failed allocated queues*/
2155 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
2156 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
2159 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
2160 adapter
->req_rx_queues
--;
2165 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
2166 adapter
->req_tx_queues
--;
2173 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
2174 sizeof(*adapter
->tx_scrq
), GFP_KERNEL
);
2175 if (!adapter
->tx_scrq
)
2178 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2179 adapter
->tx_scrq
[i
] = allqueues
[i
];
2180 adapter
->tx_scrq
[i
]->pool_index
= i
;
2183 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
2184 sizeof(*adapter
->rx_scrq
), GFP_KERNEL
);
2185 if (!adapter
->rx_scrq
)
2188 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2189 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
2190 adapter
->rx_scrq
[i
]->scrq_num
= i
;
2197 kfree(adapter
->tx_scrq
);
2198 adapter
->tx_scrq
= NULL
;
2200 for (i
= 0; i
< registered_queues
; i
++)
2201 release_sub_crq_queue(adapter
, allqueues
[i
]);
2206 static void ibmvnic_send_req_caps(struct ibmvnic_adapter
*adapter
, int retry
)
2208 struct device
*dev
= &adapter
->vdev
->dev
;
2209 union ibmvnic_crq crq
;
2212 /* Sub-CRQ entries are 32 byte long */
2213 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
2215 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
2216 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
2217 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
2221 /* Get the minimum between the queried max and the entries
2222 * that fit in our PAGE_SIZE
2224 adapter
->req_tx_entries_per_subcrq
=
2225 adapter
->max_tx_entries_per_subcrq
> entries_page
?
2226 entries_page
: adapter
->max_tx_entries_per_subcrq
;
2227 adapter
->req_rx_add_entries_per_subcrq
=
2228 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
2229 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
2231 adapter
->req_tx_queues
= adapter
->opt_tx_comp_sub_queues
;
2232 adapter
->req_rx_queues
= adapter
->opt_rx_comp_queues
;
2233 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
2235 adapter
->req_mtu
= adapter
->netdev
->mtu
+ ETH_HLEN
;
2238 memset(&crq
, 0, sizeof(crq
));
2239 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
2240 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
2242 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
2243 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
2244 atomic_inc(&adapter
->running_cap_crqs
);
2245 ibmvnic_send_crq(adapter
, &crq
);
2247 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
2248 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
2249 atomic_inc(&adapter
->running_cap_crqs
);
2250 ibmvnic_send_crq(adapter
, &crq
);
2252 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
2253 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
2254 atomic_inc(&adapter
->running_cap_crqs
);
2255 ibmvnic_send_crq(adapter
, &crq
);
2257 crq
.request_capability
.capability
=
2258 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
2259 crq
.request_capability
.number
=
2260 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
2261 atomic_inc(&adapter
->running_cap_crqs
);
2262 ibmvnic_send_crq(adapter
, &crq
);
2264 crq
.request_capability
.capability
=
2265 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
2266 crq
.request_capability
.number
=
2267 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
2268 atomic_inc(&adapter
->running_cap_crqs
);
2269 ibmvnic_send_crq(adapter
, &crq
);
2271 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
2272 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
2273 atomic_inc(&adapter
->running_cap_crqs
);
2274 ibmvnic_send_crq(adapter
, &crq
);
2276 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
2277 if (adapter
->promisc_supported
) {
2278 crq
.request_capability
.capability
=
2279 cpu_to_be16(PROMISC_REQUESTED
);
2280 crq
.request_capability
.number
= cpu_to_be64(1);
2281 atomic_inc(&adapter
->running_cap_crqs
);
2282 ibmvnic_send_crq(adapter
, &crq
);
2285 crq
.request_capability
.capability
=
2286 cpu_to_be16(PROMISC_REQUESTED
);
2287 crq
.request_capability
.number
= cpu_to_be64(0);
2288 atomic_inc(&adapter
->running_cap_crqs
);
2289 ibmvnic_send_crq(adapter
, &crq
);
2293 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
2294 struct ibmvnic_sub_crq_queue
*scrq
)
2296 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
2298 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
)
2304 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
2305 struct ibmvnic_sub_crq_queue
*scrq
)
2307 union sub_crq
*entry
;
2308 unsigned long flags
;
2310 spin_lock_irqsave(&scrq
->lock
, flags
);
2311 entry
= &scrq
->msgs
[scrq
->cur
];
2312 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
2313 if (++scrq
->cur
== scrq
->size
)
2318 spin_unlock_irqrestore(&scrq
->lock
, flags
);
2323 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
2325 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
2326 union ibmvnic_crq
*crq
;
2328 crq
= &queue
->msgs
[queue
->cur
];
2329 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
2330 if (++queue
->cur
== queue
->size
)
2339 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
2340 union sub_crq
*sub_crq
)
2342 unsigned int ua
= adapter
->vdev
->unit_address
;
2343 struct device
*dev
= &adapter
->vdev
->dev
;
2344 u64
*u64_crq
= (u64
*)sub_crq
;
2347 netdev_dbg(adapter
->netdev
,
2348 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2349 (unsigned long int)cpu_to_be64(remote_handle
),
2350 (unsigned long int)cpu_to_be64(u64_crq
[0]),
2351 (unsigned long int)cpu_to_be64(u64_crq
[1]),
2352 (unsigned long int)cpu_to_be64(u64_crq
[2]),
2353 (unsigned long int)cpu_to_be64(u64_crq
[3]));
2355 /* Make sure the hypervisor sees the complete request */
2358 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
2359 cpu_to_be64(remote_handle
),
2360 cpu_to_be64(u64_crq
[0]),
2361 cpu_to_be64(u64_crq
[1]),
2362 cpu_to_be64(u64_crq
[2]),
2363 cpu_to_be64(u64_crq
[3]));
2367 dev_warn(dev
, "CRQ Queue closed\n");
2368 dev_err(dev
, "Send error (rc=%d)\n", rc
);
2374 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
2375 u64 remote_handle
, u64 ioba
, u64 num_entries
)
2377 unsigned int ua
= adapter
->vdev
->unit_address
;
2378 struct device
*dev
= &adapter
->vdev
->dev
;
2381 /* Make sure the hypervisor sees the complete request */
2383 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
2384 cpu_to_be64(remote_handle
),
2389 dev_warn(dev
, "CRQ Queue closed\n");
2390 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
2396 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
2397 union ibmvnic_crq
*crq
)
2399 unsigned int ua
= adapter
->vdev
->unit_address
;
2400 struct device
*dev
= &adapter
->vdev
->dev
;
2401 u64
*u64_crq
= (u64
*)crq
;
2404 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
2405 (unsigned long int)cpu_to_be64(u64_crq
[0]),
2406 (unsigned long int)cpu_to_be64(u64_crq
[1]));
2408 /* Make sure the hypervisor sees the complete request */
2411 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
2412 cpu_to_be64(u64_crq
[0]),
2413 cpu_to_be64(u64_crq
[1]));
2417 dev_warn(dev
, "CRQ Queue closed\n");
2418 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
2424 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
2426 union ibmvnic_crq crq
;
2428 memset(&crq
, 0, sizeof(crq
));
2429 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
2430 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
2431 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
2433 return ibmvnic_send_crq(adapter
, &crq
);
2436 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
2438 union ibmvnic_crq crq
;
2440 memset(&crq
, 0, sizeof(crq
));
2441 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
2442 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
2443 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
2445 return ibmvnic_send_crq(adapter
, &crq
);
2448 static void send_login(struct ibmvnic_adapter
*adapter
)
2450 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
2451 struct ibmvnic_login_buffer
*login_buffer
;
2452 struct device
*dev
= &adapter
->vdev
->dev
;
2453 dma_addr_t rsp_buffer_token
;
2454 dma_addr_t buffer_token
;
2455 size_t rsp_buffer_size
;
2456 union ibmvnic_crq crq
;
2463 sizeof(struct ibmvnic_login_buffer
) +
2464 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
2466 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
2468 goto buf_alloc_failed
;
2470 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
2472 if (dma_mapping_error(dev
, buffer_token
)) {
2473 dev_err(dev
, "Couldn't map login buffer\n");
2474 goto buf_map_failed
;
2477 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
2478 sizeof(u64
) * adapter
->req_tx_queues
+
2479 sizeof(u64
) * adapter
->req_rx_queues
+
2480 sizeof(u64
) * adapter
->req_rx_queues
+
2481 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
2483 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
2484 if (!login_rsp_buffer
)
2485 goto buf_rsp_alloc_failed
;
2487 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
2488 rsp_buffer_size
, DMA_FROM_DEVICE
);
2489 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
2490 dev_err(dev
, "Couldn't map login rsp buffer\n");
2491 goto buf_rsp_map_failed
;
2494 adapter
->login_buf
= login_buffer
;
2495 adapter
->login_buf_token
= buffer_token
;
2496 adapter
->login_buf_sz
= buffer_size
;
2497 adapter
->login_rsp_buf
= login_rsp_buffer
;
2498 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
2499 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
2501 login_buffer
->len
= cpu_to_be32(buffer_size
);
2502 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
2503 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
2504 login_buffer
->off_txcomp_subcrqs
=
2505 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
2506 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
2507 login_buffer
->off_rxcomp_subcrqs
=
2508 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
2509 sizeof(u64
) * adapter
->req_tx_queues
);
2510 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
2511 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
2513 tx_list_p
= (__be64
*)((char *)login_buffer
+
2514 sizeof(struct ibmvnic_login_buffer
));
2515 rx_list_p
= (__be64
*)((char *)login_buffer
+
2516 sizeof(struct ibmvnic_login_buffer
) +
2517 sizeof(u64
) * adapter
->req_tx_queues
);
2519 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2520 if (adapter
->tx_scrq
[i
]) {
2521 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
2526 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2527 if (adapter
->rx_scrq
[i
]) {
2528 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
2533 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
2534 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
2535 netdev_dbg(adapter
->netdev
, "%016lx\n",
2536 ((unsigned long int *)(adapter
->login_buf
))[i
]);
2539 memset(&crq
, 0, sizeof(crq
));
2540 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
2541 crq
.login
.cmd
= LOGIN
;
2542 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
2543 crq
.login
.len
= cpu_to_be32(buffer_size
);
2544 ibmvnic_send_crq(adapter
, &crq
);
2549 kfree(login_rsp_buffer
);
2550 buf_rsp_alloc_failed
:
2551 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
2553 kfree(login_buffer
);
2558 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
2561 union ibmvnic_crq crq
;
2563 memset(&crq
, 0, sizeof(crq
));
2564 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
2565 crq
.request_map
.cmd
= REQUEST_MAP
;
2566 crq
.request_map
.map_id
= map_id
;
2567 crq
.request_map
.ioba
= cpu_to_be32(addr
);
2568 crq
.request_map
.len
= cpu_to_be32(len
);
2569 ibmvnic_send_crq(adapter
, &crq
);
2572 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
2574 union ibmvnic_crq crq
;
2576 memset(&crq
, 0, sizeof(crq
));
2577 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
2578 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
2579 crq
.request_unmap
.map_id
= map_id
;
2580 ibmvnic_send_crq(adapter
, &crq
);
2583 static void send_map_query(struct ibmvnic_adapter
*adapter
)
2585 union ibmvnic_crq crq
;
2587 memset(&crq
, 0, sizeof(crq
));
2588 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
2589 crq
.query_map
.cmd
= QUERY_MAP
;
2590 ibmvnic_send_crq(adapter
, &crq
);
2593 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2594 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
2596 union ibmvnic_crq crq
;
2598 atomic_set(&adapter
->running_cap_crqs
, 0);
2599 memset(&crq
, 0, sizeof(crq
));
2600 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
2601 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
2603 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
2604 atomic_inc(&adapter
->running_cap_crqs
);
2605 ibmvnic_send_crq(adapter
, &crq
);
2607 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
2608 atomic_inc(&adapter
->running_cap_crqs
);
2609 ibmvnic_send_crq(adapter
, &crq
);
2611 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
2612 atomic_inc(&adapter
->running_cap_crqs
);
2613 ibmvnic_send_crq(adapter
, &crq
);
2615 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
2616 atomic_inc(&adapter
->running_cap_crqs
);
2617 ibmvnic_send_crq(adapter
, &crq
);
2619 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
2620 atomic_inc(&adapter
->running_cap_crqs
);
2621 ibmvnic_send_crq(adapter
, &crq
);
2623 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
2624 atomic_inc(&adapter
->running_cap_crqs
);
2625 ibmvnic_send_crq(adapter
, &crq
);
2627 crq
.query_capability
.capability
=
2628 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
2629 atomic_inc(&adapter
->running_cap_crqs
);
2630 ibmvnic_send_crq(adapter
, &crq
);
2632 crq
.query_capability
.capability
=
2633 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
2634 atomic_inc(&adapter
->running_cap_crqs
);
2635 ibmvnic_send_crq(adapter
, &crq
);
2637 crq
.query_capability
.capability
=
2638 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
2639 atomic_inc(&adapter
->running_cap_crqs
);
2640 ibmvnic_send_crq(adapter
, &crq
);
2642 crq
.query_capability
.capability
=
2643 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
2644 atomic_inc(&adapter
->running_cap_crqs
);
2645 ibmvnic_send_crq(adapter
, &crq
);
2647 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
2648 atomic_inc(&adapter
->running_cap_crqs
);
2649 ibmvnic_send_crq(adapter
, &crq
);
2651 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
2652 atomic_inc(&adapter
->running_cap_crqs
);
2653 ibmvnic_send_crq(adapter
, &crq
);
2655 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
2656 atomic_inc(&adapter
->running_cap_crqs
);
2657 ibmvnic_send_crq(adapter
, &crq
);
2659 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
2660 atomic_inc(&adapter
->running_cap_crqs
);
2661 ibmvnic_send_crq(adapter
, &crq
);
2663 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
2664 atomic_inc(&adapter
->running_cap_crqs
);
2665 ibmvnic_send_crq(adapter
, &crq
);
2667 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
2668 atomic_inc(&adapter
->running_cap_crqs
);
2669 ibmvnic_send_crq(adapter
, &crq
);
2671 crq
.query_capability
.capability
= cpu_to_be16(RX_VLAN_HEADER_INSERTION
);
2672 atomic_inc(&adapter
->running_cap_crqs
);
2673 ibmvnic_send_crq(adapter
, &crq
);
2675 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
2676 atomic_inc(&adapter
->running_cap_crqs
);
2677 ibmvnic_send_crq(adapter
, &crq
);
2679 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
2680 atomic_inc(&adapter
->running_cap_crqs
);
2681 ibmvnic_send_crq(adapter
, &crq
);
2683 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
2684 atomic_inc(&adapter
->running_cap_crqs
);
2685 ibmvnic_send_crq(adapter
, &crq
);
2687 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2688 atomic_inc(&adapter
->running_cap_crqs
);
2689 ibmvnic_send_crq(adapter
, &crq
);
2691 crq
.query_capability
.capability
=
2692 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2693 atomic_inc(&adapter
->running_cap_crqs
);
2694 ibmvnic_send_crq(adapter
, &crq
);
2696 crq
.query_capability
.capability
=
2697 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2698 atomic_inc(&adapter
->running_cap_crqs
);
2699 ibmvnic_send_crq(adapter
, &crq
);
2701 crq
.query_capability
.capability
=
2702 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2703 atomic_inc(&adapter
->running_cap_crqs
);
2704 ibmvnic_send_crq(adapter
, &crq
);
2706 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2707 atomic_inc(&adapter
->running_cap_crqs
);
2708 ibmvnic_send_crq(adapter
, &crq
);
2711 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2713 struct device
*dev
= &adapter
->vdev
->dev
;
2714 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2715 union ibmvnic_crq crq
;
2718 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2719 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2721 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2722 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2723 netdev_dbg(adapter
->netdev
, "%016lx\n",
2724 ((unsigned long int *)(buf
))[i
]);
2726 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2727 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2728 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2729 buf
->tcp_ipv4_chksum
);
2730 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2731 buf
->tcp_ipv6_chksum
);
2732 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2733 buf
->udp_ipv4_chksum
);
2734 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2735 buf
->udp_ipv6_chksum
);
2736 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2737 buf
->large_tx_ipv4
);
2738 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2739 buf
->large_tx_ipv6
);
2740 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2741 buf
->large_rx_ipv4
);
2742 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2743 buf
->large_rx_ipv6
);
2744 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2745 buf
->max_ipv4_header_size
);
2746 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2747 buf
->max_ipv6_header_size
);
2748 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2749 buf
->max_tcp_header_size
);
2750 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2751 buf
->max_udp_header_size
);
2752 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2753 buf
->max_large_tx_size
);
2754 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2755 buf
->max_large_rx_size
);
2756 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2757 buf
->ipv6_extension_header
);
2758 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2759 buf
->tcp_pseudosum_req
);
2760 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2761 buf
->num_ipv6_ext_headers
);
2762 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2763 buf
->off_ipv6_ext_headers
);
2765 adapter
->ip_offload_ctrl_tok
=
2766 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2767 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2769 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2770 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2774 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2775 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2776 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2777 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2778 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2780 /* large_tx/rx disabled for now, additional features needed */
2781 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2782 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2783 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2784 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2786 adapter
->netdev
->features
= NETIF_F_GSO
;
2788 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2789 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2791 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2792 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2794 if ((adapter
->netdev
->features
&
2795 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2796 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2798 memset(&crq
, 0, sizeof(crq
));
2799 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2800 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2801 crq
.control_ip_offload
.len
=
2802 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2803 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2804 ibmvnic_send_crq(adapter
, &crq
);
2807 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2808 struct ibmvnic_adapter
*adapter
)
2810 struct device
*dev
= &adapter
->vdev
->dev
;
2811 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2812 unsigned long flags
;
2816 if (!crq
->request_error_rsp
.rc
.code
) {
2817 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2818 crq
->request_error_rsp
.rc
.code
);
2822 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2823 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2824 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2826 list_del(&error_buff
->list
);
2829 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2832 dev_err(dev
, "Couldn't find error id %x\n",
2833 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2837 dev_err(dev
, "Detailed info for error id %x:",
2838 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2840 for (i
= 0; i
< error_buff
->len
; i
++) {
2841 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2847 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2849 kfree(error_buff
->buff
);
2853 static void request_error_information(struct ibmvnic_adapter
*adapter
,
2854 union ibmvnic_crq
*err_crq
)
2856 struct device
*dev
= &adapter
->vdev
->dev
;
2857 struct net_device
*netdev
= adapter
->netdev
;
2858 struct ibmvnic_error_buff
*error_buff
;
2859 unsigned long timeout
= msecs_to_jiffies(30000);
2860 union ibmvnic_crq crq
;
2861 unsigned long flags
;
2864 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2868 detail_len
= be32_to_cpu(err_crq
->error_indication
.detail_error_sz
);
2869 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2870 if (!error_buff
->buff
) {
2875 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2877 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2878 netdev_err(netdev
, "Couldn't map error buffer\n");
2879 kfree(error_buff
->buff
);
2884 error_buff
->len
= detail_len
;
2885 error_buff
->error_id
= err_crq
->error_indication
.error_id
;
2887 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2888 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2889 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2891 memset(&crq
, 0, sizeof(crq
));
2892 crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2893 crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2894 crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2895 crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2896 crq
.request_error_info
.error_id
= err_crq
->error_indication
.error_id
;
2898 rc
= ibmvnic_send_crq(adapter
, &crq
);
2900 netdev_err(netdev
, "failed to request error information\n");
2904 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
2905 netdev_err(netdev
, "timeout waiting for error information\n");
2912 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2913 list_del(&error_buff
->list
);
2914 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2916 kfree(error_buff
->buff
);
2920 static void handle_error_indication(union ibmvnic_crq
*crq
,
2921 struct ibmvnic_adapter
*adapter
)
2923 struct device
*dev
= &adapter
->vdev
->dev
;
2925 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2926 crq
->error_indication
.flags
2927 & IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2928 be32_to_cpu(crq
->error_indication
.error_id
),
2929 be16_to_cpu(crq
->error_indication
.error_cause
));
2931 if (be32_to_cpu(crq
->error_indication
.error_id
))
2932 request_error_information(adapter
, crq
);
2934 if (crq
->error_indication
.flags
& IBMVNIC_FATAL_ERROR
)
2935 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
2937 ibmvnic_reset(adapter
, VNIC_RESET_NON_FATAL
);
2940 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2941 struct ibmvnic_adapter
*adapter
)
2943 struct net_device
*netdev
= adapter
->netdev
;
2944 struct device
*dev
= &adapter
->vdev
->dev
;
2947 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2949 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2952 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2956 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2957 struct ibmvnic_adapter
*adapter
)
2959 struct device
*dev
= &adapter
->vdev
->dev
;
2963 atomic_dec(&adapter
->running_cap_crqs
);
2964 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2966 req_value
= &adapter
->req_tx_queues
;
2970 req_value
= &adapter
->req_rx_queues
;
2973 case REQ_RX_ADD_QUEUES
:
2974 req_value
= &adapter
->req_rx_add_queues
;
2977 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2978 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2979 name
= "tx_entries_per_subcrq";
2981 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2982 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2983 name
= "rx_add_entries_per_subcrq";
2986 req_value
= &adapter
->req_mtu
;
2989 case PROMISC_REQUESTED
:
2990 req_value
= &adapter
->promisc
;
2994 dev_err(dev
, "Got invalid cap request rsp %d\n",
2995 crq
->request_capability
.capability
);
2999 switch (crq
->request_capability_rsp
.rc
.code
) {
3002 case PARTIALSUCCESS
:
3003 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3005 (long int)be64_to_cpu(crq
->request_capability_rsp
.
3007 release_sub_crqs(adapter
);
3008 *req_value
= be64_to_cpu(crq
->request_capability_rsp
.number
);
3009 ibmvnic_send_req_caps(adapter
, 1);
3012 dev_err(dev
, "Error %d in request cap rsp\n",
3013 crq
->request_capability_rsp
.rc
.code
);
3017 /* Done receiving requested capabilities, query IP offload support */
3018 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
3019 union ibmvnic_crq newcrq
;
3020 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
3021 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
3022 &adapter
->ip_offload_buf
;
3024 adapter
->wait_capability
= false;
3025 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
3029 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
3030 if (!firmware_has_feature(FW_FEATURE_CMO
))
3031 dev_err(dev
, "Couldn't map offload buffer\n");
3035 memset(&newcrq
, 0, sizeof(newcrq
));
3036 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
3037 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
3038 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
3039 newcrq
.query_ip_offload
.ioba
=
3040 cpu_to_be32(adapter
->ip_offload_tok
);
3042 ibmvnic_send_crq(adapter
, &newcrq
);
3046 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
3047 struct ibmvnic_adapter
*adapter
)
3049 struct device
*dev
= &adapter
->vdev
->dev
;
3050 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
3051 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
3054 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
3056 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3057 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
3059 /* If the number of queues requested can't be allocated by the
3060 * server, the login response will return with code 1. We will need
3061 * to resend the login buffer with fewer queues requested.
3063 if (login_rsp_crq
->generic
.rc
.code
) {
3064 adapter
->renegotiate
= true;
3065 complete(&adapter
->init_done
);
3069 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
3070 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
3071 netdev_dbg(adapter
->netdev
, "%016lx\n",
3072 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
3076 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
3077 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
3078 adapter
->req_rx_add_queues
!=
3079 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
3080 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
3081 ibmvnic_remove(adapter
->vdev
);
3084 complete(&adapter
->init_done
);
3089 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
3090 struct ibmvnic_adapter
*adapter
)
3092 struct device
*dev
= &adapter
->vdev
->dev
;
3095 rc
= crq
->request_unmap_rsp
.rc
.code
;
3097 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
3100 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
3101 struct ibmvnic_adapter
*adapter
)
3103 struct net_device
*netdev
= adapter
->netdev
;
3104 struct device
*dev
= &adapter
->vdev
->dev
;
3107 rc
= crq
->query_map_rsp
.rc
.code
;
3109 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
3112 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3113 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
3114 crq
->query_map_rsp
.free_pages
);
3117 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
3118 struct ibmvnic_adapter
*adapter
)
3120 struct net_device
*netdev
= adapter
->netdev
;
3121 struct device
*dev
= &adapter
->vdev
->dev
;
3124 atomic_dec(&adapter
->running_cap_crqs
);
3125 netdev_dbg(netdev
, "Outstanding queries: %d\n",
3126 atomic_read(&adapter
->running_cap_crqs
));
3127 rc
= crq
->query_capability
.rc
.code
;
3129 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
3133 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
3135 adapter
->min_tx_queues
=
3136 be64_to_cpu(crq
->query_capability
.number
);
3137 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
3138 adapter
->min_tx_queues
);
3141 adapter
->min_rx_queues
=
3142 be64_to_cpu(crq
->query_capability
.number
);
3143 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
3144 adapter
->min_rx_queues
);
3146 case MIN_RX_ADD_QUEUES
:
3147 adapter
->min_rx_add_queues
=
3148 be64_to_cpu(crq
->query_capability
.number
);
3149 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
3150 adapter
->min_rx_add_queues
);
3153 adapter
->max_tx_queues
=
3154 be64_to_cpu(crq
->query_capability
.number
);
3155 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
3156 adapter
->max_tx_queues
);
3159 adapter
->max_rx_queues
=
3160 be64_to_cpu(crq
->query_capability
.number
);
3161 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
3162 adapter
->max_rx_queues
);
3164 case MAX_RX_ADD_QUEUES
:
3165 adapter
->max_rx_add_queues
=
3166 be64_to_cpu(crq
->query_capability
.number
);
3167 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
3168 adapter
->max_rx_add_queues
);
3170 case MIN_TX_ENTRIES_PER_SUBCRQ
:
3171 adapter
->min_tx_entries_per_subcrq
=
3172 be64_to_cpu(crq
->query_capability
.number
);
3173 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
3174 adapter
->min_tx_entries_per_subcrq
);
3176 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
3177 adapter
->min_rx_add_entries_per_subcrq
=
3178 be64_to_cpu(crq
->query_capability
.number
);
3179 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
3180 adapter
->min_rx_add_entries_per_subcrq
);
3182 case MAX_TX_ENTRIES_PER_SUBCRQ
:
3183 adapter
->max_tx_entries_per_subcrq
=
3184 be64_to_cpu(crq
->query_capability
.number
);
3185 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
3186 adapter
->max_tx_entries_per_subcrq
);
3188 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
3189 adapter
->max_rx_add_entries_per_subcrq
=
3190 be64_to_cpu(crq
->query_capability
.number
);
3191 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
3192 adapter
->max_rx_add_entries_per_subcrq
);
3194 case TCP_IP_OFFLOAD
:
3195 adapter
->tcp_ip_offload
=
3196 be64_to_cpu(crq
->query_capability
.number
);
3197 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
3198 adapter
->tcp_ip_offload
);
3200 case PROMISC_SUPPORTED
:
3201 adapter
->promisc_supported
=
3202 be64_to_cpu(crq
->query_capability
.number
);
3203 netdev_dbg(netdev
, "promisc_supported = %lld\n",
3204 adapter
->promisc_supported
);
3207 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
3208 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
3209 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
3212 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
3213 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
3214 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
3216 case MAX_MULTICAST_FILTERS
:
3217 adapter
->max_multicast_filters
=
3218 be64_to_cpu(crq
->query_capability
.number
);
3219 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
3220 adapter
->max_multicast_filters
);
3222 case VLAN_HEADER_INSERTION
:
3223 adapter
->vlan_header_insertion
=
3224 be64_to_cpu(crq
->query_capability
.number
);
3225 if (adapter
->vlan_header_insertion
)
3226 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
3227 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
3228 adapter
->vlan_header_insertion
);
3230 case RX_VLAN_HEADER_INSERTION
:
3231 adapter
->rx_vlan_header_insertion
=
3232 be64_to_cpu(crq
->query_capability
.number
);
3233 netdev_dbg(netdev
, "rx_vlan_header_insertion = %lld\n",
3234 adapter
->rx_vlan_header_insertion
);
3236 case MAX_TX_SG_ENTRIES
:
3237 adapter
->max_tx_sg_entries
=
3238 be64_to_cpu(crq
->query_capability
.number
);
3239 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
3240 adapter
->max_tx_sg_entries
);
3242 case RX_SG_SUPPORTED
:
3243 adapter
->rx_sg_supported
=
3244 be64_to_cpu(crq
->query_capability
.number
);
3245 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
3246 adapter
->rx_sg_supported
);
3248 case OPT_TX_COMP_SUB_QUEUES
:
3249 adapter
->opt_tx_comp_sub_queues
=
3250 be64_to_cpu(crq
->query_capability
.number
);
3251 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
3252 adapter
->opt_tx_comp_sub_queues
);
3254 case OPT_RX_COMP_QUEUES
:
3255 adapter
->opt_rx_comp_queues
=
3256 be64_to_cpu(crq
->query_capability
.number
);
3257 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
3258 adapter
->opt_rx_comp_queues
);
3260 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
3261 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
3262 be64_to_cpu(crq
->query_capability
.number
);
3263 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3264 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
3266 case OPT_TX_ENTRIES_PER_SUBCRQ
:
3267 adapter
->opt_tx_entries_per_subcrq
=
3268 be64_to_cpu(crq
->query_capability
.number
);
3269 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
3270 adapter
->opt_tx_entries_per_subcrq
);
3272 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
3273 adapter
->opt_rxba_entries_per_subcrq
=
3274 be64_to_cpu(crq
->query_capability
.number
);
3275 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
3276 adapter
->opt_rxba_entries_per_subcrq
);
3278 case TX_RX_DESC_REQ
:
3279 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
3280 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
3281 adapter
->tx_rx_desc_req
);
3285 netdev_err(netdev
, "Got invalid cap rsp %d\n",
3286 crq
->query_capability
.capability
);
3290 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
3291 adapter
->wait_capability
= false;
3292 ibmvnic_send_req_caps(adapter
, 0);
3296 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3297 struct ibmvnic_adapter
*adapter
)
3299 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3300 struct net_device
*netdev
= adapter
->netdev
;
3301 struct device
*dev
= &adapter
->vdev
->dev
;
3302 u64
*u64_crq
= (u64
*)crq
;
3305 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3306 (unsigned long int)cpu_to_be64(u64_crq
[0]),
3307 (unsigned long int)cpu_to_be64(u64_crq
[1]));
3308 switch (gen_crq
->first
) {
3309 case IBMVNIC_CRQ_INIT_RSP
:
3310 switch (gen_crq
->cmd
) {
3311 case IBMVNIC_CRQ_INIT
:
3312 dev_info(dev
, "Partner initialized\n");
3313 adapter
->from_passive_init
= true;
3314 complete(&adapter
->init_done
);
3316 case IBMVNIC_CRQ_INIT_COMPLETE
:
3317 dev_info(dev
, "Partner initialization complete\n");
3318 send_version_xchg(adapter
);
3321 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3324 case IBMVNIC_CRQ_XPORT_EVENT
:
3325 netif_carrier_off(netdev
);
3326 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3327 dev_info(dev
, "Migrated, re-enabling adapter\n");
3328 ibmvnic_reset(adapter
, VNIC_RESET_MOBILITY
);
3329 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
3330 dev_info(dev
, "Backing device failover detected\n");
3331 ibmvnic_reset(adapter
, VNIC_RESET_FAILOVER
);
3333 /* The adapter lost the connection */
3334 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3336 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
3339 case IBMVNIC_CRQ_CMD_RSP
:
3342 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3347 switch (gen_crq
->cmd
) {
3348 case VERSION_EXCHANGE_RSP
:
3349 rc
= crq
->version_exchange_rsp
.rc
.code
;
3351 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3354 dev_info(dev
, "Partner protocol version is %d\n",
3355 crq
->version_exchange_rsp
.version
);
3356 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3359 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3360 send_cap_queries(adapter
);
3362 case QUERY_CAPABILITY_RSP
:
3363 handle_query_cap_rsp(crq
, adapter
);
3366 handle_query_map_rsp(crq
, adapter
);
3368 case REQUEST_MAP_RSP
:
3369 adapter
->fw_done_rc
= crq
->request_map_rsp
.rc
.code
;
3370 complete(&adapter
->fw_done
);
3372 case REQUEST_UNMAP_RSP
:
3373 handle_request_unmap_rsp(crq
, adapter
);
3375 case REQUEST_CAPABILITY_RSP
:
3376 handle_request_cap_rsp(crq
, adapter
);
3379 netdev_dbg(netdev
, "Got Login Response\n");
3380 handle_login_rsp(crq
, adapter
);
3382 case LOGICAL_LINK_STATE_RSP
:
3384 "Got Logical Link State Response, state: %d rc: %d\n",
3385 crq
->logical_link_state_rsp
.link_state
,
3386 crq
->logical_link_state_rsp
.rc
.code
);
3387 adapter
->logical_link_state
=
3388 crq
->logical_link_state_rsp
.link_state
;
3389 adapter
->init_done_rc
= crq
->logical_link_state_rsp
.rc
.code
;
3390 complete(&adapter
->init_done
);
3392 case LINK_STATE_INDICATION
:
3393 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3394 adapter
->phys_link_state
=
3395 crq
->link_state_indication
.phys_link_state
;
3396 adapter
->logical_link_state
=
3397 crq
->link_state_indication
.logical_link_state
;
3399 case CHANGE_MAC_ADDR_RSP
:
3400 netdev_dbg(netdev
, "Got MAC address change Response\n");
3401 handle_change_mac_rsp(crq
, adapter
);
3403 case ERROR_INDICATION
:
3404 netdev_dbg(netdev
, "Got Error Indication\n");
3405 handle_error_indication(crq
, adapter
);
3407 case REQUEST_ERROR_RSP
:
3408 netdev_dbg(netdev
, "Got Error Detail Response\n");
3409 handle_error_info_rsp(crq
, adapter
);
3411 case REQUEST_STATISTICS_RSP
:
3412 netdev_dbg(netdev
, "Got Statistics Response\n");
3413 complete(&adapter
->stats_done
);
3415 case QUERY_IP_OFFLOAD_RSP
:
3416 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3417 handle_query_ip_offload_rsp(adapter
);
3419 case MULTICAST_CTRL_RSP
:
3420 netdev_dbg(netdev
, "Got multicast control Response\n");
3422 case CONTROL_IP_OFFLOAD_RSP
:
3423 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3424 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3425 sizeof(adapter
->ip_offload_ctrl
),
3427 complete(&adapter
->init_done
);
3429 case COLLECT_FW_TRACE_RSP
:
3430 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3431 complete(&adapter
->fw_done
);
3434 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3439 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3441 struct ibmvnic_adapter
*adapter
= instance
;
3443 tasklet_schedule(&adapter
->tasklet
);
3447 static void ibmvnic_tasklet(void *data
)
3449 struct ibmvnic_adapter
*adapter
= data
;
3450 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3451 union ibmvnic_crq
*crq
;
3452 unsigned long flags
;
3455 spin_lock_irqsave(&queue
->lock
, flags
);
3457 /* Pull all the valid messages off the CRQ */
3458 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3459 ibmvnic_handle_crq(crq
, adapter
);
3460 crq
->generic
.first
= 0;
3463 /* remain in tasklet until all
3464 * capabilities responses are received
3466 if (!adapter
->wait_capability
)
3469 /* if capabilities CRQ's were sent in this tasklet, the following
3470 * tasklet must wait until all responses are received
3472 if (atomic_read(&adapter
->running_cap_crqs
) != 0)
3473 adapter
->wait_capability
= true;
3474 spin_unlock_irqrestore(&queue
->lock
, flags
);
3477 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3479 struct vio_dev
*vdev
= adapter
->vdev
;
3483 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3484 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3487 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3492 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3494 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3495 struct device
*dev
= &adapter
->vdev
->dev
;
3496 struct vio_dev
*vdev
= adapter
->vdev
;
3501 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3502 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3504 /* Clean out the queue */
3505 memset(crq
->msgs
, 0, PAGE_SIZE
);
3508 /* And re-open it again */
3509 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3510 crq
->msg_token
, PAGE_SIZE
);
3513 /* Adapter is good, but other end is not ready */
3514 dev_warn(dev
, "Partner adapter not ready\n");
3516 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3521 static void release_crq_queue(struct ibmvnic_adapter
*adapter
)
3523 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3524 struct vio_dev
*vdev
= adapter
->vdev
;
3530 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3531 free_irq(vdev
->irq
, adapter
);
3532 tasklet_kill(&adapter
->tasklet
);
3534 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3535 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3537 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3539 free_page((unsigned long)crq
->msgs
);
3543 static int init_crq_queue(struct ibmvnic_adapter
*adapter
)
3545 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3546 struct device
*dev
= &adapter
->vdev
->dev
;
3547 struct vio_dev
*vdev
= adapter
->vdev
;
3548 int rc
, retrc
= -ENOMEM
;
3553 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3554 /* Should we allocate more than one page? */
3559 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3560 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3562 if (dma_mapping_error(dev
, crq
->msg_token
))
3565 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3566 crq
->msg_token
, PAGE_SIZE
);
3568 if (rc
== H_RESOURCE
)
3569 /* maybe kexecing and resource is busy. try a reset */
3570 rc
= ibmvnic_reset_crq(adapter
);
3573 if (rc
== H_CLOSED
) {
3574 dev_warn(dev
, "Partner adapter not ready\n");
3576 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3577 goto reg_crq_failed
;
3582 tasklet_init(&adapter
->tasklet
, (void *)ibmvnic_tasklet
,
3583 (unsigned long)adapter
);
3585 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3586 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3589 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3591 goto req_irq_failed
;
3594 rc
= vio_enable_interrupts(vdev
);
3596 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3597 goto req_irq_failed
;
3601 spin_lock_init(&crq
->lock
);
3606 tasklet_kill(&adapter
->tasklet
);
3608 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3609 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3611 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3613 free_page((unsigned long)crq
->msgs
);
3618 static int ibmvnic_init(struct ibmvnic_adapter
*adapter
)
3620 struct device
*dev
= &adapter
->vdev
->dev
;
3621 unsigned long timeout
= msecs_to_jiffies(30000);
3624 if (adapter
->resetting
) {
3625 rc
= ibmvnic_reset_crq(adapter
);
3627 rc
= vio_enable_interrupts(adapter
->vdev
);
3629 rc
= init_crq_queue(adapter
);
3633 dev_err(dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3637 adapter
->from_passive_init
= false;
3639 init_completion(&adapter
->init_done
);
3640 adapter
->init_done_rc
= 0;
3641 ibmvnic_send_crq_init(adapter
);
3642 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3643 dev_err(dev
, "Initialization sequence timed out\n");
3647 if (adapter
->init_done_rc
) {
3648 release_crq_queue(adapter
);
3649 return adapter
->init_done_rc
;
3652 if (adapter
->from_passive_init
) {
3653 adapter
->state
= VNIC_OPEN
;
3654 adapter
->from_passive_init
= false;
3658 if (adapter
->resetting
)
3659 rc
= reset_sub_crq_queues(adapter
);
3661 rc
= init_sub_crqs(adapter
);
3663 dev_err(dev
, "Initialization of sub crqs failed\n");
3664 release_crq_queue(adapter
);
3668 rc
= init_sub_crq_irqs(adapter
);
3670 dev_err(dev
, "Failed to initialize sub crq irqs\n");
3671 release_crq_queue(adapter
);
3677 static struct device_attribute dev_attr_failover
;
3679 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3681 struct ibmvnic_adapter
*adapter
;
3682 struct net_device
*netdev
;
3683 unsigned char *mac_addr_p
;
3686 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3689 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3690 VETH_MAC_ADDR
, NULL
);
3693 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3694 __FILE__
, __LINE__
);
3698 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3699 IBMVNIC_MAX_TX_QUEUES
);
3703 adapter
= netdev_priv(netdev
);
3704 adapter
->state
= VNIC_PROBING
;
3705 dev_set_drvdata(&dev
->dev
, netdev
);
3706 adapter
->vdev
= dev
;
3707 adapter
->netdev
= netdev
;
3709 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3710 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3711 netdev
->irq
= dev
->irq
;
3712 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3713 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3714 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3716 spin_lock_init(&adapter
->stats_lock
);
3718 INIT_LIST_HEAD(&adapter
->errors
);
3719 spin_lock_init(&adapter
->error_list_lock
);
3721 INIT_WORK(&adapter
->ibmvnic_reset
, __ibmvnic_reset
);
3722 INIT_LIST_HEAD(&adapter
->rwi_list
);
3723 mutex_init(&adapter
->reset_lock
);
3724 mutex_init(&adapter
->rwi_lock
);
3725 adapter
->resetting
= false;
3728 rc
= ibmvnic_init(adapter
);
3729 if (rc
&& rc
!= EAGAIN
) {
3730 free_netdev(netdev
);
3733 } while (rc
== EAGAIN
);
3735 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
3737 rc
= device_create_file(&dev
->dev
, &dev_attr_failover
);
3739 free_netdev(netdev
);
3743 rc
= register_netdev(netdev
);
3745 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3746 device_remove_file(&dev
->dev
, &dev_attr_failover
);
3747 free_netdev(netdev
);
3750 dev_info(&dev
->dev
, "ibmvnic registered\n");
3752 adapter
->state
= VNIC_PROBED
;
3756 static int ibmvnic_remove(struct vio_dev
*dev
)
3758 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3759 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3761 adapter
->state
= VNIC_REMOVING
;
3762 unregister_netdev(netdev
);
3763 mutex_lock(&adapter
->reset_lock
);
3765 release_resources(adapter
);
3766 release_sub_crqs(adapter
);
3767 release_crq_queue(adapter
);
3769 adapter
->state
= VNIC_REMOVED
;
3771 mutex_unlock(&adapter
->reset_lock
);
3772 device_remove_file(&dev
->dev
, &dev_attr_failover
);
3773 free_netdev(netdev
);
3774 dev_set_drvdata(&dev
->dev
, NULL
);
3779 static ssize_t
failover_store(struct device
*dev
, struct device_attribute
*attr
,
3780 const char *buf
, size_t count
)
3782 struct net_device
*netdev
= dev_get_drvdata(dev
);
3783 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3784 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
3785 __be64 session_token
;
3788 if (!sysfs_streq(buf
, "1"))
3791 rc
= plpar_hcall(H_VIOCTL
, retbuf
, adapter
->vdev
->unit_address
,
3792 H_GET_SESSION_TOKEN
, 0, 0, 0);
3794 netdev_err(netdev
, "Couldn't retrieve session token, rc %ld\n",
3799 session_token
= (__be64
)retbuf
[0];
3800 netdev_dbg(netdev
, "Initiating client failover, session id %llx\n",
3801 be64_to_cpu(session_token
));
3802 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
3803 H_SESSION_ERR_DETECTED
, session_token
, 0, 0);
3805 netdev_err(netdev
, "Client initiated failover failed, rc %ld\n",
3813 static DEVICE_ATTR(failover
, 0200, NULL
, failover_store
);
3815 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3817 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3818 struct ibmvnic_adapter
*adapter
;
3819 struct iommu_table
*tbl
;
3820 unsigned long ret
= 0;
3823 tbl
= get_iommu_table_base(&vdev
->dev
);
3825 /* netdev inits at probe time along with the structures we need below*/
3827 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3829 adapter
= netdev_priv(netdev
);
3831 ret
+= PAGE_SIZE
; /* the crq message queue */
3832 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3834 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3835 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3837 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3839 ret
+= adapter
->rx_pool
[i
].size
*
3840 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3845 static int ibmvnic_resume(struct device
*dev
)
3847 struct net_device
*netdev
= dev_get_drvdata(dev
);
3848 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3851 if (adapter
->state
!= VNIC_OPEN
)
3854 /* kick the interrupt handlers just in case we lost an interrupt */
3855 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3856 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3857 adapter
->rx_scrq
[i
]);
3862 static struct vio_device_id ibmvnic_device_table
[] = {
3863 {"network", "IBM,vnic"},
3866 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3868 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3869 .resume
= ibmvnic_resume
3872 static struct vio_driver ibmvnic_driver
= {
3873 .id_table
= ibmvnic_device_table
,
3874 .probe
= ibmvnic_probe
,
3875 .remove
= ibmvnic_remove
,
3876 .get_desired_dma
= ibmvnic_get_desired_dma
,
3877 .name
= ibmvnic_driver_name
,
3878 .pm
= &ibmvnic_pm_ops
,
3881 /* module functions */
3882 static int __init
ibmvnic_module_init(void)
3884 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3885 IBMVNIC_DRIVER_VERSION
);
3887 return vio_register_driver(&ibmvnic_driver
);
3890 static void __exit
ibmvnic_module_exit(void)
3892 vio_unregister_driver(&ibmvnic_driver
);
3895 module_init(ibmvnic_module_init
);
3896 module_exit(ibmvnic_module_exit
);