1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
81 static const char ibmvnic_driver_name
[] = "ibmvnic";
82 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
89 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
90 static int ibmvnic_remove(struct vio_dev
*);
91 static void release_sub_crqs(struct ibmvnic_adapter
*);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
96 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
97 union sub_crq
*sub_crq
);
98 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
99 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
100 static int enable_scrq_irq(struct ibmvnic_adapter
*,
101 struct ibmvnic_sub_crq_queue
*);
102 static int disable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int pending_scrq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
109 static void send_map_query(struct ibmvnic_adapter
*adapter
);
110 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
111 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
112 static void send_login(struct ibmvnic_adapter
*adapter
);
113 static void send_cap_queries(struct ibmvnic_adapter
*adapter
);
114 static int init_sub_crqs(struct ibmvnic_adapter
*);
115 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
);
116 static int ibmvnic_init(struct ibmvnic_adapter
*);
117 static void release_crq_queue(struct ibmvnic_adapter
*);
119 struct ibmvnic_stat
{
120 char name
[ETH_GSTRING_LEN
];
124 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
125 offsetof(struct ibmvnic_statistics, stat))
126 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
128 static const struct ibmvnic_stat ibmvnic_stats
[] = {
129 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
130 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
131 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
132 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
133 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
134 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
135 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
136 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
137 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
138 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
139 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
140 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
141 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
142 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
143 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
144 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
145 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
146 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
147 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
148 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
149 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
150 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
153 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
154 unsigned long length
, unsigned long *number
,
157 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
160 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
167 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
168 struct ibmvnic_long_term_buff
*ltb
, int size
)
170 struct device
*dev
= &adapter
->vdev
->dev
;
173 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
177 dev_err(dev
, "Couldn't alloc long term buffer\n");
180 ltb
->map_id
= adapter
->map_id
;
183 init_completion(&adapter
->fw_done
);
184 send_request_map(adapter
, ltb
->addr
,
185 ltb
->size
, ltb
->map_id
);
186 wait_for_completion(&adapter
->fw_done
);
188 if (adapter
->fw_done_rc
) {
189 dev_err(dev
, "Couldn't map long term buffer,rc = %d\n",
190 adapter
->fw_done_rc
);
196 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
197 struct ibmvnic_long_term_buff
*ltb
)
199 struct device
*dev
= &adapter
->vdev
->dev
;
204 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
&&
205 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
)
206 send_request_unmap(adapter
, ltb
->map_id
);
207 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
210 static int reset_long_term_buff(struct ibmvnic_adapter
*adapter
,
211 struct ibmvnic_long_term_buff
*ltb
)
213 memset(ltb
->buff
, 0, ltb
->size
);
215 init_completion(&adapter
->fw_done
);
216 send_request_map(adapter
, ltb
->addr
, ltb
->size
, ltb
->map_id
);
217 wait_for_completion(&adapter
->fw_done
);
219 if (adapter
->fw_done_rc
) {
220 dev_info(&adapter
->vdev
->dev
,
221 "Reset failed, attempting to free and reallocate buffer\n");
222 free_long_term_buff(adapter
, ltb
);
223 return alloc_long_term_buff(adapter
, ltb
, ltb
->size
);
228 static void deactivate_rx_pools(struct ibmvnic_adapter
*adapter
)
232 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
234 adapter
->rx_pool
[i
].active
= 0;
237 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
238 struct ibmvnic_rx_pool
*pool
)
240 int count
= pool
->size
- atomic_read(&pool
->available
);
241 struct device
*dev
= &adapter
->vdev
->dev
;
242 int buffers_added
= 0;
243 unsigned long lpar_rc
;
244 union sub_crq sub_crq
;
257 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
258 be32_to_cpu(adapter
->login_rsp_buf
->
261 for (i
= 0; i
< count
; ++i
) {
262 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
264 dev_err(dev
, "Couldn't replenish rx buff\n");
265 adapter
->replenish_no_mem
++;
269 index
= pool
->free_map
[pool
->next_free
];
271 if (pool
->rx_buff
[index
].skb
)
272 dev_err(dev
, "Inconsistent free_map!\n");
274 /* Copy the skb to the long term mapped DMA buffer */
275 offset
= index
* pool
->buff_size
;
276 dst
= pool
->long_term_buff
.buff
+ offset
;
277 memset(dst
, 0, pool
->buff_size
);
278 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
279 pool
->rx_buff
[index
].data
= dst
;
281 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
282 pool
->rx_buff
[index
].dma
= dma_addr
;
283 pool
->rx_buff
[index
].skb
= skb
;
284 pool
->rx_buff
[index
].pool_index
= pool
->index
;
285 pool
->rx_buff
[index
].size
= pool
->buff_size
;
287 memset(&sub_crq
, 0, sizeof(sub_crq
));
288 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
289 sub_crq
.rx_add
.correlator
=
290 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
291 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
292 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
294 /* The length field of the sCRQ is defined to be 24 bits so the
295 * buffer size needs to be left shifted by a byte before it is
296 * converted to big endian to prevent the last byte from being
299 #ifdef __LITTLE_ENDIAN__
302 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
304 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
306 if (lpar_rc
!= H_SUCCESS
)
310 adapter
->replenish_add_buff_success
++;
311 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
313 atomic_add(buffers_added
, &pool
->available
);
317 dev_info(dev
, "replenish pools failure\n");
318 pool
->free_map
[pool
->next_free
] = index
;
319 pool
->rx_buff
[index
].skb
= NULL
;
320 if (!dma_mapping_error(dev
, dma_addr
))
321 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
324 dev_kfree_skb_any(skb
);
325 adapter
->replenish_add_buff_failure
++;
326 atomic_add(buffers_added
, &pool
->available
);
328 if (lpar_rc
== H_CLOSED
) {
329 /* Disable buffer pool replenishment and report carrier off if
330 * queue is closed. Firmware guarantees that a signal will
331 * be sent to the driver, triggering a reset.
333 deactivate_rx_pools(adapter
);
334 netif_carrier_off(adapter
->netdev
);
338 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
342 adapter
->replenish_task_cycles
++;
343 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
345 if (adapter
->rx_pool
[i
].active
)
346 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
350 static void release_stats_token(struct ibmvnic_adapter
*adapter
)
352 struct device
*dev
= &adapter
->vdev
->dev
;
354 if (!adapter
->stats_token
)
357 dma_unmap_single(dev
, adapter
->stats_token
,
358 sizeof(struct ibmvnic_statistics
),
360 adapter
->stats_token
= 0;
363 static int init_stats_token(struct ibmvnic_adapter
*adapter
)
365 struct device
*dev
= &adapter
->vdev
->dev
;
368 stok
= dma_map_single(dev
, &adapter
->stats
,
369 sizeof(struct ibmvnic_statistics
),
371 if (dma_mapping_error(dev
, stok
)) {
372 dev_err(dev
, "Couldn't map stats buffer\n");
376 adapter
->stats_token
= stok
;
380 static int reset_rx_pools(struct ibmvnic_adapter
*adapter
)
382 struct ibmvnic_rx_pool
*rx_pool
;
386 rx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
387 for (i
= 0; i
< rx_scrqs
; i
++) {
388 rx_pool
= &adapter
->rx_pool
[i
];
390 rc
= reset_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
394 for (j
= 0; j
< rx_pool
->size
; j
++)
395 rx_pool
->free_map
[j
] = j
;
397 memset(rx_pool
->rx_buff
, 0,
398 rx_pool
->size
* sizeof(struct ibmvnic_rx_buff
));
400 atomic_set(&rx_pool
->available
, 0);
401 rx_pool
->next_alloc
= 0;
402 rx_pool
->next_free
= 0;
409 static void release_rx_pools(struct ibmvnic_adapter
*adapter
)
411 struct ibmvnic_rx_pool
*rx_pool
;
415 if (!adapter
->rx_pool
)
418 rx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
419 for (i
= 0; i
< rx_scrqs
; i
++) {
420 rx_pool
= &adapter
->rx_pool
[i
];
422 kfree(rx_pool
->free_map
);
423 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
425 if (!rx_pool
->rx_buff
)
428 for (j
= 0; j
< rx_pool
->size
; j
++) {
429 if (rx_pool
->rx_buff
[j
].skb
) {
430 dev_kfree_skb_any(rx_pool
->rx_buff
[i
].skb
);
431 rx_pool
->rx_buff
[i
].skb
= NULL
;
435 kfree(rx_pool
->rx_buff
);
438 kfree(adapter
->rx_pool
);
439 adapter
->rx_pool
= NULL
;
442 static int init_rx_pools(struct net_device
*netdev
)
444 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
445 struct device
*dev
= &adapter
->vdev
->dev
;
446 struct ibmvnic_rx_pool
*rx_pool
;
452 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
453 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
454 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
456 adapter
->rx_pool
= kcalloc(rxadd_subcrqs
,
457 sizeof(struct ibmvnic_rx_pool
),
459 if (!adapter
->rx_pool
) {
460 dev_err(dev
, "Failed to allocate rx pools\n");
464 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
465 rx_pool
= &adapter
->rx_pool
[i
];
467 netdev_dbg(adapter
->netdev
,
468 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
469 i
, adapter
->req_rx_add_entries_per_subcrq
,
470 be64_to_cpu(size_array
[i
]));
472 rx_pool
->size
= adapter
->req_rx_add_entries_per_subcrq
;
474 rx_pool
->buff_size
= be64_to_cpu(size_array
[i
]);
477 rx_pool
->free_map
= kcalloc(rx_pool
->size
, sizeof(int),
479 if (!rx_pool
->free_map
) {
480 release_rx_pools(adapter
);
484 rx_pool
->rx_buff
= kcalloc(rx_pool
->size
,
485 sizeof(struct ibmvnic_rx_buff
),
487 if (!rx_pool
->rx_buff
) {
488 dev_err(dev
, "Couldn't alloc rx buffers\n");
489 release_rx_pools(adapter
);
493 if (alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
494 rx_pool
->size
* rx_pool
->buff_size
)) {
495 release_rx_pools(adapter
);
499 for (j
= 0; j
< rx_pool
->size
; ++j
)
500 rx_pool
->free_map
[j
] = j
;
502 atomic_set(&rx_pool
->available
, 0);
503 rx_pool
->next_alloc
= 0;
504 rx_pool
->next_free
= 0;
510 static int reset_tx_pools(struct ibmvnic_adapter
*adapter
)
512 struct ibmvnic_tx_pool
*tx_pool
;
516 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
517 for (i
= 0; i
< tx_scrqs
; i
++) {
518 tx_pool
= &adapter
->tx_pool
[i
];
520 rc
= reset_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
524 memset(tx_pool
->tx_buff
, 0,
525 adapter
->req_tx_entries_per_subcrq
*
526 sizeof(struct ibmvnic_tx_buff
));
528 for (j
= 0; j
< adapter
->req_tx_entries_per_subcrq
; j
++)
529 tx_pool
->free_map
[j
] = j
;
531 tx_pool
->consumer_index
= 0;
532 tx_pool
->producer_index
= 0;
538 static void release_tx_pools(struct ibmvnic_adapter
*adapter
)
540 struct ibmvnic_tx_pool
*tx_pool
;
543 if (!adapter
->tx_pool
)
546 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
547 for (i
= 0; i
< tx_scrqs
; i
++) {
548 tx_pool
= &adapter
->tx_pool
[i
];
549 kfree(tx_pool
->tx_buff
);
550 free_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
551 kfree(tx_pool
->free_map
);
554 kfree(adapter
->tx_pool
);
555 adapter
->tx_pool
= NULL
;
558 static int init_tx_pools(struct net_device
*netdev
)
560 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
561 struct device
*dev
= &adapter
->vdev
->dev
;
562 struct ibmvnic_tx_pool
*tx_pool
;
566 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
567 adapter
->tx_pool
= kcalloc(tx_subcrqs
,
568 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
569 if (!adapter
->tx_pool
)
572 for (i
= 0; i
< tx_subcrqs
; i
++) {
573 tx_pool
= &adapter
->tx_pool
[i
];
574 tx_pool
->tx_buff
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
575 sizeof(struct ibmvnic_tx_buff
),
577 if (!tx_pool
->tx_buff
) {
578 dev_err(dev
, "tx pool buffer allocation failed\n");
579 release_tx_pools(adapter
);
583 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
584 adapter
->req_tx_entries_per_subcrq
*
586 release_tx_pools(adapter
);
590 tx_pool
->free_map
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
591 sizeof(int), GFP_KERNEL
);
592 if (!tx_pool
->free_map
) {
593 release_tx_pools(adapter
);
597 for (j
= 0; j
< adapter
->req_tx_entries_per_subcrq
; j
++)
598 tx_pool
->free_map
[j
] = j
;
600 tx_pool
->consumer_index
= 0;
601 tx_pool
->producer_index
= 0;
607 static void release_error_buffers(struct ibmvnic_adapter
*adapter
)
609 struct device
*dev
= &adapter
->vdev
->dev
;
610 struct ibmvnic_error_buff
*error_buff
, *tmp
;
613 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
614 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
) {
615 list_del(&error_buff
->list
);
616 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
618 kfree(error_buff
->buff
);
621 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
624 static void ibmvnic_napi_enable(struct ibmvnic_adapter
*adapter
)
628 if (adapter
->napi_enabled
)
631 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
632 napi_enable(&adapter
->napi
[i
]);
634 adapter
->napi_enabled
= true;
637 static void ibmvnic_napi_disable(struct ibmvnic_adapter
*adapter
)
641 if (!adapter
->napi_enabled
)
644 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
645 napi_disable(&adapter
->napi
[i
]);
647 adapter
->napi_enabled
= false;
650 static int ibmvnic_login(struct net_device
*netdev
)
652 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
653 unsigned long timeout
= msecs_to_jiffies(30000);
654 struct device
*dev
= &adapter
->vdev
->dev
;
658 if (adapter
->renegotiate
) {
659 adapter
->renegotiate
= false;
660 release_sub_crqs(adapter
);
662 reinit_completion(&adapter
->init_done
);
663 send_cap_queries(adapter
);
664 if (!wait_for_completion_timeout(&adapter
->init_done
,
666 dev_err(dev
, "Capabilities query timeout\n");
669 rc
= init_sub_crqs(adapter
);
672 "Initialization of SCRQ's failed\n");
675 rc
= init_sub_crq_irqs(adapter
);
678 "Initialization of SCRQ's irqs failed\n");
683 reinit_completion(&adapter
->init_done
);
685 if (!wait_for_completion_timeout(&adapter
->init_done
,
687 dev_err(dev
, "Login timeout\n");
690 } while (adapter
->renegotiate
);
695 static void release_resources(struct ibmvnic_adapter
*adapter
)
699 release_tx_pools(adapter
);
700 release_rx_pools(adapter
);
702 release_stats_token(adapter
);
703 release_error_buffers(adapter
);
706 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
707 if (&adapter
->napi
[i
])
708 netif_napi_del(&adapter
->napi
[i
]);
713 static int set_link_state(struct ibmvnic_adapter
*adapter
, u8 link_state
)
715 struct net_device
*netdev
= adapter
->netdev
;
716 unsigned long timeout
= msecs_to_jiffies(30000);
717 union ibmvnic_crq crq
;
721 netdev_err(netdev
, "setting link state %d\n", link_state
);
722 memset(&crq
, 0, sizeof(crq
));
723 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
724 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
725 crq
.logical_link_state
.link_state
= link_state
;
730 reinit_completion(&adapter
->init_done
);
731 rc
= ibmvnic_send_crq(adapter
, &crq
);
733 netdev_err(netdev
, "Failed to set link state\n");
737 if (!wait_for_completion_timeout(&adapter
->init_done
,
739 netdev_err(netdev
, "timeout setting link state\n");
743 if (adapter
->init_done_rc
== 1) {
744 /* Partuial success, delay and re-send */
753 static int set_real_num_queues(struct net_device
*netdev
)
755 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
758 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->req_tx_queues
);
760 netdev_err(netdev
, "failed to set the number of tx queues\n");
764 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->req_rx_queues
);
766 netdev_err(netdev
, "failed to set the number of rx queues\n");
771 static int init_resources(struct ibmvnic_adapter
*adapter
)
773 struct net_device
*netdev
= adapter
->netdev
;
776 rc
= set_real_num_queues(netdev
);
780 rc
= init_stats_token(adapter
);
785 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
786 sizeof(struct napi_struct
), GFP_KERNEL
);
790 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
791 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
795 send_map_query(adapter
);
797 rc
= init_rx_pools(netdev
);
801 rc
= init_tx_pools(netdev
);
805 static int __ibmvnic_open(struct net_device
*netdev
)
807 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
808 enum vnic_state prev_state
= adapter
->state
;
811 adapter
->state
= VNIC_OPENING
;
812 replenish_pools(adapter
);
813 ibmvnic_napi_enable(adapter
);
815 /* We're ready to receive frames, enable the sub-crq interrupts and
816 * set the logical link state to up
818 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
819 if (prev_state
== VNIC_CLOSED
)
820 enable_irq(adapter
->rx_scrq
[i
]->irq
);
822 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
825 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
826 if (prev_state
== VNIC_CLOSED
)
827 enable_irq(adapter
->tx_scrq
[i
]->irq
);
829 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
832 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_UP
);
834 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
835 napi_disable(&adapter
->napi
[i
]);
836 release_resources(adapter
);
840 netif_tx_start_all_queues(netdev
);
842 if (prev_state
== VNIC_CLOSED
) {
843 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
844 napi_schedule(&adapter
->napi
[i
]);
847 adapter
->state
= VNIC_OPEN
;
851 static int ibmvnic_open(struct net_device
*netdev
)
853 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
856 mutex_lock(&adapter
->reset_lock
);
858 if (adapter
->state
!= VNIC_CLOSED
) {
859 rc
= ibmvnic_login(netdev
);
861 mutex_unlock(&adapter
->reset_lock
);
865 rc
= init_resources(adapter
);
867 netdev_err(netdev
, "failed to initialize resources\n");
868 release_resources(adapter
);
869 mutex_unlock(&adapter
->reset_lock
);
874 rc
= __ibmvnic_open(netdev
);
875 mutex_unlock(&adapter
->reset_lock
);
880 static void clean_tx_pools(struct ibmvnic_adapter
*adapter
)
882 struct ibmvnic_tx_pool
*tx_pool
;
887 if (!adapter
->tx_pool
)
890 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
891 tx_entries
= adapter
->req_tx_entries_per_subcrq
;
893 /* Free any remaining skbs in the tx buffer pools */
894 for (i
= 0; i
< tx_scrqs
; i
++) {
895 tx_pool
= &adapter
->tx_pool
[i
];
899 for (j
= 0; j
< tx_entries
; j
++) {
900 if (tx_pool
->tx_buff
[j
].skb
) {
901 dev_kfree_skb_any(tx_pool
->tx_buff
[j
].skb
);
902 tx_pool
->tx_buff
[j
].skb
= NULL
;
908 static int __ibmvnic_close(struct net_device
*netdev
)
910 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
914 adapter
->state
= VNIC_CLOSING
;
916 /* ensure that transmissions are stopped if called by do_reset */
917 if (adapter
->resetting
)
918 netif_tx_disable(netdev
);
920 netif_tx_stop_all_queues(netdev
);
922 ibmvnic_napi_disable(adapter
);
924 if (adapter
->tx_scrq
) {
925 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
926 if (adapter
->tx_scrq
[i
]->irq
)
927 disable_irq(adapter
->tx_scrq
[i
]->irq
);
930 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_DN
);
934 if (adapter
->rx_scrq
) {
935 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
938 while (pending_scrq(adapter
, adapter
->rx_scrq
[i
])) {
946 if (adapter
->rx_scrq
[i
]->irq
)
947 disable_irq(adapter
->rx_scrq
[i
]->irq
);
951 clean_tx_pools(adapter
);
952 adapter
->state
= VNIC_CLOSED
;
956 static int ibmvnic_close(struct net_device
*netdev
)
958 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
961 mutex_lock(&adapter
->reset_lock
);
962 rc
= __ibmvnic_close(netdev
);
963 mutex_unlock(&adapter
->reset_lock
);
969 * build_hdr_data - creates L2/L3/L4 header data buffer
970 * @hdr_field - bitfield determining needed headers
971 * @skb - socket buffer
972 * @hdr_len - array of header lengths
973 * @tot_len - total length of data
975 * Reads hdr_field to determine which headers are needed by firmware.
976 * Builds a buffer containing these headers. Saves individual header
977 * lengths and total buffer length to be used to build descriptors.
979 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
980 int *hdr_len
, u8
*hdr_data
)
985 hdr_len
[0] = sizeof(struct ethhdr
);
987 if (skb
->protocol
== htons(ETH_P_IP
)) {
988 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
989 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
990 hdr_len
[2] = tcp_hdrlen(skb
);
991 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
992 hdr_len
[2] = sizeof(struct udphdr
);
993 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
994 hdr_len
[1] = sizeof(struct ipv6hdr
);
995 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
996 hdr_len
[2] = tcp_hdrlen(skb
);
997 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
998 hdr_len
[2] = sizeof(struct udphdr
);
1001 memset(hdr_data
, 0, 120);
1002 if ((hdr_field
>> 6) & 1) {
1003 hdr
= skb_mac_header(skb
);
1004 memcpy(hdr_data
, hdr
, hdr_len
[0]);
1008 if ((hdr_field
>> 5) & 1) {
1009 hdr
= skb_network_header(skb
);
1010 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
1014 if ((hdr_field
>> 4) & 1) {
1015 hdr
= skb_transport_header(skb
);
1016 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
1023 * create_hdr_descs - create header and header extension descriptors
1024 * @hdr_field - bitfield determining needed headers
1025 * @data - buffer containing header data
1026 * @len - length of data buffer
1027 * @hdr_len - array of individual header lengths
1028 * @scrq_arr - descriptor array
1030 * Creates header and, if needed, header extension descriptors and
1031 * places them in a descriptor array, scrq_arr
1034 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
1035 union sub_crq
*scrq_arr
)
1037 union sub_crq hdr_desc
;
1042 while (tmp_len
> 0) {
1043 cur
= hdr_data
+ len
- tmp_len
;
1045 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
1046 if (cur
!= hdr_data
) {
1047 data
= hdr_desc
.hdr_ext
.data
;
1048 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
1049 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
1050 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
1051 hdr_desc
.hdr_ext
.len
= tmp
;
1053 data
= hdr_desc
.hdr
.data
;
1054 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
1055 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
1056 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
1057 hdr_desc
.hdr
.len
= tmp
;
1058 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
1059 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
1060 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
1061 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
1063 memcpy(data
, cur
, tmp
);
1065 *scrq_arr
= hdr_desc
;
1071 * build_hdr_descs_arr - build a header descriptor array
1072 * @skb - socket buffer
1073 * @num_entries - number of descriptors to be sent
1074 * @subcrq - first TX descriptor
1075 * @hdr_field - bit field determining which headers will be sent
1077 * This function will build a TX descriptor array with applicable
1078 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1081 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
1082 int *num_entries
, u8 hdr_field
)
1084 int hdr_len
[3] = {0, 0, 0};
1086 u8
*hdr_data
= txbuff
->hdr_data
;
1088 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
1093 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
1094 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
1095 txbuff
->indir_arr
+ 1);
1098 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1100 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1101 int queue_num
= skb_get_queue_mapping(skb
);
1102 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
1103 struct device
*dev
= &adapter
->vdev
->dev
;
1104 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
1105 struct ibmvnic_sub_crq_queue
*tx_scrq
;
1106 struct ibmvnic_tx_pool
*tx_pool
;
1107 unsigned int tx_send_failed
= 0;
1108 unsigned int tx_map_failed
= 0;
1109 unsigned int tx_dropped
= 0;
1110 unsigned int tx_packets
= 0;
1111 unsigned int tx_bytes
= 0;
1112 dma_addr_t data_dma_addr
;
1113 struct netdev_queue
*txq
;
1114 unsigned long lpar_rc
;
1115 union sub_crq tx_crq
;
1116 unsigned int offset
;
1117 int num_entries
= 1;
1123 if (adapter
->resetting
) {
1124 if (!netif_subqueue_stopped(netdev
, skb
))
1125 netif_stop_subqueue(netdev
, queue_num
);
1126 dev_kfree_skb_any(skb
);
1134 tx_pool
= &adapter
->tx_pool
[queue_num
];
1135 tx_scrq
= adapter
->tx_scrq
[queue_num
];
1136 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
1137 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
1138 be32_to_cpu(adapter
->login_rsp_buf
->off_txsubm_subcrqs
));
1140 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
1141 offset
= index
* adapter
->req_mtu
;
1142 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
1143 memset(dst
, 0, adapter
->req_mtu
);
1144 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
1145 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
1147 tx_pool
->consumer_index
=
1148 (tx_pool
->consumer_index
+ 1) %
1149 adapter
->req_tx_entries_per_subcrq
;
1151 tx_buff
= &tx_pool
->tx_buff
[index
];
1153 tx_buff
->data_dma
[0] = data_dma_addr
;
1154 tx_buff
->data_len
[0] = skb
->len
;
1155 tx_buff
->index
= index
;
1156 tx_buff
->pool_index
= queue_num
;
1157 tx_buff
->last_frag
= true;
1159 memset(&tx_crq
, 0, sizeof(tx_crq
));
1160 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
1161 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
1162 tx_crq
.v1
.n_crq_elem
= 1;
1163 tx_crq
.v1
.n_sge
= 1;
1164 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
1165 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
1166 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
1167 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
1168 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
1170 if (adapter
->vlan_header_insertion
) {
1171 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
1172 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
1175 if (skb
->protocol
== htons(ETH_P_IP
)) {
1176 if (ip_hdr(skb
)->version
== 4)
1177 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
1178 else if (ip_hdr(skb
)->version
== 6)
1179 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
1181 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1182 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
1183 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
1184 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
1187 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1188 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
1191 /* determine if l2/3/4 headers are sent to firmware */
1192 if ((*hdrs
>> 7) & 1 &&
1193 (skb
->protocol
== htons(ETH_P_IP
) ||
1194 skb
->protocol
== htons(ETH_P_IPV6
))) {
1195 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
1196 tx_crq
.v1
.n_crq_elem
= num_entries
;
1197 tx_buff
->indir_arr
[0] = tx_crq
;
1198 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
1199 sizeof(tx_buff
->indir_arr
),
1201 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
1202 dev_kfree_skb_any(skb
);
1203 tx_buff
->skb
= NULL
;
1204 if (!firmware_has_feature(FW_FEATURE_CMO
))
1205 dev_err(dev
, "tx: unable to map descriptor array\n");
1211 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
1212 (u64
)tx_buff
->indir_dma
,
1215 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
1218 if (lpar_rc
!= H_SUCCESS
) {
1219 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
1221 if (tx_pool
->consumer_index
== 0)
1222 tx_pool
->consumer_index
=
1223 adapter
->req_tx_entries_per_subcrq
- 1;
1225 tx_pool
->consumer_index
--;
1227 dev_kfree_skb_any(skb
);
1228 tx_buff
->skb
= NULL
;
1230 if (lpar_rc
== H_CLOSED
) {
1231 /* Disable TX and report carrier off if queue is closed.
1232 * Firmware guarantees that a signal will be sent to the
1233 * driver, triggering a reset or some other action.
1235 netif_tx_stop_all_queues(netdev
);
1236 netif_carrier_off(netdev
);
1245 if (atomic_inc_return(&tx_scrq
->used
)
1246 >= adapter
->req_tx_entries_per_subcrq
) {
1247 netdev_info(netdev
, "Stopping queue %d\n", queue_num
);
1248 netif_stop_subqueue(netdev
, queue_num
);
1252 tx_bytes
+= skb
->len
;
1253 txq
->trans_start
= jiffies
;
1257 netdev
->stats
.tx_dropped
+= tx_dropped
;
1258 netdev
->stats
.tx_bytes
+= tx_bytes
;
1259 netdev
->stats
.tx_packets
+= tx_packets
;
1260 adapter
->tx_send_failed
+= tx_send_failed
;
1261 adapter
->tx_map_failed
+= tx_map_failed
;
1266 static void ibmvnic_set_multi(struct net_device
*netdev
)
1268 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1269 struct netdev_hw_addr
*ha
;
1270 union ibmvnic_crq crq
;
1272 memset(&crq
, 0, sizeof(crq
));
1273 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1274 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1276 if (netdev
->flags
& IFF_PROMISC
) {
1277 if (!adapter
->promisc_supported
)
1280 if (netdev
->flags
& IFF_ALLMULTI
) {
1281 /* Accept all multicast */
1282 memset(&crq
, 0, sizeof(crq
));
1283 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1284 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1285 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
1286 ibmvnic_send_crq(adapter
, &crq
);
1287 } else if (netdev_mc_empty(netdev
)) {
1288 /* Reject all multicast */
1289 memset(&crq
, 0, sizeof(crq
));
1290 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1291 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1292 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
1293 ibmvnic_send_crq(adapter
, &crq
);
1295 /* Accept one or more multicast(s) */
1296 netdev_for_each_mc_addr(ha
, netdev
) {
1297 memset(&crq
, 0, sizeof(crq
));
1298 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1299 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1300 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
1301 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
1303 ibmvnic_send_crq(adapter
, &crq
);
1309 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
1311 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1312 struct sockaddr
*addr
= p
;
1313 union ibmvnic_crq crq
;
1315 if (!is_valid_ether_addr(addr
->sa_data
))
1316 return -EADDRNOTAVAIL
;
1318 memset(&crq
, 0, sizeof(crq
));
1319 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
1320 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
1321 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
1322 ibmvnic_send_crq(adapter
, &crq
);
1323 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1328 * do_reset returns zero if we are able to keep processing reset events, or
1329 * non-zero if we hit a fatal error and must halt.
1331 static int do_reset(struct ibmvnic_adapter
*adapter
,
1332 struct ibmvnic_rwi
*rwi
, u32 reset_state
)
1334 struct net_device
*netdev
= adapter
->netdev
;
1337 netif_carrier_off(netdev
);
1338 adapter
->reset_reason
= rwi
->reset_reason
;
1340 if (rwi
->reset_reason
== VNIC_RESET_MOBILITY
) {
1341 rc
= ibmvnic_reenable_crq_queue(adapter
);
1346 rc
= __ibmvnic_close(netdev
);
1350 if (adapter
->reset_reason
!= VNIC_RESET_NON_FATAL
) {
1351 /* remove the closed state so when we call open it appears
1352 * we are coming from the probed state.
1354 adapter
->state
= VNIC_PROBED
;
1356 rc
= ibmvnic_init(adapter
);
1360 /* If the adapter was in PROBE state prior to the reset,
1363 if (reset_state
== VNIC_PROBED
)
1366 rc
= ibmvnic_login(netdev
);
1368 adapter
->state
= VNIC_PROBED
;
1372 rc
= reset_tx_pools(adapter
);
1376 rc
= reset_rx_pools(adapter
);
1380 if (reset_state
== VNIC_CLOSED
)
1384 rc
= __ibmvnic_open(netdev
);
1386 if (list_empty(&adapter
->rwi_list
))
1387 adapter
->state
= VNIC_CLOSED
;
1389 adapter
->state
= reset_state
;
1394 netif_carrier_on(netdev
);
1397 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1398 napi_schedule(&adapter
->napi
[i
]);
1400 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
)
1401 netdev_notify_peers(netdev
);
1406 static struct ibmvnic_rwi
*get_next_rwi(struct ibmvnic_adapter
*adapter
)
1408 struct ibmvnic_rwi
*rwi
;
1410 mutex_lock(&adapter
->rwi_lock
);
1412 if (!list_empty(&adapter
->rwi_list
)) {
1413 rwi
= list_first_entry(&adapter
->rwi_list
, struct ibmvnic_rwi
,
1415 list_del(&rwi
->list
);
1420 mutex_unlock(&adapter
->rwi_lock
);
1424 static void free_all_rwi(struct ibmvnic_adapter
*adapter
)
1426 struct ibmvnic_rwi
*rwi
;
1428 rwi
= get_next_rwi(adapter
);
1431 rwi
= get_next_rwi(adapter
);
1435 static void __ibmvnic_reset(struct work_struct
*work
)
1437 struct ibmvnic_rwi
*rwi
;
1438 struct ibmvnic_adapter
*adapter
;
1439 struct net_device
*netdev
;
1443 adapter
= container_of(work
, struct ibmvnic_adapter
, ibmvnic_reset
);
1444 netdev
= adapter
->netdev
;
1446 mutex_lock(&adapter
->reset_lock
);
1447 adapter
->resetting
= true;
1448 reset_state
= adapter
->state
;
1450 rwi
= get_next_rwi(adapter
);
1452 rc
= do_reset(adapter
, rwi
, reset_state
);
1457 rwi
= get_next_rwi(adapter
);
1461 free_all_rwi(adapter
);
1462 mutex_unlock(&adapter
->reset_lock
);
1466 adapter
->resetting
= false;
1467 mutex_unlock(&adapter
->reset_lock
);
1470 static void ibmvnic_reset(struct ibmvnic_adapter
*adapter
,
1471 enum ibmvnic_reset_reason reason
)
1473 struct ibmvnic_rwi
*rwi
, *tmp
;
1474 struct net_device
*netdev
= adapter
->netdev
;
1475 struct list_head
*entry
;
1477 if (adapter
->state
== VNIC_REMOVING
||
1478 adapter
->state
== VNIC_REMOVED
) {
1479 netdev_dbg(netdev
, "Adapter removing, skipping reset\n");
1483 if (adapter
->state
== VNIC_PROBING
) {
1484 netdev_warn(netdev
, "Adapter reset during probe\n");
1485 adapter
->init_done_rc
= EAGAIN
;
1489 mutex_lock(&adapter
->rwi_lock
);
1491 list_for_each(entry
, &adapter
->rwi_list
) {
1492 tmp
= list_entry(entry
, struct ibmvnic_rwi
, list
);
1493 if (tmp
->reset_reason
== reason
) {
1494 netdev_err(netdev
, "Matching reset found, skipping\n");
1495 mutex_unlock(&adapter
->rwi_lock
);
1500 rwi
= kzalloc(sizeof(*rwi
), GFP_KERNEL
);
1502 mutex_unlock(&adapter
->rwi_lock
);
1503 ibmvnic_close(netdev
);
1507 rwi
->reset_reason
= reason
;
1508 list_add_tail(&rwi
->list
, &adapter
->rwi_list
);
1509 mutex_unlock(&adapter
->rwi_lock
);
1510 schedule_work(&adapter
->ibmvnic_reset
);
1513 static void ibmvnic_tx_timeout(struct net_device
*dev
)
1515 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1517 ibmvnic_reset(adapter
, VNIC_RESET_TIMEOUT
);
1520 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
1521 struct ibmvnic_rx_buff
*rx_buff
)
1523 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
1525 rx_buff
->skb
= NULL
;
1527 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
1528 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
1530 atomic_dec(&pool
->available
);
1533 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
1535 struct net_device
*netdev
= napi
->dev
;
1536 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1537 int scrq_num
= (int)(napi
- adapter
->napi
);
1538 int frames_processed
= 0;
1541 while (frames_processed
< budget
) {
1542 struct sk_buff
*skb
;
1543 struct ibmvnic_rx_buff
*rx_buff
;
1544 union sub_crq
*next
;
1549 if (unlikely(adapter
->resetting
)) {
1550 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1551 napi_complete_done(napi
, frames_processed
);
1552 return frames_processed
;
1555 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
1557 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1559 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
1560 rx_comp
.correlator
);
1561 /* do error checking */
1562 if (next
->rx_comp
.rc
) {
1563 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
1564 /* free the entry */
1565 next
->rx_comp
.first
= 0;
1566 remove_buff_from_pool(adapter
, rx_buff
);
1570 length
= be32_to_cpu(next
->rx_comp
.len
);
1571 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
1572 flags
= next
->rx_comp
.flags
;
1574 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
1577 /* VLAN Header has been stripped by the system firmware and
1578 * needs to be inserted by the driver
1580 if (adapter
->rx_vlan_header_insertion
&&
1581 (flags
& IBMVNIC_VLAN_STRIPPED
))
1582 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1583 ntohs(next
->rx_comp
.vlan_tci
));
1585 /* free the entry */
1586 next
->rx_comp
.first
= 0;
1587 remove_buff_from_pool(adapter
, rx_buff
);
1589 skb_put(skb
, length
);
1590 skb
->protocol
= eth_type_trans(skb
, netdev
);
1591 skb_record_rx_queue(skb
, scrq_num
);
1593 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
1594 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
1595 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1599 napi_gro_receive(napi
, skb
); /* send it up */
1600 netdev
->stats
.rx_packets
++;
1601 netdev
->stats
.rx_bytes
+= length
;
1605 if (adapter
->state
!= VNIC_CLOSING
)
1606 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
1608 if (frames_processed
< budget
) {
1609 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1610 napi_complete_done(napi
, frames_processed
);
1611 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
1612 napi_reschedule(napi
)) {
1613 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1617 return frames_processed
;
1620 #ifdef CONFIG_NET_POLL_CONTROLLER
1621 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1623 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1626 replenish_pools(netdev_priv(dev
));
1627 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1628 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1629 adapter
->rx_scrq
[i
]);
1633 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1638 static const struct net_device_ops ibmvnic_netdev_ops
= {
1639 .ndo_open
= ibmvnic_open
,
1640 .ndo_stop
= ibmvnic_close
,
1641 .ndo_start_xmit
= ibmvnic_xmit
,
1642 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1643 .ndo_set_mac_address
= ibmvnic_set_mac
,
1644 .ndo_validate_addr
= eth_validate_addr
,
1645 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1646 #ifdef CONFIG_NET_POLL_CONTROLLER
1647 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1649 .ndo_change_mtu
= ibmvnic_change_mtu
,
1652 /* ethtool functions */
1654 static int ibmvnic_get_link_ksettings(struct net_device
*netdev
,
1655 struct ethtool_link_ksettings
*cmd
)
1657 u32 supported
, advertising
;
1659 supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1661 advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1663 cmd
->base
.speed
= SPEED_1000
;
1664 cmd
->base
.duplex
= DUPLEX_FULL
;
1665 cmd
->base
.port
= PORT_FIBRE
;
1666 cmd
->base
.phy_address
= 0;
1667 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
1669 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
1671 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
1677 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1678 struct ethtool_drvinfo
*info
)
1680 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1681 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1684 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1686 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1688 return adapter
->msg_enable
;
1691 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1693 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1695 adapter
->msg_enable
= data
;
1698 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1700 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1702 /* Don't need to send a query because we request a logical link up at
1703 * init and then we wait for link state indications
1705 return adapter
->logical_link_state
;
1708 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1709 struct ethtool_ringparam
*ring
)
1711 ring
->rx_max_pending
= 0;
1712 ring
->tx_max_pending
= 0;
1713 ring
->rx_mini_max_pending
= 0;
1714 ring
->rx_jumbo_max_pending
= 0;
1715 ring
->rx_pending
= 0;
1716 ring
->tx_pending
= 0;
1717 ring
->rx_mini_pending
= 0;
1718 ring
->rx_jumbo_pending
= 0;
1721 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1725 if (stringset
!= ETH_SS_STATS
)
1728 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1729 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1732 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1736 return ARRAY_SIZE(ibmvnic_stats
);
1742 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1743 struct ethtool_stats
*stats
, u64
*data
)
1745 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1746 union ibmvnic_crq crq
;
1749 memset(&crq
, 0, sizeof(crq
));
1750 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1751 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1752 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1753 crq
.request_statistics
.len
=
1754 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1756 /* Wait for data to be written */
1757 init_completion(&adapter
->stats_done
);
1758 ibmvnic_send_crq(adapter
, &crq
);
1759 wait_for_completion(&adapter
->stats_done
);
1761 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1762 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1765 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1766 .get_drvinfo
= ibmvnic_get_drvinfo
,
1767 .get_msglevel
= ibmvnic_get_msglevel
,
1768 .set_msglevel
= ibmvnic_set_msglevel
,
1769 .get_link
= ibmvnic_get_link
,
1770 .get_ringparam
= ibmvnic_get_ringparam
,
1771 .get_strings
= ibmvnic_get_strings
,
1772 .get_sset_count
= ibmvnic_get_sset_count
,
1773 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1774 .get_link_ksettings
= ibmvnic_get_link_ksettings
,
1777 /* Routines for managing CRQs/sCRQs */
1779 static int reset_one_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1780 struct ibmvnic_sub_crq_queue
*scrq
)
1785 free_irq(scrq
->irq
, scrq
);
1786 irq_dispose_mapping(scrq
->irq
);
1790 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1793 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1794 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1798 static int reset_sub_crq_queues(struct ibmvnic_adapter
*adapter
)
1802 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1803 rc
= reset_one_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
1808 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1809 rc
= reset_one_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
1817 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1818 struct ibmvnic_sub_crq_queue
*scrq
)
1820 struct device
*dev
= &adapter
->vdev
->dev
;
1823 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1825 /* Close the sub-crqs */
1827 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1828 adapter
->vdev
->unit_address
,
1830 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1833 netdev_err(adapter
->netdev
,
1834 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1838 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1840 free_pages((unsigned long)scrq
->msgs
, 2);
1844 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1847 struct device
*dev
= &adapter
->vdev
->dev
;
1848 struct ibmvnic_sub_crq_queue
*scrq
;
1851 scrq
= kzalloc(sizeof(*scrq
), GFP_KERNEL
);
1856 (union sub_crq
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 2);
1858 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1859 goto zero_page_failed
;
1862 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1864 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1865 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1869 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1870 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1872 if (rc
== H_RESOURCE
)
1873 rc
= ibmvnic_reset_crq(adapter
);
1875 if (rc
== H_CLOSED
) {
1876 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1878 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1882 scrq
->adapter
= adapter
;
1883 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1884 spin_lock_init(&scrq
->lock
);
1886 netdev_dbg(adapter
->netdev
,
1887 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1888 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1893 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1896 free_pages((unsigned long)scrq
->msgs
, 2);
1903 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1907 if (adapter
->tx_scrq
) {
1908 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1909 if (!adapter
->tx_scrq
[i
])
1912 if (adapter
->tx_scrq
[i
]->irq
) {
1913 free_irq(adapter
->tx_scrq
[i
]->irq
,
1914 adapter
->tx_scrq
[i
]);
1915 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
1916 adapter
->tx_scrq
[i
]->irq
= 0;
1919 release_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
1922 kfree(adapter
->tx_scrq
);
1923 adapter
->tx_scrq
= NULL
;
1926 if (adapter
->rx_scrq
) {
1927 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1928 if (!adapter
->rx_scrq
[i
])
1931 if (adapter
->rx_scrq
[i
]->irq
) {
1932 free_irq(adapter
->rx_scrq
[i
]->irq
,
1933 adapter
->rx_scrq
[i
]);
1934 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
1935 adapter
->rx_scrq
[i
]->irq
= 0;
1938 release_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
1941 kfree(adapter
->rx_scrq
);
1942 adapter
->rx_scrq
= NULL
;
1946 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1947 struct ibmvnic_sub_crq_queue
*scrq
)
1949 struct device
*dev
= &adapter
->vdev
->dev
;
1952 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1953 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1955 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1960 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1961 struct ibmvnic_sub_crq_queue
*scrq
)
1963 struct device
*dev
= &adapter
->vdev
->dev
;
1966 if (scrq
->hw_irq
> 0x100000000ULL
) {
1967 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1971 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1972 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1974 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1979 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1980 struct ibmvnic_sub_crq_queue
*scrq
)
1982 struct device
*dev
= &adapter
->vdev
->dev
;
1983 struct ibmvnic_tx_buff
*txbuff
;
1984 union sub_crq
*next
;
1990 while (pending_scrq(adapter
, scrq
)) {
1991 unsigned int pool
= scrq
->pool_index
;
1993 next
= ibmvnic_next_scrq(adapter
, scrq
);
1994 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1995 if (next
->tx_comp
.rcs
[i
]) {
1996 dev_err(dev
, "tx error %x\n",
1997 next
->tx_comp
.rcs
[i
]);
2000 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
2001 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
2003 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
2004 if (!txbuff
->data_dma
[j
])
2007 txbuff
->data_dma
[j
] = 0;
2009 /* if sub_crq was sent indirectly */
2010 first
= txbuff
->indir_arr
[0].generic
.first
;
2011 if (first
== IBMVNIC_CRQ_CMD
) {
2012 dma_unmap_single(dev
, txbuff
->indir_dma
,
2013 sizeof(txbuff
->indir_arr
),
2017 if (txbuff
->last_frag
) {
2018 dev_kfree_skb_any(txbuff
->skb
);
2022 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
2023 producer_index
] = index
;
2024 adapter
->tx_pool
[pool
].producer_index
=
2025 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
2026 adapter
->req_tx_entries_per_subcrq
;
2028 /* remove tx_comp scrq*/
2029 next
->tx_comp
.first
= 0;
2031 if (atomic_sub_return(next
->tx_comp
.num_comps
, &scrq
->used
) <=
2032 (adapter
->req_tx_entries_per_subcrq
/ 2) &&
2033 __netif_subqueue_stopped(adapter
->netdev
,
2034 scrq
->pool_index
)) {
2035 netif_wake_subqueue(adapter
->netdev
, scrq
->pool_index
);
2036 netdev_info(adapter
->netdev
, "Started queue %d\n",
2041 enable_scrq_irq(adapter
, scrq
);
2043 if (pending_scrq(adapter
, scrq
)) {
2044 disable_scrq_irq(adapter
, scrq
);
2051 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
2053 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2054 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2056 disable_scrq_irq(adapter
, scrq
);
2057 ibmvnic_complete_tx(adapter
, scrq
);
2062 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
2064 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2065 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2067 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
2068 disable_scrq_irq(adapter
, scrq
);
2069 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
2075 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
2077 struct device
*dev
= &adapter
->vdev
->dev
;
2078 struct ibmvnic_sub_crq_queue
*scrq
;
2082 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2083 scrq
= adapter
->tx_scrq
[i
];
2084 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2088 dev_err(dev
, "Error mapping irq\n");
2089 goto req_tx_irq_failed
;
2092 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
2093 0, "ibmvnic_tx", scrq
);
2096 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
2098 irq_dispose_mapping(scrq
->irq
);
2099 goto req_rx_irq_failed
;
2103 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2104 scrq
= adapter
->rx_scrq
[i
];
2105 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2108 dev_err(dev
, "Error mapping irq\n");
2109 goto req_rx_irq_failed
;
2111 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
2112 0, "ibmvnic_rx", scrq
);
2114 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
2116 irq_dispose_mapping(scrq
->irq
);
2117 goto req_rx_irq_failed
;
2123 for (j
= 0; j
< i
; j
++) {
2124 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
2125 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2127 i
= adapter
->req_tx_queues
;
2129 for (j
= 0; j
< i
; j
++) {
2130 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
2131 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2133 release_sub_crqs(adapter
);
2137 static int init_sub_crqs(struct ibmvnic_adapter
*adapter
)
2139 struct device
*dev
= &adapter
->vdev
->dev
;
2140 struct ibmvnic_sub_crq_queue
**allqueues
;
2141 int registered_queues
= 0;
2146 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
2148 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_KERNEL
);
2152 for (i
= 0; i
< total_queues
; i
++) {
2153 allqueues
[i
] = init_sub_crq_queue(adapter
);
2154 if (!allqueues
[i
]) {
2155 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
2158 registered_queues
++;
2161 /* Make sure we were able to register the minimum number of queues */
2162 if (registered_queues
<
2163 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
2164 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
2168 /* Distribute the failed allocated queues*/
2169 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
2170 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
2173 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
2174 adapter
->req_rx_queues
--;
2179 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
2180 adapter
->req_tx_queues
--;
2187 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
2188 sizeof(*adapter
->tx_scrq
), GFP_KERNEL
);
2189 if (!adapter
->tx_scrq
)
2192 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2193 adapter
->tx_scrq
[i
] = allqueues
[i
];
2194 adapter
->tx_scrq
[i
]->pool_index
= i
;
2197 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
2198 sizeof(*adapter
->rx_scrq
), GFP_KERNEL
);
2199 if (!adapter
->rx_scrq
)
2202 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2203 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
2204 adapter
->rx_scrq
[i
]->scrq_num
= i
;
2211 kfree(adapter
->tx_scrq
);
2212 adapter
->tx_scrq
= NULL
;
2214 for (i
= 0; i
< registered_queues
; i
++)
2215 release_sub_crq_queue(adapter
, allqueues
[i
]);
2220 static void ibmvnic_send_req_caps(struct ibmvnic_adapter
*adapter
, int retry
)
2222 struct device
*dev
= &adapter
->vdev
->dev
;
2223 union ibmvnic_crq crq
;
2226 /* Sub-CRQ entries are 32 byte long */
2227 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
2229 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
2230 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
2231 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
2235 /* Get the minimum between the queried max and the entries
2236 * that fit in our PAGE_SIZE
2238 adapter
->req_tx_entries_per_subcrq
=
2239 adapter
->max_tx_entries_per_subcrq
> entries_page
?
2240 entries_page
: adapter
->max_tx_entries_per_subcrq
;
2241 adapter
->req_rx_add_entries_per_subcrq
=
2242 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
2243 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
2245 adapter
->req_tx_queues
= adapter
->opt_tx_comp_sub_queues
;
2246 adapter
->req_rx_queues
= adapter
->opt_rx_comp_queues
;
2247 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
2249 adapter
->req_mtu
= adapter
->netdev
->mtu
+ ETH_HLEN
;
2252 memset(&crq
, 0, sizeof(crq
));
2253 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
2254 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
2256 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
2257 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
2258 atomic_inc(&adapter
->running_cap_crqs
);
2259 ibmvnic_send_crq(adapter
, &crq
);
2261 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
2262 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
2263 atomic_inc(&adapter
->running_cap_crqs
);
2264 ibmvnic_send_crq(adapter
, &crq
);
2266 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
2267 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
2268 atomic_inc(&adapter
->running_cap_crqs
);
2269 ibmvnic_send_crq(adapter
, &crq
);
2271 crq
.request_capability
.capability
=
2272 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
2273 crq
.request_capability
.number
=
2274 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
2275 atomic_inc(&adapter
->running_cap_crqs
);
2276 ibmvnic_send_crq(adapter
, &crq
);
2278 crq
.request_capability
.capability
=
2279 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
2280 crq
.request_capability
.number
=
2281 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
2282 atomic_inc(&adapter
->running_cap_crqs
);
2283 ibmvnic_send_crq(adapter
, &crq
);
2285 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
2286 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
2287 atomic_inc(&adapter
->running_cap_crqs
);
2288 ibmvnic_send_crq(adapter
, &crq
);
2290 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
2291 if (adapter
->promisc_supported
) {
2292 crq
.request_capability
.capability
=
2293 cpu_to_be16(PROMISC_REQUESTED
);
2294 crq
.request_capability
.number
= cpu_to_be64(1);
2295 atomic_inc(&adapter
->running_cap_crqs
);
2296 ibmvnic_send_crq(adapter
, &crq
);
2299 crq
.request_capability
.capability
=
2300 cpu_to_be16(PROMISC_REQUESTED
);
2301 crq
.request_capability
.number
= cpu_to_be64(0);
2302 atomic_inc(&adapter
->running_cap_crqs
);
2303 ibmvnic_send_crq(adapter
, &crq
);
2307 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
2308 struct ibmvnic_sub_crq_queue
*scrq
)
2310 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
2312 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
)
2318 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
2319 struct ibmvnic_sub_crq_queue
*scrq
)
2321 union sub_crq
*entry
;
2322 unsigned long flags
;
2324 spin_lock_irqsave(&scrq
->lock
, flags
);
2325 entry
= &scrq
->msgs
[scrq
->cur
];
2326 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
2327 if (++scrq
->cur
== scrq
->size
)
2332 spin_unlock_irqrestore(&scrq
->lock
, flags
);
2337 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
2339 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
2340 union ibmvnic_crq
*crq
;
2342 crq
= &queue
->msgs
[queue
->cur
];
2343 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
2344 if (++queue
->cur
== queue
->size
)
2353 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
2354 union sub_crq
*sub_crq
)
2356 unsigned int ua
= adapter
->vdev
->unit_address
;
2357 struct device
*dev
= &adapter
->vdev
->dev
;
2358 u64
*u64_crq
= (u64
*)sub_crq
;
2361 netdev_dbg(adapter
->netdev
,
2362 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2363 (unsigned long int)cpu_to_be64(remote_handle
),
2364 (unsigned long int)cpu_to_be64(u64_crq
[0]),
2365 (unsigned long int)cpu_to_be64(u64_crq
[1]),
2366 (unsigned long int)cpu_to_be64(u64_crq
[2]),
2367 (unsigned long int)cpu_to_be64(u64_crq
[3]));
2369 /* Make sure the hypervisor sees the complete request */
2372 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
2373 cpu_to_be64(remote_handle
),
2374 cpu_to_be64(u64_crq
[0]),
2375 cpu_to_be64(u64_crq
[1]),
2376 cpu_to_be64(u64_crq
[2]),
2377 cpu_to_be64(u64_crq
[3]));
2381 dev_warn(dev
, "CRQ Queue closed\n");
2382 dev_err(dev
, "Send error (rc=%d)\n", rc
);
2388 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
2389 u64 remote_handle
, u64 ioba
, u64 num_entries
)
2391 unsigned int ua
= adapter
->vdev
->unit_address
;
2392 struct device
*dev
= &adapter
->vdev
->dev
;
2395 /* Make sure the hypervisor sees the complete request */
2397 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
2398 cpu_to_be64(remote_handle
),
2403 dev_warn(dev
, "CRQ Queue closed\n");
2404 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
2410 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
2411 union ibmvnic_crq
*crq
)
2413 unsigned int ua
= adapter
->vdev
->unit_address
;
2414 struct device
*dev
= &adapter
->vdev
->dev
;
2415 u64
*u64_crq
= (u64
*)crq
;
2418 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
2419 (unsigned long int)cpu_to_be64(u64_crq
[0]),
2420 (unsigned long int)cpu_to_be64(u64_crq
[1]));
2422 /* Make sure the hypervisor sees the complete request */
2425 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
2426 cpu_to_be64(u64_crq
[0]),
2427 cpu_to_be64(u64_crq
[1]));
2431 dev_warn(dev
, "CRQ Queue closed\n");
2432 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
2438 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
2440 union ibmvnic_crq crq
;
2442 memset(&crq
, 0, sizeof(crq
));
2443 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
2444 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
2445 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
2447 return ibmvnic_send_crq(adapter
, &crq
);
2450 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
2452 union ibmvnic_crq crq
;
2454 memset(&crq
, 0, sizeof(crq
));
2455 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
2456 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
2457 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
2459 return ibmvnic_send_crq(adapter
, &crq
);
2462 static void send_login(struct ibmvnic_adapter
*adapter
)
2464 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
2465 struct ibmvnic_login_buffer
*login_buffer
;
2466 struct device
*dev
= &adapter
->vdev
->dev
;
2467 dma_addr_t rsp_buffer_token
;
2468 dma_addr_t buffer_token
;
2469 size_t rsp_buffer_size
;
2470 union ibmvnic_crq crq
;
2477 sizeof(struct ibmvnic_login_buffer
) +
2478 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
2480 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
2482 goto buf_alloc_failed
;
2484 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
2486 if (dma_mapping_error(dev
, buffer_token
)) {
2487 dev_err(dev
, "Couldn't map login buffer\n");
2488 goto buf_map_failed
;
2491 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
2492 sizeof(u64
) * adapter
->req_tx_queues
+
2493 sizeof(u64
) * adapter
->req_rx_queues
+
2494 sizeof(u64
) * adapter
->req_rx_queues
+
2495 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
2497 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
2498 if (!login_rsp_buffer
)
2499 goto buf_rsp_alloc_failed
;
2501 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
2502 rsp_buffer_size
, DMA_FROM_DEVICE
);
2503 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
2504 dev_err(dev
, "Couldn't map login rsp buffer\n");
2505 goto buf_rsp_map_failed
;
2508 adapter
->login_buf
= login_buffer
;
2509 adapter
->login_buf_token
= buffer_token
;
2510 adapter
->login_buf_sz
= buffer_size
;
2511 adapter
->login_rsp_buf
= login_rsp_buffer
;
2512 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
2513 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
2515 login_buffer
->len
= cpu_to_be32(buffer_size
);
2516 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
2517 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
2518 login_buffer
->off_txcomp_subcrqs
=
2519 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
2520 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
2521 login_buffer
->off_rxcomp_subcrqs
=
2522 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
2523 sizeof(u64
) * adapter
->req_tx_queues
);
2524 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
2525 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
2527 tx_list_p
= (__be64
*)((char *)login_buffer
+
2528 sizeof(struct ibmvnic_login_buffer
));
2529 rx_list_p
= (__be64
*)((char *)login_buffer
+
2530 sizeof(struct ibmvnic_login_buffer
) +
2531 sizeof(u64
) * adapter
->req_tx_queues
);
2533 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2534 if (adapter
->tx_scrq
[i
]) {
2535 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
2540 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2541 if (adapter
->rx_scrq
[i
]) {
2542 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
2547 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
2548 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
2549 netdev_dbg(adapter
->netdev
, "%016lx\n",
2550 ((unsigned long int *)(adapter
->login_buf
))[i
]);
2553 memset(&crq
, 0, sizeof(crq
));
2554 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
2555 crq
.login
.cmd
= LOGIN
;
2556 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
2557 crq
.login
.len
= cpu_to_be32(buffer_size
);
2558 ibmvnic_send_crq(adapter
, &crq
);
2563 kfree(login_rsp_buffer
);
2564 buf_rsp_alloc_failed
:
2565 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
2567 kfree(login_buffer
);
2572 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
2575 union ibmvnic_crq crq
;
2577 memset(&crq
, 0, sizeof(crq
));
2578 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
2579 crq
.request_map
.cmd
= REQUEST_MAP
;
2580 crq
.request_map
.map_id
= map_id
;
2581 crq
.request_map
.ioba
= cpu_to_be32(addr
);
2582 crq
.request_map
.len
= cpu_to_be32(len
);
2583 ibmvnic_send_crq(adapter
, &crq
);
2586 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
2588 union ibmvnic_crq crq
;
2590 memset(&crq
, 0, sizeof(crq
));
2591 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
2592 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
2593 crq
.request_unmap
.map_id
= map_id
;
2594 ibmvnic_send_crq(adapter
, &crq
);
2597 static void send_map_query(struct ibmvnic_adapter
*adapter
)
2599 union ibmvnic_crq crq
;
2601 memset(&crq
, 0, sizeof(crq
));
2602 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
2603 crq
.query_map
.cmd
= QUERY_MAP
;
2604 ibmvnic_send_crq(adapter
, &crq
);
2607 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2608 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
2610 union ibmvnic_crq crq
;
2612 atomic_set(&adapter
->running_cap_crqs
, 0);
2613 memset(&crq
, 0, sizeof(crq
));
2614 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
2615 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
2617 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
2618 atomic_inc(&adapter
->running_cap_crqs
);
2619 ibmvnic_send_crq(adapter
, &crq
);
2621 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
2622 atomic_inc(&adapter
->running_cap_crqs
);
2623 ibmvnic_send_crq(adapter
, &crq
);
2625 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
2626 atomic_inc(&adapter
->running_cap_crqs
);
2627 ibmvnic_send_crq(adapter
, &crq
);
2629 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
2630 atomic_inc(&adapter
->running_cap_crqs
);
2631 ibmvnic_send_crq(adapter
, &crq
);
2633 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
2634 atomic_inc(&adapter
->running_cap_crqs
);
2635 ibmvnic_send_crq(adapter
, &crq
);
2637 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
2638 atomic_inc(&adapter
->running_cap_crqs
);
2639 ibmvnic_send_crq(adapter
, &crq
);
2641 crq
.query_capability
.capability
=
2642 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
2643 atomic_inc(&adapter
->running_cap_crqs
);
2644 ibmvnic_send_crq(adapter
, &crq
);
2646 crq
.query_capability
.capability
=
2647 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
2648 atomic_inc(&adapter
->running_cap_crqs
);
2649 ibmvnic_send_crq(adapter
, &crq
);
2651 crq
.query_capability
.capability
=
2652 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
2653 atomic_inc(&adapter
->running_cap_crqs
);
2654 ibmvnic_send_crq(adapter
, &crq
);
2656 crq
.query_capability
.capability
=
2657 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
2658 atomic_inc(&adapter
->running_cap_crqs
);
2659 ibmvnic_send_crq(adapter
, &crq
);
2661 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
2662 atomic_inc(&adapter
->running_cap_crqs
);
2663 ibmvnic_send_crq(adapter
, &crq
);
2665 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
2666 atomic_inc(&adapter
->running_cap_crqs
);
2667 ibmvnic_send_crq(adapter
, &crq
);
2669 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
2670 atomic_inc(&adapter
->running_cap_crqs
);
2671 ibmvnic_send_crq(adapter
, &crq
);
2673 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
2674 atomic_inc(&adapter
->running_cap_crqs
);
2675 ibmvnic_send_crq(adapter
, &crq
);
2677 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
2678 atomic_inc(&adapter
->running_cap_crqs
);
2679 ibmvnic_send_crq(adapter
, &crq
);
2681 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
2682 atomic_inc(&adapter
->running_cap_crqs
);
2683 ibmvnic_send_crq(adapter
, &crq
);
2685 crq
.query_capability
.capability
= cpu_to_be16(RX_VLAN_HEADER_INSERTION
);
2686 atomic_inc(&adapter
->running_cap_crqs
);
2687 ibmvnic_send_crq(adapter
, &crq
);
2689 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
2690 atomic_inc(&adapter
->running_cap_crqs
);
2691 ibmvnic_send_crq(adapter
, &crq
);
2693 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
2694 atomic_inc(&adapter
->running_cap_crqs
);
2695 ibmvnic_send_crq(adapter
, &crq
);
2697 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
2698 atomic_inc(&adapter
->running_cap_crqs
);
2699 ibmvnic_send_crq(adapter
, &crq
);
2701 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2702 atomic_inc(&adapter
->running_cap_crqs
);
2703 ibmvnic_send_crq(adapter
, &crq
);
2705 crq
.query_capability
.capability
=
2706 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2707 atomic_inc(&adapter
->running_cap_crqs
);
2708 ibmvnic_send_crq(adapter
, &crq
);
2710 crq
.query_capability
.capability
=
2711 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2712 atomic_inc(&adapter
->running_cap_crqs
);
2713 ibmvnic_send_crq(adapter
, &crq
);
2715 crq
.query_capability
.capability
=
2716 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2717 atomic_inc(&adapter
->running_cap_crqs
);
2718 ibmvnic_send_crq(adapter
, &crq
);
2720 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2721 atomic_inc(&adapter
->running_cap_crqs
);
2722 ibmvnic_send_crq(adapter
, &crq
);
2725 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2727 struct device
*dev
= &adapter
->vdev
->dev
;
2728 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2729 union ibmvnic_crq crq
;
2732 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2733 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2735 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2736 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2737 netdev_dbg(adapter
->netdev
, "%016lx\n",
2738 ((unsigned long int *)(buf
))[i
]);
2740 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2741 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2742 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2743 buf
->tcp_ipv4_chksum
);
2744 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2745 buf
->tcp_ipv6_chksum
);
2746 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2747 buf
->udp_ipv4_chksum
);
2748 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2749 buf
->udp_ipv6_chksum
);
2750 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2751 buf
->large_tx_ipv4
);
2752 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2753 buf
->large_tx_ipv6
);
2754 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2755 buf
->large_rx_ipv4
);
2756 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2757 buf
->large_rx_ipv6
);
2758 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2759 buf
->max_ipv4_header_size
);
2760 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2761 buf
->max_ipv6_header_size
);
2762 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2763 buf
->max_tcp_header_size
);
2764 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2765 buf
->max_udp_header_size
);
2766 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2767 buf
->max_large_tx_size
);
2768 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2769 buf
->max_large_rx_size
);
2770 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2771 buf
->ipv6_extension_header
);
2772 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2773 buf
->tcp_pseudosum_req
);
2774 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2775 buf
->num_ipv6_ext_headers
);
2776 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2777 buf
->off_ipv6_ext_headers
);
2779 adapter
->ip_offload_ctrl_tok
=
2780 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2781 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2783 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2784 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2788 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2789 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2790 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2791 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2792 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2794 /* large_tx/rx disabled for now, additional features needed */
2795 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2796 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2797 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2798 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2800 adapter
->netdev
->features
= NETIF_F_GSO
;
2802 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2803 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2805 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2806 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2808 if ((adapter
->netdev
->features
&
2809 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2810 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2812 memset(&crq
, 0, sizeof(crq
));
2813 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2814 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2815 crq
.control_ip_offload
.len
=
2816 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2817 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2818 ibmvnic_send_crq(adapter
, &crq
);
2821 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2822 struct ibmvnic_adapter
*adapter
)
2824 struct device
*dev
= &adapter
->vdev
->dev
;
2825 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2826 unsigned long flags
;
2830 if (!crq
->request_error_rsp
.rc
.code
) {
2831 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2832 crq
->request_error_rsp
.rc
.code
);
2836 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2837 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2838 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2840 list_del(&error_buff
->list
);
2843 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2846 dev_err(dev
, "Couldn't find error id %x\n",
2847 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2851 dev_err(dev
, "Detailed info for error id %x:",
2852 be32_to_cpu(crq
->request_error_rsp
.error_id
));
2854 for (i
= 0; i
< error_buff
->len
; i
++) {
2855 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2861 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2863 kfree(error_buff
->buff
);
2867 static void request_error_information(struct ibmvnic_adapter
*adapter
,
2868 union ibmvnic_crq
*err_crq
)
2870 struct device
*dev
= &adapter
->vdev
->dev
;
2871 struct net_device
*netdev
= adapter
->netdev
;
2872 struct ibmvnic_error_buff
*error_buff
;
2873 unsigned long timeout
= msecs_to_jiffies(30000);
2874 union ibmvnic_crq crq
;
2875 unsigned long flags
;
2878 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2882 detail_len
= be32_to_cpu(err_crq
->error_indication
.detail_error_sz
);
2883 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2884 if (!error_buff
->buff
) {
2889 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2891 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2892 netdev_err(netdev
, "Couldn't map error buffer\n");
2893 kfree(error_buff
->buff
);
2898 error_buff
->len
= detail_len
;
2899 error_buff
->error_id
= err_crq
->error_indication
.error_id
;
2901 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2902 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2903 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2905 memset(&crq
, 0, sizeof(crq
));
2906 crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2907 crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2908 crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2909 crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2910 crq
.request_error_info
.error_id
= err_crq
->error_indication
.error_id
;
2912 rc
= ibmvnic_send_crq(adapter
, &crq
);
2914 netdev_err(netdev
, "failed to request error information\n");
2918 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
2919 netdev_err(netdev
, "timeout waiting for error information\n");
2926 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2927 list_del(&error_buff
->list
);
2928 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2930 kfree(error_buff
->buff
);
2934 static void handle_error_indication(union ibmvnic_crq
*crq
,
2935 struct ibmvnic_adapter
*adapter
)
2937 struct device
*dev
= &adapter
->vdev
->dev
;
2939 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2940 crq
->error_indication
.flags
2941 & IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2942 be32_to_cpu(crq
->error_indication
.error_id
),
2943 be16_to_cpu(crq
->error_indication
.error_cause
));
2945 if (be32_to_cpu(crq
->error_indication
.error_id
))
2946 request_error_information(adapter
, crq
);
2948 if (crq
->error_indication
.flags
& IBMVNIC_FATAL_ERROR
)
2949 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
2951 ibmvnic_reset(adapter
, VNIC_RESET_NON_FATAL
);
2954 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2955 struct ibmvnic_adapter
*adapter
)
2957 struct net_device
*netdev
= adapter
->netdev
;
2958 struct device
*dev
= &adapter
->vdev
->dev
;
2961 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2963 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2966 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2970 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2971 struct ibmvnic_adapter
*adapter
)
2973 struct device
*dev
= &adapter
->vdev
->dev
;
2977 atomic_dec(&adapter
->running_cap_crqs
);
2978 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2980 req_value
= &adapter
->req_tx_queues
;
2984 req_value
= &adapter
->req_rx_queues
;
2987 case REQ_RX_ADD_QUEUES
:
2988 req_value
= &adapter
->req_rx_add_queues
;
2991 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2992 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2993 name
= "tx_entries_per_subcrq";
2995 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2996 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2997 name
= "rx_add_entries_per_subcrq";
3000 req_value
= &adapter
->req_mtu
;
3003 case PROMISC_REQUESTED
:
3004 req_value
= &adapter
->promisc
;
3008 dev_err(dev
, "Got invalid cap request rsp %d\n",
3009 crq
->request_capability
.capability
);
3013 switch (crq
->request_capability_rsp
.rc
.code
) {
3016 case PARTIALSUCCESS
:
3017 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3019 (long int)be64_to_cpu(crq
->request_capability_rsp
.
3021 *req_value
= be64_to_cpu(crq
->request_capability_rsp
.number
);
3022 ibmvnic_send_req_caps(adapter
, 1);
3025 dev_err(dev
, "Error %d in request cap rsp\n",
3026 crq
->request_capability_rsp
.rc
.code
);
3030 /* Done receiving requested capabilities, query IP offload support */
3031 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
3032 union ibmvnic_crq newcrq
;
3033 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
3034 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
3035 &adapter
->ip_offload_buf
;
3037 adapter
->wait_capability
= false;
3038 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
3042 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
3043 if (!firmware_has_feature(FW_FEATURE_CMO
))
3044 dev_err(dev
, "Couldn't map offload buffer\n");
3048 memset(&newcrq
, 0, sizeof(newcrq
));
3049 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
3050 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
3051 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
3052 newcrq
.query_ip_offload
.ioba
=
3053 cpu_to_be32(adapter
->ip_offload_tok
);
3055 ibmvnic_send_crq(adapter
, &newcrq
);
3059 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
3060 struct ibmvnic_adapter
*adapter
)
3062 struct device
*dev
= &adapter
->vdev
->dev
;
3063 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
3064 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
3067 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
3069 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3070 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
3072 /* If the number of queues requested can't be allocated by the
3073 * server, the login response will return with code 1. We will need
3074 * to resend the login buffer with fewer queues requested.
3076 if (login_rsp_crq
->generic
.rc
.code
) {
3077 adapter
->renegotiate
= true;
3078 complete(&adapter
->init_done
);
3082 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
3083 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
3084 netdev_dbg(adapter
->netdev
, "%016lx\n",
3085 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
3089 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
3090 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
3091 adapter
->req_rx_add_queues
!=
3092 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
3093 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
3094 ibmvnic_remove(adapter
->vdev
);
3097 complete(&adapter
->init_done
);
3102 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
3103 struct ibmvnic_adapter
*adapter
)
3105 struct device
*dev
= &adapter
->vdev
->dev
;
3108 rc
= crq
->request_unmap_rsp
.rc
.code
;
3110 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
3113 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
3114 struct ibmvnic_adapter
*adapter
)
3116 struct net_device
*netdev
= adapter
->netdev
;
3117 struct device
*dev
= &adapter
->vdev
->dev
;
3120 rc
= crq
->query_map_rsp
.rc
.code
;
3122 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
3125 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3126 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
3127 crq
->query_map_rsp
.free_pages
);
3130 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
3131 struct ibmvnic_adapter
*adapter
)
3133 struct net_device
*netdev
= adapter
->netdev
;
3134 struct device
*dev
= &adapter
->vdev
->dev
;
3137 atomic_dec(&adapter
->running_cap_crqs
);
3138 netdev_dbg(netdev
, "Outstanding queries: %d\n",
3139 atomic_read(&adapter
->running_cap_crqs
));
3140 rc
= crq
->query_capability
.rc
.code
;
3142 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
3146 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
3148 adapter
->min_tx_queues
=
3149 be64_to_cpu(crq
->query_capability
.number
);
3150 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
3151 adapter
->min_tx_queues
);
3154 adapter
->min_rx_queues
=
3155 be64_to_cpu(crq
->query_capability
.number
);
3156 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
3157 adapter
->min_rx_queues
);
3159 case MIN_RX_ADD_QUEUES
:
3160 adapter
->min_rx_add_queues
=
3161 be64_to_cpu(crq
->query_capability
.number
);
3162 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
3163 adapter
->min_rx_add_queues
);
3166 adapter
->max_tx_queues
=
3167 be64_to_cpu(crq
->query_capability
.number
);
3168 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
3169 adapter
->max_tx_queues
);
3172 adapter
->max_rx_queues
=
3173 be64_to_cpu(crq
->query_capability
.number
);
3174 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
3175 adapter
->max_rx_queues
);
3177 case MAX_RX_ADD_QUEUES
:
3178 adapter
->max_rx_add_queues
=
3179 be64_to_cpu(crq
->query_capability
.number
);
3180 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
3181 adapter
->max_rx_add_queues
);
3183 case MIN_TX_ENTRIES_PER_SUBCRQ
:
3184 adapter
->min_tx_entries_per_subcrq
=
3185 be64_to_cpu(crq
->query_capability
.number
);
3186 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
3187 adapter
->min_tx_entries_per_subcrq
);
3189 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
3190 adapter
->min_rx_add_entries_per_subcrq
=
3191 be64_to_cpu(crq
->query_capability
.number
);
3192 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
3193 adapter
->min_rx_add_entries_per_subcrq
);
3195 case MAX_TX_ENTRIES_PER_SUBCRQ
:
3196 adapter
->max_tx_entries_per_subcrq
=
3197 be64_to_cpu(crq
->query_capability
.number
);
3198 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
3199 adapter
->max_tx_entries_per_subcrq
);
3201 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
3202 adapter
->max_rx_add_entries_per_subcrq
=
3203 be64_to_cpu(crq
->query_capability
.number
);
3204 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
3205 adapter
->max_rx_add_entries_per_subcrq
);
3207 case TCP_IP_OFFLOAD
:
3208 adapter
->tcp_ip_offload
=
3209 be64_to_cpu(crq
->query_capability
.number
);
3210 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
3211 adapter
->tcp_ip_offload
);
3213 case PROMISC_SUPPORTED
:
3214 adapter
->promisc_supported
=
3215 be64_to_cpu(crq
->query_capability
.number
);
3216 netdev_dbg(netdev
, "promisc_supported = %lld\n",
3217 adapter
->promisc_supported
);
3220 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
3221 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
3222 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
3225 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
3226 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
3227 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
3229 case MAX_MULTICAST_FILTERS
:
3230 adapter
->max_multicast_filters
=
3231 be64_to_cpu(crq
->query_capability
.number
);
3232 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
3233 adapter
->max_multicast_filters
);
3235 case VLAN_HEADER_INSERTION
:
3236 adapter
->vlan_header_insertion
=
3237 be64_to_cpu(crq
->query_capability
.number
);
3238 if (adapter
->vlan_header_insertion
)
3239 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
3240 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
3241 adapter
->vlan_header_insertion
);
3243 case RX_VLAN_HEADER_INSERTION
:
3244 adapter
->rx_vlan_header_insertion
=
3245 be64_to_cpu(crq
->query_capability
.number
);
3246 netdev_dbg(netdev
, "rx_vlan_header_insertion = %lld\n",
3247 adapter
->rx_vlan_header_insertion
);
3249 case MAX_TX_SG_ENTRIES
:
3250 adapter
->max_tx_sg_entries
=
3251 be64_to_cpu(crq
->query_capability
.number
);
3252 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
3253 adapter
->max_tx_sg_entries
);
3255 case RX_SG_SUPPORTED
:
3256 adapter
->rx_sg_supported
=
3257 be64_to_cpu(crq
->query_capability
.number
);
3258 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
3259 adapter
->rx_sg_supported
);
3261 case OPT_TX_COMP_SUB_QUEUES
:
3262 adapter
->opt_tx_comp_sub_queues
=
3263 be64_to_cpu(crq
->query_capability
.number
);
3264 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
3265 adapter
->opt_tx_comp_sub_queues
);
3267 case OPT_RX_COMP_QUEUES
:
3268 adapter
->opt_rx_comp_queues
=
3269 be64_to_cpu(crq
->query_capability
.number
);
3270 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
3271 adapter
->opt_rx_comp_queues
);
3273 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
3274 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
3275 be64_to_cpu(crq
->query_capability
.number
);
3276 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3277 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
3279 case OPT_TX_ENTRIES_PER_SUBCRQ
:
3280 adapter
->opt_tx_entries_per_subcrq
=
3281 be64_to_cpu(crq
->query_capability
.number
);
3282 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
3283 adapter
->opt_tx_entries_per_subcrq
);
3285 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
3286 adapter
->opt_rxba_entries_per_subcrq
=
3287 be64_to_cpu(crq
->query_capability
.number
);
3288 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
3289 adapter
->opt_rxba_entries_per_subcrq
);
3291 case TX_RX_DESC_REQ
:
3292 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
3293 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
3294 adapter
->tx_rx_desc_req
);
3298 netdev_err(netdev
, "Got invalid cap rsp %d\n",
3299 crq
->query_capability
.capability
);
3303 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
3304 adapter
->wait_capability
= false;
3305 ibmvnic_send_req_caps(adapter
, 0);
3309 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3310 struct ibmvnic_adapter
*adapter
)
3312 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3313 struct net_device
*netdev
= adapter
->netdev
;
3314 struct device
*dev
= &adapter
->vdev
->dev
;
3315 u64
*u64_crq
= (u64
*)crq
;
3318 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3319 (unsigned long int)cpu_to_be64(u64_crq
[0]),
3320 (unsigned long int)cpu_to_be64(u64_crq
[1]));
3321 switch (gen_crq
->first
) {
3322 case IBMVNIC_CRQ_INIT_RSP
:
3323 switch (gen_crq
->cmd
) {
3324 case IBMVNIC_CRQ_INIT
:
3325 dev_info(dev
, "Partner initialized\n");
3326 adapter
->from_passive_init
= true;
3327 complete(&adapter
->init_done
);
3329 case IBMVNIC_CRQ_INIT_COMPLETE
:
3330 dev_info(dev
, "Partner initialization complete\n");
3331 send_version_xchg(adapter
);
3334 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3337 case IBMVNIC_CRQ_XPORT_EVENT
:
3338 netif_carrier_off(netdev
);
3339 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3340 dev_info(dev
, "Migrated, re-enabling adapter\n");
3341 ibmvnic_reset(adapter
, VNIC_RESET_MOBILITY
);
3342 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
3343 dev_info(dev
, "Backing device failover detected\n");
3344 ibmvnic_reset(adapter
, VNIC_RESET_FAILOVER
);
3346 /* The adapter lost the connection */
3347 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3349 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
3352 case IBMVNIC_CRQ_CMD_RSP
:
3355 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3360 switch (gen_crq
->cmd
) {
3361 case VERSION_EXCHANGE_RSP
:
3362 rc
= crq
->version_exchange_rsp
.rc
.code
;
3364 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3367 dev_info(dev
, "Partner protocol version is %d\n",
3368 crq
->version_exchange_rsp
.version
);
3369 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3372 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3373 send_cap_queries(adapter
);
3375 case QUERY_CAPABILITY_RSP
:
3376 handle_query_cap_rsp(crq
, adapter
);
3379 handle_query_map_rsp(crq
, adapter
);
3381 case REQUEST_MAP_RSP
:
3382 adapter
->fw_done_rc
= crq
->request_map_rsp
.rc
.code
;
3383 complete(&adapter
->fw_done
);
3385 case REQUEST_UNMAP_RSP
:
3386 handle_request_unmap_rsp(crq
, adapter
);
3388 case REQUEST_CAPABILITY_RSP
:
3389 handle_request_cap_rsp(crq
, adapter
);
3392 netdev_dbg(netdev
, "Got Login Response\n");
3393 handle_login_rsp(crq
, adapter
);
3395 case LOGICAL_LINK_STATE_RSP
:
3397 "Got Logical Link State Response, state: %d rc: %d\n",
3398 crq
->logical_link_state_rsp
.link_state
,
3399 crq
->logical_link_state_rsp
.rc
.code
);
3400 adapter
->logical_link_state
=
3401 crq
->logical_link_state_rsp
.link_state
;
3402 adapter
->init_done_rc
= crq
->logical_link_state_rsp
.rc
.code
;
3403 complete(&adapter
->init_done
);
3405 case LINK_STATE_INDICATION
:
3406 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3407 adapter
->phys_link_state
=
3408 crq
->link_state_indication
.phys_link_state
;
3409 adapter
->logical_link_state
=
3410 crq
->link_state_indication
.logical_link_state
;
3412 case CHANGE_MAC_ADDR_RSP
:
3413 netdev_dbg(netdev
, "Got MAC address change Response\n");
3414 handle_change_mac_rsp(crq
, adapter
);
3416 case ERROR_INDICATION
:
3417 netdev_dbg(netdev
, "Got Error Indication\n");
3418 handle_error_indication(crq
, adapter
);
3420 case REQUEST_ERROR_RSP
:
3421 netdev_dbg(netdev
, "Got Error Detail Response\n");
3422 handle_error_info_rsp(crq
, adapter
);
3424 case REQUEST_STATISTICS_RSP
:
3425 netdev_dbg(netdev
, "Got Statistics Response\n");
3426 complete(&adapter
->stats_done
);
3428 case QUERY_IP_OFFLOAD_RSP
:
3429 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3430 handle_query_ip_offload_rsp(adapter
);
3432 case MULTICAST_CTRL_RSP
:
3433 netdev_dbg(netdev
, "Got multicast control Response\n");
3435 case CONTROL_IP_OFFLOAD_RSP
:
3436 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3437 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3438 sizeof(adapter
->ip_offload_ctrl
),
3440 complete(&adapter
->init_done
);
3442 case COLLECT_FW_TRACE_RSP
:
3443 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3444 complete(&adapter
->fw_done
);
3447 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3452 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3454 struct ibmvnic_adapter
*adapter
= instance
;
3456 tasklet_schedule(&adapter
->tasklet
);
3460 static void ibmvnic_tasklet(void *data
)
3462 struct ibmvnic_adapter
*adapter
= data
;
3463 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3464 union ibmvnic_crq
*crq
;
3465 unsigned long flags
;
3468 spin_lock_irqsave(&queue
->lock
, flags
);
3470 /* Pull all the valid messages off the CRQ */
3471 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3472 ibmvnic_handle_crq(crq
, adapter
);
3473 crq
->generic
.first
= 0;
3476 /* remain in tasklet until all
3477 * capabilities responses are received
3479 if (!adapter
->wait_capability
)
3482 /* if capabilities CRQ's were sent in this tasklet, the following
3483 * tasklet must wait until all responses are received
3485 if (atomic_read(&adapter
->running_cap_crqs
) != 0)
3486 adapter
->wait_capability
= true;
3487 spin_unlock_irqrestore(&queue
->lock
, flags
);
3490 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3492 struct vio_dev
*vdev
= adapter
->vdev
;
3496 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3497 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3500 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3505 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3507 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3508 struct device
*dev
= &adapter
->vdev
->dev
;
3509 struct vio_dev
*vdev
= adapter
->vdev
;
3514 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3515 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3517 /* Clean out the queue */
3518 memset(crq
->msgs
, 0, PAGE_SIZE
);
3521 /* And re-open it again */
3522 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3523 crq
->msg_token
, PAGE_SIZE
);
3526 /* Adapter is good, but other end is not ready */
3527 dev_warn(dev
, "Partner adapter not ready\n");
3529 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3534 static void release_crq_queue(struct ibmvnic_adapter
*adapter
)
3536 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3537 struct vio_dev
*vdev
= adapter
->vdev
;
3543 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3544 free_irq(vdev
->irq
, adapter
);
3545 tasklet_kill(&adapter
->tasklet
);
3547 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3548 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3550 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3552 free_page((unsigned long)crq
->msgs
);
3556 static int init_crq_queue(struct ibmvnic_adapter
*adapter
)
3558 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3559 struct device
*dev
= &adapter
->vdev
->dev
;
3560 struct vio_dev
*vdev
= adapter
->vdev
;
3561 int rc
, retrc
= -ENOMEM
;
3566 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3567 /* Should we allocate more than one page? */
3572 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3573 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3575 if (dma_mapping_error(dev
, crq
->msg_token
))
3578 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3579 crq
->msg_token
, PAGE_SIZE
);
3581 if (rc
== H_RESOURCE
)
3582 /* maybe kexecing and resource is busy. try a reset */
3583 rc
= ibmvnic_reset_crq(adapter
);
3586 if (rc
== H_CLOSED
) {
3587 dev_warn(dev
, "Partner adapter not ready\n");
3589 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3590 goto reg_crq_failed
;
3595 tasklet_init(&adapter
->tasklet
, (void *)ibmvnic_tasklet
,
3596 (unsigned long)adapter
);
3598 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3599 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3602 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3604 goto req_irq_failed
;
3607 rc
= vio_enable_interrupts(vdev
);
3609 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3610 goto req_irq_failed
;
3614 spin_lock_init(&crq
->lock
);
3619 tasklet_kill(&adapter
->tasklet
);
3621 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3622 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3624 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3626 free_page((unsigned long)crq
->msgs
);
3631 static int ibmvnic_init(struct ibmvnic_adapter
*adapter
)
3633 struct device
*dev
= &adapter
->vdev
->dev
;
3634 unsigned long timeout
= msecs_to_jiffies(30000);
3637 if (adapter
->resetting
) {
3638 rc
= ibmvnic_reset_crq(adapter
);
3640 rc
= vio_enable_interrupts(adapter
->vdev
);
3642 rc
= init_crq_queue(adapter
);
3646 dev_err(dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3650 adapter
->from_passive_init
= false;
3652 init_completion(&adapter
->init_done
);
3653 adapter
->init_done_rc
= 0;
3654 ibmvnic_send_crq_init(adapter
);
3655 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3656 dev_err(dev
, "Initialization sequence timed out\n");
3660 if (adapter
->init_done_rc
) {
3661 release_crq_queue(adapter
);
3662 return adapter
->init_done_rc
;
3665 if (adapter
->from_passive_init
) {
3666 adapter
->state
= VNIC_OPEN
;
3667 adapter
->from_passive_init
= false;
3671 if (adapter
->resetting
)
3672 rc
= reset_sub_crq_queues(adapter
);
3674 rc
= init_sub_crqs(adapter
);
3676 dev_err(dev
, "Initialization of sub crqs failed\n");
3677 release_crq_queue(adapter
);
3681 rc
= init_sub_crq_irqs(adapter
);
3683 dev_err(dev
, "Failed to initialize sub crq irqs\n");
3684 release_crq_queue(adapter
);
3690 static struct device_attribute dev_attr_failover
;
3692 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3694 struct ibmvnic_adapter
*adapter
;
3695 struct net_device
*netdev
;
3696 unsigned char *mac_addr_p
;
3699 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3702 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3703 VETH_MAC_ADDR
, NULL
);
3706 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3707 __FILE__
, __LINE__
);
3711 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3712 IBMVNIC_MAX_TX_QUEUES
);
3716 adapter
= netdev_priv(netdev
);
3717 adapter
->state
= VNIC_PROBING
;
3718 dev_set_drvdata(&dev
->dev
, netdev
);
3719 adapter
->vdev
= dev
;
3720 adapter
->netdev
= netdev
;
3722 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3723 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3724 netdev
->irq
= dev
->irq
;
3725 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3726 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3727 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3729 spin_lock_init(&adapter
->stats_lock
);
3731 INIT_LIST_HEAD(&adapter
->errors
);
3732 spin_lock_init(&adapter
->error_list_lock
);
3734 INIT_WORK(&adapter
->ibmvnic_reset
, __ibmvnic_reset
);
3735 INIT_LIST_HEAD(&adapter
->rwi_list
);
3736 mutex_init(&adapter
->reset_lock
);
3737 mutex_init(&adapter
->rwi_lock
);
3738 adapter
->resetting
= false;
3741 rc
= ibmvnic_init(adapter
);
3742 if (rc
&& rc
!= EAGAIN
) {
3743 free_netdev(netdev
);
3746 } while (rc
== EAGAIN
);
3748 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
3750 rc
= device_create_file(&dev
->dev
, &dev_attr_failover
);
3752 free_netdev(netdev
);
3756 rc
= register_netdev(netdev
);
3758 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3759 device_remove_file(&dev
->dev
, &dev_attr_failover
);
3760 free_netdev(netdev
);
3763 dev_info(&dev
->dev
, "ibmvnic registered\n");
3765 adapter
->state
= VNIC_PROBED
;
3769 static int ibmvnic_remove(struct vio_dev
*dev
)
3771 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3772 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3774 adapter
->state
= VNIC_REMOVING
;
3775 unregister_netdev(netdev
);
3776 mutex_lock(&adapter
->reset_lock
);
3778 release_resources(adapter
);
3779 release_sub_crqs(adapter
);
3780 release_crq_queue(adapter
);
3782 adapter
->state
= VNIC_REMOVED
;
3784 mutex_unlock(&adapter
->reset_lock
);
3785 device_remove_file(&dev
->dev
, &dev_attr_failover
);
3786 free_netdev(netdev
);
3787 dev_set_drvdata(&dev
->dev
, NULL
);
3792 static ssize_t
failover_store(struct device
*dev
, struct device_attribute
*attr
,
3793 const char *buf
, size_t count
)
3795 struct net_device
*netdev
= dev_get_drvdata(dev
);
3796 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3797 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
3798 __be64 session_token
;
3801 if (!sysfs_streq(buf
, "1"))
3804 rc
= plpar_hcall(H_VIOCTL
, retbuf
, adapter
->vdev
->unit_address
,
3805 H_GET_SESSION_TOKEN
, 0, 0, 0);
3807 netdev_err(netdev
, "Couldn't retrieve session token, rc %ld\n",
3812 session_token
= (__be64
)retbuf
[0];
3813 netdev_dbg(netdev
, "Initiating client failover, session id %llx\n",
3814 be64_to_cpu(session_token
));
3815 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
3816 H_SESSION_ERR_DETECTED
, session_token
, 0, 0);
3818 netdev_err(netdev
, "Client initiated failover failed, rc %ld\n",
3826 static DEVICE_ATTR(failover
, 0200, NULL
, failover_store
);
3828 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3830 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3831 struct ibmvnic_adapter
*adapter
;
3832 struct iommu_table
*tbl
;
3833 unsigned long ret
= 0;
3836 tbl
= get_iommu_table_base(&vdev
->dev
);
3838 /* netdev inits at probe time along with the structures we need below*/
3840 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3842 adapter
= netdev_priv(netdev
);
3844 ret
+= PAGE_SIZE
; /* the crq message queue */
3845 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3847 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3848 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3850 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3852 ret
+= adapter
->rx_pool
[i
].size
*
3853 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3858 static int ibmvnic_resume(struct device
*dev
)
3860 struct net_device
*netdev
= dev_get_drvdata(dev
);
3861 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3864 if (adapter
->state
!= VNIC_OPEN
)
3867 /* kick the interrupt handlers just in case we lost an interrupt */
3868 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3869 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3870 adapter
->rx_scrq
[i
]);
3875 static struct vio_device_id ibmvnic_device_table
[] = {
3876 {"network", "IBM,vnic"},
3879 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3881 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3882 .resume
= ibmvnic_resume
3885 static struct vio_driver ibmvnic_driver
= {
3886 .id_table
= ibmvnic_device_table
,
3887 .probe
= ibmvnic_probe
,
3888 .remove
= ibmvnic_remove
,
3889 .get_desired_dma
= ibmvnic_get_desired_dma
,
3890 .name
= ibmvnic_driver_name
,
3891 .pm
= &ibmvnic_pm_ops
,
3894 /* module functions */
3895 static int __init
ibmvnic_module_init(void)
3897 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3898 IBMVNIC_DRIVER_VERSION
);
3900 return vio_register_driver(&ibmvnic_driver
);
3903 static void __exit
ibmvnic_module_exit(void)
3905 vio_unregister_driver(&ibmvnic_driver
);
3908 module_init(ibmvnic_module_init
);
3909 module_exit(ibmvnic_module_exit
);