1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
78 #include <linux/utsname.h>
82 static const char ibmvnic_driver_name
[] = "ibmvnic";
83 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
85 MODULE_AUTHOR("Santiago Leon");
86 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
90 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
91 static int ibmvnic_remove(struct vio_dev
*);
92 static void release_sub_crqs(struct ibmvnic_adapter
*);
93 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
94 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
95 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
96 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
97 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
98 union sub_crq
*sub_crq
);
99 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
100 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
101 static int enable_scrq_irq(struct ibmvnic_adapter
*,
102 struct ibmvnic_sub_crq_queue
*);
103 static int disable_scrq_irq(struct ibmvnic_adapter
*,
104 struct ibmvnic_sub_crq_queue
*);
105 static int pending_scrq(struct ibmvnic_adapter
*,
106 struct ibmvnic_sub_crq_queue
*);
107 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
108 struct ibmvnic_sub_crq_queue
*);
109 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
110 static void send_map_query(struct ibmvnic_adapter
*adapter
);
111 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
112 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
113 static void send_login(struct ibmvnic_adapter
*adapter
);
114 static void send_cap_queries(struct ibmvnic_adapter
*adapter
);
115 static int init_sub_crqs(struct ibmvnic_adapter
*);
116 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
);
117 static int ibmvnic_init(struct ibmvnic_adapter
*);
118 static void release_crq_queue(struct ibmvnic_adapter
*);
119 static int __ibmvnic_set_mac(struct net_device
*netdev
, struct sockaddr
*p
);
121 struct ibmvnic_stat
{
122 char name
[ETH_GSTRING_LEN
];
126 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
127 offsetof(struct ibmvnic_statistics, stat))
128 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
130 static const struct ibmvnic_stat ibmvnic_stats
[] = {
131 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
132 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
133 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
134 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
135 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
136 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
137 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
138 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
139 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
140 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
141 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
142 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
143 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
144 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
145 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
146 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
147 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
148 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
149 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
150 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
151 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
152 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
155 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
156 unsigned long length
, unsigned long *number
,
159 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
162 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
169 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
170 struct ibmvnic_long_term_buff
*ltb
, int size
)
172 struct device
*dev
= &adapter
->vdev
->dev
;
175 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
179 dev_err(dev
, "Couldn't alloc long term buffer\n");
182 ltb
->map_id
= adapter
->map_id
;
185 init_completion(&adapter
->fw_done
);
186 send_request_map(adapter
, ltb
->addr
,
187 ltb
->size
, ltb
->map_id
);
188 wait_for_completion(&adapter
->fw_done
);
190 if (adapter
->fw_done_rc
) {
191 dev_err(dev
, "Couldn't map long term buffer,rc = %d\n",
192 adapter
->fw_done_rc
);
198 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
199 struct ibmvnic_long_term_buff
*ltb
)
201 struct device
*dev
= &adapter
->vdev
->dev
;
206 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
&&
207 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
)
208 send_request_unmap(adapter
, ltb
->map_id
);
209 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
212 static int reset_long_term_buff(struct ibmvnic_adapter
*adapter
,
213 struct ibmvnic_long_term_buff
*ltb
)
215 memset(ltb
->buff
, 0, ltb
->size
);
217 init_completion(&adapter
->fw_done
);
218 send_request_map(adapter
, ltb
->addr
, ltb
->size
, ltb
->map_id
);
219 wait_for_completion(&adapter
->fw_done
);
221 if (adapter
->fw_done_rc
) {
222 dev_info(&adapter
->vdev
->dev
,
223 "Reset failed, attempting to free and reallocate buffer\n");
224 free_long_term_buff(adapter
, ltb
);
225 return alloc_long_term_buff(adapter
, ltb
, ltb
->size
);
230 static void deactivate_rx_pools(struct ibmvnic_adapter
*adapter
)
234 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
236 adapter
->rx_pool
[i
].active
= 0;
239 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
240 struct ibmvnic_rx_pool
*pool
)
242 int count
= pool
->size
- atomic_read(&pool
->available
);
243 struct device
*dev
= &adapter
->vdev
->dev
;
244 int buffers_added
= 0;
245 unsigned long lpar_rc
;
246 union sub_crq sub_crq
;
259 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
260 be32_to_cpu(adapter
->login_rsp_buf
->
263 for (i
= 0; i
< count
; ++i
) {
264 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
266 dev_err(dev
, "Couldn't replenish rx buff\n");
267 adapter
->replenish_no_mem
++;
271 index
= pool
->free_map
[pool
->next_free
];
273 if (pool
->rx_buff
[index
].skb
)
274 dev_err(dev
, "Inconsistent free_map!\n");
276 /* Copy the skb to the long term mapped DMA buffer */
277 offset
= index
* pool
->buff_size
;
278 dst
= pool
->long_term_buff
.buff
+ offset
;
279 memset(dst
, 0, pool
->buff_size
);
280 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
281 pool
->rx_buff
[index
].data
= dst
;
283 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
284 pool
->rx_buff
[index
].dma
= dma_addr
;
285 pool
->rx_buff
[index
].skb
= skb
;
286 pool
->rx_buff
[index
].pool_index
= pool
->index
;
287 pool
->rx_buff
[index
].size
= pool
->buff_size
;
289 memset(&sub_crq
, 0, sizeof(sub_crq
));
290 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
291 sub_crq
.rx_add
.correlator
=
292 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
293 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
294 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
296 /* The length field of the sCRQ is defined to be 24 bits so the
297 * buffer size needs to be left shifted by a byte before it is
298 * converted to big endian to prevent the last byte from being
301 #ifdef __LITTLE_ENDIAN__
304 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
306 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
308 if (lpar_rc
!= H_SUCCESS
)
312 adapter
->replenish_add_buff_success
++;
313 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
315 atomic_add(buffers_added
, &pool
->available
);
319 dev_info(dev
, "replenish pools failure\n");
320 pool
->free_map
[pool
->next_free
] = index
;
321 pool
->rx_buff
[index
].skb
= NULL
;
322 if (!dma_mapping_error(dev
, dma_addr
))
323 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
326 dev_kfree_skb_any(skb
);
327 adapter
->replenish_add_buff_failure
++;
328 atomic_add(buffers_added
, &pool
->available
);
330 if (lpar_rc
== H_CLOSED
) {
331 /* Disable buffer pool replenishment and report carrier off if
332 * queue is closed. Firmware guarantees that a signal will
333 * be sent to the driver, triggering a reset.
335 deactivate_rx_pools(adapter
);
336 netif_carrier_off(adapter
->netdev
);
340 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
344 adapter
->replenish_task_cycles
++;
345 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
347 if (adapter
->rx_pool
[i
].active
)
348 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
352 static void release_stats_buffers(struct ibmvnic_adapter
*adapter
)
354 kfree(adapter
->tx_stats_buffers
);
355 kfree(adapter
->rx_stats_buffers
);
358 static int init_stats_buffers(struct ibmvnic_adapter
*adapter
)
360 adapter
->tx_stats_buffers
=
361 kcalloc(adapter
->req_tx_queues
,
362 sizeof(struct ibmvnic_tx_queue_stats
),
364 if (!adapter
->tx_stats_buffers
)
367 adapter
->rx_stats_buffers
=
368 kcalloc(adapter
->req_rx_queues
,
369 sizeof(struct ibmvnic_rx_queue_stats
),
371 if (!adapter
->rx_stats_buffers
)
377 static void release_stats_token(struct ibmvnic_adapter
*adapter
)
379 struct device
*dev
= &adapter
->vdev
->dev
;
381 if (!adapter
->stats_token
)
384 dma_unmap_single(dev
, adapter
->stats_token
,
385 sizeof(struct ibmvnic_statistics
),
387 adapter
->stats_token
= 0;
390 static int init_stats_token(struct ibmvnic_adapter
*adapter
)
392 struct device
*dev
= &adapter
->vdev
->dev
;
395 stok
= dma_map_single(dev
, &adapter
->stats
,
396 sizeof(struct ibmvnic_statistics
),
398 if (dma_mapping_error(dev
, stok
)) {
399 dev_err(dev
, "Couldn't map stats buffer\n");
403 adapter
->stats_token
= stok
;
404 netdev_dbg(adapter
->netdev
, "Stats token initialized (%llx)\n", stok
);
408 static int reset_rx_pools(struct ibmvnic_adapter
*adapter
)
410 struct ibmvnic_rx_pool
*rx_pool
;
415 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
416 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
418 rx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
419 for (i
= 0; i
< rx_scrqs
; i
++) {
420 rx_pool
= &adapter
->rx_pool
[i
];
422 netdev_dbg(adapter
->netdev
, "Re-setting rx_pool[%d]\n", i
);
424 if (rx_pool
->buff_size
!= be64_to_cpu(size_array
[i
])) {
425 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
426 rx_pool
->buff_size
= be64_to_cpu(size_array
[i
]);
427 alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
431 rc
= reset_long_term_buff(adapter
,
432 &rx_pool
->long_term_buff
);
438 for (j
= 0; j
< rx_pool
->size
; j
++)
439 rx_pool
->free_map
[j
] = j
;
441 memset(rx_pool
->rx_buff
, 0,
442 rx_pool
->size
* sizeof(struct ibmvnic_rx_buff
));
444 atomic_set(&rx_pool
->available
, 0);
445 rx_pool
->next_alloc
= 0;
446 rx_pool
->next_free
= 0;
453 static void release_rx_pools(struct ibmvnic_adapter
*adapter
)
455 struct ibmvnic_rx_pool
*rx_pool
;
458 if (!adapter
->rx_pool
)
461 for (i
= 0; i
< adapter
->num_active_rx_pools
; i
++) {
462 rx_pool
= &adapter
->rx_pool
[i
];
464 netdev_dbg(adapter
->netdev
, "Releasing rx_pool[%d]\n", i
);
466 kfree(rx_pool
->free_map
);
467 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
469 if (!rx_pool
->rx_buff
)
472 for (j
= 0; j
< rx_pool
->size
; j
++) {
473 if (rx_pool
->rx_buff
[j
].skb
) {
474 dev_kfree_skb_any(rx_pool
->rx_buff
[i
].skb
);
475 rx_pool
->rx_buff
[i
].skb
= NULL
;
479 kfree(rx_pool
->rx_buff
);
482 kfree(adapter
->rx_pool
);
483 adapter
->rx_pool
= NULL
;
484 adapter
->num_active_rx_pools
= 0;
487 static int init_rx_pools(struct net_device
*netdev
)
489 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
490 struct device
*dev
= &adapter
->vdev
->dev
;
491 struct ibmvnic_rx_pool
*rx_pool
;
497 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
498 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
499 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
501 adapter
->rx_pool
= kcalloc(rxadd_subcrqs
,
502 sizeof(struct ibmvnic_rx_pool
),
504 if (!adapter
->rx_pool
) {
505 dev_err(dev
, "Failed to allocate rx pools\n");
509 adapter
->num_active_rx_pools
= 0;
511 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
512 rx_pool
= &adapter
->rx_pool
[i
];
514 netdev_dbg(adapter
->netdev
,
515 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
516 i
, adapter
->req_rx_add_entries_per_subcrq
,
517 be64_to_cpu(size_array
[i
]));
519 rx_pool
->size
= adapter
->req_rx_add_entries_per_subcrq
;
521 rx_pool
->buff_size
= be64_to_cpu(size_array
[i
]);
524 rx_pool
->free_map
= kcalloc(rx_pool
->size
, sizeof(int),
526 if (!rx_pool
->free_map
) {
527 release_rx_pools(adapter
);
531 rx_pool
->rx_buff
= kcalloc(rx_pool
->size
,
532 sizeof(struct ibmvnic_rx_buff
),
534 if (!rx_pool
->rx_buff
) {
535 dev_err(dev
, "Couldn't alloc rx buffers\n");
536 release_rx_pools(adapter
);
540 if (alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
541 rx_pool
->size
* rx_pool
->buff_size
)) {
542 release_rx_pools(adapter
);
546 for (j
= 0; j
< rx_pool
->size
; ++j
)
547 rx_pool
->free_map
[j
] = j
;
549 atomic_set(&rx_pool
->available
, 0);
550 rx_pool
->next_alloc
= 0;
551 rx_pool
->next_free
= 0;
554 adapter
->num_active_rx_pools
= rxadd_subcrqs
;
559 static int reset_tx_pools(struct ibmvnic_adapter
*adapter
)
561 struct ibmvnic_tx_pool
*tx_pool
;
565 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
566 for (i
= 0; i
< tx_scrqs
; i
++) {
567 netdev_dbg(adapter
->netdev
, "Re-setting tx_pool[%d]\n", i
);
569 tx_pool
= &adapter
->tx_pool
[i
];
571 rc
= reset_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
575 rc
= reset_long_term_buff(adapter
, &tx_pool
->tso_ltb
);
579 memset(tx_pool
->tx_buff
, 0,
580 adapter
->req_tx_entries_per_subcrq
*
581 sizeof(struct ibmvnic_tx_buff
));
583 for (j
= 0; j
< adapter
->req_tx_entries_per_subcrq
; j
++)
584 tx_pool
->free_map
[j
] = j
;
586 tx_pool
->consumer_index
= 0;
587 tx_pool
->producer_index
= 0;
588 tx_pool
->tso_index
= 0;
594 static void release_vpd_data(struct ibmvnic_adapter
*adapter
)
599 kfree(adapter
->vpd
->buff
);
603 static void release_tx_pools(struct ibmvnic_adapter
*adapter
)
605 struct ibmvnic_tx_pool
*tx_pool
;
608 if (!adapter
->tx_pool
)
611 for (i
= 0; i
< adapter
->num_active_tx_pools
; i
++) {
612 netdev_dbg(adapter
->netdev
, "Releasing tx_pool[%d]\n", i
);
613 tx_pool
= &adapter
->tx_pool
[i
];
614 kfree(tx_pool
->tx_buff
);
615 free_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
616 free_long_term_buff(adapter
, &tx_pool
->tso_ltb
);
617 kfree(tx_pool
->free_map
);
620 kfree(adapter
->tx_pool
);
621 adapter
->tx_pool
= NULL
;
622 adapter
->num_active_tx_pools
= 0;
625 static int init_tx_pools(struct net_device
*netdev
)
627 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
628 struct device
*dev
= &adapter
->vdev
->dev
;
629 struct ibmvnic_tx_pool
*tx_pool
;
633 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
634 adapter
->tx_pool
= kcalloc(tx_subcrqs
,
635 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
636 if (!adapter
->tx_pool
)
639 adapter
->num_active_tx_pools
= 0;
641 for (i
= 0; i
< tx_subcrqs
; i
++) {
642 tx_pool
= &adapter
->tx_pool
[i
];
644 netdev_dbg(adapter
->netdev
,
645 "Initializing tx_pool[%d], %lld buffs\n",
646 i
, adapter
->req_tx_entries_per_subcrq
);
648 tx_pool
->tx_buff
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
649 sizeof(struct ibmvnic_tx_buff
),
651 if (!tx_pool
->tx_buff
) {
652 dev_err(dev
, "tx pool buffer allocation failed\n");
653 release_tx_pools(adapter
);
657 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
658 adapter
->req_tx_entries_per_subcrq
*
660 release_tx_pools(adapter
);
665 if (alloc_long_term_buff(adapter
, &tx_pool
->tso_ltb
,
667 IBMVNIC_TSO_BUF_SZ
)) {
668 release_tx_pools(adapter
);
672 tx_pool
->tso_index
= 0;
674 tx_pool
->free_map
= kcalloc(adapter
->req_tx_entries_per_subcrq
,
675 sizeof(int), GFP_KERNEL
);
676 if (!tx_pool
->free_map
) {
677 release_tx_pools(adapter
);
681 for (j
= 0; j
< adapter
->req_tx_entries_per_subcrq
; j
++)
682 tx_pool
->free_map
[j
] = j
;
684 tx_pool
->consumer_index
= 0;
685 tx_pool
->producer_index
= 0;
688 adapter
->num_active_tx_pools
= tx_subcrqs
;
693 static void release_error_buffers(struct ibmvnic_adapter
*adapter
)
695 struct device
*dev
= &adapter
->vdev
->dev
;
696 struct ibmvnic_error_buff
*error_buff
, *tmp
;
699 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
700 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
) {
701 list_del(&error_buff
->list
);
702 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
704 kfree(error_buff
->buff
);
707 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
710 static void ibmvnic_napi_enable(struct ibmvnic_adapter
*adapter
)
714 if (adapter
->napi_enabled
)
717 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
718 napi_enable(&adapter
->napi
[i
]);
720 adapter
->napi_enabled
= true;
723 static void ibmvnic_napi_disable(struct ibmvnic_adapter
*adapter
)
727 if (!adapter
->napi_enabled
)
730 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
731 netdev_dbg(adapter
->netdev
, "Disabling napi[%d]\n", i
);
732 napi_disable(&adapter
->napi
[i
]);
735 adapter
->napi_enabled
= false;
738 static int ibmvnic_login(struct net_device
*netdev
)
740 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
741 unsigned long timeout
= msecs_to_jiffies(30000);
742 struct device
*dev
= &adapter
->vdev
->dev
;
746 if (adapter
->renegotiate
) {
747 adapter
->renegotiate
= false;
748 release_sub_crqs(adapter
);
750 reinit_completion(&adapter
->init_done
);
751 send_cap_queries(adapter
);
752 if (!wait_for_completion_timeout(&adapter
->init_done
,
754 dev_err(dev
, "Capabilities query timeout\n");
757 rc
= init_sub_crqs(adapter
);
760 "Initialization of SCRQ's failed\n");
763 rc
= init_sub_crq_irqs(adapter
);
766 "Initialization of SCRQ's irqs failed\n");
771 reinit_completion(&adapter
->init_done
);
773 if (!wait_for_completion_timeout(&adapter
->init_done
,
775 dev_err(dev
, "Login timeout\n");
778 } while (adapter
->renegotiate
);
780 /* handle pending MAC address changes after successful login */
781 if (adapter
->mac_change_pending
) {
782 __ibmvnic_set_mac(netdev
, &adapter
->desired
.mac
);
783 adapter
->mac_change_pending
= false;
789 static void release_resources(struct ibmvnic_adapter
*adapter
)
793 release_vpd_data(adapter
);
795 release_tx_pools(adapter
);
796 release_rx_pools(adapter
);
798 release_stats_token(adapter
);
799 release_stats_buffers(adapter
);
800 release_error_buffers(adapter
);
803 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
804 if (&adapter
->napi
[i
]) {
805 netdev_dbg(adapter
->netdev
,
806 "Releasing napi[%d]\n", i
);
807 netif_napi_del(&adapter
->napi
[i
]);
813 static int set_link_state(struct ibmvnic_adapter
*adapter
, u8 link_state
)
815 struct net_device
*netdev
= adapter
->netdev
;
816 unsigned long timeout
= msecs_to_jiffies(30000);
817 union ibmvnic_crq crq
;
821 netdev_dbg(netdev
, "setting link state %d\n", link_state
);
823 memset(&crq
, 0, sizeof(crq
));
824 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
825 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
826 crq
.logical_link_state
.link_state
= link_state
;
831 reinit_completion(&adapter
->init_done
);
832 rc
= ibmvnic_send_crq(adapter
, &crq
);
834 netdev_err(netdev
, "Failed to set link state\n");
838 if (!wait_for_completion_timeout(&adapter
->init_done
,
840 netdev_err(netdev
, "timeout setting link state\n");
844 if (adapter
->init_done_rc
== 1) {
845 /* Partuial success, delay and re-send */
854 static int set_real_num_queues(struct net_device
*netdev
)
856 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
859 netdev_dbg(netdev
, "Setting real tx/rx queues (%llx/%llx)\n",
860 adapter
->req_tx_queues
, adapter
->req_rx_queues
);
862 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->req_tx_queues
);
864 netdev_err(netdev
, "failed to set the number of tx queues\n");
868 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->req_rx_queues
);
870 netdev_err(netdev
, "failed to set the number of rx queues\n");
875 static int ibmvnic_get_vpd(struct ibmvnic_adapter
*adapter
)
877 struct device
*dev
= &adapter
->vdev
->dev
;
878 union ibmvnic_crq crq
;
881 if (adapter
->vpd
->buff
)
882 len
= adapter
->vpd
->len
;
884 init_completion(&adapter
->fw_done
);
885 crq
.get_vpd_size
.first
= IBMVNIC_CRQ_CMD
;
886 crq
.get_vpd_size
.cmd
= GET_VPD_SIZE
;
887 ibmvnic_send_crq(adapter
, &crq
);
888 wait_for_completion(&adapter
->fw_done
);
890 if (!adapter
->vpd
->len
)
893 if (!adapter
->vpd
->buff
)
894 adapter
->vpd
->buff
= kzalloc(adapter
->vpd
->len
, GFP_KERNEL
);
895 else if (adapter
->vpd
->len
!= len
)
897 krealloc(adapter
->vpd
->buff
,
898 adapter
->vpd
->len
, GFP_KERNEL
);
900 if (!adapter
->vpd
->buff
) {
901 dev_err(dev
, "Could allocate VPD buffer\n");
905 adapter
->vpd
->dma_addr
=
906 dma_map_single(dev
, adapter
->vpd
->buff
, adapter
->vpd
->len
,
908 if (dma_mapping_error(dev
, adapter
->vpd
->dma_addr
)) {
909 dev_err(dev
, "Could not map VPD buffer\n");
910 kfree(adapter
->vpd
->buff
);
914 reinit_completion(&adapter
->fw_done
);
915 crq
.get_vpd
.first
= IBMVNIC_CRQ_CMD
;
916 crq
.get_vpd
.cmd
= GET_VPD
;
917 crq
.get_vpd
.ioba
= cpu_to_be32(adapter
->vpd
->dma_addr
);
918 crq
.get_vpd
.len
= cpu_to_be32((u32
)adapter
->vpd
->len
);
919 ibmvnic_send_crq(adapter
, &crq
);
920 wait_for_completion(&adapter
->fw_done
);
925 static int init_resources(struct ibmvnic_adapter
*adapter
)
927 struct net_device
*netdev
= adapter
->netdev
;
930 rc
= set_real_num_queues(netdev
);
934 rc
= init_stats_buffers(adapter
);
938 rc
= init_stats_token(adapter
);
942 adapter
->vpd
= kzalloc(sizeof(*adapter
->vpd
), GFP_KERNEL
);
946 /* Vital Product Data (VPD) */
947 rc
= ibmvnic_get_vpd(adapter
);
949 netdev_err(netdev
, "failed to initialize Vital Product Data (VPD)\n");
954 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
955 sizeof(struct napi_struct
), GFP_KERNEL
);
959 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
960 netdev_dbg(netdev
, "Adding napi[%d]\n", i
);
961 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
965 send_map_query(adapter
);
967 rc
= init_rx_pools(netdev
);
971 rc
= init_tx_pools(netdev
);
975 static int __ibmvnic_open(struct net_device
*netdev
)
977 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
978 enum vnic_state prev_state
= adapter
->state
;
981 adapter
->state
= VNIC_OPENING
;
982 replenish_pools(adapter
);
983 ibmvnic_napi_enable(adapter
);
985 /* We're ready to receive frames, enable the sub-crq interrupts and
986 * set the logical link state to up
988 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
989 netdev_dbg(netdev
, "Enabling rx_scrq[%d] irq\n", i
);
990 if (prev_state
== VNIC_CLOSED
)
991 enable_irq(adapter
->rx_scrq
[i
]->irq
);
993 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
996 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
997 netdev_dbg(netdev
, "Enabling tx_scrq[%d] irq\n", i
);
998 if (prev_state
== VNIC_CLOSED
)
999 enable_irq(adapter
->tx_scrq
[i
]->irq
);
1001 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
1004 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_UP
);
1006 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1007 napi_disable(&adapter
->napi
[i
]);
1008 release_resources(adapter
);
1012 netif_tx_start_all_queues(netdev
);
1014 if (prev_state
== VNIC_CLOSED
) {
1015 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1016 napi_schedule(&adapter
->napi
[i
]);
1019 adapter
->state
= VNIC_OPEN
;
1023 static int ibmvnic_open(struct net_device
*netdev
)
1025 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1028 mutex_lock(&adapter
->reset_lock
);
1030 if (adapter
->state
!= VNIC_CLOSED
) {
1031 rc
= ibmvnic_login(netdev
);
1033 mutex_unlock(&adapter
->reset_lock
);
1037 rc
= init_resources(adapter
);
1039 netdev_err(netdev
, "failed to initialize resources\n");
1040 release_resources(adapter
);
1041 mutex_unlock(&adapter
->reset_lock
);
1046 rc
= __ibmvnic_open(netdev
);
1047 netif_carrier_on(netdev
);
1049 mutex_unlock(&adapter
->reset_lock
);
1054 static void clean_tx_pools(struct ibmvnic_adapter
*adapter
)
1056 struct ibmvnic_tx_pool
*tx_pool
;
1061 if (!adapter
->tx_pool
)
1064 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
1065 tx_entries
= adapter
->req_tx_entries_per_subcrq
;
1067 /* Free any remaining skbs in the tx buffer pools */
1068 for (i
= 0; i
< tx_scrqs
; i
++) {
1069 tx_pool
= &adapter
->tx_pool
[i
];
1073 netdev_dbg(adapter
->netdev
, "Cleaning tx_pool[%d]\n", i
);
1074 for (j
= 0; j
< tx_entries
; j
++) {
1075 if (tx_pool
->tx_buff
[j
].skb
) {
1076 dev_kfree_skb_any(tx_pool
->tx_buff
[j
].skb
);
1077 tx_pool
->tx_buff
[j
].skb
= NULL
;
1083 static int __ibmvnic_close(struct net_device
*netdev
)
1085 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1089 adapter
->state
= VNIC_CLOSING
;
1091 /* ensure that transmissions are stopped if called by do_reset */
1092 if (adapter
->resetting
)
1093 netif_tx_disable(netdev
);
1095 netif_tx_stop_all_queues(netdev
);
1097 ibmvnic_napi_disable(adapter
);
1099 if (adapter
->tx_scrq
) {
1100 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1101 if (adapter
->tx_scrq
[i
]->irq
) {
1102 netdev_dbg(adapter
->netdev
,
1103 "Disabling tx_scrq[%d] irq\n", i
);
1104 disable_irq(adapter
->tx_scrq
[i
]->irq
);
1108 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_DN
);
1112 if (adapter
->rx_scrq
) {
1113 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1116 while (pending_scrq(adapter
, adapter
->rx_scrq
[i
])) {
1124 if (adapter
->rx_scrq
[i
]->irq
) {
1125 netdev_dbg(adapter
->netdev
,
1126 "Disabling rx_scrq[%d] irq\n", i
);
1127 disable_irq(adapter
->rx_scrq
[i
]->irq
);
1132 clean_tx_pools(adapter
);
1133 adapter
->state
= VNIC_CLOSED
;
1137 static int ibmvnic_close(struct net_device
*netdev
)
1139 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1142 mutex_lock(&adapter
->reset_lock
);
1143 rc
= __ibmvnic_close(netdev
);
1144 mutex_unlock(&adapter
->reset_lock
);
1150 * build_hdr_data - creates L2/L3/L4 header data buffer
1151 * @hdr_field - bitfield determining needed headers
1152 * @skb - socket buffer
1153 * @hdr_len - array of header lengths
1154 * @tot_len - total length of data
1156 * Reads hdr_field to determine which headers are needed by firmware.
1157 * Builds a buffer containing these headers. Saves individual header
1158 * lengths and total buffer length to be used to build descriptors.
1160 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
1161 int *hdr_len
, u8
*hdr_data
)
1166 hdr_len
[0] = sizeof(struct ethhdr
);
1168 if (skb
->protocol
== htons(ETH_P_IP
)) {
1169 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
1170 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1171 hdr_len
[2] = tcp_hdrlen(skb
);
1172 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
1173 hdr_len
[2] = sizeof(struct udphdr
);
1174 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1175 hdr_len
[1] = sizeof(struct ipv6hdr
);
1176 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1177 hdr_len
[2] = tcp_hdrlen(skb
);
1178 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
1179 hdr_len
[2] = sizeof(struct udphdr
);
1182 memset(hdr_data
, 0, 120);
1183 if ((hdr_field
>> 6) & 1) {
1184 hdr
= skb_mac_header(skb
);
1185 memcpy(hdr_data
, hdr
, hdr_len
[0]);
1189 if ((hdr_field
>> 5) & 1) {
1190 hdr
= skb_network_header(skb
);
1191 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
1195 if ((hdr_field
>> 4) & 1) {
1196 hdr
= skb_transport_header(skb
);
1197 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
1204 * create_hdr_descs - create header and header extension descriptors
1205 * @hdr_field - bitfield determining needed headers
1206 * @data - buffer containing header data
1207 * @len - length of data buffer
1208 * @hdr_len - array of individual header lengths
1209 * @scrq_arr - descriptor array
1211 * Creates header and, if needed, header extension descriptors and
1212 * places them in a descriptor array, scrq_arr
1215 static int create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
1216 union sub_crq
*scrq_arr
)
1218 union sub_crq hdr_desc
;
1224 while (tmp_len
> 0) {
1225 cur
= hdr_data
+ len
- tmp_len
;
1227 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
1228 if (cur
!= hdr_data
) {
1229 data
= hdr_desc
.hdr_ext
.data
;
1230 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
1231 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
1232 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
1233 hdr_desc
.hdr_ext
.len
= tmp
;
1235 data
= hdr_desc
.hdr
.data
;
1236 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
1237 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
1238 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
1239 hdr_desc
.hdr
.len
= tmp
;
1240 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
1241 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
1242 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
1243 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
1245 memcpy(data
, cur
, tmp
);
1247 *scrq_arr
= hdr_desc
;
1256 * build_hdr_descs_arr - build a header descriptor array
1257 * @skb - socket buffer
1258 * @num_entries - number of descriptors to be sent
1259 * @subcrq - first TX descriptor
1260 * @hdr_field - bit field determining which headers will be sent
1262 * This function will build a TX descriptor array with applicable
1263 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1266 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
1267 int *num_entries
, u8 hdr_field
)
1269 int hdr_len
[3] = {0, 0, 0};
1271 u8
*hdr_data
= txbuff
->hdr_data
;
1273 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
1275 *num_entries
+= create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
1276 txbuff
->indir_arr
+ 1);
1279 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1281 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1282 int queue_num
= skb_get_queue_mapping(skb
);
1283 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
1284 struct device
*dev
= &adapter
->vdev
->dev
;
1285 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
1286 struct ibmvnic_sub_crq_queue
*tx_scrq
;
1287 struct ibmvnic_tx_pool
*tx_pool
;
1288 unsigned int tx_send_failed
= 0;
1289 unsigned int tx_map_failed
= 0;
1290 unsigned int tx_dropped
= 0;
1291 unsigned int tx_packets
= 0;
1292 unsigned int tx_bytes
= 0;
1293 dma_addr_t data_dma_addr
;
1294 struct netdev_queue
*txq
;
1295 unsigned long lpar_rc
;
1296 union sub_crq tx_crq
;
1297 unsigned int offset
;
1298 int num_entries
= 1;
1305 if (adapter
->resetting
) {
1306 if (!netif_subqueue_stopped(netdev
, skb
))
1307 netif_stop_subqueue(netdev
, queue_num
);
1308 dev_kfree_skb_any(skb
);
1316 tx_pool
= &adapter
->tx_pool
[queue_num
];
1317 tx_scrq
= adapter
->tx_scrq
[queue_num
];
1318 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
1319 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
1320 be32_to_cpu(adapter
->login_rsp_buf
->off_txsubm_subcrqs
));
1322 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
1324 if (skb_is_gso(skb
)) {
1325 offset
= tx_pool
->tso_index
* IBMVNIC_TSO_BUF_SZ
;
1326 dst
= tx_pool
->tso_ltb
.buff
+ offset
;
1327 memset(dst
, 0, IBMVNIC_TSO_BUF_SZ
);
1328 data_dma_addr
= tx_pool
->tso_ltb
.addr
+ offset
;
1329 tx_pool
->tso_index
++;
1330 if (tx_pool
->tso_index
== IBMVNIC_TSO_BUFS
)
1331 tx_pool
->tso_index
= 0;
1333 offset
= index
* adapter
->req_mtu
;
1334 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
1335 memset(dst
, 0, adapter
->req_mtu
);
1336 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
1339 if (skb_shinfo(skb
)->nr_frags
) {
1343 skb_copy_from_linear_data(skb
, dst
, skb_headlen(skb
));
1344 cur
= skb_headlen(skb
);
1346 /* Copy the frags */
1347 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1348 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1351 page_address(skb_frag_page(frag
)) +
1352 frag
->page_offset
, skb_frag_size(frag
));
1353 cur
+= skb_frag_size(frag
);
1356 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
1359 tx_pool
->consumer_index
=
1360 (tx_pool
->consumer_index
+ 1) %
1361 adapter
->req_tx_entries_per_subcrq
;
1363 tx_buff
= &tx_pool
->tx_buff
[index
];
1365 tx_buff
->data_dma
[0] = data_dma_addr
;
1366 tx_buff
->data_len
[0] = skb
->len
;
1367 tx_buff
->index
= index
;
1368 tx_buff
->pool_index
= queue_num
;
1369 tx_buff
->last_frag
= true;
1371 memset(&tx_crq
, 0, sizeof(tx_crq
));
1372 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
1373 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
1374 tx_crq
.v1
.n_crq_elem
= 1;
1375 tx_crq
.v1
.n_sge
= 1;
1376 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
1377 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
1378 if (skb_is_gso(skb
))
1379 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->tso_ltb
.map_id
);
1381 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
1382 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
1383 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
1385 if (adapter
->vlan_header_insertion
) {
1386 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
1387 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
1390 if (skb
->protocol
== htons(ETH_P_IP
)) {
1391 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
1392 proto
= ip_hdr(skb
)->protocol
;
1393 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1394 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
1395 proto
= ipv6_hdr(skb
)->nexthdr
;
1398 if (proto
== IPPROTO_TCP
)
1399 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
1400 else if (proto
== IPPROTO_UDP
)
1401 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
1403 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1404 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
1407 if (skb_is_gso(skb
)) {
1408 tx_crq
.v1
.flags1
|= IBMVNIC_TX_LSO
;
1409 tx_crq
.v1
.mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
1412 /* determine if l2/3/4 headers are sent to firmware */
1413 if ((*hdrs
>> 7) & 1 &&
1414 (skb
->protocol
== htons(ETH_P_IP
) ||
1415 skb
->protocol
== htons(ETH_P_IPV6
))) {
1416 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
1417 tx_crq
.v1
.n_crq_elem
= num_entries
;
1418 tx_buff
->indir_arr
[0] = tx_crq
;
1419 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
1420 sizeof(tx_buff
->indir_arr
),
1422 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
1423 dev_kfree_skb_any(skb
);
1424 tx_buff
->skb
= NULL
;
1425 if (!firmware_has_feature(FW_FEATURE_CMO
))
1426 dev_err(dev
, "tx: unable to map descriptor array\n");
1432 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
1433 (u64
)tx_buff
->indir_dma
,
1436 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
1439 if (lpar_rc
!= H_SUCCESS
) {
1440 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
1442 if (tx_pool
->consumer_index
== 0)
1443 tx_pool
->consumer_index
=
1444 adapter
->req_tx_entries_per_subcrq
- 1;
1446 tx_pool
->consumer_index
--;
1448 dev_kfree_skb_any(skb
);
1449 tx_buff
->skb
= NULL
;
1451 if (lpar_rc
== H_CLOSED
) {
1452 /* Disable TX and report carrier off if queue is closed.
1453 * Firmware guarantees that a signal will be sent to the
1454 * driver, triggering a reset or some other action.
1456 netif_tx_stop_all_queues(netdev
);
1457 netif_carrier_off(netdev
);
1466 if (atomic_inc_return(&tx_scrq
->used
)
1467 >= adapter
->req_tx_entries_per_subcrq
) {
1468 netdev_info(netdev
, "Stopping queue %d\n", queue_num
);
1469 netif_stop_subqueue(netdev
, queue_num
);
1473 tx_bytes
+= skb
->len
;
1474 txq
->trans_start
= jiffies
;
1478 netdev
->stats
.tx_dropped
+= tx_dropped
;
1479 netdev
->stats
.tx_bytes
+= tx_bytes
;
1480 netdev
->stats
.tx_packets
+= tx_packets
;
1481 adapter
->tx_send_failed
+= tx_send_failed
;
1482 adapter
->tx_map_failed
+= tx_map_failed
;
1483 adapter
->tx_stats_buffers
[queue_num
].packets
+= tx_packets
;
1484 adapter
->tx_stats_buffers
[queue_num
].bytes
+= tx_bytes
;
1485 adapter
->tx_stats_buffers
[queue_num
].dropped_packets
+= tx_dropped
;
1490 static void ibmvnic_set_multi(struct net_device
*netdev
)
1492 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1493 struct netdev_hw_addr
*ha
;
1494 union ibmvnic_crq crq
;
1496 memset(&crq
, 0, sizeof(crq
));
1497 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1498 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1500 if (netdev
->flags
& IFF_PROMISC
) {
1501 if (!adapter
->promisc_supported
)
1504 if (netdev
->flags
& IFF_ALLMULTI
) {
1505 /* Accept all multicast */
1506 memset(&crq
, 0, sizeof(crq
));
1507 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1508 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1509 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
1510 ibmvnic_send_crq(adapter
, &crq
);
1511 } else if (netdev_mc_empty(netdev
)) {
1512 /* Reject all multicast */
1513 memset(&crq
, 0, sizeof(crq
));
1514 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1515 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1516 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
1517 ibmvnic_send_crq(adapter
, &crq
);
1519 /* Accept one or more multicast(s) */
1520 netdev_for_each_mc_addr(ha
, netdev
) {
1521 memset(&crq
, 0, sizeof(crq
));
1522 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1523 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1524 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
1525 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
1527 ibmvnic_send_crq(adapter
, &crq
);
1533 static int __ibmvnic_set_mac(struct net_device
*netdev
, struct sockaddr
*p
)
1535 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1536 struct sockaddr
*addr
= p
;
1537 union ibmvnic_crq crq
;
1539 if (!is_valid_ether_addr(addr
->sa_data
))
1540 return -EADDRNOTAVAIL
;
1542 memset(&crq
, 0, sizeof(crq
));
1543 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
1544 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
1545 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
1546 ibmvnic_send_crq(adapter
, &crq
);
1547 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1551 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
1553 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1554 struct sockaddr
*addr
= p
;
1556 if (adapter
->state
== VNIC_PROBED
) {
1557 memcpy(&adapter
->desired
.mac
, addr
, sizeof(struct sockaddr
));
1558 adapter
->mac_change_pending
= true;
1562 __ibmvnic_set_mac(netdev
, addr
);
1568 * do_reset returns zero if we are able to keep processing reset events, or
1569 * non-zero if we hit a fatal error and must halt.
1571 static int do_reset(struct ibmvnic_adapter
*adapter
,
1572 struct ibmvnic_rwi
*rwi
, u32 reset_state
)
1574 u64 old_num_rx_queues
, old_num_tx_queues
;
1575 struct net_device
*netdev
= adapter
->netdev
;
1578 netdev_dbg(adapter
->netdev
, "Re-setting driver (%d)\n",
1581 netif_carrier_off(netdev
);
1582 adapter
->reset_reason
= rwi
->reset_reason
;
1584 old_num_rx_queues
= adapter
->req_rx_queues
;
1585 old_num_tx_queues
= adapter
->req_tx_queues
;
1587 if (rwi
->reset_reason
== VNIC_RESET_MOBILITY
) {
1588 rc
= ibmvnic_reenable_crq_queue(adapter
);
1593 rc
= __ibmvnic_close(netdev
);
1597 if (adapter
->reset_reason
== VNIC_RESET_CHANGE_PARAM
||
1598 adapter
->wait_for_reset
) {
1599 release_resources(adapter
);
1600 release_sub_crqs(adapter
);
1601 release_crq_queue(adapter
);
1604 if (adapter
->reset_reason
!= VNIC_RESET_NON_FATAL
) {
1605 /* remove the closed state so when we call open it appears
1606 * we are coming from the probed state.
1608 adapter
->state
= VNIC_PROBED
;
1610 rc
= ibmvnic_init(adapter
);
1612 return IBMVNIC_INIT_FAILED
;
1614 /* If the adapter was in PROBE state prior to the reset,
1617 if (reset_state
== VNIC_PROBED
)
1620 rc
= ibmvnic_login(netdev
);
1622 adapter
->state
= VNIC_PROBED
;
1626 if (adapter
->reset_reason
== VNIC_RESET_CHANGE_PARAM
||
1627 adapter
->wait_for_reset
) {
1628 rc
= init_resources(adapter
);
1631 } else if (adapter
->req_rx_queues
!= old_num_rx_queues
||
1632 adapter
->req_tx_queues
!= old_num_tx_queues
) {
1633 release_rx_pools(adapter
);
1634 release_tx_pools(adapter
);
1635 init_rx_pools(netdev
);
1636 init_tx_pools(netdev
);
1638 rc
= reset_tx_pools(adapter
);
1642 rc
= reset_rx_pools(adapter
);
1646 if (reset_state
== VNIC_CLOSED
)
1651 rc
= __ibmvnic_open(netdev
);
1653 if (list_empty(&adapter
->rwi_list
))
1654 adapter
->state
= VNIC_CLOSED
;
1656 adapter
->state
= reset_state
;
1661 netif_carrier_on(netdev
);
1664 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1665 napi_schedule(&adapter
->napi
[i
]);
1667 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
)
1668 netdev_notify_peers(netdev
);
1673 static struct ibmvnic_rwi
*get_next_rwi(struct ibmvnic_adapter
*adapter
)
1675 struct ibmvnic_rwi
*rwi
;
1677 mutex_lock(&adapter
->rwi_lock
);
1679 if (!list_empty(&adapter
->rwi_list
)) {
1680 rwi
= list_first_entry(&adapter
->rwi_list
, struct ibmvnic_rwi
,
1682 list_del(&rwi
->list
);
1687 mutex_unlock(&adapter
->rwi_lock
);
1691 static void free_all_rwi(struct ibmvnic_adapter
*adapter
)
1693 struct ibmvnic_rwi
*rwi
;
1695 rwi
= get_next_rwi(adapter
);
1698 rwi
= get_next_rwi(adapter
);
1702 static void __ibmvnic_reset(struct work_struct
*work
)
1704 struct ibmvnic_rwi
*rwi
;
1705 struct ibmvnic_adapter
*adapter
;
1706 struct net_device
*netdev
;
1710 adapter
= container_of(work
, struct ibmvnic_adapter
, ibmvnic_reset
);
1711 netdev
= adapter
->netdev
;
1713 mutex_lock(&adapter
->reset_lock
);
1714 adapter
->resetting
= true;
1715 reset_state
= adapter
->state
;
1717 rwi
= get_next_rwi(adapter
);
1719 rc
= do_reset(adapter
, rwi
, reset_state
);
1721 if (rc
&& rc
!= IBMVNIC_INIT_FAILED
)
1724 rwi
= get_next_rwi(adapter
);
1727 if (adapter
->wait_for_reset
) {
1728 adapter
->wait_for_reset
= false;
1729 adapter
->reset_done_rc
= rc
;
1730 complete(&adapter
->reset_done
);
1734 netdev_dbg(adapter
->netdev
, "Reset failed\n");
1735 free_all_rwi(adapter
);
1736 mutex_unlock(&adapter
->reset_lock
);
1740 adapter
->resetting
= false;
1741 mutex_unlock(&adapter
->reset_lock
);
1744 static void ibmvnic_reset(struct ibmvnic_adapter
*adapter
,
1745 enum ibmvnic_reset_reason reason
)
1747 struct ibmvnic_rwi
*rwi
, *tmp
;
1748 struct net_device
*netdev
= adapter
->netdev
;
1749 struct list_head
*entry
;
1751 if (adapter
->state
== VNIC_REMOVING
||
1752 adapter
->state
== VNIC_REMOVED
) {
1753 netdev_dbg(netdev
, "Adapter removing, skipping reset\n");
1757 if (adapter
->state
== VNIC_PROBING
) {
1758 netdev_warn(netdev
, "Adapter reset during probe\n");
1759 adapter
->init_done_rc
= EAGAIN
;
1763 mutex_lock(&adapter
->rwi_lock
);
1765 list_for_each(entry
, &adapter
->rwi_list
) {
1766 tmp
= list_entry(entry
, struct ibmvnic_rwi
, list
);
1767 if (tmp
->reset_reason
== reason
) {
1768 netdev_dbg(netdev
, "Skipping matching reset\n");
1769 mutex_unlock(&adapter
->rwi_lock
);
1774 rwi
= kzalloc(sizeof(*rwi
), GFP_KERNEL
);
1776 mutex_unlock(&adapter
->rwi_lock
);
1777 ibmvnic_close(netdev
);
1781 rwi
->reset_reason
= reason
;
1782 list_add_tail(&rwi
->list
, &adapter
->rwi_list
);
1783 mutex_unlock(&adapter
->rwi_lock
);
1785 netdev_dbg(adapter
->netdev
, "Scheduling reset (reason %d)\n", reason
);
1786 schedule_work(&adapter
->ibmvnic_reset
);
1789 static void ibmvnic_tx_timeout(struct net_device
*dev
)
1791 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1793 ibmvnic_reset(adapter
, VNIC_RESET_TIMEOUT
);
1796 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
1797 struct ibmvnic_rx_buff
*rx_buff
)
1799 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
1801 rx_buff
->skb
= NULL
;
1803 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
1804 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
1806 atomic_dec(&pool
->available
);
1809 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
1811 struct net_device
*netdev
= napi
->dev
;
1812 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1813 int scrq_num
= (int)(napi
- adapter
->napi
);
1814 int frames_processed
= 0;
1817 while (frames_processed
< budget
) {
1818 struct sk_buff
*skb
;
1819 struct ibmvnic_rx_buff
*rx_buff
;
1820 union sub_crq
*next
;
1825 if (unlikely(adapter
->resetting
)) {
1826 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1827 napi_complete_done(napi
, frames_processed
);
1828 return frames_processed
;
1831 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
1833 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1835 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
1836 rx_comp
.correlator
);
1837 /* do error checking */
1838 if (next
->rx_comp
.rc
) {
1839 netdev_dbg(netdev
, "rx buffer returned with rc %x\n",
1840 be16_to_cpu(next
->rx_comp
.rc
));
1841 /* free the entry */
1842 next
->rx_comp
.first
= 0;
1843 remove_buff_from_pool(adapter
, rx_buff
);
1847 length
= be32_to_cpu(next
->rx_comp
.len
);
1848 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
1849 flags
= next
->rx_comp
.flags
;
1851 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
1854 /* VLAN Header has been stripped by the system firmware and
1855 * needs to be inserted by the driver
1857 if (adapter
->rx_vlan_header_insertion
&&
1858 (flags
& IBMVNIC_VLAN_STRIPPED
))
1859 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1860 ntohs(next
->rx_comp
.vlan_tci
));
1862 /* free the entry */
1863 next
->rx_comp
.first
= 0;
1864 remove_buff_from_pool(adapter
, rx_buff
);
1866 skb_put(skb
, length
);
1867 skb
->protocol
= eth_type_trans(skb
, netdev
);
1868 skb_record_rx_queue(skb
, scrq_num
);
1870 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
1871 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
1872 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1876 napi_gro_receive(napi
, skb
); /* send it up */
1877 netdev
->stats
.rx_packets
++;
1878 netdev
->stats
.rx_bytes
+= length
;
1879 adapter
->rx_stats_buffers
[scrq_num
].packets
++;
1880 adapter
->rx_stats_buffers
[scrq_num
].bytes
+= length
;
1884 if (adapter
->state
!= VNIC_CLOSING
)
1885 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
1887 if (frames_processed
< budget
) {
1888 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1889 napi_complete_done(napi
, frames_processed
);
1890 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
1891 napi_reschedule(napi
)) {
1892 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1896 return frames_processed
;
1899 #ifdef CONFIG_NET_POLL_CONTROLLER
1900 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1902 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1905 replenish_pools(netdev_priv(dev
));
1906 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1907 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1908 adapter
->rx_scrq
[i
]);
1912 static int wait_for_reset(struct ibmvnic_adapter
*adapter
)
1914 adapter
->fallback
.mtu
= adapter
->req_mtu
;
1915 adapter
->fallback
.rx_queues
= adapter
->req_rx_queues
;
1916 adapter
->fallback
.tx_queues
= adapter
->req_tx_queues
;
1917 adapter
->fallback
.rx_entries
= adapter
->req_rx_add_entries_per_subcrq
;
1918 adapter
->fallback
.tx_entries
= adapter
->req_tx_entries_per_subcrq
;
1920 init_completion(&adapter
->reset_done
);
1921 ibmvnic_reset(adapter
, VNIC_RESET_CHANGE_PARAM
);
1922 adapter
->wait_for_reset
= true;
1923 wait_for_completion(&adapter
->reset_done
);
1925 if (adapter
->reset_done_rc
) {
1926 adapter
->desired
.mtu
= adapter
->fallback
.mtu
;
1927 adapter
->desired
.rx_queues
= adapter
->fallback
.rx_queues
;
1928 adapter
->desired
.tx_queues
= adapter
->fallback
.tx_queues
;
1929 adapter
->desired
.rx_entries
= adapter
->fallback
.rx_entries
;
1930 adapter
->desired
.tx_entries
= adapter
->fallback
.tx_entries
;
1932 init_completion(&adapter
->reset_done
);
1933 ibmvnic_reset(adapter
, VNIC_RESET_CHANGE_PARAM
);
1934 wait_for_completion(&adapter
->reset_done
);
1936 adapter
->wait_for_reset
= false;
1938 return adapter
->reset_done_rc
;
1941 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1943 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1945 adapter
->desired
.mtu
= new_mtu
+ ETH_HLEN
;
1947 return wait_for_reset(adapter
);
1950 static const struct net_device_ops ibmvnic_netdev_ops
= {
1951 .ndo_open
= ibmvnic_open
,
1952 .ndo_stop
= ibmvnic_close
,
1953 .ndo_start_xmit
= ibmvnic_xmit
,
1954 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1955 .ndo_set_mac_address
= ibmvnic_set_mac
,
1956 .ndo_validate_addr
= eth_validate_addr
,
1957 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1958 #ifdef CONFIG_NET_POLL_CONTROLLER
1959 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1961 .ndo_change_mtu
= ibmvnic_change_mtu
,
1964 /* ethtool functions */
1966 static int ibmvnic_get_link_ksettings(struct net_device
*netdev
,
1967 struct ethtool_link_ksettings
*cmd
)
1969 u32 supported
, advertising
;
1971 supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1973 advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1975 cmd
->base
.speed
= SPEED_1000
;
1976 cmd
->base
.duplex
= DUPLEX_FULL
;
1977 cmd
->base
.port
= PORT_FIBRE
;
1978 cmd
->base
.phy_address
= 0;
1979 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
1981 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
1983 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
1989 static void ibmvnic_get_drvinfo(struct net_device
*netdev
,
1990 struct ethtool_drvinfo
*info
)
1992 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1994 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1995 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1996 strlcpy(info
->fw_version
, adapter
->fw_version
,
1997 sizeof(info
->fw_version
));
2000 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
2002 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2004 return adapter
->msg_enable
;
2007 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
2009 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2011 adapter
->msg_enable
= data
;
2014 static u32
ibmvnic_get_link(struct net_device
*netdev
)
2016 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2018 /* Don't need to send a query because we request a logical link up at
2019 * init and then we wait for link state indications
2021 return adapter
->logical_link_state
;
2024 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
2025 struct ethtool_ringparam
*ring
)
2027 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2029 ring
->rx_max_pending
= adapter
->max_rx_add_entries_per_subcrq
;
2030 ring
->tx_max_pending
= adapter
->max_tx_entries_per_subcrq
;
2031 ring
->rx_mini_max_pending
= 0;
2032 ring
->rx_jumbo_max_pending
= 0;
2033 ring
->rx_pending
= adapter
->req_rx_add_entries_per_subcrq
;
2034 ring
->tx_pending
= adapter
->req_tx_entries_per_subcrq
;
2035 ring
->rx_mini_pending
= 0;
2036 ring
->rx_jumbo_pending
= 0;
2039 static int ibmvnic_set_ringparam(struct net_device
*netdev
,
2040 struct ethtool_ringparam
*ring
)
2042 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2044 if (ring
->rx_pending
> adapter
->max_rx_add_entries_per_subcrq
||
2045 ring
->tx_pending
> adapter
->max_tx_entries_per_subcrq
) {
2046 netdev_err(netdev
, "Invalid request.\n");
2047 netdev_err(netdev
, "Max tx buffers = %llu\n",
2048 adapter
->max_rx_add_entries_per_subcrq
);
2049 netdev_err(netdev
, "Max rx buffers = %llu\n",
2050 adapter
->max_tx_entries_per_subcrq
);
2054 adapter
->desired
.rx_entries
= ring
->rx_pending
;
2055 adapter
->desired
.tx_entries
= ring
->tx_pending
;
2057 return wait_for_reset(adapter
);
2060 static void ibmvnic_get_channels(struct net_device
*netdev
,
2061 struct ethtool_channels
*channels
)
2063 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2065 channels
->max_rx
= adapter
->max_rx_queues
;
2066 channels
->max_tx
= adapter
->max_tx_queues
;
2067 channels
->max_other
= 0;
2068 channels
->max_combined
= 0;
2069 channels
->rx_count
= adapter
->req_rx_queues
;
2070 channels
->tx_count
= adapter
->req_tx_queues
;
2071 channels
->other_count
= 0;
2072 channels
->combined_count
= 0;
2075 static int ibmvnic_set_channels(struct net_device
*netdev
,
2076 struct ethtool_channels
*channels
)
2078 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2080 adapter
->desired
.rx_queues
= channels
->rx_count
;
2081 adapter
->desired
.tx_queues
= channels
->tx_count
;
2083 return wait_for_reset(adapter
);
2086 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2088 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2091 if (stringset
!= ETH_SS_STATS
)
2094 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
2095 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
2097 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2098 snprintf(data
, ETH_GSTRING_LEN
, "tx%d_packets", i
);
2099 data
+= ETH_GSTRING_LEN
;
2101 snprintf(data
, ETH_GSTRING_LEN
, "tx%d_bytes", i
);
2102 data
+= ETH_GSTRING_LEN
;
2104 snprintf(data
, ETH_GSTRING_LEN
, "tx%d_dropped_packets", i
);
2105 data
+= ETH_GSTRING_LEN
;
2108 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2109 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_packets", i
);
2110 data
+= ETH_GSTRING_LEN
;
2112 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_bytes", i
);
2113 data
+= ETH_GSTRING_LEN
;
2115 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_interrupts", i
);
2116 data
+= ETH_GSTRING_LEN
;
2120 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
2122 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2126 return ARRAY_SIZE(ibmvnic_stats
) +
2127 adapter
->req_tx_queues
* NUM_TX_STATS
+
2128 adapter
->req_rx_queues
* NUM_RX_STATS
;
2134 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
2135 struct ethtool_stats
*stats
, u64
*data
)
2137 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2138 union ibmvnic_crq crq
;
2141 memset(&crq
, 0, sizeof(crq
));
2142 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
2143 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
2144 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
2145 crq
.request_statistics
.len
=
2146 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
2148 /* Wait for data to be written */
2149 init_completion(&adapter
->stats_done
);
2150 ibmvnic_send_crq(adapter
, &crq
);
2151 wait_for_completion(&adapter
->stats_done
);
2153 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
2154 data
[i
] = be64_to_cpu(IBMVNIC_GET_STAT(adapter
,
2155 ibmvnic_stats
[i
].offset
));
2157 for (j
= 0; j
< adapter
->req_tx_queues
; j
++) {
2158 data
[i
] = adapter
->tx_stats_buffers
[j
].packets
;
2160 data
[i
] = adapter
->tx_stats_buffers
[j
].bytes
;
2162 data
[i
] = adapter
->tx_stats_buffers
[j
].dropped_packets
;
2166 for (j
= 0; j
< adapter
->req_rx_queues
; j
++) {
2167 data
[i
] = adapter
->rx_stats_buffers
[j
].packets
;
2169 data
[i
] = adapter
->rx_stats_buffers
[j
].bytes
;
2171 data
[i
] = adapter
->rx_stats_buffers
[j
].interrupts
;
2176 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
2177 .get_drvinfo
= ibmvnic_get_drvinfo
,
2178 .get_msglevel
= ibmvnic_get_msglevel
,
2179 .set_msglevel
= ibmvnic_set_msglevel
,
2180 .get_link
= ibmvnic_get_link
,
2181 .get_ringparam
= ibmvnic_get_ringparam
,
2182 .set_ringparam
= ibmvnic_set_ringparam
,
2183 .get_channels
= ibmvnic_get_channels
,
2184 .set_channels
= ibmvnic_set_channels
,
2185 .get_strings
= ibmvnic_get_strings
,
2186 .get_sset_count
= ibmvnic_get_sset_count
,
2187 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
2188 .get_link_ksettings
= ibmvnic_get_link_ksettings
,
2191 /* Routines for managing CRQs/sCRQs */
2193 static int reset_one_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
2194 struct ibmvnic_sub_crq_queue
*scrq
)
2199 free_irq(scrq
->irq
, scrq
);
2200 irq_dispose_mapping(scrq
->irq
);
2204 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
2207 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
2208 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
2212 static int reset_sub_crq_queues(struct ibmvnic_adapter
*adapter
)
2216 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2217 netdev_dbg(adapter
->netdev
, "Re-setting tx_scrq[%d]\n", i
);
2218 rc
= reset_one_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
2223 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2224 netdev_dbg(adapter
->netdev
, "Re-setting rx_scrq[%d]\n", i
);
2225 rc
= reset_one_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
2233 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
2234 struct ibmvnic_sub_crq_queue
*scrq
)
2236 struct device
*dev
= &adapter
->vdev
->dev
;
2239 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
2241 /* Close the sub-crqs */
2243 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
2244 adapter
->vdev
->unit_address
,
2246 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
2249 netdev_err(adapter
->netdev
,
2250 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2254 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
2256 free_pages((unsigned long)scrq
->msgs
, 2);
2260 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
2263 struct device
*dev
= &adapter
->vdev
->dev
;
2264 struct ibmvnic_sub_crq_queue
*scrq
;
2267 scrq
= kzalloc(sizeof(*scrq
), GFP_KERNEL
);
2272 (union sub_crq
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 2);
2274 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
2275 goto zero_page_failed
;
2278 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
2280 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
2281 dev_warn(dev
, "Couldn't map crq queue messages page\n");
2285 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
2286 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
2288 if (rc
== H_RESOURCE
)
2289 rc
= ibmvnic_reset_crq(adapter
);
2291 if (rc
== H_CLOSED
) {
2292 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
2294 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
2298 scrq
->adapter
= adapter
;
2299 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
2300 spin_lock_init(&scrq
->lock
);
2302 netdev_dbg(adapter
->netdev
,
2303 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2304 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
2309 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
2312 free_pages((unsigned long)scrq
->msgs
, 2);
2319 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
2323 if (adapter
->tx_scrq
) {
2324 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2325 if (!adapter
->tx_scrq
[i
])
2328 netdev_dbg(adapter
->netdev
, "Releasing tx_scrq[%d]\n",
2330 if (adapter
->tx_scrq
[i
]->irq
) {
2331 free_irq(adapter
->tx_scrq
[i
]->irq
,
2332 adapter
->tx_scrq
[i
]);
2333 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
2334 adapter
->tx_scrq
[i
]->irq
= 0;
2337 release_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
2340 kfree(adapter
->tx_scrq
);
2341 adapter
->tx_scrq
= NULL
;
2344 if (adapter
->rx_scrq
) {
2345 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2346 if (!adapter
->rx_scrq
[i
])
2349 netdev_dbg(adapter
->netdev
, "Releasing rx_scrq[%d]\n",
2351 if (adapter
->rx_scrq
[i
]->irq
) {
2352 free_irq(adapter
->rx_scrq
[i
]->irq
,
2353 adapter
->rx_scrq
[i
]);
2354 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
2355 adapter
->rx_scrq
[i
]->irq
= 0;
2358 release_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
2361 kfree(adapter
->rx_scrq
);
2362 adapter
->rx_scrq
= NULL
;
2366 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
2367 struct ibmvnic_sub_crq_queue
*scrq
)
2369 struct device
*dev
= &adapter
->vdev
->dev
;
2372 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
2373 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
2375 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2380 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
2381 struct ibmvnic_sub_crq_queue
*scrq
)
2383 struct device
*dev
= &adapter
->vdev
->dev
;
2386 if (scrq
->hw_irq
> 0x100000000ULL
) {
2387 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
2391 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
2392 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
2394 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2399 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
2400 struct ibmvnic_sub_crq_queue
*scrq
)
2402 struct device
*dev
= &adapter
->vdev
->dev
;
2403 struct ibmvnic_tx_buff
*txbuff
;
2404 union sub_crq
*next
;
2410 while (pending_scrq(adapter
, scrq
)) {
2411 unsigned int pool
= scrq
->pool_index
;
2413 next
= ibmvnic_next_scrq(adapter
, scrq
);
2414 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
2415 if (next
->tx_comp
.rcs
[i
]) {
2416 dev_err(dev
, "tx error %x\n",
2417 next
->tx_comp
.rcs
[i
]);
2420 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
2421 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
2423 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
2424 if (!txbuff
->data_dma
[j
])
2427 txbuff
->data_dma
[j
] = 0;
2429 /* if sub_crq was sent indirectly */
2430 first
= txbuff
->indir_arr
[0].generic
.first
;
2431 if (first
== IBMVNIC_CRQ_CMD
) {
2432 dma_unmap_single(dev
, txbuff
->indir_dma
,
2433 sizeof(txbuff
->indir_arr
),
2437 if (txbuff
->last_frag
) {
2438 dev_kfree_skb_any(txbuff
->skb
);
2442 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
2443 producer_index
] = index
;
2444 adapter
->tx_pool
[pool
].producer_index
=
2445 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
2446 adapter
->req_tx_entries_per_subcrq
;
2448 /* remove tx_comp scrq*/
2449 next
->tx_comp
.first
= 0;
2451 if (atomic_sub_return(next
->tx_comp
.num_comps
, &scrq
->used
) <=
2452 (adapter
->req_tx_entries_per_subcrq
/ 2) &&
2453 __netif_subqueue_stopped(adapter
->netdev
,
2454 scrq
->pool_index
)) {
2455 netif_wake_subqueue(adapter
->netdev
, scrq
->pool_index
);
2456 netdev_info(adapter
->netdev
, "Started queue %d\n",
2461 enable_scrq_irq(adapter
, scrq
);
2463 if (pending_scrq(adapter
, scrq
)) {
2464 disable_scrq_irq(adapter
, scrq
);
2471 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
2473 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2474 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2476 disable_scrq_irq(adapter
, scrq
);
2477 ibmvnic_complete_tx(adapter
, scrq
);
2482 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
2484 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2485 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2487 adapter
->rx_stats_buffers
[scrq
->scrq_num
].interrupts
++;
2489 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
2490 disable_scrq_irq(adapter
, scrq
);
2491 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
2497 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
2499 struct device
*dev
= &adapter
->vdev
->dev
;
2500 struct ibmvnic_sub_crq_queue
*scrq
;
2504 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2505 netdev_dbg(adapter
->netdev
, "Initializing tx_scrq[%d] irq\n",
2507 scrq
= adapter
->tx_scrq
[i
];
2508 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2512 dev_err(dev
, "Error mapping irq\n");
2513 goto req_tx_irq_failed
;
2516 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
2517 0, "ibmvnic_tx", scrq
);
2520 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
2522 irq_dispose_mapping(scrq
->irq
);
2523 goto req_rx_irq_failed
;
2527 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2528 netdev_dbg(adapter
->netdev
, "Initializing rx_scrq[%d] irq\n",
2530 scrq
= adapter
->rx_scrq
[i
];
2531 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2534 dev_err(dev
, "Error mapping irq\n");
2535 goto req_rx_irq_failed
;
2537 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
2538 0, "ibmvnic_rx", scrq
);
2540 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
2542 irq_dispose_mapping(scrq
->irq
);
2543 goto req_rx_irq_failed
;
2549 for (j
= 0; j
< i
; j
++) {
2550 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
2551 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2553 i
= adapter
->req_tx_queues
;
2555 for (j
= 0; j
< i
; j
++) {
2556 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
2557 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2559 release_sub_crqs(adapter
);
2563 static int init_sub_crqs(struct ibmvnic_adapter
*adapter
)
2565 struct device
*dev
= &adapter
->vdev
->dev
;
2566 struct ibmvnic_sub_crq_queue
**allqueues
;
2567 int registered_queues
= 0;
2572 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
2574 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_KERNEL
);
2578 for (i
= 0; i
< total_queues
; i
++) {
2579 allqueues
[i
] = init_sub_crq_queue(adapter
);
2580 if (!allqueues
[i
]) {
2581 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
2584 registered_queues
++;
2587 /* Make sure we were able to register the minimum number of queues */
2588 if (registered_queues
<
2589 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
2590 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
2594 /* Distribute the failed allocated queues*/
2595 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
2596 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
2599 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
2600 adapter
->req_rx_queues
--;
2605 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
2606 adapter
->req_tx_queues
--;
2613 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
2614 sizeof(*adapter
->tx_scrq
), GFP_KERNEL
);
2615 if (!adapter
->tx_scrq
)
2618 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2619 adapter
->tx_scrq
[i
] = allqueues
[i
];
2620 adapter
->tx_scrq
[i
]->pool_index
= i
;
2623 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
2624 sizeof(*adapter
->rx_scrq
), GFP_KERNEL
);
2625 if (!adapter
->rx_scrq
)
2628 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2629 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
2630 adapter
->rx_scrq
[i
]->scrq_num
= i
;
2637 kfree(adapter
->tx_scrq
);
2638 adapter
->tx_scrq
= NULL
;
2640 for (i
= 0; i
< registered_queues
; i
++)
2641 release_sub_crq_queue(adapter
, allqueues
[i
]);
2646 static void ibmvnic_send_req_caps(struct ibmvnic_adapter
*adapter
, int retry
)
2648 struct device
*dev
= &adapter
->vdev
->dev
;
2649 union ibmvnic_crq crq
;
2653 /* Sub-CRQ entries are 32 byte long */
2654 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
2656 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
2657 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
2658 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
2662 if (adapter
->desired
.mtu
)
2663 adapter
->req_mtu
= adapter
->desired
.mtu
;
2665 adapter
->req_mtu
= adapter
->netdev
->mtu
+ ETH_HLEN
;
2667 if (!adapter
->desired
.tx_entries
)
2668 adapter
->desired
.tx_entries
=
2669 adapter
->max_tx_entries_per_subcrq
;
2670 if (!adapter
->desired
.rx_entries
)
2671 adapter
->desired
.rx_entries
=
2672 adapter
->max_rx_add_entries_per_subcrq
;
2674 max_entries
= IBMVNIC_MAX_LTB_SIZE
/
2675 (adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
);
2677 if ((adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
) *
2678 adapter
->desired
.tx_entries
> IBMVNIC_MAX_LTB_SIZE
) {
2679 adapter
->desired
.tx_entries
= max_entries
;
2682 if ((adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
) *
2683 adapter
->desired
.rx_entries
> IBMVNIC_MAX_LTB_SIZE
) {
2684 adapter
->desired
.rx_entries
= max_entries
;
2687 if (adapter
->desired
.tx_entries
)
2688 adapter
->req_tx_entries_per_subcrq
=
2689 adapter
->desired
.tx_entries
;
2691 adapter
->req_tx_entries_per_subcrq
=
2692 adapter
->max_tx_entries_per_subcrq
;
2694 if (adapter
->desired
.rx_entries
)
2695 adapter
->req_rx_add_entries_per_subcrq
=
2696 adapter
->desired
.rx_entries
;
2698 adapter
->req_rx_add_entries_per_subcrq
=
2699 adapter
->max_rx_add_entries_per_subcrq
;
2701 if (adapter
->desired
.tx_queues
)
2702 adapter
->req_tx_queues
=
2703 adapter
->desired
.tx_queues
;
2705 adapter
->req_tx_queues
=
2706 adapter
->opt_tx_comp_sub_queues
;
2708 if (adapter
->desired
.rx_queues
)
2709 adapter
->req_rx_queues
=
2710 adapter
->desired
.rx_queues
;
2712 adapter
->req_rx_queues
=
2713 adapter
->opt_rx_comp_queues
;
2715 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
2718 memset(&crq
, 0, sizeof(crq
));
2719 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
2720 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
2722 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
2723 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
2724 atomic_inc(&adapter
->running_cap_crqs
);
2725 ibmvnic_send_crq(adapter
, &crq
);
2727 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
2728 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
2729 atomic_inc(&adapter
->running_cap_crqs
);
2730 ibmvnic_send_crq(adapter
, &crq
);
2732 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
2733 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
2734 atomic_inc(&adapter
->running_cap_crqs
);
2735 ibmvnic_send_crq(adapter
, &crq
);
2737 crq
.request_capability
.capability
=
2738 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
2739 crq
.request_capability
.number
=
2740 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
2741 atomic_inc(&adapter
->running_cap_crqs
);
2742 ibmvnic_send_crq(adapter
, &crq
);
2744 crq
.request_capability
.capability
=
2745 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
2746 crq
.request_capability
.number
=
2747 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
2748 atomic_inc(&adapter
->running_cap_crqs
);
2749 ibmvnic_send_crq(adapter
, &crq
);
2751 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
2752 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
2753 atomic_inc(&adapter
->running_cap_crqs
);
2754 ibmvnic_send_crq(adapter
, &crq
);
2756 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
2757 if (adapter
->promisc_supported
) {
2758 crq
.request_capability
.capability
=
2759 cpu_to_be16(PROMISC_REQUESTED
);
2760 crq
.request_capability
.number
= cpu_to_be64(1);
2761 atomic_inc(&adapter
->running_cap_crqs
);
2762 ibmvnic_send_crq(adapter
, &crq
);
2765 crq
.request_capability
.capability
=
2766 cpu_to_be16(PROMISC_REQUESTED
);
2767 crq
.request_capability
.number
= cpu_to_be64(0);
2768 atomic_inc(&adapter
->running_cap_crqs
);
2769 ibmvnic_send_crq(adapter
, &crq
);
2773 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
2774 struct ibmvnic_sub_crq_queue
*scrq
)
2776 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
2778 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
)
2784 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
2785 struct ibmvnic_sub_crq_queue
*scrq
)
2787 union sub_crq
*entry
;
2788 unsigned long flags
;
2790 spin_lock_irqsave(&scrq
->lock
, flags
);
2791 entry
= &scrq
->msgs
[scrq
->cur
];
2792 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
2793 if (++scrq
->cur
== scrq
->size
)
2798 spin_unlock_irqrestore(&scrq
->lock
, flags
);
2803 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
2805 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
2806 union ibmvnic_crq
*crq
;
2808 crq
= &queue
->msgs
[queue
->cur
];
2809 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
2810 if (++queue
->cur
== queue
->size
)
2819 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
2820 union sub_crq
*sub_crq
)
2822 unsigned int ua
= adapter
->vdev
->unit_address
;
2823 struct device
*dev
= &adapter
->vdev
->dev
;
2824 u64
*u64_crq
= (u64
*)sub_crq
;
2827 netdev_dbg(adapter
->netdev
,
2828 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2829 (unsigned long int)cpu_to_be64(remote_handle
),
2830 (unsigned long int)cpu_to_be64(u64_crq
[0]),
2831 (unsigned long int)cpu_to_be64(u64_crq
[1]),
2832 (unsigned long int)cpu_to_be64(u64_crq
[2]),
2833 (unsigned long int)cpu_to_be64(u64_crq
[3]));
2835 /* Make sure the hypervisor sees the complete request */
2838 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
2839 cpu_to_be64(remote_handle
),
2840 cpu_to_be64(u64_crq
[0]),
2841 cpu_to_be64(u64_crq
[1]),
2842 cpu_to_be64(u64_crq
[2]),
2843 cpu_to_be64(u64_crq
[3]));
2847 dev_warn(dev
, "CRQ Queue closed\n");
2848 dev_err(dev
, "Send error (rc=%d)\n", rc
);
2854 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
2855 u64 remote_handle
, u64 ioba
, u64 num_entries
)
2857 unsigned int ua
= adapter
->vdev
->unit_address
;
2858 struct device
*dev
= &adapter
->vdev
->dev
;
2861 /* Make sure the hypervisor sees the complete request */
2863 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
2864 cpu_to_be64(remote_handle
),
2869 dev_warn(dev
, "CRQ Queue closed\n");
2870 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
2876 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
2877 union ibmvnic_crq
*crq
)
2879 unsigned int ua
= adapter
->vdev
->unit_address
;
2880 struct device
*dev
= &adapter
->vdev
->dev
;
2881 u64
*u64_crq
= (u64
*)crq
;
2884 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
2885 (unsigned long int)cpu_to_be64(u64_crq
[0]),
2886 (unsigned long int)cpu_to_be64(u64_crq
[1]));
2888 /* Make sure the hypervisor sees the complete request */
2891 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
2892 cpu_to_be64(u64_crq
[0]),
2893 cpu_to_be64(u64_crq
[1]));
2897 dev_warn(dev
, "CRQ Queue closed\n");
2898 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
2904 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
2906 union ibmvnic_crq crq
;
2908 memset(&crq
, 0, sizeof(crq
));
2909 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
2910 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
2911 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
2913 return ibmvnic_send_crq(adapter
, &crq
);
2916 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
2918 union ibmvnic_crq crq
;
2920 memset(&crq
, 0, sizeof(crq
));
2921 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
2922 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
2923 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
2925 return ibmvnic_send_crq(adapter
, &crq
);
2928 struct vnic_login_client_data
{
2934 static int vnic_client_data_len(struct ibmvnic_adapter
*adapter
)
2938 /* Calculate the amount of buffer space needed for the
2939 * vnic client data in the login buffer. There are four entries,
2940 * OS name, LPAR name, device name, and a null last entry.
2942 len
= 4 * sizeof(struct vnic_login_client_data
);
2943 len
+= 6; /* "Linux" plus NULL */
2944 len
+= strlen(utsname()->nodename
) + 1;
2945 len
+= strlen(adapter
->netdev
->name
) + 1;
2950 static void vnic_add_client_data(struct ibmvnic_adapter
*adapter
,
2951 struct vnic_login_client_data
*vlcd
)
2953 const char *os_name
= "Linux";
2956 /* Type 1 - LPAR OS */
2958 len
= strlen(os_name
) + 1;
2959 vlcd
->len
= cpu_to_be16(len
);
2960 strncpy(&vlcd
->name
, os_name
, len
);
2961 vlcd
= (struct vnic_login_client_data
*)((char *)&vlcd
->name
+ len
);
2963 /* Type 2 - LPAR name */
2965 len
= strlen(utsname()->nodename
) + 1;
2966 vlcd
->len
= cpu_to_be16(len
);
2967 strncpy(&vlcd
->name
, utsname()->nodename
, len
);
2968 vlcd
= (struct vnic_login_client_data
*)((char *)&vlcd
->name
+ len
);
2970 /* Type 3 - device name */
2972 len
= strlen(adapter
->netdev
->name
) + 1;
2973 vlcd
->len
= cpu_to_be16(len
);
2974 strncpy(&vlcd
->name
, adapter
->netdev
->name
, len
);
2977 static void send_login(struct ibmvnic_adapter
*adapter
)
2979 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
2980 struct ibmvnic_login_buffer
*login_buffer
;
2981 struct device
*dev
= &adapter
->vdev
->dev
;
2982 dma_addr_t rsp_buffer_token
;
2983 dma_addr_t buffer_token
;
2984 size_t rsp_buffer_size
;
2985 union ibmvnic_crq crq
;
2989 int client_data_len
;
2990 struct vnic_login_client_data
*vlcd
;
2993 client_data_len
= vnic_client_data_len(adapter
);
2996 sizeof(struct ibmvnic_login_buffer
) +
2997 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
) +
3000 login_buffer
= kzalloc(buffer_size
, GFP_ATOMIC
);
3002 goto buf_alloc_failed
;
3004 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
3006 if (dma_mapping_error(dev
, buffer_token
)) {
3007 dev_err(dev
, "Couldn't map login buffer\n");
3008 goto buf_map_failed
;
3011 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
3012 sizeof(u64
) * adapter
->req_tx_queues
+
3013 sizeof(u64
) * adapter
->req_rx_queues
+
3014 sizeof(u64
) * adapter
->req_rx_queues
+
3015 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
3017 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
3018 if (!login_rsp_buffer
)
3019 goto buf_rsp_alloc_failed
;
3021 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
3022 rsp_buffer_size
, DMA_FROM_DEVICE
);
3023 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
3024 dev_err(dev
, "Couldn't map login rsp buffer\n");
3025 goto buf_rsp_map_failed
;
3028 adapter
->login_buf
= login_buffer
;
3029 adapter
->login_buf_token
= buffer_token
;
3030 adapter
->login_buf_sz
= buffer_size
;
3031 adapter
->login_rsp_buf
= login_rsp_buffer
;
3032 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
3033 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
3035 login_buffer
->len
= cpu_to_be32(buffer_size
);
3036 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
3037 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
3038 login_buffer
->off_txcomp_subcrqs
=
3039 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
3040 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
3041 login_buffer
->off_rxcomp_subcrqs
=
3042 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
3043 sizeof(u64
) * adapter
->req_tx_queues
);
3044 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
3045 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
3047 tx_list_p
= (__be64
*)((char *)login_buffer
+
3048 sizeof(struct ibmvnic_login_buffer
));
3049 rx_list_p
= (__be64
*)((char *)login_buffer
+
3050 sizeof(struct ibmvnic_login_buffer
) +
3051 sizeof(u64
) * adapter
->req_tx_queues
);
3053 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
3054 if (adapter
->tx_scrq
[i
]) {
3055 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
3060 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
3061 if (adapter
->rx_scrq
[i
]) {
3062 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
3067 /* Insert vNIC login client data */
3068 vlcd
= (struct vnic_login_client_data
*)
3069 ((char *)rx_list_p
+ (sizeof(u64
) * adapter
->req_rx_queues
));
3070 login_buffer
->client_data_offset
=
3071 cpu_to_be32((char *)vlcd
- (char *)login_buffer
);
3072 login_buffer
->client_data_len
= cpu_to_be32(client_data_len
);
3074 vnic_add_client_data(adapter
, vlcd
);
3076 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
3077 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
3078 netdev_dbg(adapter
->netdev
, "%016lx\n",
3079 ((unsigned long int *)(adapter
->login_buf
))[i
]);
3082 memset(&crq
, 0, sizeof(crq
));
3083 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
3084 crq
.login
.cmd
= LOGIN
;
3085 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
3086 crq
.login
.len
= cpu_to_be32(buffer_size
);
3087 ibmvnic_send_crq(adapter
, &crq
);
3092 kfree(login_rsp_buffer
);
3093 buf_rsp_alloc_failed
:
3094 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
3096 kfree(login_buffer
);
3101 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
3104 union ibmvnic_crq crq
;
3106 memset(&crq
, 0, sizeof(crq
));
3107 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
3108 crq
.request_map
.cmd
= REQUEST_MAP
;
3109 crq
.request_map
.map_id
= map_id
;
3110 crq
.request_map
.ioba
= cpu_to_be32(addr
);
3111 crq
.request_map
.len
= cpu_to_be32(len
);
3112 ibmvnic_send_crq(adapter
, &crq
);
3115 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
3117 union ibmvnic_crq crq
;
3119 memset(&crq
, 0, sizeof(crq
));
3120 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
3121 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
3122 crq
.request_unmap
.map_id
= map_id
;
3123 ibmvnic_send_crq(adapter
, &crq
);
3126 static void send_map_query(struct ibmvnic_adapter
*adapter
)
3128 union ibmvnic_crq crq
;
3130 memset(&crq
, 0, sizeof(crq
));
3131 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
3132 crq
.query_map
.cmd
= QUERY_MAP
;
3133 ibmvnic_send_crq(adapter
, &crq
);
3136 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3137 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
3139 union ibmvnic_crq crq
;
3141 atomic_set(&adapter
->running_cap_crqs
, 0);
3142 memset(&crq
, 0, sizeof(crq
));
3143 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
3144 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
3146 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
3147 atomic_inc(&adapter
->running_cap_crqs
);
3148 ibmvnic_send_crq(adapter
, &crq
);
3150 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
3151 atomic_inc(&adapter
->running_cap_crqs
);
3152 ibmvnic_send_crq(adapter
, &crq
);
3154 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
3155 atomic_inc(&adapter
->running_cap_crqs
);
3156 ibmvnic_send_crq(adapter
, &crq
);
3158 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
3159 atomic_inc(&adapter
->running_cap_crqs
);
3160 ibmvnic_send_crq(adapter
, &crq
);
3162 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
3163 atomic_inc(&adapter
->running_cap_crqs
);
3164 ibmvnic_send_crq(adapter
, &crq
);
3166 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
3167 atomic_inc(&adapter
->running_cap_crqs
);
3168 ibmvnic_send_crq(adapter
, &crq
);
3170 crq
.query_capability
.capability
=
3171 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
3172 atomic_inc(&adapter
->running_cap_crqs
);
3173 ibmvnic_send_crq(adapter
, &crq
);
3175 crq
.query_capability
.capability
=
3176 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
3177 atomic_inc(&adapter
->running_cap_crqs
);
3178 ibmvnic_send_crq(adapter
, &crq
);
3180 crq
.query_capability
.capability
=
3181 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
3182 atomic_inc(&adapter
->running_cap_crqs
);
3183 ibmvnic_send_crq(adapter
, &crq
);
3185 crq
.query_capability
.capability
=
3186 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
3187 atomic_inc(&adapter
->running_cap_crqs
);
3188 ibmvnic_send_crq(adapter
, &crq
);
3190 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
3191 atomic_inc(&adapter
->running_cap_crqs
);
3192 ibmvnic_send_crq(adapter
, &crq
);
3194 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
3195 atomic_inc(&adapter
->running_cap_crqs
);
3196 ibmvnic_send_crq(adapter
, &crq
);
3198 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
3199 atomic_inc(&adapter
->running_cap_crqs
);
3200 ibmvnic_send_crq(adapter
, &crq
);
3202 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
3203 atomic_inc(&adapter
->running_cap_crqs
);
3204 ibmvnic_send_crq(adapter
, &crq
);
3206 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
3207 atomic_inc(&adapter
->running_cap_crqs
);
3208 ibmvnic_send_crq(adapter
, &crq
);
3210 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
3211 atomic_inc(&adapter
->running_cap_crqs
);
3212 ibmvnic_send_crq(adapter
, &crq
);
3214 crq
.query_capability
.capability
= cpu_to_be16(RX_VLAN_HEADER_INSERTION
);
3215 atomic_inc(&adapter
->running_cap_crqs
);
3216 ibmvnic_send_crq(adapter
, &crq
);
3218 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
3219 atomic_inc(&adapter
->running_cap_crqs
);
3220 ibmvnic_send_crq(adapter
, &crq
);
3222 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
3223 atomic_inc(&adapter
->running_cap_crqs
);
3224 ibmvnic_send_crq(adapter
, &crq
);
3226 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
3227 atomic_inc(&adapter
->running_cap_crqs
);
3228 ibmvnic_send_crq(adapter
, &crq
);
3230 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
3231 atomic_inc(&adapter
->running_cap_crqs
);
3232 ibmvnic_send_crq(adapter
, &crq
);
3234 crq
.query_capability
.capability
=
3235 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
3236 atomic_inc(&adapter
->running_cap_crqs
);
3237 ibmvnic_send_crq(adapter
, &crq
);
3239 crq
.query_capability
.capability
=
3240 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
3241 atomic_inc(&adapter
->running_cap_crqs
);
3242 ibmvnic_send_crq(adapter
, &crq
);
3244 crq
.query_capability
.capability
=
3245 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
3246 atomic_inc(&adapter
->running_cap_crqs
);
3247 ibmvnic_send_crq(adapter
, &crq
);
3249 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
3250 atomic_inc(&adapter
->running_cap_crqs
);
3251 ibmvnic_send_crq(adapter
, &crq
);
3254 static void handle_vpd_size_rsp(union ibmvnic_crq
*crq
,
3255 struct ibmvnic_adapter
*adapter
)
3257 struct device
*dev
= &adapter
->vdev
->dev
;
3259 if (crq
->get_vpd_size_rsp
.rc
.code
) {
3260 dev_err(dev
, "Error retrieving VPD size, rc=%x\n",
3261 crq
->get_vpd_size_rsp
.rc
.code
);
3262 complete(&adapter
->fw_done
);
3266 adapter
->vpd
->len
= be64_to_cpu(crq
->get_vpd_size_rsp
.len
);
3267 complete(&adapter
->fw_done
);
3270 static void handle_vpd_rsp(union ibmvnic_crq
*crq
,
3271 struct ibmvnic_adapter
*adapter
)
3273 struct device
*dev
= &adapter
->vdev
->dev
;
3274 unsigned char *substr
= NULL
, *ptr
= NULL
;
3275 u8 fw_level_len
= 0;
3277 memset(adapter
->fw_version
, 0, 32);
3279 dma_unmap_single(dev
, adapter
->vpd
->dma_addr
, adapter
->vpd
->len
,
3282 if (crq
->get_vpd_rsp
.rc
.code
) {
3283 dev_err(dev
, "Error retrieving VPD from device, rc=%x\n",
3284 crq
->get_vpd_rsp
.rc
.code
);
3288 /* get the position of the firmware version info
3289 * located after the ASCII 'RM' substring in the buffer
3291 substr
= strnstr(adapter
->vpd
->buff
, "RM", adapter
->vpd
->len
);
3293 dev_info(dev
, "No FW level provided by VPD\n");
3297 /* get length of firmware level ASCII substring */
3298 if ((substr
+ 2) < (adapter
->vpd
->buff
+ adapter
->vpd
->len
)) {
3299 fw_level_len
= *(substr
+ 2);
3301 dev_info(dev
, "Length of FW substr extrapolated VDP buff\n");
3305 /* copy firmware version string from vpd into adapter */
3306 if ((substr
+ 3 + fw_level_len
) <
3307 (adapter
->vpd
->buff
+ adapter
->vpd
->len
)) {
3308 ptr
= strncpy((char *)adapter
->fw_version
,
3309 substr
+ 3, fw_level_len
);
3312 dev_err(dev
, "Failed to isolate FW level string\n");
3314 dev_info(dev
, "FW substr extrapolated VPD buff\n");
3318 complete(&adapter
->fw_done
);
3321 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
3323 struct device
*dev
= &adapter
->vdev
->dev
;
3324 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
3325 union ibmvnic_crq crq
;
3328 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
3329 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
3331 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
3332 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
3333 netdev_dbg(adapter
->netdev
, "%016lx\n",
3334 ((unsigned long int *)(buf
))[i
]);
3336 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
3337 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
3338 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
3339 buf
->tcp_ipv4_chksum
);
3340 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
3341 buf
->tcp_ipv6_chksum
);
3342 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
3343 buf
->udp_ipv4_chksum
);
3344 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
3345 buf
->udp_ipv6_chksum
);
3346 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
3347 buf
->large_tx_ipv4
);
3348 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
3349 buf
->large_tx_ipv6
);
3350 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
3351 buf
->large_rx_ipv4
);
3352 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
3353 buf
->large_rx_ipv6
);
3354 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
3355 buf
->max_ipv4_header_size
);
3356 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
3357 buf
->max_ipv6_header_size
);
3358 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
3359 buf
->max_tcp_header_size
);
3360 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
3361 buf
->max_udp_header_size
);
3362 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
3363 buf
->max_large_tx_size
);
3364 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
3365 buf
->max_large_rx_size
);
3366 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
3367 buf
->ipv6_extension_header
);
3368 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
3369 buf
->tcp_pseudosum_req
);
3370 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
3371 buf
->num_ipv6_ext_headers
);
3372 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
3373 buf
->off_ipv6_ext_headers
);
3375 adapter
->ip_offload_ctrl_tok
=
3376 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
3377 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
3379 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
3380 dev_err(dev
, "Couldn't map ip offload control buffer\n");
3384 adapter
->ip_offload_ctrl
.len
=
3385 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
3386 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
3387 adapter
->ip_offload_ctrl
.ipv4_chksum
= buf
->ipv4_chksum
;
3388 adapter
->ip_offload_ctrl
.ipv6_chksum
= buf
->ipv6_chksum
;
3389 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
3390 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
3391 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
3392 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
3393 adapter
->ip_offload_ctrl
.large_tx_ipv4
= buf
->large_tx_ipv4
;
3394 adapter
->ip_offload_ctrl
.large_tx_ipv6
= buf
->large_tx_ipv6
;
3396 /* large_rx disabled for now, additional features needed */
3397 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
3398 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
3400 adapter
->netdev
->features
= NETIF_F_SG
| NETIF_F_GSO
;
3402 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
3403 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
3405 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
3406 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
3408 if ((adapter
->netdev
->features
&
3409 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
3410 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
3412 if (buf
->large_tx_ipv4
)
3413 adapter
->netdev
->features
|= NETIF_F_TSO
;
3414 if (buf
->large_tx_ipv6
)
3415 adapter
->netdev
->features
|= NETIF_F_TSO6
;
3417 adapter
->netdev
->hw_features
|= adapter
->netdev
->features
;
3419 memset(&crq
, 0, sizeof(crq
));
3420 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
3421 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
3422 crq
.control_ip_offload
.len
=
3423 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
3424 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
3425 ibmvnic_send_crq(adapter
, &crq
);
3428 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
3429 struct ibmvnic_adapter
*adapter
)
3431 struct device
*dev
= &adapter
->vdev
->dev
;
3432 struct ibmvnic_error_buff
*error_buff
, *tmp
;
3433 unsigned long flags
;
3437 if (!crq
->request_error_rsp
.rc
.code
) {
3438 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
3439 crq
->request_error_rsp
.rc
.code
);
3443 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
3444 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
3445 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
3447 list_del(&error_buff
->list
);
3450 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
3453 dev_err(dev
, "Couldn't find error id %x\n",
3454 be32_to_cpu(crq
->request_error_rsp
.error_id
));
3458 dev_err(dev
, "Detailed info for error id %x:",
3459 be32_to_cpu(crq
->request_error_rsp
.error_id
));
3461 for (i
= 0; i
< error_buff
->len
; i
++) {
3462 pr_cont("%02x", (int)error_buff
->buff
[i
]);
3468 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
3470 kfree(error_buff
->buff
);
3474 static void request_error_information(struct ibmvnic_adapter
*adapter
,
3475 union ibmvnic_crq
*err_crq
)
3477 struct device
*dev
= &adapter
->vdev
->dev
;
3478 struct net_device
*netdev
= adapter
->netdev
;
3479 struct ibmvnic_error_buff
*error_buff
;
3480 unsigned long timeout
= msecs_to_jiffies(30000);
3481 union ibmvnic_crq crq
;
3482 unsigned long flags
;
3485 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
3489 detail_len
= be32_to_cpu(err_crq
->error_indication
.detail_error_sz
);
3490 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
3491 if (!error_buff
->buff
) {
3496 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
3498 if (dma_mapping_error(dev
, error_buff
->dma
)) {
3499 netdev_err(netdev
, "Couldn't map error buffer\n");
3500 kfree(error_buff
->buff
);
3505 error_buff
->len
= detail_len
;
3506 error_buff
->error_id
= err_crq
->error_indication
.error_id
;
3508 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
3509 list_add_tail(&error_buff
->list
, &adapter
->errors
);
3510 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
3512 memset(&crq
, 0, sizeof(crq
));
3513 crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
3514 crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
3515 crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
3516 crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
3517 crq
.request_error_info
.error_id
= err_crq
->error_indication
.error_id
;
3519 rc
= ibmvnic_send_crq(adapter
, &crq
);
3521 netdev_err(netdev
, "failed to request error information\n");
3525 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3526 netdev_err(netdev
, "timeout waiting for error information\n");
3533 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
3534 list_del(&error_buff
->list
);
3535 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
3537 kfree(error_buff
->buff
);
3541 static void handle_error_indication(union ibmvnic_crq
*crq
,
3542 struct ibmvnic_adapter
*adapter
)
3544 struct device
*dev
= &adapter
->vdev
->dev
;
3546 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
3547 crq
->error_indication
.flags
3548 & IBMVNIC_FATAL_ERROR
? "FATAL " : "",
3549 be32_to_cpu(crq
->error_indication
.error_id
),
3550 be16_to_cpu(crq
->error_indication
.error_cause
));
3552 if (be32_to_cpu(crq
->error_indication
.error_id
))
3553 request_error_information(adapter
, crq
);
3555 if (crq
->error_indication
.flags
& IBMVNIC_FATAL_ERROR
)
3556 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
3558 ibmvnic_reset(adapter
, VNIC_RESET_NON_FATAL
);
3561 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
3562 struct ibmvnic_adapter
*adapter
)
3564 struct net_device
*netdev
= adapter
->netdev
;
3565 struct device
*dev
= &adapter
->vdev
->dev
;
3568 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
3570 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
3573 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
3577 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
3578 struct ibmvnic_adapter
*adapter
)
3580 struct device
*dev
= &adapter
->vdev
->dev
;
3584 atomic_dec(&adapter
->running_cap_crqs
);
3585 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
3587 req_value
= &adapter
->req_tx_queues
;
3591 req_value
= &adapter
->req_rx_queues
;
3594 case REQ_RX_ADD_QUEUES
:
3595 req_value
= &adapter
->req_rx_add_queues
;
3598 case REQ_TX_ENTRIES_PER_SUBCRQ
:
3599 req_value
= &adapter
->req_tx_entries_per_subcrq
;
3600 name
= "tx_entries_per_subcrq";
3602 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
3603 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
3604 name
= "rx_add_entries_per_subcrq";
3607 req_value
= &adapter
->req_mtu
;
3610 case PROMISC_REQUESTED
:
3611 req_value
= &adapter
->promisc
;
3615 dev_err(dev
, "Got invalid cap request rsp %d\n",
3616 crq
->request_capability
.capability
);
3620 switch (crq
->request_capability_rsp
.rc
.code
) {
3623 case PARTIALSUCCESS
:
3624 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3626 (long int)be64_to_cpu(crq
->request_capability_rsp
.
3629 if (be16_to_cpu(crq
->request_capability_rsp
.capability
) ==
3631 pr_err("mtu of %llu is not supported. Reverting.\n",
3633 *req_value
= adapter
->fallback
.mtu
;
3636 be64_to_cpu(crq
->request_capability_rsp
.number
);
3639 ibmvnic_send_req_caps(adapter
, 1);
3642 dev_err(dev
, "Error %d in request cap rsp\n",
3643 crq
->request_capability_rsp
.rc
.code
);
3647 /* Done receiving requested capabilities, query IP offload support */
3648 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
3649 union ibmvnic_crq newcrq
;
3650 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
3651 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
3652 &adapter
->ip_offload_buf
;
3654 adapter
->wait_capability
= false;
3655 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
3659 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
3660 if (!firmware_has_feature(FW_FEATURE_CMO
))
3661 dev_err(dev
, "Couldn't map offload buffer\n");
3665 memset(&newcrq
, 0, sizeof(newcrq
));
3666 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
3667 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
3668 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
3669 newcrq
.query_ip_offload
.ioba
=
3670 cpu_to_be32(adapter
->ip_offload_tok
);
3672 ibmvnic_send_crq(adapter
, &newcrq
);
3676 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
3677 struct ibmvnic_adapter
*adapter
)
3679 struct device
*dev
= &adapter
->vdev
->dev
;
3680 struct net_device
*netdev
= adapter
->netdev
;
3681 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
3682 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
3685 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
3687 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3688 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
3690 /* If the number of queues requested can't be allocated by the
3691 * server, the login response will return with code 1. We will need
3692 * to resend the login buffer with fewer queues requested.
3694 if (login_rsp_crq
->generic
.rc
.code
) {
3695 adapter
->renegotiate
= true;
3696 complete(&adapter
->init_done
);
3700 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
3702 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
3703 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
3704 netdev_dbg(adapter
->netdev
, "%016lx\n",
3705 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
3709 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
3710 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
3711 adapter
->req_rx_add_queues
!=
3712 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
3713 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
3714 ibmvnic_remove(adapter
->vdev
);
3717 complete(&adapter
->init_done
);
3722 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
3723 struct ibmvnic_adapter
*adapter
)
3725 struct device
*dev
= &adapter
->vdev
->dev
;
3728 rc
= crq
->request_unmap_rsp
.rc
.code
;
3730 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
3733 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
3734 struct ibmvnic_adapter
*adapter
)
3736 struct net_device
*netdev
= adapter
->netdev
;
3737 struct device
*dev
= &adapter
->vdev
->dev
;
3740 rc
= crq
->query_map_rsp
.rc
.code
;
3742 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
3745 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3746 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
3747 crq
->query_map_rsp
.free_pages
);
3750 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
3751 struct ibmvnic_adapter
*adapter
)
3753 struct net_device
*netdev
= adapter
->netdev
;
3754 struct device
*dev
= &adapter
->vdev
->dev
;
3757 atomic_dec(&adapter
->running_cap_crqs
);
3758 netdev_dbg(netdev
, "Outstanding queries: %d\n",
3759 atomic_read(&adapter
->running_cap_crqs
));
3760 rc
= crq
->query_capability
.rc
.code
;
3762 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
3766 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
3768 adapter
->min_tx_queues
=
3769 be64_to_cpu(crq
->query_capability
.number
);
3770 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
3771 adapter
->min_tx_queues
);
3774 adapter
->min_rx_queues
=
3775 be64_to_cpu(crq
->query_capability
.number
);
3776 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
3777 adapter
->min_rx_queues
);
3779 case MIN_RX_ADD_QUEUES
:
3780 adapter
->min_rx_add_queues
=
3781 be64_to_cpu(crq
->query_capability
.number
);
3782 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
3783 adapter
->min_rx_add_queues
);
3786 adapter
->max_tx_queues
=
3787 be64_to_cpu(crq
->query_capability
.number
);
3788 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
3789 adapter
->max_tx_queues
);
3792 adapter
->max_rx_queues
=
3793 be64_to_cpu(crq
->query_capability
.number
);
3794 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
3795 adapter
->max_rx_queues
);
3797 case MAX_RX_ADD_QUEUES
:
3798 adapter
->max_rx_add_queues
=
3799 be64_to_cpu(crq
->query_capability
.number
);
3800 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
3801 adapter
->max_rx_add_queues
);
3803 case MIN_TX_ENTRIES_PER_SUBCRQ
:
3804 adapter
->min_tx_entries_per_subcrq
=
3805 be64_to_cpu(crq
->query_capability
.number
);
3806 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
3807 adapter
->min_tx_entries_per_subcrq
);
3809 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
3810 adapter
->min_rx_add_entries_per_subcrq
=
3811 be64_to_cpu(crq
->query_capability
.number
);
3812 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
3813 adapter
->min_rx_add_entries_per_subcrq
);
3815 case MAX_TX_ENTRIES_PER_SUBCRQ
:
3816 adapter
->max_tx_entries_per_subcrq
=
3817 be64_to_cpu(crq
->query_capability
.number
);
3818 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
3819 adapter
->max_tx_entries_per_subcrq
);
3821 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
3822 adapter
->max_rx_add_entries_per_subcrq
=
3823 be64_to_cpu(crq
->query_capability
.number
);
3824 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
3825 adapter
->max_rx_add_entries_per_subcrq
);
3827 case TCP_IP_OFFLOAD
:
3828 adapter
->tcp_ip_offload
=
3829 be64_to_cpu(crq
->query_capability
.number
);
3830 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
3831 adapter
->tcp_ip_offload
);
3833 case PROMISC_SUPPORTED
:
3834 adapter
->promisc_supported
=
3835 be64_to_cpu(crq
->query_capability
.number
);
3836 netdev_dbg(netdev
, "promisc_supported = %lld\n",
3837 adapter
->promisc_supported
);
3840 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
3841 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
3842 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
3845 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
3846 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
3847 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
3849 case MAX_MULTICAST_FILTERS
:
3850 adapter
->max_multicast_filters
=
3851 be64_to_cpu(crq
->query_capability
.number
);
3852 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
3853 adapter
->max_multicast_filters
);
3855 case VLAN_HEADER_INSERTION
:
3856 adapter
->vlan_header_insertion
=
3857 be64_to_cpu(crq
->query_capability
.number
);
3858 if (adapter
->vlan_header_insertion
)
3859 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
3860 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
3861 adapter
->vlan_header_insertion
);
3863 case RX_VLAN_HEADER_INSERTION
:
3864 adapter
->rx_vlan_header_insertion
=
3865 be64_to_cpu(crq
->query_capability
.number
);
3866 netdev_dbg(netdev
, "rx_vlan_header_insertion = %lld\n",
3867 adapter
->rx_vlan_header_insertion
);
3869 case MAX_TX_SG_ENTRIES
:
3870 adapter
->max_tx_sg_entries
=
3871 be64_to_cpu(crq
->query_capability
.number
);
3872 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
3873 adapter
->max_tx_sg_entries
);
3875 case RX_SG_SUPPORTED
:
3876 adapter
->rx_sg_supported
=
3877 be64_to_cpu(crq
->query_capability
.number
);
3878 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
3879 adapter
->rx_sg_supported
);
3881 case OPT_TX_COMP_SUB_QUEUES
:
3882 adapter
->opt_tx_comp_sub_queues
=
3883 be64_to_cpu(crq
->query_capability
.number
);
3884 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
3885 adapter
->opt_tx_comp_sub_queues
);
3887 case OPT_RX_COMP_QUEUES
:
3888 adapter
->opt_rx_comp_queues
=
3889 be64_to_cpu(crq
->query_capability
.number
);
3890 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
3891 adapter
->opt_rx_comp_queues
);
3893 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
3894 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
3895 be64_to_cpu(crq
->query_capability
.number
);
3896 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3897 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
3899 case OPT_TX_ENTRIES_PER_SUBCRQ
:
3900 adapter
->opt_tx_entries_per_subcrq
=
3901 be64_to_cpu(crq
->query_capability
.number
);
3902 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
3903 adapter
->opt_tx_entries_per_subcrq
);
3905 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
3906 adapter
->opt_rxba_entries_per_subcrq
=
3907 be64_to_cpu(crq
->query_capability
.number
);
3908 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
3909 adapter
->opt_rxba_entries_per_subcrq
);
3911 case TX_RX_DESC_REQ
:
3912 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
3913 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
3914 adapter
->tx_rx_desc_req
);
3918 netdev_err(netdev
, "Got invalid cap rsp %d\n",
3919 crq
->query_capability
.capability
);
3923 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
3924 adapter
->wait_capability
= false;
3925 ibmvnic_send_req_caps(adapter
, 0);
3929 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3930 struct ibmvnic_adapter
*adapter
)
3932 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3933 struct net_device
*netdev
= adapter
->netdev
;
3934 struct device
*dev
= &adapter
->vdev
->dev
;
3935 u64
*u64_crq
= (u64
*)crq
;
3938 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3939 (unsigned long int)cpu_to_be64(u64_crq
[0]),
3940 (unsigned long int)cpu_to_be64(u64_crq
[1]));
3941 switch (gen_crq
->first
) {
3942 case IBMVNIC_CRQ_INIT_RSP
:
3943 switch (gen_crq
->cmd
) {
3944 case IBMVNIC_CRQ_INIT
:
3945 dev_info(dev
, "Partner initialized\n");
3946 adapter
->from_passive_init
= true;
3947 complete(&adapter
->init_done
);
3949 case IBMVNIC_CRQ_INIT_COMPLETE
:
3950 dev_info(dev
, "Partner initialization complete\n");
3951 send_version_xchg(adapter
);
3954 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3957 case IBMVNIC_CRQ_XPORT_EVENT
:
3958 netif_carrier_off(netdev
);
3959 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3960 dev_info(dev
, "Migrated, re-enabling adapter\n");
3961 ibmvnic_reset(adapter
, VNIC_RESET_MOBILITY
);
3962 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
3963 dev_info(dev
, "Backing device failover detected\n");
3964 ibmvnic_reset(adapter
, VNIC_RESET_FAILOVER
);
3966 /* The adapter lost the connection */
3967 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3969 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
3972 case IBMVNIC_CRQ_CMD_RSP
:
3975 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3980 switch (gen_crq
->cmd
) {
3981 case VERSION_EXCHANGE_RSP
:
3982 rc
= crq
->version_exchange_rsp
.rc
.code
;
3984 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3987 dev_info(dev
, "Partner protocol version is %d\n",
3988 crq
->version_exchange_rsp
.version
);
3989 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3992 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3993 send_cap_queries(adapter
);
3995 case QUERY_CAPABILITY_RSP
:
3996 handle_query_cap_rsp(crq
, adapter
);
3999 handle_query_map_rsp(crq
, adapter
);
4001 case REQUEST_MAP_RSP
:
4002 adapter
->fw_done_rc
= crq
->request_map_rsp
.rc
.code
;
4003 complete(&adapter
->fw_done
);
4005 case REQUEST_UNMAP_RSP
:
4006 handle_request_unmap_rsp(crq
, adapter
);
4008 case REQUEST_CAPABILITY_RSP
:
4009 handle_request_cap_rsp(crq
, adapter
);
4012 netdev_dbg(netdev
, "Got Login Response\n");
4013 handle_login_rsp(crq
, adapter
);
4015 case LOGICAL_LINK_STATE_RSP
:
4017 "Got Logical Link State Response, state: %d rc: %d\n",
4018 crq
->logical_link_state_rsp
.link_state
,
4019 crq
->logical_link_state_rsp
.rc
.code
);
4020 adapter
->logical_link_state
=
4021 crq
->logical_link_state_rsp
.link_state
;
4022 adapter
->init_done_rc
= crq
->logical_link_state_rsp
.rc
.code
;
4023 complete(&adapter
->init_done
);
4025 case LINK_STATE_INDICATION
:
4026 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
4027 adapter
->phys_link_state
=
4028 crq
->link_state_indication
.phys_link_state
;
4029 adapter
->logical_link_state
=
4030 crq
->link_state_indication
.logical_link_state
;
4032 case CHANGE_MAC_ADDR_RSP
:
4033 netdev_dbg(netdev
, "Got MAC address change Response\n");
4034 handle_change_mac_rsp(crq
, adapter
);
4036 case ERROR_INDICATION
:
4037 netdev_dbg(netdev
, "Got Error Indication\n");
4038 handle_error_indication(crq
, adapter
);
4040 case REQUEST_ERROR_RSP
:
4041 netdev_dbg(netdev
, "Got Error Detail Response\n");
4042 handle_error_info_rsp(crq
, adapter
);
4044 case REQUEST_STATISTICS_RSP
:
4045 netdev_dbg(netdev
, "Got Statistics Response\n");
4046 complete(&adapter
->stats_done
);
4048 case QUERY_IP_OFFLOAD_RSP
:
4049 netdev_dbg(netdev
, "Got Query IP offload Response\n");
4050 handle_query_ip_offload_rsp(adapter
);
4052 case MULTICAST_CTRL_RSP
:
4053 netdev_dbg(netdev
, "Got multicast control Response\n");
4055 case CONTROL_IP_OFFLOAD_RSP
:
4056 netdev_dbg(netdev
, "Got Control IP offload Response\n");
4057 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
4058 sizeof(adapter
->ip_offload_ctrl
),
4060 complete(&adapter
->init_done
);
4062 case COLLECT_FW_TRACE_RSP
:
4063 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
4064 complete(&adapter
->fw_done
);
4066 case GET_VPD_SIZE_RSP
:
4067 handle_vpd_size_rsp(crq
, adapter
);
4070 handle_vpd_rsp(crq
, adapter
);
4073 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
4078 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
4080 struct ibmvnic_adapter
*adapter
= instance
;
4082 tasklet_schedule(&adapter
->tasklet
);
4086 static void ibmvnic_tasklet(void *data
)
4088 struct ibmvnic_adapter
*adapter
= data
;
4089 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
4090 union ibmvnic_crq
*crq
;
4091 unsigned long flags
;
4094 spin_lock_irqsave(&queue
->lock
, flags
);
4096 /* Pull all the valid messages off the CRQ */
4097 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
4098 ibmvnic_handle_crq(crq
, adapter
);
4099 crq
->generic
.first
= 0;
4102 /* remain in tasklet until all
4103 * capabilities responses are received
4105 if (!adapter
->wait_capability
)
4108 /* if capabilities CRQ's were sent in this tasklet, the following
4109 * tasklet must wait until all responses are received
4111 if (atomic_read(&adapter
->running_cap_crqs
) != 0)
4112 adapter
->wait_capability
= true;
4113 spin_unlock_irqrestore(&queue
->lock
, flags
);
4116 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
4118 struct vio_dev
*vdev
= adapter
->vdev
;
4122 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
4123 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4126 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
4131 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
4133 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
4134 struct device
*dev
= &adapter
->vdev
->dev
;
4135 struct vio_dev
*vdev
= adapter
->vdev
;
4140 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
4141 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4143 /* Clean out the queue */
4144 memset(crq
->msgs
, 0, PAGE_SIZE
);
4147 /* And re-open it again */
4148 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
4149 crq
->msg_token
, PAGE_SIZE
);
4152 /* Adapter is good, but other end is not ready */
4153 dev_warn(dev
, "Partner adapter not ready\n");
4155 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
4160 static void release_crq_queue(struct ibmvnic_adapter
*adapter
)
4162 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
4163 struct vio_dev
*vdev
= adapter
->vdev
;
4169 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
4170 free_irq(vdev
->irq
, adapter
);
4171 tasklet_kill(&adapter
->tasklet
);
4173 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
4174 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4176 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
4178 free_page((unsigned long)crq
->msgs
);
4182 static int init_crq_queue(struct ibmvnic_adapter
*adapter
)
4184 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
4185 struct device
*dev
= &adapter
->vdev
->dev
;
4186 struct vio_dev
*vdev
= adapter
->vdev
;
4187 int rc
, retrc
= -ENOMEM
;
4192 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
4193 /* Should we allocate more than one page? */
4198 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
4199 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
4201 if (dma_mapping_error(dev
, crq
->msg_token
))
4204 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
4205 crq
->msg_token
, PAGE_SIZE
);
4207 if (rc
== H_RESOURCE
)
4208 /* maybe kexecing and resource is busy. try a reset */
4209 rc
= ibmvnic_reset_crq(adapter
);
4212 if (rc
== H_CLOSED
) {
4213 dev_warn(dev
, "Partner adapter not ready\n");
4215 dev_warn(dev
, "Error %d opening adapter\n", rc
);
4216 goto reg_crq_failed
;
4221 tasklet_init(&adapter
->tasklet
, (void *)ibmvnic_tasklet
,
4222 (unsigned long)adapter
);
4224 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
4225 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
4228 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
4230 goto req_irq_failed
;
4233 rc
= vio_enable_interrupts(vdev
);
4235 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
4236 goto req_irq_failed
;
4240 spin_lock_init(&crq
->lock
);
4245 tasklet_kill(&adapter
->tasklet
);
4247 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
4248 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4250 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
4252 free_page((unsigned long)crq
->msgs
);
4257 static int ibmvnic_init(struct ibmvnic_adapter
*adapter
)
4259 struct device
*dev
= &adapter
->vdev
->dev
;
4260 unsigned long timeout
= msecs_to_jiffies(30000);
4263 if (adapter
->resetting
&& !adapter
->wait_for_reset
) {
4264 rc
= ibmvnic_reset_crq(adapter
);
4266 rc
= vio_enable_interrupts(adapter
->vdev
);
4268 rc
= init_crq_queue(adapter
);
4272 dev_err(dev
, "Couldn't initialize crq. rc=%d\n", rc
);
4276 adapter
->from_passive_init
= false;
4278 init_completion(&adapter
->init_done
);
4279 adapter
->init_done_rc
= 0;
4280 ibmvnic_send_crq_init(adapter
);
4281 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
4282 dev_err(dev
, "Initialization sequence timed out\n");
4286 if (adapter
->init_done_rc
) {
4287 release_crq_queue(adapter
);
4288 return adapter
->init_done_rc
;
4291 if (adapter
->from_passive_init
) {
4292 adapter
->state
= VNIC_OPEN
;
4293 adapter
->from_passive_init
= false;
4297 if (adapter
->resetting
&& !adapter
->wait_for_reset
)
4298 rc
= reset_sub_crq_queues(adapter
);
4300 rc
= init_sub_crqs(adapter
);
4302 dev_err(dev
, "Initialization of sub crqs failed\n");
4303 release_crq_queue(adapter
);
4307 rc
= init_sub_crq_irqs(adapter
);
4309 dev_err(dev
, "Failed to initialize sub crq irqs\n");
4310 release_crq_queue(adapter
);
4316 static struct device_attribute dev_attr_failover
;
4318 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
4320 struct ibmvnic_adapter
*adapter
;
4321 struct net_device
*netdev
;
4322 unsigned char *mac_addr_p
;
4325 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
4328 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
4329 VETH_MAC_ADDR
, NULL
);
4332 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4333 __FILE__
, __LINE__
);
4337 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
4338 IBMVNIC_MAX_TX_QUEUES
);
4342 adapter
= netdev_priv(netdev
);
4343 adapter
->state
= VNIC_PROBING
;
4344 dev_set_drvdata(&dev
->dev
, netdev
);
4345 adapter
->vdev
= dev
;
4346 adapter
->netdev
= netdev
;
4348 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
4349 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
4350 netdev
->irq
= dev
->irq
;
4351 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
4352 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
4353 SET_NETDEV_DEV(netdev
, &dev
->dev
);
4355 spin_lock_init(&adapter
->stats_lock
);
4357 INIT_LIST_HEAD(&adapter
->errors
);
4358 spin_lock_init(&adapter
->error_list_lock
);
4360 INIT_WORK(&adapter
->ibmvnic_reset
, __ibmvnic_reset
);
4361 INIT_LIST_HEAD(&adapter
->rwi_list
);
4362 mutex_init(&adapter
->reset_lock
);
4363 mutex_init(&adapter
->rwi_lock
);
4364 adapter
->resetting
= false;
4366 adapter
->mac_change_pending
= false;
4369 rc
= ibmvnic_init(adapter
);
4370 if (rc
&& rc
!= EAGAIN
)
4371 goto ibmvnic_init_fail
;
4372 } while (rc
== EAGAIN
);
4374 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
4375 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
4376 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
4378 rc
= device_create_file(&dev
->dev
, &dev_attr_failover
);
4380 goto ibmvnic_init_fail
;
4382 netif_carrier_off(netdev
);
4383 rc
= register_netdev(netdev
);
4385 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
4386 goto ibmvnic_register_fail
;
4388 dev_info(&dev
->dev
, "ibmvnic registered\n");
4390 adapter
->state
= VNIC_PROBED
;
4392 adapter
->wait_for_reset
= false;
4396 ibmvnic_register_fail
:
4397 device_remove_file(&dev
->dev
, &dev_attr_failover
);
4400 release_sub_crqs(adapter
);
4401 release_crq_queue(adapter
);
4402 free_netdev(netdev
);
4407 static int ibmvnic_remove(struct vio_dev
*dev
)
4409 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
4410 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
4412 adapter
->state
= VNIC_REMOVING
;
4413 unregister_netdev(netdev
);
4414 mutex_lock(&adapter
->reset_lock
);
4416 release_resources(adapter
);
4417 release_sub_crqs(adapter
);
4418 release_crq_queue(adapter
);
4420 adapter
->state
= VNIC_REMOVED
;
4422 mutex_unlock(&adapter
->reset_lock
);
4423 device_remove_file(&dev
->dev
, &dev_attr_failover
);
4424 free_netdev(netdev
);
4425 dev_set_drvdata(&dev
->dev
, NULL
);
4430 static ssize_t
failover_store(struct device
*dev
, struct device_attribute
*attr
,
4431 const char *buf
, size_t count
)
4433 struct net_device
*netdev
= dev_get_drvdata(dev
);
4434 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
4435 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
4436 __be64 session_token
;
4439 if (!sysfs_streq(buf
, "1"))
4442 rc
= plpar_hcall(H_VIOCTL
, retbuf
, adapter
->vdev
->unit_address
,
4443 H_GET_SESSION_TOKEN
, 0, 0, 0);
4445 netdev_err(netdev
, "Couldn't retrieve session token, rc %ld\n",
4450 session_token
= (__be64
)retbuf
[0];
4451 netdev_dbg(netdev
, "Initiating client failover, session id %llx\n",
4452 be64_to_cpu(session_token
));
4453 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
4454 H_SESSION_ERR_DETECTED
, session_token
, 0, 0);
4456 netdev_err(netdev
, "Client initiated failover failed, rc %ld\n",
4464 static DEVICE_ATTR(failover
, 0200, NULL
, failover_store
);
4466 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
4468 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
4469 struct ibmvnic_adapter
*adapter
;
4470 struct iommu_table
*tbl
;
4471 unsigned long ret
= 0;
4474 tbl
= get_iommu_table_base(&vdev
->dev
);
4476 /* netdev inits at probe time along with the structures we need below*/
4478 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
4480 adapter
= netdev_priv(netdev
);
4482 ret
+= PAGE_SIZE
; /* the crq message queue */
4483 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
4485 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
4486 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
4488 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
4490 ret
+= adapter
->rx_pool
[i
].size
*
4491 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
4496 static int ibmvnic_resume(struct device
*dev
)
4498 struct net_device
*netdev
= dev_get_drvdata(dev
);
4499 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
4501 if (adapter
->state
!= VNIC_OPEN
)
4504 tasklet_schedule(&adapter
->tasklet
);
4509 static const struct vio_device_id ibmvnic_device_table
[] = {
4510 {"network", "IBM,vnic"},
4513 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
4515 static const struct dev_pm_ops ibmvnic_pm_ops
= {
4516 .resume
= ibmvnic_resume
4519 static struct vio_driver ibmvnic_driver
= {
4520 .id_table
= ibmvnic_device_table
,
4521 .probe
= ibmvnic_probe
,
4522 .remove
= ibmvnic_remove
,
4523 .get_desired_dma
= ibmvnic_get_desired_dma
,
4524 .name
= ibmvnic_driver_name
,
4525 .pm
= &ibmvnic_pm_ops
,
4528 /* module functions */
4529 static int __init
ibmvnic_module_init(void)
4531 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
4532 IBMVNIC_DRIVER_VERSION
);
4534 return vio_register_driver(&ibmvnic_driver
);
4537 static void __exit
ibmvnic_module_exit(void)
4539 vio_unregister_driver(&ibmvnic_driver
);
4542 module_init(ibmvnic_module_init
);
4543 module_exit(ibmvnic_module_exit
);