]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/ibm/ibmvnic.c
ibmvnic: complete init_done on transport events
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / ibm / ibmvnic.c
CommitLineData
d5bb994b 1// SPDX-License-Identifier: GPL-2.0-or-later
032c5e82
TF
2/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
032c5e82
TF
10/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
4eb50ceb 51#include <linux/if_arp.h>
032c5e82
TF
52#include <linux/in.h>
53#include <linux/ip.h>
ad7775dc 54#include <linux/ipv6.h>
032c5e82
TF
55#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
032c5e82
TF
58#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
65dc6891 66#include <linux/workqueue.h>
6052d5e2 67#include <linux/if_vlan.h>
37798d02 68#include <linux/utsname.h>
032c5e82
TF
69
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
78b07ac1 75MODULE_AUTHOR("Santiago Leon");
032c5e82
TF
76MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
d7c0ef36 81static void release_sub_crqs(struct ibmvnic_adapter *, bool);
032c5e82
TF
82static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
ad7775dc 86static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
032c5e82
TF
87static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96static int ibmvnic_poll(struct napi_struct *napi, int data);
69980d02 97static void send_query_map(struct ibmvnic_adapter *adapter);
673ead24 98static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
9c4eaabd 99static int send_request_unmap(struct ibmvnic_adapter *, u8);
20a8ab74 100static int send_login(struct ibmvnic_adapter *adapter);
491099ad 101static void send_query_cap(struct ibmvnic_adapter *adapter);
4d96f12a 102static int init_sub_crqs(struct ibmvnic_adapter *);
bd0b6723 103static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
635e442f 104static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
f992887c 105static void release_crq_queue(struct ibmvnic_adapter *);
62740e97 106static int __ibmvnic_set_mac(struct net_device *, u8 *);
30f79625 107static int init_crq_queue(struct ibmvnic_adapter *adapter);
f8d6ae0d 108static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
65d6470d
SB
109static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
110 struct ibmvnic_sub_crq_queue *tx_scrq);
8b41d367 111static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
032c5e82
TF
112
113struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
115 int offset;
116};
117
118#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
91dc5d25 120#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
032c5e82
TF
121
122static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145};
146
53f8b1b2
CF
147static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
148{
149 union ibmvnic_crq crq;
150
151 memset(&crq, 0, sizeof(crq));
152 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
153 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
154
155 return ibmvnic_send_crq(adapter, &crq);
156}
157
158static int send_version_xchg(struct ibmvnic_adapter *adapter)
159{
160 union ibmvnic_crq crq;
161
162 memset(&crq, 0, sizeof(crq));
163 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
164 crq.version_exchange.cmd = VERSION_EXCHANGE;
165 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
166
167 return ibmvnic_send_crq(adapter, &crq);
168}
169
032c5e82
TF
170static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
171 unsigned long length, unsigned long *number,
172 unsigned long *irq)
173{
174 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
175 long rc;
176
177 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
178 *number = retbuf[0];
179 *irq = retbuf[1];
180
181 return rc;
182}
183
476d96ca
TF
184/**
185 * ibmvnic_wait_for_completion - Check device state and wait for completion
186 * @adapter: private device data
187 * @comp_done: completion structure to wait for
188 * @timeout: time to wait in milliseconds
189 *
190 * Wait for a completion signal or until the timeout limit is reached
191 * while checking that the device is still active.
192 */
193static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
194 struct completion *comp_done,
195 unsigned long timeout)
196{
197 struct net_device *netdev;
198 unsigned long div_timeout;
199 u8 retry;
200
201 netdev = adapter->netdev;
202 retry = 5;
203 div_timeout = msecs_to_jiffies(timeout / retry);
204 while (true) {
205 if (!adapter->crq.active) {
206 netdev_err(netdev, "Device down!\n");
207 return -ENODEV;
208 }
8f9cc1ee 209 if (!retry--)
476d96ca
TF
210 break;
211 if (wait_for_completion_timeout(comp_done, div_timeout))
212 return 0;
213 }
214 netdev_err(netdev, "Operation timed out.\n");
215 return -ETIMEDOUT;
216}
217
032c5e82
TF
218static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
219 struct ibmvnic_long_term_buff *ltb, int size)
220{
221 struct device *dev = &adapter->vdev->dev;
9c4eaabd 222 int rc;
032c5e82
TF
223
224 ltb->size = size;
225 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
226 GFP_KERNEL);
227
228 if (!ltb->buff) {
229 dev_err(dev, "Couldn't alloc long term buffer\n");
230 return -ENOMEM;
231 }
232 ltb->map_id = adapter->map_id;
233 adapter->map_id++;
db5d0b59 234
ff25dcb9
TF
235 mutex_lock(&adapter->fw_lock);
236 adapter->fw_done_rc = 0;
070eca95 237 reinit_completion(&adapter->fw_done);
552a3372
SB
238
239 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
9c4eaabd 240 if (rc) {
552a3372
SB
241 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
242 goto out;
9c4eaabd 243 }
476d96ca
TF
244
245 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
246 if (rc) {
247 dev_err(dev,
248 "Long term map request aborted or timed out,rc = %d\n",
249 rc);
552a3372 250 goto out;
476d96ca 251 }
f3be0cbc
TF
252
253 if (adapter->fw_done_rc) {
254 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
255 adapter->fw_done_rc);
552a3372
SB
256 rc = -1;
257 goto out;
258 }
259 rc = 0;
260out:
261 if (rc) {
4cf2ddf3 262 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
552a3372 263 ltb->buff = NULL;
f3be0cbc 264 }
ff25dcb9 265 mutex_unlock(&adapter->fw_lock);
552a3372 266 return rc;
032c5e82
TF
267}
268
269static void free_long_term_buff(struct ibmvnic_adapter *adapter,
270 struct ibmvnic_long_term_buff *ltb)
271{
272 struct device *dev = &adapter->vdev->dev;
273
c657e32c
NF
274 if (!ltb->buff)
275 return;
276
7d3a7b9e
LP
277 /* VIOS automatically unmaps the long term buffer at remote
278 * end for the following resets:
279 * FAILOVER, MOBILITY, TIMEOUT.
280 */
ed651a10 281 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
7d3a7b9e
LP
282 adapter->reset_reason != VNIC_RESET_MOBILITY &&
283 adapter->reset_reason != VNIC_RESET_TIMEOUT)
dfad09a6 284 send_request_unmap(adapter, ltb->map_id);
59af56c2 285 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
552a3372
SB
286 ltb->buff = NULL;
287 ltb->map_id = 0;
032c5e82
TF
288}
289
0ec13aff
SB
290static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
291 struct ibmvnic_long_term_buff *ltb)
f3be0cbc 292{
0ec13aff
SB
293 struct device *dev = &adapter->vdev->dev;
294 int rc;
9c4eaabd 295
f3be0cbc 296 memset(ltb->buff, 0, ltb->size);
0ec13aff
SB
297
298 mutex_lock(&adapter->fw_lock);
299 adapter->fw_done_rc = 0;
300
301 reinit_completion(&adapter->fw_done);
302 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
303 if (rc) {
304 mutex_unlock(&adapter->fw_lock);
305 return rc;
306 }
307
308 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
309 if (rc) {
310 dev_info(dev,
311 "Reset failed, long term map request timed out or aborted\n");
312 mutex_unlock(&adapter->fw_lock);
313 return rc;
314 }
315
316 if (adapter->fw_done_rc) {
317 dev_info(dev,
318 "Reset failed, attempting to free and reallocate buffer\n");
319 free_long_term_buff(adapter, ltb);
320 mutex_unlock(&adapter->fw_lock);
321 return alloc_long_term_buff(adapter, ltb, ltb->size);
322 }
323 mutex_unlock(&adapter->fw_lock);
f3be0cbc
TF
324 return 0;
325}
326
f185a49a
TF
327static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
328{
329 int i;
330
507ebe64 331 for (i = 0; i < adapter->num_active_rx_pools; i++)
f185a49a
TF
332 adapter->rx_pool[i].active = 0;
333}
334
032c5e82
TF
335static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
336 struct ibmvnic_rx_pool *pool)
337{
338 int count = pool->size - atomic_read(&pool->available);
f3ae59c0 339 u64 handle = adapter->rx_scrq[pool->index]->handle;
032c5e82 340 struct device *dev = &adapter->vdev->dev;
4f0b6812
TF
341 struct ibmvnic_ind_xmit_queue *ind_bufp;
342 struct ibmvnic_sub_crq_queue *rx_scrq;
343 union sub_crq *sub_crq;
032c5e82
TF
344 int buffers_added = 0;
345 unsigned long lpar_rc;
032c5e82
TF
346 struct sk_buff *skb;
347 unsigned int offset;
348 dma_addr_t dma_addr;
349 unsigned char *dst;
032c5e82
TF
350 int shift = 0;
351 int index;
352 int i;
353
f185a49a
TF
354 if (!pool->active)
355 return;
356
4f0b6812
TF
357 rx_scrq = adapter->rx_scrq[pool->index];
358 ind_bufp = &rx_scrq->ind_buf;
72368f8b
SB
359
360 /* netdev_skb_alloc() could have failed after we saved a few skbs
361 * in the indir_buf and we would not have sent them to VIOS yet.
362 * To account for them, start the loop at ind_bufp->index rather
363 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
364 * be 0.
365 */
366 for (i = ind_bufp->index; i < count; ++i) {
e552aa31 367 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
032c5e82
TF
368 if (!skb) {
369 dev_err(dev, "Couldn't replenish rx buff\n");
370 adapter->replenish_no_mem++;
371 break;
372 }
373
374 index = pool->free_map[pool->next_free];
375
376 if (pool->rx_buff[index].skb)
377 dev_err(dev, "Inconsistent free_map!\n");
378
379 /* Copy the skb to the long term mapped DMA buffer */
380 offset = index * pool->buff_size;
381 dst = pool->long_term_buff.buff + offset;
382 memset(dst, 0, pool->buff_size);
383 dma_addr = pool->long_term_buff.addr + offset;
384 pool->rx_buff[index].data = dst;
385
386 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
387 pool->rx_buff[index].dma = dma_addr;
388 pool->rx_buff[index].skb = skb;
389 pool->rx_buff[index].pool_index = pool->index;
390 pool->rx_buff[index].size = pool->buff_size;
391
4f0b6812
TF
392 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
393 memset(sub_crq, 0, sizeof(*sub_crq));
394 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
395 sub_crq->rx_add.correlator =
032c5e82 396 cpu_to_be64((u64)&pool->rx_buff[index]);
4f0b6812
TF
397 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
398 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
032c5e82
TF
399
400 /* The length field of the sCRQ is defined to be 24 bits so the
401 * buffer size needs to be left shifted by a byte before it is
402 * converted to big endian to prevent the last byte from being
403 * truncated.
404 */
405#ifdef __LITTLE_ENDIAN__
406 shift = 8;
407#endif
4f0b6812 408 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
032c5e82 409 pool->next_free = (pool->next_free + 1) % pool->size;
4f0b6812
TF
410 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
411 i == count - 1) {
412 lpar_rc =
413 send_subcrq_indirect(adapter, handle,
414 (u64)ind_bufp->indir_dma,
415 (u64)ind_bufp->index);
416 if (lpar_rc != H_SUCCESS)
417 goto failure;
418 buffers_added += ind_bufp->index;
419 adapter->replenish_add_buff_success += ind_bufp->index;
420 ind_bufp->index = 0;
421 }
032c5e82
TF
422 }
423 atomic_add(buffers_added, &pool->available);
424 return;
425
426failure:
2d14d379
TF
427 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
428 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
4f0b6812
TF
429 for (i = ind_bufp->index - 1; i >= 0; --i) {
430 struct ibmvnic_rx_buff *rx_buff;
431
432 pool->next_free = pool->next_free == 0 ?
433 pool->size - 1 : pool->next_free - 1;
434 sub_crq = &ind_bufp->indir_arr[i];
435 rx_buff = (struct ibmvnic_rx_buff *)
436 be64_to_cpu(sub_crq->rx_add.correlator);
437 index = (int)(rx_buff - pool->rx_buff);
438 pool->free_map[pool->next_free] = index;
439 dev_kfree_skb_any(pool->rx_buff[index].skb);
440 pool->rx_buff[index].skb = NULL;
441 }
c2af6225
DB
442 adapter->replenish_add_buff_failure += ind_bufp->index;
443 atomic_add(buffers_added, &pool->available);
4f0b6812 444 ind_bufp->index = 0;
5a18e1e0 445 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
f185a49a 446 /* Disable buffer pool replenishment and report carrier off if
5a18e1e0
TF
447 * queue is closed or pending failover.
448 * Firmware guarantees that a signal will be sent to the
449 * driver, triggering a reset.
f185a49a
TF
450 */
451 deactivate_rx_pools(adapter);
452 netif_carrier_off(adapter->netdev);
453 }
032c5e82
TF
454}
455
456static void replenish_pools(struct ibmvnic_adapter *adapter)
457{
458 int i;
459
032c5e82 460 adapter->replenish_task_cycles++;
507ebe64 461 for (i = 0; i < adapter->num_active_rx_pools; i++) {
032c5e82
TF
462 if (adapter->rx_pool[i].active)
463 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
464 }
38bd5cec
SB
465
466 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
032c5e82
TF
467}
468
3d52b594
JA
469static void release_stats_buffers(struct ibmvnic_adapter *adapter)
470{
471 kfree(adapter->tx_stats_buffers);
472 kfree(adapter->rx_stats_buffers);
b0992eca
TF
473 adapter->tx_stats_buffers = NULL;
474 adapter->rx_stats_buffers = NULL;
3d52b594
JA
475}
476
477static int init_stats_buffers(struct ibmvnic_adapter *adapter)
478{
479 adapter->tx_stats_buffers =
abcae546 480 kcalloc(IBMVNIC_MAX_QUEUES,
3d52b594
JA
481 sizeof(struct ibmvnic_tx_queue_stats),
482 GFP_KERNEL);
483 if (!adapter->tx_stats_buffers)
484 return -ENOMEM;
485
486 adapter->rx_stats_buffers =
abcae546 487 kcalloc(IBMVNIC_MAX_QUEUES,
3d52b594
JA
488 sizeof(struct ibmvnic_rx_queue_stats),
489 GFP_KERNEL);
490 if (!adapter->rx_stats_buffers)
491 return -ENOMEM;
492
493 return 0;
494}
495
7bbc27a4
NF
496static void release_stats_token(struct ibmvnic_adapter *adapter)
497{
498 struct device *dev = &adapter->vdev->dev;
499
500 if (!adapter->stats_token)
501 return;
502
503 dma_unmap_single(dev, adapter->stats_token,
504 sizeof(struct ibmvnic_statistics),
505 DMA_FROM_DEVICE);
506 adapter->stats_token = 0;
507}
508
509static int init_stats_token(struct ibmvnic_adapter *adapter)
510{
511 struct device *dev = &adapter->vdev->dev;
512 dma_addr_t stok;
513
514 stok = dma_map_single(dev, &adapter->stats,
515 sizeof(struct ibmvnic_statistics),
516 DMA_FROM_DEVICE);
517 if (dma_mapping_error(dev, stok)) {
518 dev_err(dev, "Couldn't map stats buffer\n");
519 return -1;
520 }
521
522 adapter->stats_token = stok;
d1cf33d9 523 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
7bbc27a4
NF
524 return 0;
525}
526
8c0543ad
NF
527static int reset_rx_pools(struct ibmvnic_adapter *adapter)
528{
529 struct ibmvnic_rx_pool *rx_pool;
507ebe64 530 u64 buff_size;
8c0543ad 531 int rx_scrqs;
f3be0cbc 532 int i, j, rc;
896d8695 533
9f134573
MC
534 if (!adapter->rx_pool)
535 return -1;
536
507ebe64
TF
537 buff_size = adapter->cur_rx_buf_sz;
538 rx_scrqs = adapter->num_active_rx_pools;
8c0543ad
NF
539 for (i = 0; i < rx_scrqs; i++) {
540 rx_pool = &adapter->rx_pool[i];
541
d1cf33d9
NF
542 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
543
507ebe64 544 if (rx_pool->buff_size != buff_size) {
896d8695 545 free_long_term_buff(adapter, &rx_pool->long_term_buff);
9a87c3fc 546 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
7c940b1a
TF
547 rc = alloc_long_term_buff(adapter,
548 &rx_pool->long_term_buff,
549 rx_pool->size *
550 rx_pool->buff_size);
896d8695 551 } else {
0ec13aff
SB
552 rc = reset_long_term_buff(adapter,
553 &rx_pool->long_term_buff);
896d8695
JA
554 }
555
f3be0cbc
TF
556 if (rc)
557 return rc;
8c0543ad
NF
558
559 for (j = 0; j < rx_pool->size; j++)
560 rx_pool->free_map[j] = j;
561
562 memset(rx_pool->rx_buff, 0,
563 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
564
565 atomic_set(&rx_pool->available, 0);
566 rx_pool->next_alloc = 0;
567 rx_pool->next_free = 0;
c3e53b9a 568 rx_pool->active = 1;
8c0543ad
NF
569 }
570
571 return 0;
572}
573
0ffe2cb7 574static void release_rx_pools(struct ibmvnic_adapter *adapter)
032c5e82 575{
0ffe2cb7 576 struct ibmvnic_rx_pool *rx_pool;
0ffe2cb7 577 int i, j;
032c5e82 578
0ffe2cb7 579 if (!adapter->rx_pool)
032c5e82
TF
580 return;
581
82e3be32 582 for (i = 0; i < adapter->num_active_rx_pools; i++) {
0ffe2cb7
NF
583 rx_pool = &adapter->rx_pool[i];
584
d1cf33d9
NF
585 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
586
0ffe2cb7
NF
587 kfree(rx_pool->free_map);
588 free_long_term_buff(adapter, &rx_pool->long_term_buff);
589
590 if (!rx_pool->rx_buff)
e0ebe942 591 continue;
0ffe2cb7
NF
592
593 for (j = 0; j < rx_pool->size; j++) {
594 if (rx_pool->rx_buff[j].skb) {
b7cdec3d
TF
595 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
596 rx_pool->rx_buff[j].skb = NULL;
0ffe2cb7
NF
597 }
598 }
599
600 kfree(rx_pool->rx_buff);
601 }
602
603 kfree(adapter->rx_pool);
604 adapter->rx_pool = NULL;
82e3be32 605 adapter->num_active_rx_pools = 0;
0ffe2cb7
NF
606}
607
608static int init_rx_pools(struct net_device *netdev)
609{
610 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
611 struct device *dev = &adapter->vdev->dev;
612 struct ibmvnic_rx_pool *rx_pool;
613 int rxadd_subcrqs;
507ebe64 614 u64 buff_size;
0ffe2cb7
NF
615 int i, j;
616
507ebe64
TF
617 rxadd_subcrqs = adapter->num_active_rx_scrqs;
618 buff_size = adapter->cur_rx_buf_sz;
0ffe2cb7
NF
619
620 adapter->rx_pool = kcalloc(rxadd_subcrqs,
621 sizeof(struct ibmvnic_rx_pool),
622 GFP_KERNEL);
623 if (!adapter->rx_pool) {
624 dev_err(dev, "Failed to allocate rx pools\n");
625 return -1;
626 }
627
82e3be32
NF
628 adapter->num_active_rx_pools = rxadd_subcrqs;
629
0ffe2cb7
NF
630 for (i = 0; i < rxadd_subcrqs; i++) {
631 rx_pool = &adapter->rx_pool[i];
632
633 netdev_dbg(adapter->netdev,
d1cf33d9 634 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
0ffe2cb7 635 i, adapter->req_rx_add_entries_per_subcrq,
507ebe64 636 buff_size);
0ffe2cb7
NF
637
638 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
639 rx_pool->index = i;
9a87c3fc 640 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
0ffe2cb7
NF
641 rx_pool->active = 1;
642
643 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
644 GFP_KERNEL);
645 if (!rx_pool->free_map) {
646 release_rx_pools(adapter);
647 return -1;
032c5e82 648 }
0ffe2cb7
NF
649
650 rx_pool->rx_buff = kcalloc(rx_pool->size,
651 sizeof(struct ibmvnic_rx_buff),
652 GFP_KERNEL);
653 if (!rx_pool->rx_buff) {
654 dev_err(dev, "Couldn't alloc rx buffers\n");
655 release_rx_pools(adapter);
656 return -1;
657 }
658
659 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
660 rx_pool->size * rx_pool->buff_size)) {
661 release_rx_pools(adapter);
662 return -1;
663 }
664
665 for (j = 0; j < rx_pool->size; ++j)
666 rx_pool->free_map[j] = j;
667
668 atomic_set(&rx_pool->available, 0);
669 rx_pool->next_alloc = 0;
670 rx_pool->next_free = 0;
032c5e82 671 }
0ffe2cb7
NF
672
673 return 0;
032c5e82
TF
674}
675
0ec13aff
SB
676static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
677 struct ibmvnic_tx_pool *tx_pool)
e26dc25b
TF
678{
679 int rc, i;
680
0ec13aff 681 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
e26dc25b
TF
682 if (rc)
683 return rc;
684
685 memset(tx_pool->tx_buff, 0,
686 tx_pool->num_buffers *
687 sizeof(struct ibmvnic_tx_buff));
688
689 for (i = 0; i < tx_pool->num_buffers; i++)
690 tx_pool->free_map[i] = i;
691
692 tx_pool->consumer_index = 0;
693 tx_pool->producer_index = 0;
694
695 return 0;
696}
697
8c0543ad
NF
698static int reset_tx_pools(struct ibmvnic_adapter *adapter)
699{
8c0543ad 700 int tx_scrqs;
e26dc25b 701 int i, rc;
8c0543ad 702
9f134573
MC
703 if (!adapter->tx_pool)
704 return -1;
705
507ebe64 706 tx_scrqs = adapter->num_active_tx_pools;
8c0543ad 707 for (i = 0; i < tx_scrqs; i++) {
65d6470d 708 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
0ec13aff 709 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
f3be0cbc
TF
710 if (rc)
711 return rc;
0ec13aff 712 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
fdb06105
TF
713 if (rc)
714 return rc;
8c0543ad
NF
715 }
716
717 return 0;
718}
719
4e6759be
DANR
720static void release_vpd_data(struct ibmvnic_adapter *adapter)
721{
722 if (!adapter->vpd)
723 return;
724
725 kfree(adapter->vpd->buff);
726 kfree(adapter->vpd);
b0992eca
TF
727
728 adapter->vpd = NULL;
4e6759be
DANR
729}
730
fb79421c
TF
731static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
732 struct ibmvnic_tx_pool *tx_pool)
733{
734 kfree(tx_pool->tx_buff);
735 kfree(tx_pool->free_map);
736 free_long_term_buff(adapter, &tx_pool->long_term_buff);
737}
738
c657e32c
NF
739static void release_tx_pools(struct ibmvnic_adapter *adapter)
740{
896d8695 741 int i;
c657e32c
NF
742
743 if (!adapter->tx_pool)
744 return;
745
82e3be32 746 for (i = 0; i < adapter->num_active_tx_pools; i++) {
fb79421c
TF
747 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
748 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
c657e32c
NF
749 }
750
751 kfree(adapter->tx_pool);
752 adapter->tx_pool = NULL;
fb79421c
TF
753 kfree(adapter->tso_pool);
754 adapter->tso_pool = NULL;
82e3be32 755 adapter->num_active_tx_pools = 0;
c657e32c
NF
756}
757
3205306c
TF
758static int init_one_tx_pool(struct net_device *netdev,
759 struct ibmvnic_tx_pool *tx_pool,
760 int num_entries, int buf_size)
761{
762 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
763 int i;
764
765 tx_pool->tx_buff = kcalloc(num_entries,
766 sizeof(struct ibmvnic_tx_buff),
767 GFP_KERNEL);
768 if (!tx_pool->tx_buff)
769 return -1;
770
771 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
772 num_entries * buf_size))
773 return -1;
774
775 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
776 if (!tx_pool->free_map)
777 return -1;
778
779 for (i = 0; i < num_entries; i++)
780 tx_pool->free_map[i] = i;
781
782 tx_pool->consumer_index = 0;
783 tx_pool->producer_index = 0;
784 tx_pool->num_buffers = num_entries;
785 tx_pool->buf_size = buf_size;
786
787 return 0;
788}
789
c657e32c
NF
790static int init_tx_pools(struct net_device *netdev)
791{
792 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
c657e32c 793 int tx_subcrqs;
9a87c3fc 794 u64 buff_size;
3205306c 795 int i, rc;
c657e32c 796
507ebe64 797 tx_subcrqs = adapter->num_active_tx_scrqs;
c657e32c
NF
798 adapter->tx_pool = kcalloc(tx_subcrqs,
799 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
800 if (!adapter->tx_pool)
801 return -1;
802
3205306c
TF
803 adapter->tso_pool = kcalloc(tx_subcrqs,
804 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
f6ebca8e
SB
805 if (!adapter->tso_pool) {
806 kfree(adapter->tx_pool);
807 adapter->tx_pool = NULL;
3205306c 808 return -1;
f6ebca8e 809 }
3205306c 810
82e3be32
NF
811 adapter->num_active_tx_pools = tx_subcrqs;
812
c657e32c 813 for (i = 0; i < tx_subcrqs; i++) {
9a87c3fc
DB
814 buff_size = adapter->req_mtu + VLAN_HLEN;
815 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
3205306c
TF
816 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
817 adapter->req_tx_entries_per_subcrq,
9a87c3fc 818 buff_size);
3205306c 819 if (rc) {
c657e32c 820 release_tx_pools(adapter);
3205306c 821 return rc;
fdb06105
TF
822 }
823
7c940b1a
TF
824 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
825 IBMVNIC_TSO_BUFS,
826 IBMVNIC_TSO_BUF_SZ);
3205306c 827 if (rc) {
c657e32c 828 release_tx_pools(adapter);
3205306c 829 return rc;
c657e32c 830 }
c657e32c
NF
831 }
832
833 return 0;
834}
835
d944c3d6
JA
836static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
837{
838 int i;
839
840 if (adapter->napi_enabled)
841 return;
842
843 for (i = 0; i < adapter->req_rx_queues; i++)
844 napi_enable(&adapter->napi[i]);
845
846 adapter->napi_enabled = true;
847}
848
849static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
850{
851 int i;
852
853 if (!adapter->napi_enabled)
854 return;
855
d1cf33d9
NF
856 for (i = 0; i < adapter->req_rx_queues; i++) {
857 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
d944c3d6 858 napi_disable(&adapter->napi[i]);
d1cf33d9 859 }
d944c3d6
JA
860
861 adapter->napi_enabled = false;
862}
863
86f669b2
NF
864static int init_napi(struct ibmvnic_adapter *adapter)
865{
866 int i;
867
868 adapter->napi = kcalloc(adapter->req_rx_queues,
869 sizeof(struct napi_struct), GFP_KERNEL);
870 if (!adapter->napi)
871 return -ENOMEM;
872
873 for (i = 0; i < adapter->req_rx_queues; i++) {
874 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
875 netif_napi_add(adapter->netdev, &adapter->napi[i],
876 ibmvnic_poll, NAPI_POLL_WEIGHT);
877 }
878
82e3be32 879 adapter->num_active_rx_napi = adapter->req_rx_queues;
86f669b2
NF
880 return 0;
881}
882
883static void release_napi(struct ibmvnic_adapter *adapter)
884{
885 int i;
886
887 if (!adapter->napi)
888 return;
889
82e3be32 890 for (i = 0; i < adapter->num_active_rx_napi; i++) {
390de194
WY
891 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
892 netif_napi_del(&adapter->napi[i]);
86f669b2
NF
893 }
894
895 kfree(adapter->napi);
896 adapter->napi = NULL;
82e3be32 897 adapter->num_active_rx_napi = 0;
c3f22415 898 adapter->napi_enabled = false;
86f669b2
NF
899}
900
0666ef7f
LP
901static const char *adapter_state_to_string(enum vnic_state state)
902{
903 switch (state) {
904 case VNIC_PROBING:
905 return "PROBING";
906 case VNIC_PROBED:
907 return "PROBED";
908 case VNIC_OPENING:
909 return "OPENING";
910 case VNIC_OPEN:
911 return "OPEN";
912 case VNIC_CLOSING:
913 return "CLOSING";
914 case VNIC_CLOSED:
915 return "CLOSED";
916 case VNIC_REMOVING:
917 return "REMOVING";
918 case VNIC_REMOVED:
919 return "REMOVED";
822ebc2c
LP
920 case VNIC_DOWN:
921 return "DOWN";
0666ef7f 922 }
07b5dc1d 923 return "UNKNOWN";
0666ef7f
LP
924}
925
a57a5d25 926static int ibmvnic_login(struct net_device *netdev)
032c5e82
TF
927{
928 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
98c41f04 929 unsigned long timeout = msecs_to_jiffies(20000);
64d92aa2 930 int retry_count = 0;
dff515a3 931 int retries = 10;
eb110410 932 bool retry;
4d96f12a 933 int rc;
032c5e82 934
bd0b6723 935 do {
eb110410 936 retry = false;
dff515a3 937 if (retry_count > retries) {
64d92aa2
NF
938 netdev_warn(netdev, "Login attempts exceeded\n");
939 return -1;
940 }
941
942 adapter->init_done_rc = 0;
943 reinit_completion(&adapter->init_done);
944 rc = send_login(adapter);
c98d9cc4 945 if (rc)
64d92aa2 946 return rc;
64d92aa2
NF
947
948 if (!wait_for_completion_timeout(&adapter->init_done,
949 timeout)) {
dff515a3
TF
950 netdev_warn(netdev, "Login timed out, retrying...\n");
951 retry = true;
952 adapter->init_done_rc = 0;
953 retry_count++;
954 continue;
64d92aa2
NF
955 }
956
dff515a3
TF
957 if (adapter->init_done_rc == ABORTED) {
958 netdev_warn(netdev, "Login aborted, retrying...\n");
959 retry = true;
960 adapter->init_done_rc = 0;
961 retry_count++;
962 /* FW or device may be busy, so
963 * wait a bit before retrying login
964 */
965 msleep(500);
966 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
64d92aa2 967 retry_count++;
d7c0ef36 968 release_sub_crqs(adapter, 1);
bd0b6723 969
eb110410
TF
970 retry = true;
971 netdev_dbg(netdev,
972 "Received partial success, retrying...\n");
64d92aa2 973 adapter->init_done_rc = 0;
bd0b6723 974 reinit_completion(&adapter->init_done);
491099ad 975 send_query_cap(adapter);
bd0b6723
JA
976 if (!wait_for_completion_timeout(&adapter->init_done,
977 timeout)) {
64d92aa2
NF
978 netdev_warn(netdev,
979 "Capabilities query timed out\n");
bd0b6723
JA
980 return -1;
981 }
64d92aa2 982
4d96f12a
TF
983 rc = init_sub_crqs(adapter);
984 if (rc) {
64d92aa2
NF
985 netdev_warn(netdev,
986 "SCRQ initialization failed\n");
4d96f12a
TF
987 return -1;
988 }
64d92aa2 989
4d96f12a
TF
990 rc = init_sub_crq_irqs(adapter);
991 if (rc) {
64d92aa2
NF
992 netdev_warn(netdev,
993 "SCRQ irq initialization failed\n");
4d96f12a
TF
994 return -1;
995 }
64d92aa2
NF
996 } else if (adapter->init_done_rc) {
997 netdev_warn(netdev, "Adapter login failed\n");
bd0b6723
JA
998 return -1;
999 }
eb110410 1000 } while (retry);
bd0b6723 1001
62740e97 1002 __ibmvnic_set_mac(netdev, adapter->mac_addr);
3d166130 1003
0666ef7f 1004 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
a57a5d25
JA
1005 return 0;
1006}
1007
34f0f4e3
TF
1008static void release_login_buffer(struct ibmvnic_adapter *adapter)
1009{
1010 kfree(adapter->login_buf);
1011 adapter->login_buf = NULL;
1012}
1013
1014static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1015{
1016 kfree(adapter->login_rsp_buf);
1017 adapter->login_rsp_buf = NULL;
1018}
1019
1b8955ee
NF
1020static void release_resources(struct ibmvnic_adapter *adapter)
1021{
4e6759be
DANR
1022 release_vpd_data(adapter);
1023
1b8955ee
NF
1024 release_tx_pools(adapter);
1025 release_rx_pools(adapter);
1026
86f669b2 1027 release_napi(adapter);
a0c8be56 1028 release_login_buffer(adapter);
34f0f4e3 1029 release_login_rsp_buffer(adapter);
1b8955ee
NF
1030}
1031
53da09e9
NF
1032static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1033{
1034 struct net_device *netdev = adapter->netdev;
98c41f04 1035 unsigned long timeout = msecs_to_jiffies(20000);
53da09e9
NF
1036 union ibmvnic_crq crq;
1037 bool resend;
1038 int rc;
1039
d1cf33d9
NF
1040 netdev_dbg(netdev, "setting link state %d\n", link_state);
1041
53da09e9
NF
1042 memset(&crq, 0, sizeof(crq));
1043 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1044 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1045 crq.logical_link_state.link_state = link_state;
1046
1047 do {
1048 resend = false;
1049
1050 reinit_completion(&adapter->init_done);
1051 rc = ibmvnic_send_crq(adapter, &crq);
1052 if (rc) {
1053 netdev_err(netdev, "Failed to set link state\n");
1054 return rc;
1055 }
1056
1057 if (!wait_for_completion_timeout(&adapter->init_done,
1058 timeout)) {
1059 netdev_err(netdev, "timeout setting link state\n");
1060 return -1;
1061 }
1062
4c5f6af0 1063 if (adapter->init_done_rc == PARTIALSUCCESS) {
53da09e9
NF
1064 /* Partuial success, delay and re-send */
1065 mdelay(1000);
1066 resend = true;
ab5ec33b
TF
1067 } else if (adapter->init_done_rc) {
1068 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1069 adapter->init_done_rc);
1070 return adapter->init_done_rc;
53da09e9
NF
1071 }
1072 } while (resend);
1073
1074 return 0;
1075}
1076
7f3c6e6b
TF
1077static int set_real_num_queues(struct net_device *netdev)
1078{
1079 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1080 int rc;
1081
d1cf33d9
NF
1082 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1083 adapter->req_tx_queues, adapter->req_rx_queues);
1084
7f3c6e6b
TF
1085 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1086 if (rc) {
1087 netdev_err(netdev, "failed to set the number of tx queues\n");
1088 return rc;
1089 }
1090
1091 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1092 if (rc)
1093 netdev_err(netdev, "failed to set the number of rx queues\n");
1094
1095 return rc;
1096}
1097
4e6759be
DANR
1098static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1099{
1100 struct device *dev = &adapter->vdev->dev;
1101 union ibmvnic_crq crq;
4e6759be 1102 int len = 0;
9c4eaabd 1103 int rc;
4e6759be
DANR
1104
1105 if (adapter->vpd->buff)
1106 len = adapter->vpd->len;
1107
ff25dcb9
TF
1108 mutex_lock(&adapter->fw_lock);
1109 adapter->fw_done_rc = 0;
070eca95 1110 reinit_completion(&adapter->fw_done);
ff25dcb9 1111
4e6759be
DANR
1112 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1113 crq.get_vpd_size.cmd = GET_VPD_SIZE;
9c4eaabd 1114 rc = ibmvnic_send_crq(adapter, &crq);
ff25dcb9
TF
1115 if (rc) {
1116 mutex_unlock(&adapter->fw_lock);
9c4eaabd 1117 return rc;
ff25dcb9 1118 }
476d96ca
TF
1119
1120 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1121 if (rc) {
1122 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
ff25dcb9 1123 mutex_unlock(&adapter->fw_lock);
476d96ca
TF
1124 return rc;
1125 }
ff25dcb9 1126 mutex_unlock(&adapter->fw_lock);
4e6759be
DANR
1127
1128 if (!adapter->vpd->len)
1129 return -ENODATA;
1130
1131 if (!adapter->vpd->buff)
1132 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1133 else if (adapter->vpd->len != len)
1134 adapter->vpd->buff =
1135 krealloc(adapter->vpd->buff,
1136 adapter->vpd->len, GFP_KERNEL);
1137
1138 if (!adapter->vpd->buff) {
1139 dev_err(dev, "Could allocate VPD buffer\n");
1140 return -ENOMEM;
1141 }
1142
1143 adapter->vpd->dma_addr =
1144 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1145 DMA_FROM_DEVICE);
f743106e 1146 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
4e6759be
DANR
1147 dev_err(dev, "Could not map VPD buffer\n");
1148 kfree(adapter->vpd->buff);
b0992eca 1149 adapter->vpd->buff = NULL;
4e6759be
DANR
1150 return -ENOMEM;
1151 }
1152
ff25dcb9
TF
1153 mutex_lock(&adapter->fw_lock);
1154 adapter->fw_done_rc = 0;
4e6759be 1155 reinit_completion(&adapter->fw_done);
ff25dcb9 1156
4e6759be
DANR
1157 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1158 crq.get_vpd.cmd = GET_VPD;
1159 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1160 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
9c4eaabd
TF
1161 rc = ibmvnic_send_crq(adapter, &crq);
1162 if (rc) {
1163 kfree(adapter->vpd->buff);
1164 adapter->vpd->buff = NULL;
ff25dcb9 1165 mutex_unlock(&adapter->fw_lock);
9c4eaabd
TF
1166 return rc;
1167 }
476d96ca
TF
1168
1169 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1170 if (rc) {
1171 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1172 kfree(adapter->vpd->buff);
1173 adapter->vpd->buff = NULL;
ff25dcb9 1174 mutex_unlock(&adapter->fw_lock);
476d96ca
TF
1175 return rc;
1176 }
4e6759be 1177
ff25dcb9 1178 mutex_unlock(&adapter->fw_lock);
4e6759be
DANR
1179 return 0;
1180}
1181
bfc32f29 1182static int init_resources(struct ibmvnic_adapter *adapter)
a57a5d25 1183{
bfc32f29 1184 struct net_device *netdev = adapter->netdev;
86f669b2 1185 int rc;
a57a5d25 1186
7f3c6e6b
TF
1187 rc = set_real_num_queues(netdev);
1188 if (rc)
1189 return rc;
bd0b6723 1190
4e6759be
DANR
1191 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1192 if (!adapter->vpd)
1193 return -ENOMEM;
1194
69d08dcb
JA
1195 /* Vital Product Data (VPD) */
1196 rc = ibmvnic_get_vpd(adapter);
1197 if (rc) {
1198 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1199 return rc;
1200 }
1201
032c5e82 1202 adapter->map_id = 1;
bfc32f29 1203
86f669b2
NF
1204 rc = init_napi(adapter);
1205 if (rc)
1206 return rc;
032c5e82 1207
69980d02 1208 send_query_map(adapter);
0ffe2cb7
NF
1209
1210 rc = init_rx_pools(netdev);
1211 if (rc)
bfc32f29 1212 return rc;
032c5e82 1213
c657e32c 1214 rc = init_tx_pools(netdev);
bfc32f29
NF
1215 return rc;
1216}
1217
ed651a10 1218static int __ibmvnic_open(struct net_device *netdev)
bfc32f29
NF
1219{
1220 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
ed651a10 1221 enum vnic_state prev_state = adapter->state;
bfc32f29
NF
1222 int i, rc;
1223
90c8014c 1224 adapter->state = VNIC_OPENING;
032c5e82 1225 replenish_pools(adapter);
d944c3d6 1226 ibmvnic_napi_enable(adapter);
bfc32f29 1227
032c5e82
TF
1228 /* We're ready to receive frames, enable the sub-crq interrupts and
1229 * set the logical link state to up
1230 */
ed651a10 1231 for (i = 0; i < adapter->req_rx_queues; i++) {
d1cf33d9 1232 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
ed651a10
NF
1233 if (prev_state == VNIC_CLOSED)
1234 enable_irq(adapter->rx_scrq[i]->irq);
f23e0643 1235 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
ed651a10 1236 }
032c5e82 1237
ed651a10 1238 for (i = 0; i < adapter->req_tx_queues; i++) {
d1cf33d9 1239 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
ed651a10
NF
1240 if (prev_state == VNIC_CLOSED)
1241 enable_irq(adapter->tx_scrq[i]->irq);
f23e0643 1242 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
0d973388 1243 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
ed651a10 1244 }
032c5e82 1245
53da09e9 1246 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
bfc32f29 1247 if (rc) {
0775ebc4 1248 ibmvnic_napi_disable(adapter);
8b41d367 1249 ibmvnic_disable_irqs(adapter);
ed651a10 1250 return rc;
bfc32f29 1251 }
032c5e82 1252
ed651a10
NF
1253 netif_tx_start_all_queues(netdev);
1254
2ca220f9
DM
1255 if (prev_state == VNIC_CLOSED) {
1256 for (i = 0; i < adapter->req_rx_queues; i++)
1257 napi_schedule(&adapter->napi[i]);
1258 }
1259
ed651a10
NF
1260 adapter->state = VNIC_OPEN;
1261 return rc;
1262}
1263
1264static int ibmvnic_open(struct net_device *netdev)
1265{
1266 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
69d08dcb 1267 int rc;
ed651a10 1268
8f1c0fd2
SB
1269 ASSERT_RTNL();
1270
1271 /* If device failover is pending or we are about to reset, just set
1272 * device state and return. Device operation will be handled by reset
1273 * routine.
1274 *
1275 * It should be safe to overwrite the adapter->state here. Since
1276 * we hold the rtnl, either the reset has not actually started or
1277 * the rtnl got dropped during the set_link_state() in do_reset().
1278 * In the former case, no one else is changing the state (again we
1279 * have the rtnl) and in the latter case, do_reset() will detect and
1280 * honor our setting below.
5a18e1e0 1281 */
8f1c0fd2 1282 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
0666ef7f
LP
1283 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1284 adapter_state_to_string(adapter->state),
1285 adapter->failover_pending);
5a18e1e0 1286 adapter->state = VNIC_OPEN;
8f1c0fd2
SB
1287 rc = 0;
1288 goto out;
5a18e1e0
TF
1289 }
1290
ed651a10
NF
1291 if (adapter->state != VNIC_CLOSED) {
1292 rc = ibmvnic_login(netdev);
a5681e20 1293 if (rc)
1d850493 1294 goto out;
ed651a10
NF
1295
1296 rc = init_resources(adapter);
1297 if (rc) {
1298 netdev_err(netdev, "failed to initialize resources\n");
1d850493 1299 goto out;
ed651a10
NF
1300 }
1301 }
1302
1303 rc = __ibmvnic_open(netdev);
4e6759be 1304
1d850493 1305out:
8f1c0fd2
SB
1306 /* If open failed and there is a pending failover or in-progress reset,
1307 * set device state and return. Device operation will be handled by
1308 * reset routine. See also comments above regarding rtnl.
1d850493 1309 */
8f1c0fd2
SB
1310 if (rc &&
1311 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1d850493
SB
1312 adapter->state = VNIC_OPEN;
1313 rc = 0;
1314 }
8b41d367
SB
1315
1316 if (rc) {
1317 release_resources(adapter);
1318 }
1319
bfc32f29 1320 return rc;
032c5e82
TF
1321}
1322
d0869c00
TF
1323static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1324{
1325 struct ibmvnic_rx_pool *rx_pool;
637f81d1 1326 struct ibmvnic_rx_buff *rx_buff;
d0869c00
TF
1327 u64 rx_entries;
1328 int rx_scrqs;
1329 int i, j;
1330
1331 if (!adapter->rx_pool)
1332 return;
1333
660e309d 1334 rx_scrqs = adapter->num_active_rx_pools;
d0869c00
TF
1335 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1336
1337 /* Free any remaining skbs in the rx buffer pools */
1338 for (i = 0; i < rx_scrqs; i++) {
1339 rx_pool = &adapter->rx_pool[i];
637f81d1 1340 if (!rx_pool || !rx_pool->rx_buff)
d0869c00
TF
1341 continue;
1342
1343 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1344 for (j = 0; j < rx_entries; j++) {
637f81d1
TF
1345 rx_buff = &rx_pool->rx_buff[j];
1346 if (rx_buff && rx_buff->skb) {
1347 dev_kfree_skb_any(rx_buff->skb);
1348 rx_buff->skb = NULL;
d0869c00
TF
1349 }
1350 }
1351 }
1352}
1353
e9e1e978
TF
1354static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1355 struct ibmvnic_tx_pool *tx_pool)
b41b83e9 1356{
637f81d1 1357 struct ibmvnic_tx_buff *tx_buff;
b41b83e9 1358 u64 tx_entries;
e9e1e978
TF
1359 int i;
1360
050e85c9 1361 if (!tx_pool || !tx_pool->tx_buff)
e9e1e978
TF
1362 return;
1363
1364 tx_entries = tx_pool->num_buffers;
1365
1366 for (i = 0; i < tx_entries; i++) {
1367 tx_buff = &tx_pool->tx_buff[i];
1368 if (tx_buff && tx_buff->skb) {
1369 dev_kfree_skb_any(tx_buff->skb);
1370 tx_buff->skb = NULL;
1371 }
1372 }
1373}
1374
1375static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1376{
b41b83e9 1377 int tx_scrqs;
e9e1e978 1378 int i;
b41b83e9 1379
e9e1e978 1380 if (!adapter->tx_pool || !adapter->tso_pool)
b41b83e9
NF
1381 return;
1382
660e309d 1383 tx_scrqs = adapter->num_active_tx_pools;
b41b83e9
NF
1384
1385 /* Free any remaining skbs in the tx buffer pools */
1386 for (i = 0; i < tx_scrqs; i++) {
d1cf33d9 1387 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
e9e1e978
TF
1388 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1389 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
b41b83e9
NF
1390 }
1391}
1392
6095e590 1393static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
ea5509f5 1394{
6095e590 1395 struct net_device *netdev = adapter->netdev;
ea5509f5
JA
1396 int i;
1397
46293b94
NF
1398 if (adapter->tx_scrq) {
1399 for (i = 0; i < adapter->req_tx_queues; i++)
d1cf33d9 1400 if (adapter->tx_scrq[i]->irq) {
f873866a 1401 netdev_dbg(netdev,
d1cf33d9 1402 "Disabling tx_scrq[%d] irq\n", i);
f23e0643 1403 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
46293b94 1404 disable_irq(adapter->tx_scrq[i]->irq);
d1cf33d9 1405 }
46293b94
NF
1406 }
1407
46293b94
NF
1408 if (adapter->rx_scrq) {
1409 for (i = 0; i < adapter->req_rx_queues; i++) {
d1cf33d9 1410 if (adapter->rx_scrq[i]->irq) {
f873866a 1411 netdev_dbg(netdev,
d1cf33d9 1412 "Disabling rx_scrq[%d] irq\n", i);
f23e0643 1413 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
46293b94 1414 disable_irq(adapter->rx_scrq[i]->irq);
d1cf33d9 1415 }
46293b94
NF
1416 }
1417 }
6095e590
JA
1418}
1419
1420static void ibmvnic_cleanup(struct net_device *netdev)
1421{
1422 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1423
1424 /* ensure that transmissions are stopped if called by do_reset */
7ed5b31f 1425 if (test_bit(0, &adapter->resetting))
6095e590
JA
1426 netif_tx_disable(netdev);
1427 else
1428 netif_tx_stop_all_queues(netdev);
1429
1430 ibmvnic_napi_disable(adapter);
1431 ibmvnic_disable_irqs(adapter);
1432
d0869c00 1433 clean_rx_pools(adapter);
10f76215 1434 clean_tx_pools(adapter);
01d9bd79
TF
1435}
1436
1437static int __ibmvnic_close(struct net_device *netdev)
1438{
1439 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1440 int rc = 0;
1441
1442 adapter->state = VNIC_CLOSING;
1443 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
90c8014c 1444 adapter->state = VNIC_CLOSED;
d4083d3c 1445 return rc;
032c5e82
TF
1446}
1447
ed651a10
NF
1448static int ibmvnic_close(struct net_device *netdev)
1449{
1450 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1451 int rc;
1452
0666ef7f
LP
1453 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1454 adapter_state_to_string(adapter->state),
1455 adapter->failover_pending,
38bd5cec
SB
1456 adapter->force_reset_recovery);
1457
5a18e1e0
TF
1458 /* If device failover is pending, just set device state and return.
1459 * Device operation will be handled by reset routine.
1460 */
1461 if (adapter->failover_pending) {
1462 adapter->state = VNIC_CLOSED;
1463 return 0;
1464 }
1465
ed651a10 1466 rc = __ibmvnic_close(netdev);
30f79625 1467 ibmvnic_cleanup(netdev);
ed651a10
NF
1468
1469 return rc;
1470}
1471
ad7775dc
TF
1472/**
1473 * build_hdr_data - creates L2/L3/L4 header data buffer
80708602
LJ
1474 * @hdr_field: bitfield determining needed headers
1475 * @skb: socket buffer
1476 * @hdr_len: array of header lengths
1477 * @hdr_data: buffer to write the header to
ad7775dc
TF
1478 *
1479 * Reads hdr_field to determine which headers are needed by firmware.
1480 * Builds a buffer containing these headers. Saves individual header
1481 * lengths and total buffer length to be used to build descriptors.
1482 */
1483static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1484 int *hdr_len, u8 *hdr_data)
1485{
1486 int len = 0;
1487 u8 *hdr;
1488
da75e3b6
TF
1489 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1490 hdr_len[0] = sizeof(struct vlan_ethhdr);
1491 else
1492 hdr_len[0] = sizeof(struct ethhdr);
ad7775dc
TF
1493
1494 if (skb->protocol == htons(ETH_P_IP)) {
1495 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1496 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1497 hdr_len[2] = tcp_hdrlen(skb);
1498 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1499 hdr_len[2] = sizeof(struct udphdr);
1500 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1501 hdr_len[1] = sizeof(struct ipv6hdr);
1502 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1503 hdr_len[2] = tcp_hdrlen(skb);
1504 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1505 hdr_len[2] = sizeof(struct udphdr);
4eb50ceb
TF
1506 } else if (skb->protocol == htons(ETH_P_ARP)) {
1507 hdr_len[1] = arp_hdr_len(skb->dev);
1508 hdr_len[2] = 0;
ad7775dc
TF
1509 }
1510
1511 memset(hdr_data, 0, 120);
1512 if ((hdr_field >> 6) & 1) {
1513 hdr = skb_mac_header(skb);
1514 memcpy(hdr_data, hdr, hdr_len[0]);
1515 len += hdr_len[0];
1516 }
1517
1518 if ((hdr_field >> 5) & 1) {
1519 hdr = skb_network_header(skb);
1520 memcpy(hdr_data + len, hdr, hdr_len[1]);
1521 len += hdr_len[1];
1522 }
1523
1524 if ((hdr_field >> 4) & 1) {
1525 hdr = skb_transport_header(skb);
1526 memcpy(hdr_data + len, hdr, hdr_len[2]);
1527 len += hdr_len[2];
1528 }
1529 return len;
1530}
1531
1532/**
1533 * create_hdr_descs - create header and header extension descriptors
80708602
LJ
1534 * @hdr_field: bitfield determining needed headers
1535 * @hdr_data: buffer containing header data
1536 * @len: length of data buffer
1537 * @hdr_len: array of individual header lengths
1538 * @scrq_arr: descriptor array
ad7775dc
TF
1539 *
1540 * Creates header and, if needed, header extension descriptors and
1541 * places them in a descriptor array, scrq_arr
1542 */
1543
2de09681
TF
1544static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1545 union sub_crq *scrq_arr)
ad7775dc
TF
1546{
1547 union sub_crq hdr_desc;
1548 int tmp_len = len;
2de09681 1549 int num_descs = 0;
ad7775dc
TF
1550 u8 *data, *cur;
1551 int tmp;
1552
1553 while (tmp_len > 0) {
1554 cur = hdr_data + len - tmp_len;
1555
1556 memset(&hdr_desc, 0, sizeof(hdr_desc));
1557 if (cur != hdr_data) {
1558 data = hdr_desc.hdr_ext.data;
1559 tmp = tmp_len > 29 ? 29 : tmp_len;
1560 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1561 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1562 hdr_desc.hdr_ext.len = tmp;
1563 } else {
1564 data = hdr_desc.hdr.data;
1565 tmp = tmp_len > 24 ? 24 : tmp_len;
1566 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1567 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1568 hdr_desc.hdr.len = tmp;
1569 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1570 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1571 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1572 hdr_desc.hdr.flag = hdr_field << 1;
1573 }
1574 memcpy(data, cur, tmp);
1575 tmp_len -= tmp;
1576 *scrq_arr = hdr_desc;
1577 scrq_arr++;
2de09681 1578 num_descs++;
ad7775dc 1579 }
2de09681
TF
1580
1581 return num_descs;
ad7775dc
TF
1582}
1583
1584/**
1585 * build_hdr_descs_arr - build a header descriptor array
73214a69
LP
1586 * @skb: tx socket buffer
1587 * @indir_arr: indirect array
80708602
LJ
1588 * @num_entries: number of descriptors to be sent
1589 * @hdr_field: bit field determining which headers will be sent
ad7775dc
TF
1590 *
1591 * This function will build a TX descriptor array with applicable
1592 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1593 */
1594
c62aa373
TF
1595static void build_hdr_descs_arr(struct sk_buff *skb,
1596 union sub_crq *indir_arr,
ad7775dc
TF
1597 int *num_entries, u8 hdr_field)
1598{
1599 int hdr_len[3] = {0, 0, 0};
c62aa373 1600 u8 hdr_data[140] = {0};
2de09681 1601 int tot_len;
ad7775dc 1602
c62aa373
TF
1603 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1604 hdr_data);
2de09681 1605 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
c62aa373 1606 indir_arr + 1);
ad7775dc
TF
1607}
1608
1f247a6f
TF
1609static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1610 struct net_device *netdev)
1611{
1612 /* For some backing devices, mishandling of small packets
1613 * can result in a loss of connection or TX stall. Device
1614 * architects recommend that no packet should be smaller
1615 * than the minimum MTU value provided to the driver, so
1616 * pad any packets to that length
1617 */
1618 if (skb->len < netdev->min_mtu)
1619 return skb_put_padto(skb, netdev->min_mtu);
7083a45a
TF
1620
1621 return 0;
1f247a6f
TF
1622}
1623
0d973388
TF
1624static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1625 struct ibmvnic_sub_crq_queue *tx_scrq)
1626{
1627 struct ibmvnic_ind_xmit_queue *ind_bufp;
1628 struct ibmvnic_tx_buff *tx_buff;
1629 struct ibmvnic_tx_pool *tx_pool;
1630 union sub_crq tx_scrq_entry;
1631 int queue_num;
1632 int entries;
1633 int index;
1634 int i;
1635
1636 ind_bufp = &tx_scrq->ind_buf;
1637 entries = (u64)ind_bufp->index;
1638 queue_num = tx_scrq->pool_index;
1639
1640 for (i = entries - 1; i >= 0; --i) {
1641 tx_scrq_entry = ind_bufp->indir_arr[i];
1642 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1643 continue;
1644 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1645 if (index & IBMVNIC_TSO_POOL_MASK) {
1646 tx_pool = &adapter->tso_pool[queue_num];
1647 index &= ~IBMVNIC_TSO_POOL_MASK;
1648 } else {
1649 tx_pool = &adapter->tx_pool[queue_num];
1650 }
1651 tx_pool->free_map[tx_pool->consumer_index] = index;
1652 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1653 tx_pool->num_buffers - 1 :
1654 tx_pool->consumer_index - 1;
1655 tx_buff = &tx_pool->tx_buff[index];
1656 adapter->netdev->stats.tx_packets--;
1657 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1658 adapter->tx_stats_buffers[queue_num].packets--;
1659 adapter->tx_stats_buffers[queue_num].bytes -=
1660 tx_buff->skb->len;
1661 dev_kfree_skb_any(tx_buff->skb);
1662 tx_buff->skb = NULL;
1663 adapter->netdev->stats.tx_dropped++;
1664 }
1665 ind_bufp->index = 0;
1666 if (atomic_sub_return(entries, &tx_scrq->used) <=
1667 (adapter->req_tx_entries_per_subcrq / 2) &&
65d6470d
SB
1668 __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1669 !test_bit(0, &adapter->resetting)) {
0d973388
TF
1670 netif_wake_subqueue(adapter->netdev, queue_num);
1671 netdev_dbg(adapter->netdev, "Started queue %d\n",
1672 queue_num);
1673 }
1674}
1675
1676static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1677 struct ibmvnic_sub_crq_queue *tx_scrq)
1678{
1679 struct ibmvnic_ind_xmit_queue *ind_bufp;
1680 u64 dma_addr;
1681 u64 entries;
1682 u64 handle;
1683 int rc;
1684
1685 ind_bufp = &tx_scrq->ind_buf;
1686 dma_addr = (u64)ind_bufp->indir_dma;
1687 entries = (u64)ind_bufp->index;
1688 handle = tx_scrq->handle;
1689
1690 if (!entries)
1691 return 0;
1692 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1693 if (rc)
1694 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1695 else
1696 ind_bufp->index = 0;
1697 return 0;
1698}
1699
94b2bb28 1700static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
032c5e82
TF
1701{
1702 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1703 int queue_num = skb_get_queue_mapping(skb);
ad7775dc 1704 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
032c5e82 1705 struct device *dev = &adapter->vdev->dev;
0d973388 1706 struct ibmvnic_ind_xmit_queue *ind_bufp;
032c5e82 1707 struct ibmvnic_tx_buff *tx_buff = NULL;
142c0ac4 1708 struct ibmvnic_sub_crq_queue *tx_scrq;
032c5e82
TF
1709 struct ibmvnic_tx_pool *tx_pool;
1710 unsigned int tx_send_failed = 0;
0d973388 1711 netdev_tx_t ret = NETDEV_TX_OK;
032c5e82 1712 unsigned int tx_map_failed = 0;
c62aa373 1713 union sub_crq indir_arr[16];
032c5e82
TF
1714 unsigned int tx_dropped = 0;
1715 unsigned int tx_packets = 0;
1716 unsigned int tx_bytes = 0;
1717 dma_addr_t data_dma_addr;
1718 struct netdev_queue *txq;
032c5e82
TF
1719 unsigned long lpar_rc;
1720 union sub_crq tx_crq;
1721 unsigned int offset;
ad7775dc 1722 int num_entries = 1;
032c5e82 1723 unsigned char *dst;
032c5e82 1724 int index = 0;
a0dca10f 1725 u8 proto = 0;
0d973388
TF
1726
1727 tx_scrq = adapter->tx_scrq[queue_num];
1728 txq = netdev_get_tx_queue(netdev, queue_num);
1729 ind_bufp = &tx_scrq->ind_buf;
032c5e82 1730
7ed5b31f 1731 if (test_bit(0, &adapter->resetting)) {
7f5b0308
TF
1732 dev_kfree_skb_any(skb);
1733
032c5e82
TF
1734 tx_send_failed++;
1735 tx_dropped++;
7f5b0308 1736 ret = NETDEV_TX_OK;
032c5e82
TF
1737 goto out;
1738 }
1739
7083a45a 1740 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1f247a6f
TF
1741 tx_dropped++;
1742 tx_send_failed++;
1743 ret = NETDEV_TX_OK;
0d973388 1744 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1f247a6f
TF
1745 goto out;
1746 }
06b3e357
TF
1747 if (skb_is_gso(skb))
1748 tx_pool = &adapter->tso_pool[queue_num];
1749 else
1750 tx_pool = &adapter->tx_pool[queue_num];
1f247a6f 1751
032c5e82 1752 index = tx_pool->free_map[tx_pool->consumer_index];
fdb06105 1753
86b61a5f
TF
1754 if (index == IBMVNIC_INVALID_MAP) {
1755 dev_kfree_skb_any(skb);
1756 tx_send_failed++;
1757 tx_dropped++;
bb55362b 1758 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
86b61a5f
TF
1759 ret = NETDEV_TX_OK;
1760 goto out;
1761 }
1762
1763 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1764
06b3e357
TF
1765 offset = index * tx_pool->buf_size;
1766 dst = tx_pool->long_term_buff.buff + offset;
1767 memset(dst, 0, tx_pool->buf_size);
1768 data_dma_addr = tx_pool->long_term_buff.addr + offset;
032c5e82 1769
15482056
TF
1770 if (skb_shinfo(skb)->nr_frags) {
1771 int cur, i;
1772
1773 /* Copy the head */
1774 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1775 cur = skb_headlen(skb);
1776
1777 /* Copy the frags */
1778 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1779 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1780
c3105f84
CJ
1781 memcpy(dst + cur, skb_frag_address(frag),
1782 skb_frag_size(frag));
15482056
TF
1783 cur += skb_frag_size(frag);
1784 }
1785 } else {
1786 skb_copy_from_linear_data(skb, dst, skb->len);
1787 }
1788
42557dab
LP
1789 /* post changes to long_term_buff *dst before VIOS accessing it */
1790 dma_wmb();
1791
032c5e82 1792 tx_pool->consumer_index =
06b3e357 1793 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
032c5e82
TF
1794
1795 tx_buff = &tx_pool->tx_buff[index];
1796 tx_buff->skb = skb;
032c5e82
TF
1797 tx_buff->index = index;
1798 tx_buff->pool_index = queue_num;
032c5e82
TF
1799
1800 memset(&tx_crq, 0, sizeof(tx_crq));
1801 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1802 tx_crq.v1.type = IBMVNIC_TX_DESC;
1803 tx_crq.v1.n_crq_elem = 1;
1804 tx_crq.v1.n_sge = 1;
1805 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
06b3e357 1806
fdb06105 1807 if (skb_is_gso(skb))
06b3e357
TF
1808 tx_crq.v1.correlator =
1809 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
fdb06105 1810 else
06b3e357
TF
1811 tx_crq.v1.correlator = cpu_to_be32(index);
1812 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
032c5e82
TF
1813 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1814 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1815
e84b4794 1816 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
032c5e82
TF
1817 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1818 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1819 }
1820
1821 if (skb->protocol == htons(ETH_P_IP)) {
a0dca10f
TF
1822 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1823 proto = ip_hdr(skb)->protocol;
1824 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1825 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1826 proto = ipv6_hdr(skb)->nexthdr;
032c5e82
TF
1827 }
1828
a0dca10f
TF
1829 if (proto == IPPROTO_TCP)
1830 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1831 else if (proto == IPPROTO_UDP)
1832 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1833
ad7775dc 1834 if (skb->ip_summed == CHECKSUM_PARTIAL) {
032c5e82 1835 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
ad7775dc
TF
1836 hdrs += 2;
1837 }
fdb06105
TF
1838 if (skb_is_gso(skb)) {
1839 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1840 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1841 hdrs += 2;
1842 }
0d973388
TF
1843
1844 if ((*hdrs >> 7) & 1)
c62aa373 1845 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
7f5b0308 1846
0d973388
TF
1847 tx_crq.v1.n_crq_elem = num_entries;
1848 tx_buff->num_entries = num_entries;
1849 /* flush buffer if current entry can not fit */
1850 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1851 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1852 if (lpar_rc != H_SUCCESS)
1853 goto tx_flush_err;
1854 }
7f5b0308 1855
c62aa373
TF
1856 indir_arr[0] = tx_crq;
1857 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
0d973388
TF
1858 num_entries * sizeof(struct ibmvnic_generic_scrq));
1859 ind_bufp->index += num_entries;
1860 if (__netdev_tx_sent_queue(txq, skb->len,
1861 netdev_xmit_more() &&
1862 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1863 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1864 if (lpar_rc != H_SUCCESS)
1865 goto tx_err;
032c5e82 1866 }
142c0ac4 1867
ffc385b9 1868 if (atomic_add_return(num_entries, &tx_scrq->used)
58c8c0c0 1869 >= adapter->req_tx_entries_per_subcrq) {
0aecb13c 1870 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
142c0ac4
TF
1871 netif_stop_subqueue(netdev, queue_num);
1872 }
1873
032c5e82
TF
1874 tx_packets++;
1875 tx_bytes += skb->len;
1876 txq->trans_start = jiffies;
1877 ret = NETDEV_TX_OK;
86b61a5f 1878 goto out;
032c5e82 1879
0d973388
TF
1880tx_flush_err:
1881 dev_kfree_skb_any(skb);
1882 tx_buff->skb = NULL;
1883 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1884 tx_pool->num_buffers - 1 :
1885 tx_pool->consumer_index - 1;
1886 tx_dropped++;
1887tx_err:
1888 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1889 dev_err_ratelimited(dev, "tx: send failed\n");
1890
1891 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1892 /* Disable TX and report carrier off if queue is closed
1893 * or pending failover.
1894 * Firmware guarantees that a signal will be sent to the
1895 * driver, triggering a reset or some other action.
1896 */
1897 netif_tx_stop_all_queues(netdev);
1898 netif_carrier_off(netdev);
1899 }
032c5e82
TF
1900out:
1901 netdev->stats.tx_dropped += tx_dropped;
1902 netdev->stats.tx_bytes += tx_bytes;
1903 netdev->stats.tx_packets += tx_packets;
1904 adapter->tx_send_failed += tx_send_failed;
1905 adapter->tx_map_failed += tx_map_failed;
3d52b594
JA
1906 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1907 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1908 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
032c5e82
TF
1909
1910 return ret;
1911}
1912
1913static void ibmvnic_set_multi(struct net_device *netdev)
1914{
1915 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1916 struct netdev_hw_addr *ha;
1917 union ibmvnic_crq crq;
1918
1919 memset(&crq, 0, sizeof(crq));
1920 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1921 crq.request_capability.cmd = REQUEST_CAPABILITY;
1922
1923 if (netdev->flags & IFF_PROMISC) {
1924 if (!adapter->promisc_supported)
1925 return;
1926 } else {
1927 if (netdev->flags & IFF_ALLMULTI) {
1928 /* Accept all multicast */
1929 memset(&crq, 0, sizeof(crq));
1930 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1931 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1932 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1933 ibmvnic_send_crq(adapter, &crq);
1934 } else if (netdev_mc_empty(netdev)) {
1935 /* Reject all multicast */
1936 memset(&crq, 0, sizeof(crq));
1937 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1938 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1939 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1940 ibmvnic_send_crq(adapter, &crq);
1941 } else {
1942 /* Accept one or more multicast(s) */
1943 netdev_for_each_mc_addr(ha, netdev) {
1944 memset(&crq, 0, sizeof(crq));
1945 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1946 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1947 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1948 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1949 ha->addr);
1950 ibmvnic_send_crq(adapter, &crq);
1951 }
1952 }
1953 }
1954}
1955
62740e97 1956static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
032c5e82
TF
1957{
1958 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
032c5e82 1959 union ibmvnic_crq crq;
9c4eaabd 1960 int rc;
032c5e82 1961
62740e97
TF
1962 if (!is_valid_ether_addr(dev_addr)) {
1963 rc = -EADDRNOTAVAIL;
1964 goto err;
1965 }
032c5e82
TF
1966
1967 memset(&crq, 0, sizeof(crq));
1968 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1969 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
62740e97 1970 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
f813614f 1971
ff25dcb9
TF
1972 mutex_lock(&adapter->fw_lock);
1973 adapter->fw_done_rc = 0;
070eca95 1974 reinit_completion(&adapter->fw_done);
ff25dcb9 1975
9c4eaabd 1976 rc = ibmvnic_send_crq(adapter, &crq);
62740e97
TF
1977 if (rc) {
1978 rc = -EIO;
ff25dcb9 1979 mutex_unlock(&adapter->fw_lock);
62740e97
TF
1980 goto err;
1981 }
1982
476d96ca 1983 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
032c5e82 1984 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
476d96ca 1985 if (rc || adapter->fw_done_rc) {
62740e97 1986 rc = -EIO;
ff25dcb9 1987 mutex_unlock(&adapter->fw_lock);
62740e97
TF
1988 goto err;
1989 }
ff25dcb9 1990 mutex_unlock(&adapter->fw_lock);
62740e97
TF
1991 return 0;
1992err:
1993 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1994 return rc;
032c5e82
TF
1995}
1996
c26eba03
JA
1997static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1998{
1999 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2000 struct sockaddr *addr = p;
f813614f 2001 int rc;
c26eba03 2002
62740e97 2003 rc = 0;
8fc3672a
LP
2004 if (!is_valid_ether_addr(addr->sa_data))
2005 return -EADDRNOTAVAIL;
2006
67eb2114
JW
2007 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2008 if (adapter->state != VNIC_PROBED)
62740e97 2009 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
c26eba03 2010
f813614f 2011 return rc;
c26eba03
JA
2012}
2013
caee7bf5
LP
2014static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2015{
2016 switch (reason) {
2017 case VNIC_RESET_FAILOVER:
2018 return "FAILOVER";
2019 case VNIC_RESET_MOBILITY:
2020 return "MOBILITY";
2021 case VNIC_RESET_FATAL:
2022 return "FATAL";
2023 case VNIC_RESET_NON_FATAL:
2024 return "NON_FATAL";
2025 case VNIC_RESET_TIMEOUT:
2026 return "TIMEOUT";
2027 case VNIC_RESET_CHANGE_PARAM:
2028 return "CHANGE_PARAM";
822ebc2c
LP
2029 case VNIC_RESET_PASSIVE_INIT:
2030 return "PASSIVE_INIT";
caee7bf5 2031 }
07b5dc1d 2032 return "UNKNOWN";
caee7bf5
LP
2033}
2034
80708602 2035/*
ed651a10
NF
2036 * do_reset returns zero if we are able to keep processing reset events, or
2037 * non-zero if we hit a fatal error and must halt.
2038 */
2039static int do_reset(struct ibmvnic_adapter *adapter,
2040 struct ibmvnic_rwi *rwi, u32 reset_state)
032c5e82 2041{
896d8695 2042 u64 old_num_rx_queues, old_num_tx_queues;
5bf032ef 2043 u64 old_num_rx_slots, old_num_tx_slots;
ed651a10 2044 struct net_device *netdev = adapter->netdev;
d3a6abcc 2045 int rc;
ed651a10 2046
38bd5cec 2047 netdev_dbg(adapter->netdev,
0666ef7f
LP
2048 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2049 adapter_state_to_string(adapter->state),
2050 adapter->failover_pending,
2051 reset_reason_to_string(rwi->reset_reason),
2052 adapter_state_to_string(reset_state));
d1cf33d9 2053
3f5ec374
LP
2054 adapter->reset_reason = rwi->reset_reason;
2055 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2056 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2057 rtnl_lock();
2058
bab08bed 2059 /* Now that we have the rtnl lock, clear any pending failover.
1d850493
SB
2060 * This will ensure ibmvnic_open() has either completed or will
2061 * block until failover is complete.
2062 */
2063 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2064 adapter->failover_pending = false;
b27507bb 2065
8f1c0fd2
SB
2066 /* read the state and check (again) after getting rtnl */
2067 reset_state = adapter->state;
2068
2069 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2070 rc = -EBUSY;
2071 goto out;
2072 }
2073
ed651a10 2074 netif_carrier_off(netdev);
ed651a10 2075
896d8695
JA
2076 old_num_rx_queues = adapter->req_rx_queues;
2077 old_num_tx_queues = adapter->req_tx_queues;
5bf032ef
TF
2078 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2079 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
896d8695 2080
30f79625
NF
2081 ibmvnic_cleanup(netdev);
2082
1f94608b
TF
2083 if (reset_state == VNIC_OPEN &&
2084 adapter->reset_reason != VNIC_RESET_MOBILITY &&
30f79625 2085 adapter->reset_reason != VNIC_RESET_FAILOVER) {
3f5ec374
LP
2086 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2087 rc = __ibmvnic_close(netdev);
2088 if (rc)
2089 goto out;
2090 } else {
2091 adapter->state = VNIC_CLOSING;
b27507bb 2092
3f5ec374
LP
2093 /* Release the RTNL lock before link state change and
2094 * re-acquire after the link state change to allow
2095 * linkwatch_event to grab the RTNL lock and run during
2096 * a reset.
2097 */
2098 rtnl_unlock();
2099 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2100 rtnl_lock();
2101 if (rc)
2102 goto out;
ed651a10 2103
8f1c0fd2
SB
2104 if (adapter->state == VNIC_OPEN) {
2105 /* When we dropped rtnl, ibmvnic_open() got
2106 * it and noticed that we are resetting and
2107 * set the adapter state to OPEN. Update our
2108 * new "target" state, and resume the reset
2109 * from VNIC_CLOSING state.
2110 */
2111 netdev_dbg(netdev,
0666ef7f
LP
2112 "Open changed state from %s, updating.\n",
2113 adapter_state_to_string(reset_state));
8f1c0fd2
SB
2114 reset_state = VNIC_OPEN;
2115 adapter->state = VNIC_CLOSING;
2116 }
2117
3f5ec374 2118 if (adapter->state != VNIC_CLOSING) {
8f1c0fd2
SB
2119 /* If someone else changed the adapter state
2120 * when we dropped the rtnl, fail the reset
2121 */
3f5ec374
LP
2122 rc = -1;
2123 goto out;
2124 }
3f5ec374 2125 adapter->state = VNIC_CLOSED;
b27507bb 2126 }
3f5ec374 2127 }
b27507bb 2128
3f5ec374
LP
2129 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2130 release_resources(adapter);
2131 release_sub_crqs(adapter, 1);
2132 release_crq_queue(adapter);
c26eba03
JA
2133 }
2134
8cb31cfc
JA
2135 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2136 /* remove the closed state so when we call open it appears
2137 * we are coming from the probed state.
2138 */
2139 adapter->state = VNIC_PROBED;
032c5e82 2140
3f5ec374
LP
2141 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2142 rc = init_crq_queue(adapter);
2143 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
30f79625
NF
2144 rc = ibmvnic_reenable_crq_queue(adapter);
2145 release_sub_crqs(adapter, 1);
2146 } else {
2147 rc = ibmvnic_reset_crq(adapter);
8b40eb73 2148 if (rc == H_CLOSED || rc == H_SUCCESS) {
30f79625 2149 rc = vio_enable_interrupts(adapter->vdev);
8b40eb73
DM
2150 if (rc)
2151 netdev_err(adapter->netdev,
2152 "Reset failed to enable interrupts. rc=%d\n",
2153 rc);
2154 }
30f79625
NF
2155 }
2156
2157 if (rc) {
2158 netdev_err(adapter->netdev,
8b40eb73 2159 "Reset couldn't initialize crq. rc=%d\n", rc);
b27507bb 2160 goto out;
30f79625
NF
2161 }
2162
635e442f 2163 rc = ibmvnic_reset_init(adapter, true);
b27507bb
JK
2164 if (rc) {
2165 rc = IBMVNIC_INIT_FAILED;
2166 goto out;
2167 }
ed651a10 2168
53f8b1b2 2169 /* If the adapter was in PROBE or DOWN state prior to the reset,
8cb31cfc
JA
2170 * exit here.
2171 */
53f8b1b2 2172 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
b27507bb
JK
2173 rc = 0;
2174 goto out;
2175 }
ed651a10 2176
8cb31cfc 2177 rc = ibmvnic_login(netdev);
f78afaac 2178 if (rc)
b27507bb 2179 goto out;
ed651a10 2180
3f5ec374
LP
2181 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2182 rc = init_resources(adapter);
2183 if (rc)
2184 goto out;
2185 } else if (adapter->req_rx_queues != old_num_rx_queues ||
b27507bb
JK
2186 adapter->req_tx_queues != old_num_tx_queues ||
2187 adapter->req_rx_add_entries_per_subcrq !=
2188 old_num_rx_slots ||
2189 adapter->req_tx_entries_per_subcrq !=
9f134573
MC
2190 old_num_tx_slots ||
2191 !adapter->rx_pool ||
2192 !adapter->tso_pool ||
2193 !adapter->tx_pool) {
896d8695
JA
2194 release_rx_pools(adapter);
2195 release_tx_pools(adapter);
86f669b2 2196 release_napi(adapter);
a5681e20
JK
2197 release_vpd_data(adapter);
2198
2199 rc = init_resources(adapter);
f611a5b4 2200 if (rc)
b27507bb 2201 goto out;
a5681e20 2202
c26eba03
JA
2203 } else {
2204 rc = reset_tx_pools(adapter);
8ae4dff8 2205 if (rc) {
9f134573 2206 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
91dc5d25 2207 rc);
b27507bb 2208 goto out;
8ae4dff8 2209 }
8c0543ad 2210
c26eba03 2211 rc = reset_rx_pools(adapter);
8ae4dff8 2212 if (rc) {
9f134573 2213 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
91dc5d25 2214 rc);
b27507bb 2215 goto out;
8ae4dff8 2216 }
c26eba03 2217 }
134bbe7f 2218 ibmvnic_disable_irqs(adapter);
8cb31cfc 2219 }
e676d81c
JA
2220 adapter->state = VNIC_CLOSED;
2221
b27507bb
JK
2222 if (reset_state == VNIC_CLOSED) {
2223 rc = 0;
2224 goto out;
2225 }
e676d81c 2226
ed651a10
NF
2227 rc = __ibmvnic_open(netdev);
2228 if (rc) {
b27507bb
JK
2229 rc = IBMVNIC_OPEN_FAILED;
2230 goto out;
ed651a10
NF
2231 }
2232
be32a243
TF
2233 /* refresh device's multicast list */
2234 ibmvnic_set_multi(netdev);
2235
98025bce 2236 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
6be46662
LP
2237 adapter->reset_reason == VNIC_RESET_MOBILITY)
2238 __netdev_notify_peers(netdev);
61d3e1d9 2239
b27507bb
JK
2240 rc = 0;
2241
2242out:
0cb4bc66
DM
2243 /* restore the adapter state if reset failed */
2244 if (rc)
2245 adapter->state = reset_state;
3f5ec374
LP
2246 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2247 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2248 rtnl_unlock();
b27507bb 2249
0666ef7f
LP
2250 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2251 adapter_state_to_string(adapter->state),
2252 adapter->failover_pending, rc);
b27507bb 2253 return rc;
ed651a10
NF
2254}
2255
2770a798
TF
2256static int do_hard_reset(struct ibmvnic_adapter *adapter,
2257 struct ibmvnic_rwi *rwi, u32 reset_state)
2258{
2259 struct net_device *netdev = adapter->netdev;
2260 int rc;
2261
caee7bf5
LP
2262 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2263 reset_reason_to_string(rwi->reset_reason));
2770a798 2264
8f1c0fd2
SB
2265 /* read the state and check (again) after getting rtnl */
2266 reset_state = adapter->state;
2267
2268 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2269 rc = -EBUSY;
2270 goto out;
2271 }
2272
2770a798
TF
2273 netif_carrier_off(netdev);
2274 adapter->reset_reason = rwi->reset_reason;
2275
2276 ibmvnic_cleanup(netdev);
2277 release_resources(adapter);
2278 release_sub_crqs(adapter, 0);
2279 release_crq_queue(adapter);
2280
2281 /* remove the closed state so when we call open it appears
2282 * we are coming from the probed state.
2283 */
2284 adapter->state = VNIC_PROBED;
2285
bbd669a8 2286 reinit_completion(&adapter->init_done);
2770a798
TF
2287 rc = init_crq_queue(adapter);
2288 if (rc) {
2289 netdev_err(adapter->netdev,
2290 "Couldn't initialize crq. rc=%d\n", rc);
0cb4bc66 2291 goto out;
2770a798
TF
2292 }
2293
635e442f 2294 rc = ibmvnic_reset_init(adapter, false);
2770a798 2295 if (rc)
0cb4bc66 2296 goto out;
2770a798 2297
53f8b1b2 2298 /* If the adapter was in PROBE or DOWN state prior to the reset,
2770a798
TF
2299 * exit here.
2300 */
53f8b1b2 2301 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
0cb4bc66 2302 goto out;
2770a798
TF
2303
2304 rc = ibmvnic_login(netdev);
0cb4bc66
DM
2305 if (rc)
2306 goto out;
a5681e20
JK
2307
2308 rc = init_resources(adapter);
2770a798 2309 if (rc)
0cb4bc66 2310 goto out;
2770a798
TF
2311
2312 ibmvnic_disable_irqs(adapter);
2313 adapter->state = VNIC_CLOSED;
2314
2315 if (reset_state == VNIC_CLOSED)
0cb4bc66 2316 goto out;
2770a798
TF
2317
2318 rc = __ibmvnic_open(netdev);
0cb4bc66
DM
2319 if (rc) {
2320 rc = IBMVNIC_OPEN_FAILED;
2321 goto out;
2322 }
2770a798 2323
6be46662 2324 __netdev_notify_peers(netdev);
0cb4bc66
DM
2325out:
2326 /* restore adapter state if reset failed */
2327 if (rc)
2328 adapter->state = reset_state;
0666ef7f
LP
2329 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2330 adapter_state_to_string(adapter->state),
2331 adapter->failover_pending, rc);
0cb4bc66 2332 return rc;
2770a798
TF
2333}
2334
ed651a10
NF
2335static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2336{
2337 struct ibmvnic_rwi *rwi;
6c5c7489 2338 unsigned long flags;
ed651a10 2339
6c5c7489 2340 spin_lock_irqsave(&adapter->rwi_lock, flags);
ed651a10
NF
2341
2342 if (!list_empty(&adapter->rwi_list)) {
2343 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2344 list);
2345 list_del(&rwi->list);
2346 } else {
2347 rwi = NULL;
2348 }
2349
6c5c7489 2350 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ed651a10
NF
2351 return rwi;
2352}
2353
53f8b1b2
CF
2354/**
2355 * do_passive_init - complete probing when partner device is detected.
2356 * @adapter: ibmvnic_adapter struct
2357 *
2358 * If the ibmvnic device does not have a partner device to communicate with at boot
2359 * and that partner device comes online at a later time, this function is called
2360 * to complete the initialization process of ibmvnic device.
2361 * Caller is expected to hold rtnl_lock().
2362 *
2363 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2364 * in the down state.
2365 * Returns 0 upon success and the device is in PROBED state.
2366 */
2367
2368static int do_passive_init(struct ibmvnic_adapter *adapter)
2369{
2370 unsigned long timeout = msecs_to_jiffies(30000);
2371 struct net_device *netdev = adapter->netdev;
2372 struct device *dev = &adapter->vdev->dev;
2373 int rc;
2374
2375 netdev_dbg(netdev, "Partner device found, probing.\n");
2376
2377 adapter->state = VNIC_PROBING;
2378 reinit_completion(&adapter->init_done);
2379 adapter->init_done_rc = 0;
2380 adapter->crq.active = true;
2381
2382 rc = send_crq_init_complete(adapter);
2383 if (rc)
2384 goto out;
2385
2386 rc = send_version_xchg(adapter);
2387 if (rc)
2388 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2389
2390 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2391 dev_err(dev, "Initialization sequence timed out\n");
2392 rc = -ETIMEDOUT;
2393 goto out;
2394 }
2395
2396 rc = init_sub_crqs(adapter);
2397 if (rc) {
2398 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2399 goto out;
2400 }
2401
2402 rc = init_sub_crq_irqs(adapter);
2403 if (rc) {
2404 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2405 goto init_failed;
2406 }
2407
2408 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2409 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2410 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2411
2412 adapter->state = VNIC_PROBED;
2413 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2414
2415 return 0;
2416
2417init_failed:
2418 release_sub_crqs(adapter, 1);
2419out:
2420 adapter->state = VNIC_DOWN;
2421 return rc;
2422}
2423
ed651a10
NF
2424static void __ibmvnic_reset(struct work_struct *work)
2425{
ed651a10 2426 struct ibmvnic_adapter *adapter;
7d7195a0 2427 bool saved_state = false;
4f408e1f
SB
2428 struct ibmvnic_rwi *tmprwi;
2429 struct ibmvnic_rwi *rwi;
7d7195a0 2430 unsigned long flags;
ed651a10 2431 u32 reset_state;
849ea910 2432 int num_fails = 0;
c26eba03 2433 int rc = 0;
ed651a10
NF
2434
2435 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
ed651a10 2436
7ed5b31f 2437 if (test_and_set_bit_lock(0, &adapter->resetting)) {
870e04ae
LP
2438 queue_delayed_work(system_long_wq,
2439 &adapter->ibmvnic_delayed_reset,
2440 IBMVNIC_RESET_DELAY);
7ed5b31f
JK
2441 return;
2442 }
2443
ed651a10
NF
2444 rwi = get_next_rwi(adapter);
2445 while (rwi) {
7d7195a0
JK
2446 spin_lock_irqsave(&adapter->state_lock, flags);
2447
36f1031c 2448 if (adapter->state == VNIC_REMOVING ||
c8dc5595 2449 adapter->state == VNIC_REMOVED) {
7d7195a0 2450 spin_unlock_irqrestore(&adapter->state_lock, flags);
1c2977c0
JK
2451 kfree(rwi);
2452 rc = EBUSY;
2453 break;
2454 }
36f1031c 2455
7d7195a0
JK
2456 if (!saved_state) {
2457 reset_state = adapter->state;
7d7195a0
JK
2458 saved_state = true;
2459 }
2460 spin_unlock_irqrestore(&adapter->state_lock, flags);
2461
53f8b1b2
CF
2462 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2463 rtnl_lock();
2464 rc = do_passive_init(adapter);
2465 rtnl_unlock();
2466 if (!rc)
2467 netif_carrier_on(adapter->netdev);
2468 } else if (adapter->force_reset_recovery) {
bab08bed 2469 /* Since we are doing a hard reset now, clear the
1d850493
SB
2470 * failover_pending flag so we don't ignore any
2471 * future MOBILITY or other resets.
2472 */
2473 adapter->failover_pending = false;
2474
b27507bb
JK
2475 /* Transport event occurred during previous reset */
2476 if (adapter->wait_for_reset) {
2477 /* Previous was CHANGE_PARAM; caller locked */
2478 adapter->force_reset_recovery = false;
2479 rc = do_hard_reset(adapter, rwi, reset_state);
2480 } else {
2481 rtnl_lock();
2482 adapter->force_reset_recovery = false;
2483 rc = do_hard_reset(adapter, rwi, reset_state);
2484 rtnl_unlock();
2485 }
849ea910
SB
2486 if (rc)
2487 num_fails++;
2488 else
2489 num_fails = 0;
2490
2491 /* If auto-priority-failover is enabled we can get
2492 * back to back failovers during resets, resulting
2493 * in at least two failed resets (from high-priority
2494 * backing device to low-priority one and then back)
2495 * If resets continue to fail beyond that, give the
2496 * adapter some time to settle down before retrying.
2497 */
2498 if (num_fails >= 3) {
f15fde9d 2499 netdev_dbg(adapter->netdev,
849ea910
SB
2500 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2501 adapter_state_to_string(adapter->state),
2502 num_fails);
f15fde9d
SB
2503 set_current_state(TASK_UNINTERRUPTIBLE);
2504 schedule_timeout(60 * HZ);
2505 }
1f45dc22 2506 } else {
2770a798
TF
2507 rc = do_reset(adapter, rwi, reset_state);
2508 }
4f408e1f 2509 tmprwi = rwi;
a86d5c68 2510 adapter->last_reset_time = jiffies;
0cb4bc66 2511
18f141bf
DM
2512 if (rc)
2513 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
ed651a10
NF
2514
2515 rwi = get_next_rwi(adapter);
7ed5b31f 2516
4f408e1f
SB
2517 /*
2518 * If there is another reset queued, free the previous rwi
2519 * and process the new reset even if previous reset failed
2520 * (the previous reset could have failed because of a fail
2521 * over for instance, so process the fail over).
2522 *
2523 * If there are no resets queued and the previous reset failed,
2524 * the adapter would be in an undefined state. So retry the
2525 * previous reset as a hard reset.
2526 */
2527 if (rwi)
2528 kfree(tmprwi);
2529 else if (rc)
2530 rwi = tmprwi;
2531
7ed5b31f 2532 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
4f408e1f 2533 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
7ed5b31f 2534 adapter->force_reset_recovery = true;
ed651a10
NF
2535 }
2536
c26eba03 2537 if (adapter->wait_for_reset) {
c26eba03
JA
2538 adapter->reset_done_rc = rc;
2539 complete(&adapter->reset_done);
2540 }
2541
7ed5b31f 2542 clear_bit_unlock(0, &adapter->resetting);
38bd5cec
SB
2543
2544 netdev_dbg(adapter->netdev,
0666ef7f
LP
2545 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2546 adapter_state_to_string(adapter->state),
2547 adapter->force_reset_recovery,
38bd5cec 2548 adapter->wait_for_reset);
7ed5b31f
JK
2549}
2550
2551static void __ibmvnic_delayed_reset(struct work_struct *work)
2552{
2553 struct ibmvnic_adapter *adapter;
2554
2555 adapter = container_of(work, struct ibmvnic_adapter,
2556 ibmvnic_delayed_reset.work);
2557 __ibmvnic_reset(&adapter->ibmvnic_reset);
ed651a10
NF
2558}
2559
35a1c06a
SB
2560static void flush_reset_queue(struct ibmvnic_adapter *adapter)
2561{
2562 struct list_head *entry, *tmp_entry;
2563
2564 if (!list_empty(&adapter->rwi_list)) {
2565 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
2566 list_del(entry);
2567 kfree(list_entry(entry, struct ibmvnic_rwi, list));
2568 }
2569 }
2570}
2571
af894d23
TF
2572static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2573 enum ibmvnic_reset_reason reason)
ed651a10 2574{
ed651a10 2575 struct net_device *netdev = adapter->netdev;
35a1c06a 2576 struct ibmvnic_rwi *rwi, *tmp;
6c5c7489 2577 unsigned long flags;
af894d23 2578 int ret;
ed651a10 2579
b646acd5
JK
2580 spin_lock_irqsave(&adapter->rwi_lock, flags);
2581
2582 /* If failover is pending don't schedule any other reset.
1d850493
SB
2583 * Instead let the failover complete. If there is already a
2584 * a failover reset scheduled, we will detect and drop the
2585 * duplicate reset when walking the ->rwi_list below.
2586 */
ed651a10 2587 if (adapter->state == VNIC_REMOVING ||
5a18e1e0 2588 adapter->state == VNIC_REMOVED ||
1d850493 2589 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
af894d23 2590 ret = EBUSY;
5a18e1e0 2591 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
af894d23 2592 goto err;
ed651a10
NF
2593 }
2594
6a2fb0e9
NF
2595 if (adapter->state == VNIC_PROBING) {
2596 netdev_warn(netdev, "Adapter reset during probe\n");
08d6b7a7 2597 adapter->init_done_rc = -EAGAIN;
91dc5d25 2598 ret = EAGAIN;
af894d23 2599 goto err;
6a2fb0e9
NF
2600 }
2601
3e98ae00 2602 list_for_each_entry(tmp, &adapter->rwi_list, list) {
ed651a10 2603 if (tmp->reset_reason == reason) {
caee7bf5
LP
2604 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2605 reset_reason_to_string(reason));
af894d23
TF
2606 ret = EBUSY;
2607 goto err;
ed651a10
NF
2608 }
2609 }
2610
1d1bbc37 2611 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
ed651a10 2612 if (!rwi) {
af894d23
TF
2613 ret = ENOMEM;
2614 goto err;
ed651a10 2615 }
2770a798
TF
2616 /* if we just received a transport event,
2617 * flush reset queue and process this reset
2618 */
35a1c06a
SB
2619 if (adapter->force_reset_recovery)
2620 flush_reset_queue(adapter);
2621
ed651a10
NF
2622 rwi->reset_reason = reason;
2623 list_add_tail(&rwi->list, &adapter->rwi_list);
caee7bf5
LP
2624 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2625 reset_reason_to_string(reason));
870e04ae 2626 queue_work(system_long_wq, &adapter->ibmvnic_reset);
af894d23 2627
4a41c421 2628 ret = 0;
af894d23 2629err:
4a41c421
SB
2630 /* ibmvnic_close() below can block, so drop the lock first */
2631 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2632
2633 if (ret == ENOMEM)
2634 ibmvnic_close(netdev);
2635
af894d23 2636 return -ret;
ed651a10
NF
2637}
2638
0290bd29 2639static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
ed651a10
NF
2640{
2641 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2642
855a631a
LP
2643 if (test_bit(0, &adapter->resetting)) {
2644 netdev_err(adapter->netdev,
2645 "Adapter is resetting, skip timeout reset\n");
2646 return;
2647 }
a86d5c68
DM
2648 /* No queuing up reset until at least 5 seconds (default watchdog val)
2649 * after last reset
2650 */
2651 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2652 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2653 return;
2654 }
ed651a10 2655 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
032c5e82
TF
2656}
2657
2658static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2659 struct ibmvnic_rx_buff *rx_buff)
2660{
2661 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2662
2663 rx_buff->skb = NULL;
2664
2665 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2666 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2667
2668 atomic_dec(&pool->available);
2669}
2670
2671static int ibmvnic_poll(struct napi_struct *napi, int budget)
2672{
ec20f36b
DB
2673 struct ibmvnic_sub_crq_queue *rx_scrq;
2674 struct ibmvnic_adapter *adapter;
2675 struct net_device *netdev;
2676 int frames_processed;
2677 int scrq_num;
2678
2679 netdev = napi->dev;
2680 adapter = netdev_priv(netdev);
2681 scrq_num = (int)(napi - adapter->napi);
2682 frames_processed = 0;
2683 rx_scrq = adapter->rx_scrq[scrq_num];
152ce47d 2684
032c5e82
TF
2685restart_poll:
2686 while (frames_processed < budget) {
2687 struct sk_buff *skb;
2688 struct ibmvnic_rx_buff *rx_buff;
2689 union sub_crq *next;
2690 u32 length;
2691 u16 offset;
2692 u8 flags = 0;
2693
7ed5b31f 2694 if (unlikely(test_bit(0, &adapter->resetting) &&
3468656f 2695 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
ec20f36b 2696 enable_scrq_irq(adapter, rx_scrq);
21ecba6c
TF
2697 napi_complete_done(napi, frames_processed);
2698 return frames_processed;
2699 }
2700
ec20f36b 2701 if (!pending_scrq(adapter, rx_scrq))
032c5e82 2702 break;
ec20f36b 2703 next = ibmvnic_next_scrq(adapter, rx_scrq);
914789ac
LP
2704 rx_buff = (struct ibmvnic_rx_buff *)
2705 be64_to_cpu(next->rx_comp.correlator);
032c5e82
TF
2706 /* do error checking */
2707 if (next->rx_comp.rc) {
e1cea2e7
JA
2708 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2709 be16_to_cpu(next->rx_comp.rc));
032c5e82
TF
2710 /* free the entry */
2711 next->rx_comp.first = 0;
4b9b0f01 2712 dev_kfree_skb_any(rx_buff->skb);
032c5e82 2713 remove_buff_from_pool(adapter, rx_buff);
ca05e316 2714 continue;
abe27a88
TF
2715 } else if (!rx_buff->skb) {
2716 /* free the entry */
2717 next->rx_comp.first = 0;
2718 remove_buff_from_pool(adapter, rx_buff);
2719 continue;
032c5e82
TF
2720 }
2721
2722 length = be32_to_cpu(next->rx_comp.len);
2723 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2724 flags = next->rx_comp.flags;
2725 skb = rx_buff->skb;
42557dab
LP
2726 /* load long_term_buff before copying to skb */
2727 dma_rmb();
032c5e82
TF
2728 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2729 length);
6052d5e2
MFV
2730
2731 /* VLAN Header has been stripped by the system firmware and
2732 * needs to be inserted by the driver
2733 */
2734 if (adapter->rx_vlan_header_insertion &&
2735 (flags & IBMVNIC_VLAN_STRIPPED))
2736 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2737 ntohs(next->rx_comp.vlan_tci));
2738
032c5e82
TF
2739 /* free the entry */
2740 next->rx_comp.first = 0;
2741 remove_buff_from_pool(adapter, rx_buff);
2742
2743 skb_put(skb, length);
2744 skb->protocol = eth_type_trans(skb, netdev);
94ca305f 2745 skb_record_rx_queue(skb, scrq_num);
032c5e82
TF
2746
2747 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2748 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2749 skb->ip_summed = CHECKSUM_UNNECESSARY;
2750 }
2751
2752 length = skb->len;
2753 napi_gro_receive(napi, skb); /* send it up */
2754 netdev->stats.rx_packets++;
2755 netdev->stats.rx_bytes += length;
3d52b594
JA
2756 adapter->rx_stats_buffers[scrq_num].packets++;
2757 adapter->rx_stats_buffers[scrq_num].bytes += length;
032c5e82
TF
2758 frames_processed++;
2759 }
152ce47d 2760
41ed0a00
DB
2761 if (adapter->state != VNIC_CLOSING &&
2762 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2763 adapter->req_rx_add_entries_per_subcrq / 2) ||
2764 frames_processed < budget))
152ce47d 2765 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
032c5e82 2766 if (frames_processed < budget) {
ec20f36b
DB
2767 if (napi_complete_done(napi, frames_processed)) {
2768 enable_scrq_irq(adapter, rx_scrq);
2769 if (pending_scrq(adapter, rx_scrq)) {
ec20f36b
DB
2770 if (napi_reschedule(napi)) {
2771 disable_scrq_irq(adapter, rx_scrq);
2772 goto restart_poll;
2773 }
2774 }
032c5e82
TF
2775 }
2776 }
2777 return frames_processed;
2778}
2779
c26eba03
JA
2780static int wait_for_reset(struct ibmvnic_adapter *adapter)
2781{
af894d23
TF
2782 int rc, ret;
2783
c26eba03
JA
2784 adapter->fallback.mtu = adapter->req_mtu;
2785 adapter->fallback.rx_queues = adapter->req_rx_queues;
2786 adapter->fallback.tx_queues = adapter->req_tx_queues;
2787 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2788 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2789
070eca95 2790 reinit_completion(&adapter->reset_done);
c26eba03 2791 adapter->wait_for_reset = true;
af894d23 2792 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
476d96ca
TF
2793
2794 if (rc) {
2795 ret = rc;
2796 goto out;
2797 }
2798 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2799 if (rc) {
2800 ret = -ENODEV;
2801 goto out;
2802 }
c26eba03 2803
af894d23 2804 ret = 0;
c26eba03 2805 if (adapter->reset_done_rc) {
af894d23 2806 ret = -EIO;
c26eba03
JA
2807 adapter->desired.mtu = adapter->fallback.mtu;
2808 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2809 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2810 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2811 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2812
070eca95 2813 reinit_completion(&adapter->reset_done);
af894d23
TF
2814 adapter->wait_for_reset = true;
2815 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
476d96ca
TF
2816 if (rc) {
2817 ret = rc;
2818 goto out;
2819 }
2820 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2821 60000);
2822 if (rc) {
2823 ret = -ENODEV;
2824 goto out;
2825 }
c26eba03 2826 }
476d96ca 2827out:
c26eba03
JA
2828 adapter->wait_for_reset = false;
2829
af894d23 2830 return ret;
c26eba03
JA
2831}
2832
3a807b75
JA
2833static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2834{
c26eba03
JA
2835 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2836
2837 adapter->desired.mtu = new_mtu + ETH_HLEN;
2838
2839 return wait_for_reset(adapter);
3a807b75
JA
2840}
2841
f10b09ef
TF
2842static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2843 struct net_device *dev,
2844 netdev_features_t features)
2845{
2846 /* Some backing hardware adapters can not
2847 * handle packets with a MSS less than 224
2848 * or with only one segment.
2849 */
2850 if (skb_is_gso(skb)) {
2851 if (skb_shinfo(skb)->gso_size < 224 ||
2852 skb_shinfo(skb)->gso_segs == 1)
2853 features &= ~NETIF_F_GSO_MASK;
2854 }
2855
2856 return features;
2857}
2858
032c5e82
TF
2859static const struct net_device_ops ibmvnic_netdev_ops = {
2860 .ndo_open = ibmvnic_open,
2861 .ndo_stop = ibmvnic_close,
2862 .ndo_start_xmit = ibmvnic_xmit,
2863 .ndo_set_rx_mode = ibmvnic_set_multi,
2864 .ndo_set_mac_address = ibmvnic_set_mac,
2865 .ndo_validate_addr = eth_validate_addr,
032c5e82 2866 .ndo_tx_timeout = ibmvnic_tx_timeout,
3a807b75 2867 .ndo_change_mtu = ibmvnic_change_mtu,
f10b09ef 2868 .ndo_features_check = ibmvnic_features_check,
032c5e82
TF
2869};
2870
2871/* ethtool functions */
2872
8a43379f
PR
2873static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2874 struct ethtool_link_ksettings *cmd)
032c5e82 2875{
f8d6ae0d
MFV
2876 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2877 int rc;
2878
2879 rc = send_query_phys_parms(adapter);
2880 if (rc) {
2881 adapter->speed = SPEED_UNKNOWN;
2882 adapter->duplex = DUPLEX_UNKNOWN;
2883 }
2884 cmd->base.speed = adapter->speed;
2885 cmd->base.duplex = adapter->duplex;
8a43379f
PR
2886 cmd->base.port = PORT_FIBRE;
2887 cmd->base.phy_address = 0;
2888 cmd->base.autoneg = AUTONEG_ENABLE;
2889
032c5e82
TF
2890 return 0;
2891}
2892
4e6759be 2893static void ibmvnic_get_drvinfo(struct net_device *netdev,
032c5e82
TF
2894 struct ethtool_drvinfo *info)
2895{
4e6759be
DANR
2896 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2897
8a96c80e
LP
2898 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2899 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2900 strscpy(info->fw_version, adapter->fw_version,
4e6759be 2901 sizeof(info->fw_version));
032c5e82
TF
2902}
2903
2904static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2905{
2906 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2907
2908 return adapter->msg_enable;
2909}
2910
2911static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2912{
2913 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2914
2915 adapter->msg_enable = data;
2916}
2917
2918static u32 ibmvnic_get_link(struct net_device *netdev)
2919{
2920 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2921
2922 /* Don't need to send a query because we request a logical link up at
2923 * init and then we wait for link state indications
2924 */
2925 return adapter->logical_link_state;
2926}
2927
2928static void ibmvnic_get_ringparam(struct net_device *netdev,
2929 struct ethtool_ringparam *ring)
2930{
bc131b3a
JA
2931 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2932
723ad916
TF
2933 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2934 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2935 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2936 } else {
2937 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2938 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2939 }
032c5e82
TF
2940 ring->rx_mini_max_pending = 0;
2941 ring->rx_jumbo_max_pending = 0;
bc131b3a
JA
2942 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2943 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
032c5e82
TF
2944 ring->rx_mini_pending = 0;
2945 ring->rx_jumbo_pending = 0;
2946}
2947
c26eba03
JA
2948static int ibmvnic_set_ringparam(struct net_device *netdev,
2949 struct ethtool_ringparam *ring)
2950{
2951 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
723ad916 2952 int ret;
c26eba03 2953
723ad916 2954 ret = 0;
c26eba03
JA
2955 adapter->desired.rx_entries = ring->rx_pending;
2956 adapter->desired.tx_entries = ring->tx_pending;
2957
723ad916
TF
2958 ret = wait_for_reset(adapter);
2959
2960 if (!ret &&
2961 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2962 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2963 netdev_info(netdev,
2964 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2965 ring->rx_pending, ring->tx_pending,
2966 adapter->req_rx_add_entries_per_subcrq,
2967 adapter->req_tx_entries_per_subcrq);
2968 return ret;
c26eba03
JA
2969}
2970
c2dbeb67
JA
2971static void ibmvnic_get_channels(struct net_device *netdev,
2972 struct ethtool_channels *channels)
2973{
2974 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2975
723ad916
TF
2976 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2977 channels->max_rx = adapter->max_rx_queues;
2978 channels->max_tx = adapter->max_tx_queues;
2979 } else {
2980 channels->max_rx = IBMVNIC_MAX_QUEUES;
2981 channels->max_tx = IBMVNIC_MAX_QUEUES;
2982 }
2983
c2dbeb67
JA
2984 channels->max_other = 0;
2985 channels->max_combined = 0;
2986 channels->rx_count = adapter->req_rx_queues;
2987 channels->tx_count = adapter->req_tx_queues;
2988 channels->other_count = 0;
2989 channels->combined_count = 0;
2990}
2991
c26eba03
JA
2992static int ibmvnic_set_channels(struct net_device *netdev,
2993 struct ethtool_channels *channels)
2994{
2995 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
723ad916 2996 int ret;
c26eba03 2997
723ad916 2998 ret = 0;
c26eba03
JA
2999 adapter->desired.rx_queues = channels->rx_count;
3000 adapter->desired.tx_queues = channels->tx_count;
3001
723ad916
TF
3002 ret = wait_for_reset(adapter);
3003
3004 if (!ret &&
3005 (adapter->req_rx_queues != channels->rx_count ||
3006 adapter->req_tx_queues != channels->tx_count))
3007 netdev_info(netdev,
3008 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3009 channels->rx_count, channels->tx_count,
3010 adapter->req_rx_queues, adapter->req_tx_queues);
3011 return ret;
c26eba03
JA
3012}
3013
032c5e82
TF
3014static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3015{
3d52b594 3016 struct ibmvnic_adapter *adapter = netdev_priv(dev);
032c5e82
TF
3017 int i;
3018
723ad916
TF
3019 switch (stringset) {
3020 case ETH_SS_STATS:
3021 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
3022 i++, data += ETH_GSTRING_LEN)
3023 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
032c5e82 3024
723ad916
TF
3025 for (i = 0; i < adapter->req_tx_queues; i++) {
3026 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3027 data += ETH_GSTRING_LEN;
3d52b594 3028
723ad916
TF
3029 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3030 data += ETH_GSTRING_LEN;
3d52b594 3031
723ad916
TF
3032 snprintf(data, ETH_GSTRING_LEN,
3033 "tx%d_dropped_packets", i);
3034 data += ETH_GSTRING_LEN;
3035 }
3d52b594 3036
723ad916
TF
3037 for (i = 0; i < adapter->req_rx_queues; i++) {
3038 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3039 data += ETH_GSTRING_LEN;
3d52b594 3040
723ad916
TF
3041 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3042 data += ETH_GSTRING_LEN;
3d52b594 3043
723ad916
TF
3044 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3045 data += ETH_GSTRING_LEN;
3046 }
3047 break;
3d52b594 3048
723ad916
TF
3049 case ETH_SS_PRIV_FLAGS:
3050 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3051 strcpy(data + i * ETH_GSTRING_LEN,
3052 ibmvnic_priv_flags[i]);
3053 break;
3054 default:
3055 return;
3d52b594 3056 }
032c5e82
TF
3057}
3058
3059static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3060{
3d52b594
JA
3061 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3062
032c5e82
TF
3063 switch (sset) {
3064 case ETH_SS_STATS:
3d52b594
JA
3065 return ARRAY_SIZE(ibmvnic_stats) +
3066 adapter->req_tx_queues * NUM_TX_STATS +
3067 adapter->req_rx_queues * NUM_RX_STATS;
723ad916
TF
3068 case ETH_SS_PRIV_FLAGS:
3069 return ARRAY_SIZE(ibmvnic_priv_flags);
032c5e82
TF
3070 default:
3071 return -EOPNOTSUPP;
3072 }
3073}
3074
3075static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3076 struct ethtool_stats *stats, u64 *data)
3077{
3078 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3079 union ibmvnic_crq crq;
3d52b594 3080 int i, j;
9c4eaabd 3081 int rc;
032c5e82
TF
3082
3083 memset(&crq, 0, sizeof(crq));
3084 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3085 crq.request_statistics.cmd = REQUEST_STATISTICS;
3086 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3087 crq.request_statistics.len =
3088 cpu_to_be32(sizeof(struct ibmvnic_statistics));
032c5e82
TF
3089
3090 /* Wait for data to be written */
070eca95 3091 reinit_completion(&adapter->stats_done);
9c4eaabd
TF
3092 rc = ibmvnic_send_crq(adapter, &crq);
3093 if (rc)
3094 return;
476d96ca
TF
3095 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3096 if (rc)
3097 return;
032c5e82
TF
3098
3099 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
91dc5d25
LP
3100 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3101 (adapter, ibmvnic_stats[i].offset));
3d52b594
JA
3102
3103 for (j = 0; j < adapter->req_tx_queues; j++) {
3104 data[i] = adapter->tx_stats_buffers[j].packets;
3105 i++;
3106 data[i] = adapter->tx_stats_buffers[j].bytes;
3107 i++;
3108 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3109 i++;
3110 }
3111
3112 for (j = 0; j < adapter->req_rx_queues; j++) {
3113 data[i] = adapter->rx_stats_buffers[j].packets;
3114 i++;
3115 data[i] = adapter->rx_stats_buffers[j].bytes;
3116 i++;
3117 data[i] = adapter->rx_stats_buffers[j].interrupts;
3118 i++;
3119 }
032c5e82
TF
3120}
3121
723ad916
TF
3122static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3123{
3124 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3125
3126 return adapter->priv_flags;
3127}
3128
3129static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3130{
3131 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3132 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3133
3134 if (which_maxes)
3135 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3136 else
3137 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3138
3139 return 0;
3140}
91dc5d25 3141
032c5e82 3142static const struct ethtool_ops ibmvnic_ethtool_ops = {
032c5e82
TF
3143 .get_drvinfo = ibmvnic_get_drvinfo,
3144 .get_msglevel = ibmvnic_get_msglevel,
3145 .set_msglevel = ibmvnic_set_msglevel,
3146 .get_link = ibmvnic_get_link,
3147 .get_ringparam = ibmvnic_get_ringparam,
c26eba03 3148 .set_ringparam = ibmvnic_set_ringparam,
c2dbeb67 3149 .get_channels = ibmvnic_get_channels,
c26eba03 3150 .set_channels = ibmvnic_set_channels,
032c5e82
TF
3151 .get_strings = ibmvnic_get_strings,
3152 .get_sset_count = ibmvnic_get_sset_count,
3153 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
8a43379f 3154 .get_link_ksettings = ibmvnic_get_link_ksettings,
723ad916
TF
3155 .get_priv_flags = ibmvnic_get_priv_flags,
3156 .set_priv_flags = ibmvnic_set_priv_flags,
032c5e82
TF
3157};
3158
3159/* Routines for managing CRQs/sCRQs */
3160
57a49436
NF
3161static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3162 struct ibmvnic_sub_crq_queue *scrq)
3163{
3164 int rc;
3165
9281cf2d 3166 if (!scrq) {
862aecbd 3167 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
9281cf2d
DM
3168 return -EINVAL;
3169 }
3170
57a49436
NF
3171 if (scrq->irq) {
3172 free_irq(scrq->irq, scrq);
3173 irq_dispose_mapping(scrq->irq);
3174 scrq->irq = 0;
3175 }
3176
9281cf2d
DM
3177 if (scrq->msgs) {
3178 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3179 atomic_set(&scrq->used, 0);
3180 scrq->cur = 0;
55fd59b0 3181 scrq->ind_buf.index = 0;
9281cf2d
DM
3182 } else {
3183 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3184 return -EINVAL;
3185 }
57a49436
NF
3186
3187 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3188 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3189 return rc;
3190}
3191
3192static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3193{
3194 int i, rc;
3195
a0faaa27
LP
3196 if (!adapter->tx_scrq || !adapter->rx_scrq)
3197 return -EINVAL;
3198
57a49436 3199 for (i = 0; i < adapter->req_tx_queues; i++) {
d1cf33d9 3200 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
57a49436
NF
3201 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3202 if (rc)
3203 return rc;
3204 }
3205
3206 for (i = 0; i < adapter->req_rx_queues; i++) {
d1cf33d9 3207 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
57a49436
NF
3208 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3209 if (rc)
3210 return rc;
3211 }
3212
57a49436
NF
3213 return rc;
3214}
3215
032c5e82 3216static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
d7c0ef36
NF
3217 struct ibmvnic_sub_crq_queue *scrq,
3218 bool do_h_free)
032c5e82
TF
3219{
3220 struct device *dev = &adapter->vdev->dev;
3221 long rc;
3222
3223 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3224
d7c0ef36
NF
3225 if (do_h_free) {
3226 /* Close the sub-crqs */
3227 do {
3228 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3229 adapter->vdev->unit_address,
3230 scrq->crq_num);
3231 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
032c5e82 3232
d7c0ef36
NF
3233 if (rc) {
3234 netdev_err(adapter->netdev,
3235 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3236 scrq->crq_num, rc);
3237 }
ffa73855
TF
3238 }
3239
f019fb63
TF
3240 dma_free_coherent(dev,
3241 IBMVNIC_IND_ARR_SZ,
3242 scrq->ind_buf.indir_arr,
3243 scrq->ind_buf.indir_dma);
3244
032c5e82
TF
3245 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3246 DMA_BIDIRECTIONAL);
3247 free_pages((unsigned long)scrq->msgs, 2);
3248 kfree(scrq);
3249}
3250
3251static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3252 *adapter)
3253{
3254 struct device *dev = &adapter->vdev->dev;
3255 struct ibmvnic_sub_crq_queue *scrq;
3256 int rc;
3257
1bb3c739 3258 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
032c5e82
TF
3259 if (!scrq)
3260 return NULL;
3261
7f7adc50 3262 scrq->msgs =
1bb3c739 3263 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
032c5e82
TF
3264 if (!scrq->msgs) {
3265 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3266 goto zero_page_failed;
3267 }
3268
3269 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3270 DMA_BIDIRECTIONAL);
3271 if (dma_mapping_error(dev, scrq->msg_token)) {
3272 dev_warn(dev, "Couldn't map crq queue messages page\n");
3273 goto map_failed;
3274 }
3275
3276 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3277 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3278
3279 if (rc == H_RESOURCE)
3280 rc = ibmvnic_reset_crq(adapter);
3281
3282 if (rc == H_CLOSED) {
3283 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3284 } else if (rc) {
3285 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3286 goto reg_failed;
3287 }
3288
032c5e82
TF
3289 scrq->adapter = adapter;
3290 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
f019fb63
TF
3291 scrq->ind_buf.index = 0;
3292
3293 scrq->ind_buf.indir_arr =
3294 dma_alloc_coherent(dev,
3295 IBMVNIC_IND_ARR_SZ,
3296 &scrq->ind_buf.indir_dma,
3297 GFP_KERNEL);
3298
3299 if (!scrq->ind_buf.indir_arr)
3300 goto indir_failed;
3301
032c5e82
TF
3302 spin_lock_init(&scrq->lock);
3303
3304 netdev_dbg(adapter->netdev,
3305 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3306 scrq->crq_num, scrq->hw_irq, scrq->irq);
3307
3308 return scrq;
3309
f019fb63
TF
3310indir_failed:
3311 do {
3312 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3313 adapter->vdev->unit_address,
3314 scrq->crq_num);
3315 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
032c5e82
TF
3316reg_failed:
3317 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3318 DMA_BIDIRECTIONAL);
3319map_failed:
3320 free_pages((unsigned long)scrq->msgs, 2);
3321zero_page_failed:
3322 kfree(scrq);
3323
3324 return NULL;
3325}
3326
d7c0ef36 3327static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
032c5e82
TF
3328{
3329 int i;
3330
3331 if (adapter->tx_scrq) {
82e3be32 3332 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
b510888f
NF
3333 if (!adapter->tx_scrq[i])
3334 continue;
3335
d1cf33d9
NF
3336 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3337 i);
65d6470d 3338 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
b510888f 3339 if (adapter->tx_scrq[i]->irq) {
032c5e82
TF
3340 free_irq(adapter->tx_scrq[i]->irq,
3341 adapter->tx_scrq[i]);
88eb98a0 3342 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
b510888f 3343 adapter->tx_scrq[i]->irq = 0;
032c5e82 3344 }
b510888f 3345
d7c0ef36
NF
3346 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3347 do_h_free);
b510888f
NF
3348 }
3349
9501df3c 3350 kfree(adapter->tx_scrq);
032c5e82 3351 adapter->tx_scrq = NULL;
82e3be32 3352 adapter->num_active_tx_scrqs = 0;
032c5e82
TF
3353 }
3354
3355 if (adapter->rx_scrq) {
82e3be32 3356 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
b510888f
NF
3357 if (!adapter->rx_scrq[i])
3358 continue;
3359
d1cf33d9
NF
3360 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3361 i);
b510888f 3362 if (adapter->rx_scrq[i]->irq) {
032c5e82
TF
3363 free_irq(adapter->rx_scrq[i]->irq,
3364 adapter->rx_scrq[i]);
88eb98a0 3365 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
b510888f 3366 adapter->rx_scrq[i]->irq = 0;
032c5e82 3367 }
032c5e82 3368
d7c0ef36
NF
3369 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3370 do_h_free);
b510888f 3371 }
ea22d51a 3372
b510888f 3373 kfree(adapter->rx_scrq);
ea22d51a 3374 adapter->rx_scrq = NULL;
82e3be32 3375 adapter->num_active_rx_scrqs = 0;
ea22d51a 3376 }
ea22d51a
TF
3377}
3378
032c5e82
TF
3379static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3380 struct ibmvnic_sub_crq_queue *scrq)
3381{
3382 struct device *dev = &adapter->vdev->dev;
3383 unsigned long rc;
3384
3385 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3386 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3387 if (rc)
3388 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3389 scrq->hw_irq, rc);
3390 return rc;
3391}
3392
3393static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3394 struct ibmvnic_sub_crq_queue *scrq)
3395{
3396 struct device *dev = &adapter->vdev->dev;
3397 unsigned long rc;
3398
3399 if (scrq->hw_irq > 0x100000000ULL) {
3400 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3401 return 1;
3402 }
3403
7ed5b31f 3404 if (test_bit(0, &adapter->resetting) &&
73f9d364 3405 adapter->reset_reason == VNIC_RESET_MOBILITY) {
284f87d2 3406 u64 val = (0xff000000) | scrq->hw_irq;
73f9d364 3407
284f87d2 3408 rc = plpar_hcall_norets(H_EOI, val);
2df5c60e
JK
3409 /* H_EOI would fail with rc = H_FUNCTION when running
3410 * in XIVE mode which is expected, but not an error.
3411 */
154b3b2a 3412 if (rc && (rc != H_FUNCTION))
284f87d2
JK
3413 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3414 val, rc);
73f9d364 3415 }
f23e0643 3416
032c5e82
TF
3417 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3418 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3419 if (rc)
3420 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3421 scrq->hw_irq, rc);
3422 return rc;
3423}
3424
3425static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3426 struct ibmvnic_sub_crq_queue *scrq)
3427{
3428 struct device *dev = &adapter->vdev->dev;
06b3e357 3429 struct ibmvnic_tx_pool *tx_pool;
032c5e82 3430 struct ibmvnic_tx_buff *txbuff;
0d973388 3431 struct netdev_queue *txq;
032c5e82
TF
3432 union sub_crq *next;
3433 int index;
c62aa373 3434 int i;
032c5e82
TF
3435
3436restart_loop:
3437 while (pending_scrq(adapter, scrq)) {
3438 unsigned int pool = scrq->pool_index;
ffc385b9 3439 int num_entries = 0;
0d973388
TF
3440 int total_bytes = 0;
3441 int num_packets = 0;
032c5e82
TF
3442
3443 next = ibmvnic_next_scrq(adapter, scrq);
3444 for (i = 0; i < next->tx_comp.num_comps; i++) {
032c5e82 3445 index = be32_to_cpu(next->tx_comp.correlators[i]);
06b3e357
TF
3446 if (index & IBMVNIC_TSO_POOL_MASK) {
3447 tx_pool = &adapter->tso_pool[pool];
3448 index &= ~IBMVNIC_TSO_POOL_MASK;
3449 } else {
3450 tx_pool = &adapter->tx_pool[pool];
3451 }
3452
3453 txbuff = &tx_pool->tx_buff[index];
0d973388
TF
3454 num_packets++;
3455 num_entries += txbuff->num_entries;
3456 if (txbuff->skb) {
3457 total_bytes += txbuff->skb->len;
ca09bf7b
LP
3458 if (next->tx_comp.rcs[i]) {
3459 dev_err(dev, "tx error %x\n",
3460 next->tx_comp.rcs[i]);
3461 dev_kfree_skb_irq(txbuff->skb);
3462 } else {
3463 dev_consume_skb_irq(txbuff->skb);
3464 }
7c3e7de3 3465 txbuff->skb = NULL;
0d973388
TF
3466 } else {
3467 netdev_warn(adapter->netdev,
3468 "TX completion received with NULL socket buffer\n");
142c0ac4 3469 }
06b3e357
TF
3470 tx_pool->free_map[tx_pool->producer_index] = index;
3471 tx_pool->producer_index =
3472 (tx_pool->producer_index + 1) %
3473 tx_pool->num_buffers;
032c5e82
TF
3474 }
3475 /* remove tx_comp scrq*/
3476 next->tx_comp.first = 0;
7c3e7de3 3477
0d973388
TF
3478 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3479 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3480
ffc385b9 3481 if (atomic_sub_return(num_entries, &scrq->used) <=
7c3e7de3
NF
3482 (adapter->req_tx_entries_per_subcrq / 2) &&
3483 __netif_subqueue_stopped(adapter->netdev,
3484 scrq->pool_index)) {
3485 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
0aecb13c
TF
3486 netdev_dbg(adapter->netdev, "Started queue %d\n",
3487 scrq->pool_index);
7c3e7de3 3488 }
032c5e82
TF
3489 }
3490
3491 enable_scrq_irq(adapter, scrq);
3492
3493 if (pending_scrq(adapter, scrq)) {
3494 disable_scrq_irq(adapter, scrq);
3495 goto restart_loop;
3496 }
3497
3498 return 0;
3499}
3500
3501static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3502{
3503 struct ibmvnic_sub_crq_queue *scrq = instance;
3504 struct ibmvnic_adapter *adapter = scrq->adapter;
3505
3506 disable_scrq_irq(adapter, scrq);
3507 ibmvnic_complete_tx(adapter, scrq);
3508
3509 return IRQ_HANDLED;
3510}
3511
3512static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3513{
3514 struct ibmvnic_sub_crq_queue *scrq = instance;
3515 struct ibmvnic_adapter *adapter = scrq->adapter;
3516
09fb35ea
NF
3517 /* When booting a kdump kernel we can hit pending interrupts
3518 * prior to completing driver initialization.
3519 */
3520 if (unlikely(adapter->state != VNIC_OPEN))
3521 return IRQ_NONE;
3522
3d52b594
JA
3523 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3524
032c5e82
TF
3525 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3526 disable_scrq_irq(adapter, scrq);
3527 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3528 }
3529
3530 return IRQ_HANDLED;
3531}
3532
ea22d51a
TF
3533static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3534{
3535 struct device *dev = &adapter->vdev->dev;
3536 struct ibmvnic_sub_crq_queue *scrq;
3537 int i = 0, j = 0;
3538 int rc = 0;
3539
3540 for (i = 0; i < adapter->req_tx_queues; i++) {
d1cf33d9
NF
3541 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3542 i);
ea22d51a
TF
3543 scrq = adapter->tx_scrq[i];
3544 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3545
99c1790e 3546 if (!scrq->irq) {
ea22d51a
TF
3547 rc = -EINVAL;
3548 dev_err(dev, "Error mapping irq\n");
3549 goto req_tx_irq_failed;
3550 }
3551
e56e2515
MFV
3552 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3553 adapter->vdev->unit_address, i);
ea22d51a 3554 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
e56e2515 3555 0, scrq->name, scrq);
ea22d51a
TF
3556
3557 if (rc) {
3558 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3559 scrq->irq, rc);
3560 irq_dispose_mapping(scrq->irq);
af9090c2 3561 goto req_tx_irq_failed;
ea22d51a
TF
3562 }
3563 }
3564
3565 for (i = 0; i < adapter->req_rx_queues; i++) {
d1cf33d9
NF
3566 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3567 i);
ea22d51a
TF
3568 scrq = adapter->rx_scrq[i];
3569 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
99c1790e 3570 if (!scrq->irq) {
ea22d51a
TF
3571 rc = -EINVAL;
3572 dev_err(dev, "Error mapping irq\n");
3573 goto req_rx_irq_failed;
3574 }
e56e2515
MFV
3575 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3576 adapter->vdev->unit_address, i);
ea22d51a 3577 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
e56e2515 3578 0, scrq->name, scrq);
ea22d51a
TF
3579 if (rc) {
3580 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3581 scrq->irq, rc);
3582 irq_dispose_mapping(scrq->irq);
3583 goto req_rx_irq_failed;
3584 }
3585 }
3586 return rc;
3587
3588req_rx_irq_failed:
8bf371e6 3589 for (j = 0; j < i; j++) {
ea22d51a
TF
3590 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3591 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
8bf371e6 3592 }
ea22d51a
TF
3593 i = adapter->req_tx_queues;
3594req_tx_irq_failed:
8bf371e6 3595 for (j = 0; j < i; j++) {
ea22d51a 3596 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
27a2145d 3597 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
8bf371e6 3598 }
d7c0ef36 3599 release_sub_crqs(adapter, 1);
ea22d51a
TF
3600 return rc;
3601}
3602
d346b9bc 3603static int init_sub_crqs(struct ibmvnic_adapter *adapter)
032c5e82
TF
3604{
3605 struct device *dev = &adapter->vdev->dev;
3606 struct ibmvnic_sub_crq_queue **allqueues;
3607 int registered_queues = 0;
032c5e82
TF
3608 int total_queues;
3609 int more = 0;
ea22d51a 3610 int i;
032c5e82 3611
032c5e82
TF
3612 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3613
1bb3c739 3614 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
032c5e82 3615 if (!allqueues)
d346b9bc 3616 return -1;
032c5e82
TF
3617
3618 for (i = 0; i < total_queues; i++) {
3619 allqueues[i] = init_sub_crq_queue(adapter);
3620 if (!allqueues[i]) {
3621 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3622 break;
3623 }
3624 registered_queues++;
3625 }
3626
3627 /* Make sure we were able to register the minimum number of queues */
3628 if (registered_queues <
3629 adapter->min_tx_queues + adapter->min_rx_queues) {
3630 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3631 goto tx_failed;
3632 }
3633
3634 /* Distribute the failed allocated queues*/
3635 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3636 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3637 switch (i % 3) {
3638 case 0:
3639 if (adapter->req_rx_queues > adapter->min_rx_queues)
3640 adapter->req_rx_queues--;
3641 else
3642 more++;
3643 break;
3644 case 1:
3645 if (adapter->req_tx_queues > adapter->min_tx_queues)
3646 adapter->req_tx_queues--;
3647 else
3648 more++;
3649 break;
3650 }
3651 }
3652
3653 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1bb3c739 3654 sizeof(*adapter->tx_scrq), GFP_KERNEL);
032c5e82
TF
3655 if (!adapter->tx_scrq)
3656 goto tx_failed;
3657
3658 for (i = 0; i < adapter->req_tx_queues; i++) {
3659 adapter->tx_scrq[i] = allqueues[i];
3660 adapter->tx_scrq[i]->pool_index = i;
82e3be32 3661 adapter->num_active_tx_scrqs++;
032c5e82
TF
3662 }
3663
3664 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1bb3c739 3665 sizeof(*adapter->rx_scrq), GFP_KERNEL);
032c5e82
TF
3666 if (!adapter->rx_scrq)
3667 goto rx_failed;
3668
3669 for (i = 0; i < adapter->req_rx_queues; i++) {
3670 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3671 adapter->rx_scrq[i]->scrq_num = i;
82e3be32 3672 adapter->num_active_rx_scrqs++;
032c5e82
TF
3673 }
3674
d346b9bc
NF
3675 kfree(allqueues);
3676 return 0;
3677
3678rx_failed:
3679 kfree(adapter->tx_scrq);
3680 adapter->tx_scrq = NULL;
3681tx_failed:
3682 for (i = 0; i < registered_queues; i++)
d7c0ef36 3683 release_sub_crq_queue(adapter, allqueues[i], 1);
d346b9bc
NF
3684 kfree(allqueues);
3685 return -1;
3686}
3687
09081b9d 3688static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
d346b9bc
NF
3689{
3690 struct device *dev = &adapter->vdev->dev;
3691 union ibmvnic_crq crq;
c26eba03 3692 int max_entries;
f4f81217
SB
3693 int cap_reqs;
3694
3695 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
3696 * the PROMISC flag). Initialize this count upfront. When the tasklet
3697 * receives a response to all of these, it will send the next protocol
3698 * message (QUERY_IP_OFFLOAD).
3699 */
3700 if (!(adapter->netdev->flags & IFF_PROMISC) ||
3701 adapter->promisc_supported)
3702 cap_reqs = 7;
3703 else
3704 cap_reqs = 6;
d346b9bc
NF
3705
3706 if (!retry) {
3707 /* Sub-CRQ entries are 32 byte long */
3708 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3709
f4f81217
SB
3710 atomic_set(&adapter->running_cap_crqs, cap_reqs);
3711
d346b9bc
NF
3712 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3713 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3714 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3715 return;
3716 }
3717
c26eba03
JA
3718 if (adapter->desired.mtu)
3719 adapter->req_mtu = adapter->desired.mtu;
3720 else
3721 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3722
3723 if (!adapter->desired.tx_entries)
3724 adapter->desired.tx_entries =
3725 adapter->max_tx_entries_per_subcrq;
3726 if (!adapter->desired.rx_entries)
3727 adapter->desired.rx_entries =
3728 adapter->max_rx_add_entries_per_subcrq;
3729
3730 max_entries = IBMVNIC_MAX_LTB_SIZE /
3731 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3732
3733 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3734 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3735 adapter->desired.tx_entries = max_entries;
3736 }
d346b9bc 3737
c26eba03
JA
3738 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3739 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3740 adapter->desired.rx_entries = max_entries;
3741 }
3742
3743 if (adapter->desired.tx_entries)
3744 adapter->req_tx_entries_per_subcrq =
3745 adapter->desired.tx_entries;
3746 else
3747 adapter->req_tx_entries_per_subcrq =
3748 adapter->max_tx_entries_per_subcrq;
3749
3750 if (adapter->desired.rx_entries)
3751 adapter->req_rx_add_entries_per_subcrq =
3752 adapter->desired.rx_entries;
3753 else
3754 adapter->req_rx_add_entries_per_subcrq =
3755 adapter->max_rx_add_entries_per_subcrq;
3756
3757 if (adapter->desired.tx_queues)
3758 adapter->req_tx_queues =
3759 adapter->desired.tx_queues;
3760 else
3761 adapter->req_tx_queues =
3762 adapter->opt_tx_comp_sub_queues;
3763
3764 if (adapter->desired.rx_queues)
3765 adapter->req_rx_queues =
3766 adapter->desired.rx_queues;
3767 else
3768 adapter->req_rx_queues =
3769 adapter->opt_rx_comp_queues;
3770
3771 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
f4f81217
SB
3772 } else {
3773 atomic_add(cap_reqs, &adapter->running_cap_crqs);
d346b9bc 3774 }
032c5e82
TF
3775 memset(&crq, 0, sizeof(crq));
3776 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3777 crq.request_capability.cmd = REQUEST_CAPABILITY;
3778
3779 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
de89e854 3780 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
f4f81217 3781 cap_reqs--;
032c5e82
TF
3782 ibmvnic_send_crq(adapter, &crq);
3783
3784 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
de89e854 3785 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
f4f81217 3786 cap_reqs--;
032c5e82
TF
3787 ibmvnic_send_crq(adapter, &crq);
3788
3789 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
de89e854 3790 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
f4f81217 3791 cap_reqs--;
032c5e82
TF
3792 ibmvnic_send_crq(adapter, &crq);
3793
3794 crq.request_capability.capability =
3795 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3796 crq.request_capability.number =
de89e854 3797 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
f4f81217 3798 cap_reqs--;
032c5e82
TF
3799 ibmvnic_send_crq(adapter, &crq);
3800
3801 crq.request_capability.capability =
3802 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3803 crq.request_capability.number =
de89e854 3804 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
f4f81217 3805 cap_reqs--;
032c5e82
TF
3806 ibmvnic_send_crq(adapter, &crq);
3807
3808 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
de89e854 3809 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
f4f81217 3810 cap_reqs--;
032c5e82
TF
3811 ibmvnic_send_crq(adapter, &crq);
3812
3813 if (adapter->netdev->flags & IFF_PROMISC) {
3814 if (adapter->promisc_supported) {
3815 crq.request_capability.capability =
3816 cpu_to_be16(PROMISC_REQUESTED);
de89e854 3817 crq.request_capability.number = cpu_to_be64(1);
f4f81217 3818 cap_reqs--;
032c5e82
TF
3819 ibmvnic_send_crq(adapter, &crq);
3820 }
3821 } else {
3822 crq.request_capability.capability =
3823 cpu_to_be16(PROMISC_REQUESTED);
de89e854 3824 crq.request_capability.number = cpu_to_be64(0);
f4f81217 3825 cap_reqs--;
032c5e82
TF
3826 ibmvnic_send_crq(adapter, &crq);
3827 }
f4f81217
SB
3828
3829 /* Keep at end to catch any discrepancy between expected and actual
3830 * CRQs sent.
3831 */
3832 WARN_ON(cap_reqs != 0);
032c5e82
TF
3833}
3834
3835static int pending_scrq(struct ibmvnic_adapter *adapter,
3836 struct ibmvnic_sub_crq_queue *scrq)
3837{
3838 union sub_crq *entry = &scrq->msgs[scrq->cur];
665ab1eb 3839 int rc;
032c5e82 3840
665ab1eb
LP
3841 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
3842
3843 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3844 * contents of the SCRQ descriptor
3845 */
3846 dma_rmb();
3847
3848 return rc;
032c5e82
TF
3849}
3850
3851static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3852 struct ibmvnic_sub_crq_queue *scrq)
3853{
3854 union sub_crq *entry;
3855 unsigned long flags;
3856
3857 spin_lock_irqsave(&scrq->lock, flags);
3858 entry = &scrq->msgs[scrq->cur];
3859 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3860 if (++scrq->cur == scrq->size)
3861 scrq->cur = 0;
3862 } else {
3863 entry = NULL;
3864 }
3865 spin_unlock_irqrestore(&scrq->lock, flags);
3866
665ab1eb
LP
3867 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3868 * contents of the SCRQ descriptor
b71ec952
TF
3869 */
3870 dma_rmb();
3871
032c5e82
TF
3872 return entry;
3873}
3874
3875static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3876{
3877 struct ibmvnic_crq_queue *queue = &adapter->crq;
3878 union ibmvnic_crq *crq;
3879
3880 crq = &queue->msgs[queue->cur];
3881 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3882 if (++queue->cur == queue->size)
3883 queue->cur = 0;
3884 } else {
3885 crq = NULL;
3886 }
3887
3888 return crq;
3889}
3890
2d14d379
TF
3891static void print_subcrq_error(struct device *dev, int rc, const char *func)
3892{
3893 switch (rc) {
3894 case H_PARAMETER:
3895 dev_warn_ratelimited(dev,
3896 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3897 func, rc);
3898 break;
3899 case H_CLOSED:
3900 dev_warn_ratelimited(dev,
3901 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3902 func, rc);
3903 break;
3904 default:
3905 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3906 break;
3907 }
3908}
3909
ad7775dc
TF
3910static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3911 u64 remote_handle, u64 ioba, u64 num_entries)
3912{
3913 unsigned int ua = adapter->vdev->unit_address;
3914 struct device *dev = &adapter->vdev->dev;
3915 int rc;
3916
3917 /* Make sure the hypervisor sees the complete request */
1a42156f 3918 dma_wmb();
ad7775dc
TF
3919 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3920 cpu_to_be64(remote_handle),
3921 ioba, num_entries);
3922
2d14d379
TF
3923 if (rc)
3924 print_subcrq_error(dev, rc, __func__);
ad7775dc
TF
3925
3926 return rc;
3927}
3928
032c5e82
TF
3929static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3930 union ibmvnic_crq *crq)
3931{
3932 unsigned int ua = adapter->vdev->unit_address;
3933 struct device *dev = &adapter->vdev->dev;
3934 u64 *u64_crq = (u64 *)crq;
3935 int rc;
3936
3937 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
429aa364
LP
3938 (unsigned long)cpu_to_be64(u64_crq[0]),
3939 (unsigned long)cpu_to_be64(u64_crq[1]));
032c5e82 3940
5153698e
TF
3941 if (!adapter->crq.active &&
3942 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3943 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3944 return -EINVAL;
3945 }
3946
032c5e82 3947 /* Make sure the hypervisor sees the complete request */
1a42156f 3948 dma_wmb();
032c5e82
TF
3949
3950 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3951 cpu_to_be64(u64_crq[0]),
3952 cpu_to_be64(u64_crq[1]));
3953
3954 if (rc) {
ec95dffa 3955 if (rc == H_CLOSED) {
032c5e82 3956 dev_warn(dev, "CRQ Queue closed\n");
fa68bfab 3957 /* do not reset, report the fail, wait for passive init from server */
ec95dffa
NF
3958 }
3959
032c5e82
TF
3960 dev_warn(dev, "Send error (rc=%d)\n", rc);
3961 }
3962
3963 return rc;
3964}
3965
3966static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3967{
36a782fd 3968 struct device *dev = &adapter->vdev->dev;
032c5e82 3969 union ibmvnic_crq crq;
36a782fd
TF
3970 int retries = 100;
3971 int rc;
032c5e82
TF
3972
3973 memset(&crq, 0, sizeof(crq));
3974 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3975 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3976 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3977
36a782fd
TF
3978 do {
3979 rc = ibmvnic_send_crq(adapter, &crq);
3980 if (rc != H_CLOSED)
3981 break;
3982 retries--;
3983 msleep(50);
3984
3985 } while (retries > 0);
3986
3987 if (rc) {
3988 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3989 return rc;
3990 }
3991
3992 return 0;
032c5e82
TF
3993}
3994
37798d02
NF
3995struct vnic_login_client_data {
3996 u8 type;
3997 __be16 len;
08ea556e 3998 char name[];
37798d02
NF
3999} __packed;
4000
4001static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4002{
4003 int len;
4004
4005 /* Calculate the amount of buffer space needed for the
4006 * vnic client data in the login buffer. There are four entries,
4007 * OS name, LPAR name, device name, and a null last entry.
4008 */
4009 len = 4 * sizeof(struct vnic_login_client_data);
4010 len += 6; /* "Linux" plus NULL */
4011 len += strlen(utsname()->nodename) + 1;
4012 len += strlen(adapter->netdev->name) + 1;
4013
4014 return len;
4015}
4016
4017static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4018 struct vnic_login_client_data *vlcd)
4019{
4020 const char *os_name = "Linux";
4021 int len;
4022
4023 /* Type 1 - LPAR OS */
4024 vlcd->type = 1;
4025 len = strlen(os_name) + 1;
4026 vlcd->len = cpu_to_be16(len);
ef2c3dda 4027 strscpy(vlcd->name, os_name, len);
08ea556e 4028 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
37798d02
NF
4029
4030 /* Type 2 - LPAR name */
4031 vlcd->type = 2;
4032 len = strlen(utsname()->nodename) + 1;
4033 vlcd->len = cpu_to_be16(len);
ef2c3dda 4034 strscpy(vlcd->name, utsname()->nodename, len);
08ea556e 4035 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
37798d02
NF
4036
4037 /* Type 3 - device name */
4038 vlcd->type = 3;
4039 len = strlen(adapter->netdev->name) + 1;
4040 vlcd->len = cpu_to_be16(len);
ef2c3dda 4041 strscpy(vlcd->name, adapter->netdev->name, len);
37798d02
NF
4042}
4043
20a8ab74 4044static int send_login(struct ibmvnic_adapter *adapter)
032c5e82
TF
4045{
4046 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4047 struct ibmvnic_login_buffer *login_buffer;
032c5e82 4048 struct device *dev = &adapter->vdev->dev;
c98d9cc4 4049 struct vnic_login_client_data *vlcd;
032c5e82
TF
4050 dma_addr_t rsp_buffer_token;
4051 dma_addr_t buffer_token;
4052 size_t rsp_buffer_size;
4053 union ibmvnic_crq crq;
c98d9cc4 4054 int client_data_len;
032c5e82
TF
4055 size_t buffer_size;
4056 __be64 *tx_list_p;
4057 __be64 *rx_list_p;
c98d9cc4 4058 int rc;
032c5e82
TF
4059 int i;
4060
20a8ab74
TF
4061 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4062 netdev_err(adapter->netdev,
4063 "RX or TX queues are not allocated, device login failed\n");
4064 return -1;
4065 }
4066
a0c8be56 4067 release_login_buffer(adapter);
34f0f4e3 4068 release_login_rsp_buffer(adapter);
a0c8be56 4069
37798d02
NF
4070 client_data_len = vnic_client_data_len(adapter);
4071
032c5e82
TF
4072 buffer_size =
4073 sizeof(struct ibmvnic_login_buffer) +
37798d02
NF
4074 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4075 client_data_len;
032c5e82 4076
37798d02 4077 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
032c5e82
TF
4078 if (!login_buffer)
4079 goto buf_alloc_failed;
4080
4081 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4082 DMA_TO_DEVICE);
4083 if (dma_mapping_error(dev, buffer_token)) {
4084 dev_err(dev, "Couldn't map login buffer\n");
4085 goto buf_map_failed;
4086 }
4087
498cd8e4
JA
4088 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4089 sizeof(u64) * adapter->req_tx_queues +
4090 sizeof(u64) * adapter->req_rx_queues +
4091 sizeof(u64) * adapter->req_rx_queues +
4092 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
032c5e82
TF
4093
4094 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4095 if (!login_rsp_buffer)
4096 goto buf_rsp_alloc_failed;
4097
4098 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4099 rsp_buffer_size, DMA_FROM_DEVICE);
4100 if (dma_mapping_error(dev, rsp_buffer_token)) {
4101 dev_err(dev, "Couldn't map login rsp buffer\n");
4102 goto buf_rsp_map_failed;
4103 }
661a2622 4104
032c5e82
TF
4105 adapter->login_buf = login_buffer;
4106 adapter->login_buf_token = buffer_token;
4107 adapter->login_buf_sz = buffer_size;
4108 adapter->login_rsp_buf = login_rsp_buffer;
4109 adapter->login_rsp_buf_token = rsp_buffer_token;
4110 adapter->login_rsp_buf_sz = rsp_buffer_size;
4111
4112 login_buffer->len = cpu_to_be32(buffer_size);
4113 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4114 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4115 login_buffer->off_txcomp_subcrqs =
4116 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4117 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4118 login_buffer->off_rxcomp_subcrqs =
4119 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4120 sizeof(u64) * adapter->req_tx_queues);
4121 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4122 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4123
4124 tx_list_p = (__be64 *)((char *)login_buffer +
4125 sizeof(struct ibmvnic_login_buffer));
4126 rx_list_p = (__be64 *)((char *)login_buffer +
4127 sizeof(struct ibmvnic_login_buffer) +
4128 sizeof(u64) * adapter->req_tx_queues);
4129
4130 for (i = 0; i < adapter->req_tx_queues; i++) {
4131 if (adapter->tx_scrq[i]) {
914789ac
LP
4132 tx_list_p[i] =
4133 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
032c5e82
TF
4134 }
4135 }
4136
4137 for (i = 0; i < adapter->req_rx_queues; i++) {
4138 if (adapter->rx_scrq[i]) {
914789ac
LP
4139 rx_list_p[i] =
4140 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
032c5e82
TF
4141 }
4142 }
4143
37798d02
NF
4144 /* Insert vNIC login client data */
4145 vlcd = (struct vnic_login_client_data *)
4146 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4147 login_buffer->client_data_offset =
4148 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4149 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4150
4151 vnic_add_client_data(adapter, vlcd);
4152
032c5e82
TF
4153 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4154 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4155 netdev_dbg(adapter->netdev, "%016lx\n",
429aa364 4156 ((unsigned long *)(adapter->login_buf))[i]);
032c5e82
TF
4157 }
4158
4159 memset(&crq, 0, sizeof(crq));
4160 crq.login.first = IBMVNIC_CRQ_CMD;
4161 crq.login.cmd = LOGIN;
4162 crq.login.ioba = cpu_to_be32(buffer_token);
4163 crq.login.len = cpu_to_be32(buffer_size);
76cdc5c5
SB
4164
4165 adapter->login_pending = true;
c98d9cc4
DM
4166 rc = ibmvnic_send_crq(adapter, &crq);
4167 if (rc) {
4168 adapter->login_pending = false;
4169 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4170 goto buf_rsp_map_failed;
4171 }
032c5e82 4172
20a8ab74 4173 return 0;
032c5e82 4174
032c5e82
TF
4175buf_rsp_map_failed:
4176 kfree(login_rsp_buffer);
c98d9cc4 4177 adapter->login_rsp_buf = NULL;
032c5e82
TF
4178buf_rsp_alloc_failed:
4179 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4180buf_map_failed:
4181 kfree(login_buffer);
c98d9cc4 4182 adapter->login_buf = NULL;
032c5e82 4183buf_alloc_failed:
20a8ab74 4184 return -1;
032c5e82
TF
4185}
4186
9c4eaabd
TF
4187static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4188 u32 len, u8 map_id)
032c5e82
TF
4189{
4190 union ibmvnic_crq crq;
4191
4192 memset(&crq, 0, sizeof(crq));
4193 crq.request_map.first = IBMVNIC_CRQ_CMD;
4194 crq.request_map.cmd = REQUEST_MAP;
4195 crq.request_map.map_id = map_id;
4196 crq.request_map.ioba = cpu_to_be32(addr);
4197 crq.request_map.len = cpu_to_be32(len);
9c4eaabd 4198 return ibmvnic_send_crq(adapter, &crq);
032c5e82
TF
4199}
4200
9c4eaabd 4201static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
032c5e82
TF
4202{
4203 union ibmvnic_crq crq;
4204
4205 memset(&crq, 0, sizeof(crq));
4206 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4207 crq.request_unmap.cmd = REQUEST_UNMAP;
4208 crq.request_unmap.map_id = map_id;
9c4eaabd 4209 return ibmvnic_send_crq(adapter, &crq);
032c5e82
TF
4210}
4211
69980d02 4212static void send_query_map(struct ibmvnic_adapter *adapter)
032c5e82
TF
4213{
4214 union ibmvnic_crq crq;
4215
4216 memset(&crq, 0, sizeof(crq));
4217 crq.query_map.first = IBMVNIC_CRQ_CMD;
4218 crq.query_map.cmd = QUERY_MAP;
4219 ibmvnic_send_crq(adapter, &crq);
4220}
4221
4222/* Send a series of CRQs requesting various capabilities of the VNIC server */
491099ad 4223static void send_query_cap(struct ibmvnic_adapter *adapter)
032c5e82
TF
4224{
4225 union ibmvnic_crq crq;
f4f81217
SB
4226 int cap_reqs;
4227
4228 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
4229 * upfront. When the tasklet receives a response to all of these, it
4230 * can send out the next protocol messaage (REQUEST_CAPABILITY).
4231 */
4232 cap_reqs = 25;
4233
4234 atomic_set(&adapter->running_cap_crqs, cap_reqs);
032c5e82 4235
032c5e82
TF
4236 memset(&crq, 0, sizeof(crq));
4237 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4238 crq.query_capability.cmd = QUERY_CAPABILITY;
4239
4240 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
032c5e82 4241 ibmvnic_send_crq(adapter, &crq);
f4f81217 4242 cap_reqs--;
032c5e82
TF
4243
4244 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
032c5e82 4245 ibmvnic_send_crq(adapter, &crq);
f4f81217 4246 cap_reqs--;
032c5e82
TF
4247
4248 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
032c5e82 4249 ibmvnic_send_crq(adapter, &crq);
f4f81217 4250 cap_reqs--;
032c5e82
TF
4251
4252 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
032c5e82 4253 ibmvnic_send_crq(adapter, &crq);
f4f81217 4254 cap_reqs--;
032c5e82
TF
4255
4256 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
032c5e82 4257 ibmvnic_send_crq(adapter, &crq);
f4f81217 4258 cap_reqs--;
032c5e82
TF
4259
4260 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
032c5e82 4261 ibmvnic_send_crq(adapter, &crq);
f4f81217 4262 cap_reqs--;
032c5e82
TF
4263
4264 crq.query_capability.capability =
4265 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
032c5e82 4266 ibmvnic_send_crq(adapter, &crq);
f4f81217 4267 cap_reqs--;
032c5e82
TF
4268
4269 crq.query_capability.capability =
4270 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
032c5e82 4271 ibmvnic_send_crq(adapter, &crq);
f4f81217 4272 cap_reqs--;
032c5e82
TF
4273
4274 crq.query_capability.capability =
4275 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
032c5e82 4276 ibmvnic_send_crq(adapter, &crq);
f4f81217 4277 cap_reqs--;
032c5e82
TF
4278
4279 crq.query_capability.capability =
4280 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
032c5e82 4281 ibmvnic_send_crq(adapter, &crq);
f4f81217 4282 cap_reqs--;
032c5e82
TF
4283
4284 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
032c5e82 4285 ibmvnic_send_crq(adapter, &crq);
f4f81217 4286 cap_reqs--;
032c5e82
TF
4287
4288 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
032c5e82 4289 ibmvnic_send_crq(adapter, &crq);
f4f81217 4290 cap_reqs--;
032c5e82
TF
4291
4292 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
032c5e82 4293 ibmvnic_send_crq(adapter, &crq);
f4f81217 4294 cap_reqs--;
032c5e82
TF
4295
4296 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
032c5e82 4297 ibmvnic_send_crq(adapter, &crq);
f4f81217 4298 cap_reqs--;
032c5e82
TF
4299
4300 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
032c5e82 4301 ibmvnic_send_crq(adapter, &crq);
f4f81217 4302 cap_reqs--;
032c5e82
TF
4303
4304 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
032c5e82 4305 ibmvnic_send_crq(adapter, &crq);
f4f81217 4306 cap_reqs--;
032c5e82 4307
6052d5e2 4308 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
6052d5e2 4309 ibmvnic_send_crq(adapter, &crq);
f4f81217 4310 cap_reqs--;
6052d5e2 4311
032c5e82 4312 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
032c5e82 4313 ibmvnic_send_crq(adapter, &crq);
f4f81217 4314 cap_reqs--;
032c5e82
TF
4315
4316 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
032c5e82 4317 ibmvnic_send_crq(adapter, &crq);
f4f81217 4318 cap_reqs--;
032c5e82
TF
4319
4320 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
032c5e82 4321 ibmvnic_send_crq(adapter, &crq);
f4f81217 4322 cap_reqs--;
032c5e82
TF
4323
4324 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
032c5e82 4325 ibmvnic_send_crq(adapter, &crq);
f4f81217 4326 cap_reqs--;
032c5e82
TF
4327
4328 crq.query_capability.capability =
4329 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
032c5e82 4330 ibmvnic_send_crq(adapter, &crq);
f4f81217 4331 cap_reqs--;
032c5e82
TF
4332
4333 crq.query_capability.capability =
4334 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
032c5e82 4335 ibmvnic_send_crq(adapter, &crq);
f4f81217 4336 cap_reqs--;
032c5e82
TF
4337
4338 crq.query_capability.capability =
4339 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
032c5e82 4340 ibmvnic_send_crq(adapter, &crq);
f4f81217 4341 cap_reqs--;
032c5e82
TF
4342
4343 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
f4f81217 4344
032c5e82 4345 ibmvnic_send_crq(adapter, &crq);
f4f81217
SB
4346 cap_reqs--;
4347
4348 /* Keep at end to catch any discrepancy between expected and actual
4349 * CRQs sent.
4350 */
4351 WARN_ON(cap_reqs != 0);
032c5e82
TF
4352}
4353
16e811fe
LP
4354static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4355{
4356 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4357 struct device *dev = &adapter->vdev->dev;
4358 union ibmvnic_crq crq;
4359
4360 adapter->ip_offload_tok =
4361 dma_map_single(dev,
4362 &adapter->ip_offload_buf,
4363 buf_sz,
4364 DMA_FROM_DEVICE);
4365
4366 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4367 if (!firmware_has_feature(FW_FEATURE_CMO))
4368 dev_err(dev, "Couldn't map offload buffer\n");
4369 return;
4370 }
4371
4372 memset(&crq, 0, sizeof(crq));
4373 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4374 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4375 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4376 crq.query_ip_offload.ioba =
4377 cpu_to_be32(adapter->ip_offload_tok);
4378
4379 ibmvnic_send_crq(adapter, &crq);
4380}
4381
46899bde
LP
4382static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4383{
4384 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4385 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4386 struct device *dev = &adapter->vdev->dev;
4387 netdev_features_t old_hw_features = 0;
4388 union ibmvnic_crq crq;
4389
4390 adapter->ip_offload_ctrl_tok =
4391 dma_map_single(dev,
4392 ctrl_buf,
4393 sizeof(adapter->ip_offload_ctrl),
4394 DMA_TO_DEVICE);
4395
4396 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4397 dev_err(dev, "Couldn't map ip offload control buffer\n");
4398 return;
4399 }
4400
4401 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4402 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4403 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4404 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4405 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4406 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4407 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4408 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4409 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4410 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4411
4412 /* large_rx disabled for now, additional features needed */
4413 ctrl_buf->large_rx_ipv4 = 0;
4414 ctrl_buf->large_rx_ipv6 = 0;
4415
4416 if (adapter->state != VNIC_PROBING) {
4417 old_hw_features = adapter->netdev->hw_features;
4418 adapter->netdev->hw_features = 0;
4419 }
4420
4421 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4422
4423 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4424 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4425
4426 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4427 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4428
4429 if ((adapter->netdev->features &
4430 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4431 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4432
4433 if (buf->large_tx_ipv4)
4434 adapter->netdev->hw_features |= NETIF_F_TSO;
4435 if (buf->large_tx_ipv6)
4436 adapter->netdev->hw_features |= NETIF_F_TSO6;
4437
4438 if (adapter->state == VNIC_PROBING) {
4439 adapter->netdev->features |= adapter->netdev->hw_features;
4440 } else if (old_hw_features != adapter->netdev->hw_features) {
4441 netdev_features_t tmp = 0;
4442
4443 /* disable features no longer supported */
4444 adapter->netdev->features &= adapter->netdev->hw_features;
4445 /* turn on features now supported if previously enabled */
4446 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4447 adapter->netdev->hw_features;
4448 adapter->netdev->features |=
4449 tmp & adapter->netdev->wanted_features;
4450 }
4451
4452 memset(&crq, 0, sizeof(crq));
4453 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4454 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4455 crq.control_ip_offload.len =
4456 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4457 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4458 ibmvnic_send_crq(adapter, &crq);
4459}
4460
4e6759be
DANR
4461static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4462 struct ibmvnic_adapter *adapter)
4463{
4464 struct device *dev = &adapter->vdev->dev;
4465
4466 if (crq->get_vpd_size_rsp.rc.code) {
4467 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4468 crq->get_vpd_size_rsp.rc.code);
4469 complete(&adapter->fw_done);
4470 return;
4471 }
4472
4473 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4474 complete(&adapter->fw_done);
4475}
4476
4477static void handle_vpd_rsp(union ibmvnic_crq *crq,
4478 struct ibmvnic_adapter *adapter)
4479{
4480 struct device *dev = &adapter->vdev->dev;
21a2545b 4481 unsigned char *substr = NULL;
4e6759be
DANR
4482 u8 fw_level_len = 0;
4483
4484 memset(adapter->fw_version, 0, 32);
4485
4486 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4487 DMA_FROM_DEVICE);
4488
4489 if (crq->get_vpd_rsp.rc.code) {
4490 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4491 crq->get_vpd_rsp.rc.code);
4492 goto complete;
4493 }
4494
4495 /* get the position of the firmware version info
4496 * located after the ASCII 'RM' substring in the buffer
4497 */
4498 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4499 if (!substr) {
a107311d 4500 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4e6759be
DANR
4501 goto complete;
4502 }
4503
4504 /* get length of firmware level ASCII substring */
4505 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4506 fw_level_len = *(substr + 2);
4507 } else {
4508 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4509 goto complete;
4510 }
4511
4512 /* copy firmware version string from vpd into adapter */
4513 if ((substr + 3 + fw_level_len) <
4514 (adapter->vpd->buff + adapter->vpd->len)) {
21a2545b 4515 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4e6759be
DANR
4516 } else {
4517 dev_info(dev, "FW substr extrapolated VPD buff\n");
4518 }
4519
4520complete:
21a2545b 4521 if (adapter->fw_version[0] == '\0')
0b217d3d 4522 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4e6759be
DANR
4523 complete(&adapter->fw_done);
4524}
4525
032c5e82
TF
4526static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4527{
4528 struct device *dev = &adapter->vdev->dev;
4529 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
032c5e82
TF
4530 int i;
4531
4532 dma_unmap_single(dev, adapter->ip_offload_tok,
4533 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4534
4535 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4536 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4537 netdev_dbg(adapter->netdev, "%016lx\n",
429aa364 4538 ((unsigned long *)(buf))[i]);
032c5e82
TF
4539
4540 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4541 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4542 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4543 buf->tcp_ipv4_chksum);
4544 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4545 buf->tcp_ipv6_chksum);
4546 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4547 buf->udp_ipv4_chksum);
4548 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4549 buf->udp_ipv6_chksum);
4550 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4551 buf->large_tx_ipv4);
4552 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4553 buf->large_tx_ipv6);
4554 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4555 buf->large_rx_ipv4);
4556 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4557 buf->large_rx_ipv6);
4558 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4559 buf->max_ipv4_header_size);
4560 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4561 buf->max_ipv6_header_size);
4562 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4563 buf->max_tcp_header_size);
4564 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4565 buf->max_udp_header_size);
4566 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4567 buf->max_large_tx_size);
4568 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4569 buf->max_large_rx_size);
4570 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4571 buf->ipv6_extension_header);
4572 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4573 buf->tcp_pseudosum_req);
4574 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4575 buf->num_ipv6_ext_headers);
4576 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4577 buf->off_ipv6_ext_headers);
4578
46899bde 4579 send_control_ip_offload(adapter);
032c5e82
TF
4580}
4581
c9008d33
TF
4582static const char *ibmvnic_fw_err_cause(u16 cause)
4583{
4584 switch (cause) {
4585 case ADAPTER_PROBLEM:
4586 return "adapter problem";
4587 case BUS_PROBLEM:
4588 return "bus problem";
4589 case FW_PROBLEM:
4590 return "firmware problem";
4591 case DD_PROBLEM:
4592 return "device driver problem";
4593 case EEH_RECOVERY:
4594 return "EEH recovery";
4595 case FW_UPDATED:
4596 return "firmware updated";
4597 case LOW_MEMORY:
4598 return "low Memory";
4599 default:
4600 return "unknown";
4601 }
4602}
4603
2f9de9ba
NF
4604static void handle_error_indication(union ibmvnic_crq *crq,
4605 struct ibmvnic_adapter *adapter)
4606{
4607 struct device *dev = &adapter->vdev->dev;
c9008d33
TF
4608 u16 cause;
4609
4610 cause = be16_to_cpu(crq->error_indication.error_cause);
2f9de9ba 4611
c9008d33
TF
4612 dev_warn_ratelimited(dev,
4613 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4614 crq->error_indication.flags
4615 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4616 ibmvnic_fw_err_cause(cause));
2f9de9ba 4617
ed651a10
NF
4618 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4619 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
8cb31cfc
JA
4620 else
4621 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
032c5e82
TF
4622}
4623
f813614f
TF
4624static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4625 struct ibmvnic_adapter *adapter)
032c5e82
TF
4626{
4627 struct net_device *netdev = adapter->netdev;
4628 struct device *dev = &adapter->vdev->dev;
4629 long rc;
4630
4631 rc = crq->change_mac_addr_rsp.rc.code;
4632 if (rc) {
4633 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
f813614f 4634 goto out;
032c5e82 4635 }
d9b0e599
LP
4636 /* crq->change_mac_addr.mac_addr is the requested one
4637 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4638 */
62740e97
TF
4639 ether_addr_copy(netdev->dev_addr,
4640 &crq->change_mac_addr_rsp.mac_addr[0]);
d9b0e599
LP
4641 ether_addr_copy(adapter->mac_addr,
4642 &crq->change_mac_addr_rsp.mac_addr[0]);
f813614f
TF
4643out:
4644 complete(&adapter->fw_done);
4645 return rc;
032c5e82
TF
4646}
4647
4648static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4649 struct ibmvnic_adapter *adapter)
4650{
4651 struct device *dev = &adapter->vdev->dev;
4652 u64 *req_value;
4653 char *name;
4654
901e040a 4655 atomic_dec(&adapter->running_cap_crqs);
f4f81217
SB
4656 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
4657 atomic_read(&adapter->running_cap_crqs));
032c5e82
TF
4658 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4659 case REQ_TX_QUEUES:
4660 req_value = &adapter->req_tx_queues;
4661 name = "tx";
4662 break;
4663 case REQ_RX_QUEUES:
4664 req_value = &adapter->req_rx_queues;
4665 name = "rx";
4666 break;
4667 case REQ_RX_ADD_QUEUES:
4668 req_value = &adapter->req_rx_add_queues;
4669 name = "rx_add";
4670 break;
4671 case REQ_TX_ENTRIES_PER_SUBCRQ:
4672 req_value = &adapter->req_tx_entries_per_subcrq;
4673 name = "tx_entries_per_subcrq";
4674 break;
4675 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4676 req_value = &adapter->req_rx_add_entries_per_subcrq;
4677 name = "rx_add_entries_per_subcrq";
4678 break;
4679 case REQ_MTU:
4680 req_value = &adapter->req_mtu;
4681 name = "mtu";
4682 break;
4683 case PROMISC_REQUESTED:
4684 req_value = &adapter->promisc;
4685 name = "promisc";
4686 break;
4687 default:
4688 dev_err(dev, "Got invalid cap request rsp %d\n",
4689 crq->request_capability.capability);
4690 return;
4691 }
4692
4693 switch (crq->request_capability_rsp.rc.code) {
4694 case SUCCESS:
4695 break;
4696 case PARTIALSUCCESS:
4697 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4698 *req_value,
914789ac
LP
4699 (long)be64_to_cpu(crq->request_capability_rsp.number),
4700 name);
e7913803
JA
4701
4702 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4703 REQ_MTU) {
4704 pr_err("mtu of %llu is not supported. Reverting.\n",
4705 *req_value);
4706 *req_value = adapter->fallback.mtu;
4707 } else {
4708 *req_value =
4709 be64_to_cpu(crq->request_capability_rsp.number);
4710 }
4711
09081b9d 4712 send_request_cap(adapter, 1);
032c5e82
TF
4713 return;
4714 default:
4715 dev_err(dev, "Error %d in request cap rsp\n",
4716 crq->request_capability_rsp.rc.code);
4717 return;
4718 }
4719
4720 /* Done receiving requested capabilities, query IP offload support */
901e040a 4721 if (atomic_read(&adapter->running_cap_crqs) == 0) {
249168ad 4722 adapter->wait_capability = false;
16e811fe 4723 send_query_ip_offload(adapter);
032c5e82
TF
4724 }
4725}
4726
4727static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4728 struct ibmvnic_adapter *adapter)
4729{
4730 struct device *dev = &adapter->vdev->dev;
c26eba03 4731 struct net_device *netdev = adapter->netdev;
032c5e82
TF
4732 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4733 struct ibmvnic_login_buffer *login = adapter->login_buf;
f3ae59c0
CF
4734 u64 *tx_handle_array;
4735 u64 *rx_handle_array;
4736 int num_tx_pools;
4737 int num_rx_pools;
507ebe64 4738 u64 *size_array;
032c5e82
TF
4739 int i;
4740
76cdc5c5
SB
4741 /* CHECK: Test/set of login_pending does not need to be atomic
4742 * because only ibmvnic_tasklet tests/clears this.
4743 */
4744 if (!adapter->login_pending) {
4745 netdev_warn(netdev, "Ignoring unexpected login response\n");
4746 return 0;
4747 }
4748 adapter->login_pending = false;
4749
032c5e82 4750 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
37e40fa8 4751 DMA_TO_DEVICE);
032c5e82 4752 dma_unmap_single(dev, adapter->login_rsp_buf_token,
37e40fa8 4753 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
032c5e82 4754
498cd8e4
JA
4755 /* If the number of queues requested can't be allocated by the
4756 * server, the login response will return with code 1. We will need
4757 * to resend the login buffer with fewer queues requested.
4758 */
4759 if (login_rsp_crq->generic.rc.code) {
64d92aa2 4760 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
498cd8e4
JA
4761 complete(&adapter->init_done);
4762 return 0;
4763 }
4764
d437f5aa
SB
4765 if (adapter->failover_pending) {
4766 adapter->init_done_rc = -EAGAIN;
4767 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
4768 complete(&adapter->init_done);
4769 /* login response buffer will be released on reset */
4770 return 0;
4771 }
4772
c26eba03
JA
4773 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4774
032c5e82
TF
4775 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4776 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4777 netdev_dbg(adapter->netdev, "%016lx\n",
429aa364 4778 ((unsigned long *)(adapter->login_rsp_buf))[i]);
032c5e82
TF
4779 }
4780
4781 /* Sanity checks */
4782 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4783 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4784 adapter->req_rx_add_queues !=
4785 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4786 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
31d6b403 4787 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
032c5e82
TF
4788 return -EIO;
4789 }
507ebe64
TF
4790 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4791 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4792 /* variable buffer sizes are not supported, so just read the
4793 * first entry.
4794 */
4795 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
f3ae59c0
CF
4796
4797 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4798 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4799
4800 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4801 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4802 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4803 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4804
4805 for (i = 0; i < num_tx_pools; i++)
4806 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4807
4808 for (i = 0; i < num_rx_pools; i++)
4809 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4810
507ebe64
TF
4811 adapter->num_active_tx_scrqs = num_tx_pools;
4812 adapter->num_active_rx_scrqs = num_rx_pools;
f3ae59c0 4813 release_login_rsp_buffer(adapter);
a2c0f039 4814 release_login_buffer(adapter);
032c5e82
TF
4815 complete(&adapter->init_done);
4816
032c5e82
TF
4817 return 0;
4818}
4819
032c5e82
TF
4820static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4821 struct ibmvnic_adapter *adapter)
4822{
4823 struct device *dev = &adapter->vdev->dev;
4824 long rc;
4825
4826 rc = crq->request_unmap_rsp.rc.code;
4827 if (rc)
4828 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4829}
4830
4831static void handle_query_map_rsp(union ibmvnic_crq *crq,
4832 struct ibmvnic_adapter *adapter)
4833{
4834 struct net_device *netdev = adapter->netdev;
4835 struct device *dev = &adapter->vdev->dev;
4836 long rc;
4837
4838 rc = crq->query_map_rsp.rc.code;
4839 if (rc) {
4840 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4841 return;
4842 }
4843 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4844 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4845 crq->query_map_rsp.free_pages);
4846}
4847
4848static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4849 struct ibmvnic_adapter *adapter)
4850{
4851 struct net_device *netdev = adapter->netdev;
4852 struct device *dev = &adapter->vdev->dev;
4853 long rc;
4854
901e040a 4855 atomic_dec(&adapter->running_cap_crqs);
032c5e82 4856 netdev_dbg(netdev, "Outstanding queries: %d\n",
901e040a 4857 atomic_read(&adapter->running_cap_crqs));
032c5e82
TF
4858 rc = crq->query_capability.rc.code;
4859 if (rc) {
4860 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4861 goto out;
4862 }
4863
4864 switch (be16_to_cpu(crq->query_capability.capability)) {
4865 case MIN_TX_QUEUES:
4866 adapter->min_tx_queues =
de89e854 4867 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4868 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4869 adapter->min_tx_queues);
4870 break;
4871 case MIN_RX_QUEUES:
4872 adapter->min_rx_queues =
de89e854 4873 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4874 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4875 adapter->min_rx_queues);
4876 break;
4877 case MIN_RX_ADD_QUEUES:
4878 adapter->min_rx_add_queues =
de89e854 4879 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4880 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4881 adapter->min_rx_add_queues);
4882 break;
4883 case MAX_TX_QUEUES:
4884 adapter->max_tx_queues =
de89e854 4885 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4886 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4887 adapter->max_tx_queues);
4888 break;
4889 case MAX_RX_QUEUES:
4890 adapter->max_rx_queues =
de89e854 4891 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4892 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4893 adapter->max_rx_queues);
4894 break;
4895 case MAX_RX_ADD_QUEUES:
4896 adapter->max_rx_add_queues =
de89e854 4897 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4898 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4899 adapter->max_rx_add_queues);
4900 break;
4901 case MIN_TX_ENTRIES_PER_SUBCRQ:
4902 adapter->min_tx_entries_per_subcrq =
de89e854 4903 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4904 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4905 adapter->min_tx_entries_per_subcrq);
4906 break;
4907 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4908 adapter->min_rx_add_entries_per_subcrq =
de89e854 4909 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4910 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4911 adapter->min_rx_add_entries_per_subcrq);
4912 break;
4913 case MAX_TX_ENTRIES_PER_SUBCRQ:
4914 adapter->max_tx_entries_per_subcrq =
de89e854 4915 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4916 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4917 adapter->max_tx_entries_per_subcrq);
4918 break;
4919 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4920 adapter->max_rx_add_entries_per_subcrq =
de89e854 4921 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4922 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4923 adapter->max_rx_add_entries_per_subcrq);
4924 break;
4925 case TCP_IP_OFFLOAD:
4926 adapter->tcp_ip_offload =
de89e854 4927 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4928 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4929 adapter->tcp_ip_offload);
4930 break;
4931 case PROMISC_SUPPORTED:
4932 adapter->promisc_supported =
de89e854 4933 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4934 netdev_dbg(netdev, "promisc_supported = %lld\n",
4935 adapter->promisc_supported);
4936 break;
4937 case MIN_MTU:
de89e854 4938 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
f39f0d1e 4939 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
032c5e82
TF
4940 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4941 break;
4942 case MAX_MTU:
de89e854 4943 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
f39f0d1e 4944 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
032c5e82
TF
4945 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4946 break;
4947 case MAX_MULTICAST_FILTERS:
4948 adapter->max_multicast_filters =
de89e854 4949 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4950 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4951 adapter->max_multicast_filters);
4952 break;
4953 case VLAN_HEADER_INSERTION:
4954 adapter->vlan_header_insertion =
de89e854 4955 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4956 if (adapter->vlan_header_insertion)
4957 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4958 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4959 adapter->vlan_header_insertion);
4960 break;
6052d5e2
MFV
4961 case RX_VLAN_HEADER_INSERTION:
4962 adapter->rx_vlan_header_insertion =
4963 be64_to_cpu(crq->query_capability.number);
4964 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4965 adapter->rx_vlan_header_insertion);
4966 break;
032c5e82
TF
4967 case MAX_TX_SG_ENTRIES:
4968 adapter->max_tx_sg_entries =
de89e854 4969 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4970 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4971 adapter->max_tx_sg_entries);
4972 break;
4973 case RX_SG_SUPPORTED:
4974 adapter->rx_sg_supported =
de89e854 4975 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4976 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4977 adapter->rx_sg_supported);
4978 break;
4979 case OPT_TX_COMP_SUB_QUEUES:
4980 adapter->opt_tx_comp_sub_queues =
de89e854 4981 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4982 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4983 adapter->opt_tx_comp_sub_queues);
4984 break;
4985 case OPT_RX_COMP_QUEUES:
4986 adapter->opt_rx_comp_queues =
de89e854 4987 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4988 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4989 adapter->opt_rx_comp_queues);
4990 break;
4991 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4992 adapter->opt_rx_bufadd_q_per_rx_comp_q =
de89e854 4993 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
4994 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4995 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4996 break;
4997 case OPT_TX_ENTRIES_PER_SUBCRQ:
4998 adapter->opt_tx_entries_per_subcrq =
de89e854 4999 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
5000 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5001 adapter->opt_tx_entries_per_subcrq);
5002 break;
5003 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5004 adapter->opt_rxba_entries_per_subcrq =
de89e854 5005 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
5006 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5007 adapter->opt_rxba_entries_per_subcrq);
5008 break;
5009 case TX_RX_DESC_REQ:
5010 adapter->tx_rx_desc_req = crq->query_capability.number;
5011 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5012 adapter->tx_rx_desc_req);
5013 break;
5014
5015 default:
5016 netdev_err(netdev, "Got invalid cap rsp %d\n",
5017 crq->query_capability.capability);
5018 }
5019
5020out:
249168ad
TF
5021 if (atomic_read(&adapter->running_cap_crqs) == 0) {
5022 adapter->wait_capability = false;
09081b9d 5023 send_request_cap(adapter, 0);
249168ad 5024 }
032c5e82
TF
5025}
5026
f8d6ae0d
MFV
5027static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5028{
5029 union ibmvnic_crq crq;
5030 int rc;
5031
5032 memset(&crq, 0, sizeof(crq));
5033 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5034 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
ff25dcb9
TF
5035
5036 mutex_lock(&adapter->fw_lock);
5037 adapter->fw_done_rc = 0;
070eca95 5038 reinit_completion(&adapter->fw_done);
ff25dcb9 5039
f8d6ae0d 5040 rc = ibmvnic_send_crq(adapter, &crq);
ff25dcb9
TF
5041 if (rc) {
5042 mutex_unlock(&adapter->fw_lock);
f8d6ae0d 5043 return rc;
ff25dcb9 5044 }
476d96ca
TF
5045
5046 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
ff25dcb9
TF
5047 if (rc) {
5048 mutex_unlock(&adapter->fw_lock);
476d96ca 5049 return rc;
ff25dcb9 5050 }
476d96ca 5051
ff25dcb9 5052 mutex_unlock(&adapter->fw_lock);
f8d6ae0d
MFV
5053 return adapter->fw_done_rc ? -EIO : 0;
5054}
5055
5056static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5057 struct ibmvnic_adapter *adapter)
5058{
5059 struct net_device *netdev = adapter->netdev;
5060 int rc;
dd0f9d89 5061 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
f8d6ae0d
MFV
5062
5063 rc = crq->query_phys_parms_rsp.rc.code;
5064 if (rc) {
5065 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5066 return rc;
5067 }
dd0f9d89 5068 switch (rspeed) {
f8d6ae0d
MFV
5069 case IBMVNIC_10MBPS:
5070 adapter->speed = SPEED_10;
5071 break;
5072 case IBMVNIC_100MBPS:
5073 adapter->speed = SPEED_100;
5074 break;
5075 case IBMVNIC_1GBPS:
5076 adapter->speed = SPEED_1000;
5077 break;
b9cd795b 5078 case IBMVNIC_10GBPS:
f8d6ae0d
MFV
5079 adapter->speed = SPEED_10000;
5080 break;
5081 case IBMVNIC_25GBPS:
5082 adapter->speed = SPEED_25000;
5083 break;
5084 case IBMVNIC_40GBPS:
5085 adapter->speed = SPEED_40000;
5086 break;
5087 case IBMVNIC_50GBPS:
5088 adapter->speed = SPEED_50000;
5089 break;
5090 case IBMVNIC_100GBPS:
5091 adapter->speed = SPEED_100000;
5092 break;
b9cd795b
LP
5093 case IBMVNIC_200GBPS:
5094 adapter->speed = SPEED_200000;
5095 break;
f8d6ae0d 5096 default:
dd0f9d89
MFV
5097 if (netif_carrier_ok(netdev))
5098 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
f8d6ae0d
MFV
5099 adapter->speed = SPEED_UNKNOWN;
5100 }
5101 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5102 adapter->duplex = DUPLEX_FULL;
5103 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5104 adapter->duplex = DUPLEX_HALF;
5105 else
5106 adapter->duplex = DUPLEX_UNKNOWN;
5107
5108 return rc;
5109}
5110
032c5e82
TF
5111static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5112 struct ibmvnic_adapter *adapter)
5113{
5114 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5115 struct net_device *netdev = adapter->netdev;
5116 struct device *dev = &adapter->vdev->dev;
993a82b0 5117 u64 *u64_crq = (u64 *)crq;
032c5e82
TF
5118 long rc;
5119
5120 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
429aa364
LP
5121 (unsigned long)cpu_to_be64(u64_crq[0]),
5122 (unsigned long)cpu_to_be64(u64_crq[1]));
032c5e82
TF
5123 switch (gen_crq->first) {
5124 case IBMVNIC_CRQ_INIT_RSP:
5125 switch (gen_crq->cmd) {
5126 case IBMVNIC_CRQ_INIT:
5127 dev_info(dev, "Partner initialized\n");
017892c1 5128 adapter->from_passive_init = true;
76cdc5c5
SB
5129 /* Discard any stale login responses from prev reset.
5130 * CHECK: should we clear even on INIT_COMPLETE?
5131 */
5132 adapter->login_pending = false;
5133
53f8b1b2
CF
5134 if (adapter->state == VNIC_DOWN)
5135 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5136 else
5137 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5138
ef66a1ea
SB
5139 if (rc && rc != -EBUSY) {
5140 /* We were unable to schedule the failover
5141 * reset either because the adapter was still
5142 * probing (eg: during kexec) or we could not
5143 * allocate memory. Clear the failover_pending
5144 * flag since no one else will. We ignore
5145 * EBUSY because it means either FAILOVER reset
5146 * is already scheduled or the adapter is
5147 * being removed.
5148 */
5149 netdev_err(netdev,
5150 "Error %ld scheduling failover reset\n",
5151 rc);
5152 adapter->failover_pending = false;
5153 }
08d6b7a7
SB
5154
5155 if (!completion_done(&adapter->init_done)) {
08d6b7a7
SB
5156 if (!adapter->init_done_rc)
5157 adapter->init_done_rc = -EAGAIN;
fc07be5b 5158 complete(&adapter->init_done);
08d6b7a7
SB
5159 }
5160
032c5e82
TF
5161 break;
5162 case IBMVNIC_CRQ_INIT_COMPLETE:
5163 dev_info(dev, "Partner initialization complete\n");
5153698e 5164 adapter->crq.active = true;
032c5e82
TF
5165 send_version_xchg(adapter);
5166 break;
5167 default:
5168 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5169 }
5170 return;
5171 case IBMVNIC_CRQ_XPORT_EVENT:
ed651a10 5172 netif_carrier_off(netdev);
5153698e 5173 adapter->crq.active = false;
2147e3d0
TF
5174 /* terminate any thread waiting for a response
5175 * from the device
5176 */
5177 if (!completion_done(&adapter->fw_done)) {
5178 adapter->fw_done_rc = -EIO;
5179 complete(&adapter->fw_done);
5180 }
42d36f09
SB
5181
5182 /* if we got here during crq-init, retry crq-init */
5183 if (!completion_done(&adapter->init_done)) {
5184 adapter->init_done_rc = -EAGAIN;
5185 complete(&adapter->init_done);
5186 }
5187
2147e3d0
TF
5188 if (!completion_done(&adapter->stats_done))
5189 complete(&adapter->stats_done);
7ed5b31f 5190 if (test_bit(0, &adapter->resetting))
2770a798 5191 adapter->force_reset_recovery = true;
032c5e82 5192 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
ed651a10
NF
5193 dev_info(dev, "Migrated, re-enabling adapter\n");
5194 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
dfad09a6
TF
5195 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5196 dev_info(dev, "Backing device failover detected\n");
5a18e1e0 5197 adapter->failover_pending = true;
032c5e82
TF
5198 } else {
5199 /* The adapter lost the connection */
5200 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5201 gen_crq->cmd);
ed651a10 5202 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
032c5e82
TF
5203 }
5204 return;
5205 case IBMVNIC_CRQ_CMD_RSP:
5206 break;
5207 default:
5208 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5209 gen_crq->first);
5210 return;
5211 }
5212
5213 switch (gen_crq->cmd) {
5214 case VERSION_EXCHANGE_RSP:
5215 rc = crq->version_exchange_rsp.rc.code;
5216 if (rc) {
5217 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5218 break;
5219 }
78468899 5220 ibmvnic_version =
032c5e82 5221 be16_to_cpu(crq->version_exchange_rsp.version);
78468899
TF
5222 dev_info(dev, "Partner protocol version is %d\n",
5223 ibmvnic_version);
491099ad 5224 send_query_cap(adapter);
032c5e82
TF
5225 break;
5226 case QUERY_CAPABILITY_RSP:
5227 handle_query_cap_rsp(crq, adapter);
5228 break;
5229 case QUERY_MAP_RSP:
5230 handle_query_map_rsp(crq, adapter);
5231 break;
5232 case REQUEST_MAP_RSP:
f3be0cbc
TF
5233 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5234 complete(&adapter->fw_done);
032c5e82
TF
5235 break;
5236 case REQUEST_UNMAP_RSP:
5237 handle_request_unmap_rsp(crq, adapter);
5238 break;
5239 case REQUEST_CAPABILITY_RSP:
5240 handle_request_cap_rsp(crq, adapter);
5241 break;
5242 case LOGIN_RSP:
5243 netdev_dbg(netdev, "Got Login Response\n");
5244 handle_login_rsp(crq, adapter);
5245 break;
5246 case LOGICAL_LINK_STATE_RSP:
53da09e9
NF
5247 netdev_dbg(netdev,
5248 "Got Logical Link State Response, state: %d rc: %d\n",
5249 crq->logical_link_state_rsp.link_state,
5250 crq->logical_link_state_rsp.rc.code);
032c5e82
TF
5251 adapter->logical_link_state =
5252 crq->logical_link_state_rsp.link_state;
53da09e9
NF
5253 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5254 complete(&adapter->init_done);
032c5e82
TF
5255 break;
5256 case LINK_STATE_INDICATION:
5257 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5258 adapter->phys_link_state =
5259 crq->link_state_indication.phys_link_state;
5260 adapter->logical_link_state =
5261 crq->link_state_indication.logical_link_state;
0655f994
TF
5262 if (adapter->phys_link_state && adapter->logical_link_state)
5263 netif_carrier_on(netdev);
5264 else
5265 netif_carrier_off(netdev);
032c5e82
TF
5266 break;
5267 case CHANGE_MAC_ADDR_RSP:
5268 netdev_dbg(netdev, "Got MAC address change Response\n");
f813614f 5269 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
032c5e82
TF
5270 break;
5271 case ERROR_INDICATION:
5272 netdev_dbg(netdev, "Got Error Indication\n");
5273 handle_error_indication(crq, adapter);
5274 break;
032c5e82
TF
5275 case REQUEST_STATISTICS_RSP:
5276 netdev_dbg(netdev, "Got Statistics Response\n");
5277 complete(&adapter->stats_done);
5278 break;
032c5e82
TF
5279 case QUERY_IP_OFFLOAD_RSP:
5280 netdev_dbg(netdev, "Got Query IP offload Response\n");
5281 handle_query_ip_offload_rsp(adapter);
5282 break;
5283 case MULTICAST_CTRL_RSP:
5284 netdev_dbg(netdev, "Got multicast control Response\n");
5285 break;
5286 case CONTROL_IP_OFFLOAD_RSP:
5287 netdev_dbg(netdev, "Got Control IP offload Response\n");
5288 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5289 sizeof(adapter->ip_offload_ctrl),
5290 DMA_TO_DEVICE);
bd0b6723 5291 complete(&adapter->init_done);
032c5e82 5292 break;
032c5e82
TF
5293 case COLLECT_FW_TRACE_RSP:
5294 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5295 complete(&adapter->fw_done);
5296 break;
4e6759be
DANR
5297 case GET_VPD_SIZE_RSP:
5298 handle_vpd_size_rsp(crq, adapter);
5299 break;
5300 case GET_VPD_RSP:
5301 handle_vpd_rsp(crq, adapter);
5302 break;
f8d6ae0d
MFV
5303 case QUERY_PHYS_PARMS_RSP:
5304 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5305 complete(&adapter->fw_done);
5306 break;
032c5e82
TF
5307 default:
5308 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5309 gen_crq->cmd);
5310 }
5311}
5312
5313static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5314{
5315 struct ibmvnic_adapter *adapter = instance;
6c267b3d 5316
6c267b3d 5317 tasklet_schedule(&adapter->tasklet);
6c267b3d
TF
5318 return IRQ_HANDLED;
5319}
5320
aa7c3fee 5321static void ibmvnic_tasklet(struct tasklet_struct *t)
6c267b3d 5322{
aa7c3fee 5323 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
032c5e82 5324 struct ibmvnic_crq_queue *queue = &adapter->crq;
032c5e82
TF
5325 union ibmvnic_crq *crq;
5326 unsigned long flags;
5327 bool done = false;
5328
5329 spin_lock_irqsave(&queue->lock, flags);
032c5e82
TF
5330 while (!done) {
5331 /* Pull all the valid messages off the CRQ */
5332 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
e41aec79
LP
5333 /* This barrier makes sure ibmvnic_next_crq()'s
5334 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5335 * before ibmvnic_handle_crq()'s
5336 * switch(gen_crq->first) and switch(gen_crq->cmd).
5337 */
5338 dma_rmb();
032c5e82
TF
5339 ibmvnic_handle_crq(crq, adapter);
5340 crq->generic.first = 0;
5341 }
032c5e82 5342 }
249168ad
TF
5343 /* if capabilities CRQ's were sent in this tasklet, the following
5344 * tasklet must wait until all responses are received
5345 */
5346 if (atomic_read(&adapter->running_cap_crqs) != 0)
5347 adapter->wait_capability = true;
032c5e82 5348 spin_unlock_irqrestore(&queue->lock, flags);
032c5e82
TF
5349}
5350
5351static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5352{
5353 struct vio_dev *vdev = adapter->vdev;
5354 int rc;
5355
5356 do {
5357 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5358 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5359
5360 if (rc)
5361 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5362
5363 return rc;
5364}
5365
5366static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5367{
5368 struct ibmvnic_crq_queue *crq = &adapter->crq;
5369 struct device *dev = &adapter->vdev->dev;
5370 struct vio_dev *vdev = adapter->vdev;
5371 int rc;
5372
5373 /* Close the CRQ */
5374 do {
5375 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5376 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5377
5378 /* Clean out the queue */
0e435bef
LP
5379 if (!crq->msgs)
5380 return -EINVAL;
5381
032c5e82
TF
5382 memset(crq->msgs, 0, PAGE_SIZE);
5383 crq->cur = 0;
5153698e 5384 crq->active = false;
032c5e82
TF
5385
5386 /* And re-open it again */
5387 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5388 crq->msg_token, PAGE_SIZE);
5389
5390 if (rc == H_CLOSED)
5391 /* Adapter is good, but other end is not ready */
5392 dev_warn(dev, "Partner adapter not ready\n");
5393 else if (rc != 0)
5394 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5395
5396 return rc;
5397}
5398
f992887c 5399static void release_crq_queue(struct ibmvnic_adapter *adapter)
032c5e82
TF
5400{
5401 struct ibmvnic_crq_queue *crq = &adapter->crq;
5402 struct vio_dev *vdev = adapter->vdev;
5403 long rc;
5404
f992887c
NF
5405 if (!crq->msgs)
5406 return;
5407
032c5e82
TF
5408 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5409 free_irq(vdev->irq, adapter);
6c267b3d 5410 tasklet_kill(&adapter->tasklet);
032c5e82
TF
5411 do {
5412 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5413 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5414
5415 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5416 DMA_BIDIRECTIONAL);
5417 free_page((unsigned long)crq->msgs);
f992887c 5418 crq->msgs = NULL;
5153698e 5419 crq->active = false;
032c5e82
TF
5420}
5421
f992887c 5422static int init_crq_queue(struct ibmvnic_adapter *adapter)
032c5e82
TF
5423{
5424 struct ibmvnic_crq_queue *crq = &adapter->crq;
5425 struct device *dev = &adapter->vdev->dev;
5426 struct vio_dev *vdev = adapter->vdev;
5427 int rc, retrc = -ENOMEM;
5428
f992887c
NF
5429 if (crq->msgs)
5430 return 0;
5431
032c5e82
TF
5432 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5433 /* Should we allocate more than one page? */
5434
5435 if (!crq->msgs)
5436 return -ENOMEM;
5437
5438 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5439 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5440 DMA_BIDIRECTIONAL);
5441 if (dma_mapping_error(dev, crq->msg_token))
5442 goto map_failed;
5443
5444 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5445 crq->msg_token, PAGE_SIZE);
5446
5447 if (rc == H_RESOURCE)
5448 /* maybe kexecing and resource is busy. try a reset */
5449 rc = ibmvnic_reset_crq(adapter);
5450 retrc = rc;
5451
5452 if (rc == H_CLOSED) {
5453 dev_warn(dev, "Partner adapter not ready\n");
5454 } else if (rc) {
5455 dev_warn(dev, "Error %d opening adapter\n", rc);
5456 goto reg_crq_failed;
5457 }
5458
5459 retrc = 0;
5460
aa7c3fee 5461 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
6c267b3d 5462
032c5e82 5463 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
e56e2515
MFV
5464 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5465 adapter->vdev->unit_address);
5466 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
032c5e82
TF
5467 if (rc) {
5468 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5469 vdev->irq, rc);
5470 goto req_irq_failed;
5471 }
5472
5473 rc = vio_enable_interrupts(vdev);
5474 if (rc) {
5475 dev_err(dev, "Error %d enabling interrupts\n", rc);
5476 goto req_irq_failed;
5477 }
5478
5479 crq->cur = 0;
5480 spin_lock_init(&crq->lock);
5481
8c5ab1e8
SB
5482 /* process any CRQs that were queued before we enabled interrupts */
5483 tasklet_schedule(&adapter->tasklet);
5484
032c5e82
TF
5485 return retrc;
5486
5487req_irq_failed:
6c267b3d 5488 tasklet_kill(&adapter->tasklet);
032c5e82
TF
5489 do {
5490 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5491 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5492reg_crq_failed:
5493 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5494map_failed:
5495 free_page((unsigned long)crq->msgs);
f992887c 5496 crq->msgs = NULL;
032c5e82
TF
5497 return retrc;
5498}
5499
635e442f 5500static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
032c5e82 5501{
f6ef6408 5502 struct device *dev = &adapter->vdev->dev;
98c41f04 5503 unsigned long timeout = msecs_to_jiffies(20000);
6881b07f
MS
5504 u64 old_num_rx_queues = adapter->req_rx_queues;
5505 u64 old_num_tx_queues = adapter->req_tx_queues;
f6ef6408
JA
5506 int rc;
5507
017892c1
JA
5508 adapter->from_passive_init = false;
5509
6881b07f 5510 if (reset)
635e442f 5511 reinit_completion(&adapter->init_done);
d7c0ef36 5512
6a2fb0e9 5513 adapter->init_done_rc = 0;
fa68bfab
LP
5514 rc = ibmvnic_send_crq_init(adapter);
5515 if (rc) {
5516 dev_err(dev, "Send crq init failed with error %d\n", rc);
5517 return rc;
5518 }
5519
f6ef6408
JA
5520 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5521 dev_err(dev, "Initialization sequence timed out\n");
017892c1
JA
5522 return -1;
5523 }
5524
6a2fb0e9
NF
5525 if (adapter->init_done_rc) {
5526 release_crq_queue(adapter);
5527 return adapter->init_done_rc;
5528 }
5529
785a2b10
LP
5530 if (adapter->from_passive_init) {
5531 adapter->state = VNIC_OPEN;
5532 adapter->from_passive_init = false;
5533 return -1;
5534 }
5535
635e442f
LP
5536 if (reset &&
5537 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
30f79625 5538 adapter->reset_reason != VNIC_RESET_MOBILITY) {
d7c0ef36
NF
5539 if (adapter->req_rx_queues != old_num_rx_queues ||
5540 adapter->req_tx_queues != old_num_tx_queues) {
5541 release_sub_crqs(adapter, 0);
5542 rc = init_sub_crqs(adapter);
5543 } else {
5544 rc = reset_sub_crq_queues(adapter);
5545 }
5546 } else {
57a49436 5547 rc = init_sub_crqs(adapter);
d7c0ef36
NF
5548 }
5549
1bb3c739
NF
5550 if (rc) {
5551 dev_err(dev, "Initialization of sub crqs failed\n");
5552 release_crq_queue(adapter);
5df969c3
TF
5553 return rc;
5554 }
5555
5556 rc = init_sub_crq_irqs(adapter);
5557 if (rc) {
5558 dev_err(dev, "Failed to initialize sub crq irqs\n");
5559 release_crq_queue(adapter);
1bb3c739
NF
5560 }
5561
5562 return rc;
f6ef6408
JA
5563}
5564
40c9db8a
TF
5565static struct device_attribute dev_attr_failover;
5566
f6ef6408
JA
5567static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5568{
032c5e82
TF
5569 struct ibmvnic_adapter *adapter;
5570 struct net_device *netdev;
5571 unsigned char *mac_addr_p;
53f8b1b2 5572 bool init_success;
032c5e82
TF
5573 int rc;
5574
5575 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5576 dev->unit_address);
5577
5578 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5579 VETH_MAC_ADDR, NULL);
5580 if (!mac_addr_p) {
5581 dev_err(&dev->dev,
5582 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5583 __FILE__, __LINE__);
5584 return 0;
5585 }
5586
5587 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
d45cc3a4 5588 IBMVNIC_MAX_QUEUES);
032c5e82
TF
5589 if (!netdev)
5590 return -ENOMEM;
5591
5592 adapter = netdev_priv(netdev);
90c8014c 5593 adapter->state = VNIC_PROBING;
032c5e82
TF
5594 dev_set_drvdata(&dev->dev, netdev);
5595 adapter->vdev = dev;
5596 adapter->netdev = netdev;
76cdc5c5 5597 adapter->login_pending = false;
032c5e82
TF
5598
5599 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5600 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5601 netdev->irq = dev->irq;
5602 netdev->netdev_ops = &ibmvnic_netdev_ops;
5603 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5604 SET_NETDEV_DEV(netdev, &dev->dev);
5605
ed651a10 5606 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
7ed5b31f
JK
5607 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5608 __ibmvnic_delayed_reset);
ed651a10 5609 INIT_LIST_HEAD(&adapter->rwi_list);
6c5c7489 5610 spin_lock_init(&adapter->rwi_lock);
7d7195a0 5611 spin_lock_init(&adapter->state_lock);
ff25dcb9 5612 mutex_init(&adapter->fw_lock);
bbd669a8 5613 init_completion(&adapter->init_done);
070eca95
TF
5614 init_completion(&adapter->fw_done);
5615 init_completion(&adapter->reset_done);
5616 init_completion(&adapter->stats_done);
7ed5b31f 5617 clear_bit(0, &adapter->resetting);
ed651a10 5618
53f8b1b2 5619 init_success = false;
6a2fb0e9 5620 do {
30f79625
NF
5621 rc = init_crq_queue(adapter);
5622 if (rc) {
5623 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5624 rc);
5625 goto ibmvnic_init_fail;
5626 }
5627
635e442f 5628 rc = ibmvnic_reset_init(adapter, false);
08d6b7a7 5629 } while (rc == -EAGAIN);
032c5e82 5630
53f8b1b2
CF
5631 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
5632 * partner is not ready. CRQ is not active. When the partner becomes
5633 * ready, we will do the passive init reset.
5634 */
5635
5636 if (!rc)
5637 init_success = true;
5638
07184213
TF
5639 rc = init_stats_buffers(adapter);
5640 if (rc)
5641 goto ibmvnic_init_fail;
5642
5643 rc = init_stats_token(adapter);
5644 if (rc)
5645 goto ibmvnic_stats_fail;
5646
40c9db8a 5647 rc = device_create_file(&dev->dev, &dev_attr_failover);
7c1885ae 5648 if (rc)
07184213 5649 goto ibmvnic_dev_file_err;
40c9db8a 5650
e876a8a7 5651 netif_carrier_off(netdev);
032c5e82 5652
53f8b1b2
CF
5653 if (init_success) {
5654 adapter->state = VNIC_PROBED;
5655 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5656 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5657 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5658 } else {
5659 adapter->state = VNIC_DOWN;
5660 }
c26eba03
JA
5661
5662 adapter->wait_for_reset = false;
a86d5c68 5663 adapter->last_reset_time = jiffies;
60353779
SB
5664
5665 rc = register_netdev(netdev);
5666 if (rc) {
5667 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5668 goto ibmvnic_register_fail;
5669 }
5670 dev_info(&dev->dev, "ibmvnic registered\n");
5671
032c5e82 5672 return 0;
7c1885ae
NF
5673
5674ibmvnic_register_fail:
5675 device_remove_file(&dev->dev, &dev_attr_failover);
5676
07184213
TF
5677ibmvnic_dev_file_err:
5678 release_stats_token(adapter);
5679
5680ibmvnic_stats_fail:
5681 release_stats_buffers(adapter);
5682
7c1885ae 5683ibmvnic_init_fail:
d7c0ef36 5684 release_sub_crqs(adapter, 1);
7c1885ae 5685 release_crq_queue(adapter);
ff25dcb9 5686 mutex_destroy(&adapter->fw_lock);
7c1885ae
NF
5687 free_netdev(netdev);
5688
5689 return rc;
032c5e82
TF
5690}
5691
386a966f 5692static void ibmvnic_remove(struct vio_dev *dev)
032c5e82
TF
5693{
5694 struct net_device *netdev = dev_get_drvdata(&dev->dev);
37489055 5695 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
7d7195a0
JK
5696 unsigned long flags;
5697
5698 spin_lock_irqsave(&adapter->state_lock, flags);
4a41c421
SB
5699
5700 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5701 * finish. Then, set the state to REMOVING to prevent it from
5702 * scheduling any more work and to have reset functions ignore
5703 * any resets that have already been scheduled. Drop the lock
5704 * after setting state, so __ibmvnic_reset() which is called
5705 * from the flush_work() below, can make progress.
5706 */
69cdb794 5707 spin_lock(&adapter->rwi_lock);
90c8014c 5708 adapter->state = VNIC_REMOVING;
69cdb794 5709 spin_unlock(&adapter->rwi_lock);
4a41c421 5710
7d7195a0
JK
5711 spin_unlock_irqrestore(&adapter->state_lock, flags);
5712
6954a9e4
TF
5713 flush_work(&adapter->ibmvnic_reset);
5714 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5715
a5681e20
JK
5716 rtnl_lock();
5717 unregister_netdevice(netdev);
37489055
NF
5718
5719 release_resources(adapter);
d7c0ef36 5720 release_sub_crqs(adapter, 1);
37489055
NF
5721 release_crq_queue(adapter);
5722
53cc7721
TF
5723 release_stats_token(adapter);
5724 release_stats_buffers(adapter);
5725
90c8014c
NF
5726 adapter->state = VNIC_REMOVED;
5727
a5681e20 5728 rtnl_unlock();
ff25dcb9 5729 mutex_destroy(&adapter->fw_lock);
40c9db8a 5730 device_remove_file(&dev->dev, &dev_attr_failover);
032c5e82
TF
5731 free_netdev(netdev);
5732 dev_set_drvdata(&dev->dev, NULL);
032c5e82
TF
5733}
5734
40c9db8a
TF
5735static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5736 const char *buf, size_t count)
5737{
5738 struct net_device *netdev = dev_get_drvdata(dev);
5739 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5740 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5741 __be64 session_token;
5742 long rc;
5743
5744 if (!sysfs_streq(buf, "1"))
5745 return -EINVAL;
5746
5747 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5748 H_GET_SESSION_TOKEN, 0, 0, 0);
5749 if (rc) {
5750 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5751 rc);
334c4241 5752 goto last_resort;
40c9db8a
TF
5753 }
5754
5755 session_token = (__be64)retbuf[0];
5756 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5757 be64_to_cpu(session_token));
5758 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5759 H_SESSION_ERR_DETECTED, session_token, 0, 0);
a8a0d5e8 5760 if (rc) {
334c4241
LP
5761 netdev_err(netdev,
5762 "H_VIOCTL initiated failover failed, rc %ld\n",
40c9db8a 5763 rc);
a8a0d5e8
SB
5764 goto last_resort;
5765 }
5766
5767 return count;
334c4241
LP
5768
5769last_resort:
5770 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
5771 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
40c9db8a
TF
5772
5773 return count;
5774}
6cbaefb4 5775static DEVICE_ATTR_WO(failover);
40c9db8a 5776
032c5e82
TF
5777static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5778{
5779 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5780 struct ibmvnic_adapter *adapter;
5781 struct iommu_table *tbl;
5782 unsigned long ret = 0;
5783 int i;
5784
5785 tbl = get_iommu_table_base(&vdev->dev);
5786
5787 /* netdev inits at probe time along with the structures we need below*/
5788 if (!netdev)
5789 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5790
5791 adapter = netdev_priv(netdev);
5792
5793 ret += PAGE_SIZE; /* the crq message queue */
032c5e82
TF
5794 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5795
5796 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5797 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5798
507ebe64 5799 for (i = 0; i < adapter->num_active_rx_pools; i++)
032c5e82
TF
5800 ret += adapter->rx_pool[i].size *
5801 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5802
5803 return ret;
5804}
5805
5806static int ibmvnic_resume(struct device *dev)
5807{
5808 struct net_device *netdev = dev_get_drvdata(dev);
5809 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
032c5e82 5810
cb89ba24
JA
5811 if (adapter->state != VNIC_OPEN)
5812 return 0;
5813
a248878d 5814 tasklet_schedule(&adapter->tasklet);
032c5e82
TF
5815
5816 return 0;
5817}
5818
8c37bc67 5819static const struct vio_device_id ibmvnic_device_table[] = {
032c5e82
TF
5820 {"network", "IBM,vnic"},
5821 {"", "" }
5822};
5823MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5824
5825static const struct dev_pm_ops ibmvnic_pm_ops = {
5826 .resume = ibmvnic_resume
5827};
5828
5829static struct vio_driver ibmvnic_driver = {
5830 .id_table = ibmvnic_device_table,
5831 .probe = ibmvnic_probe,
5832 .remove = ibmvnic_remove,
5833 .get_desired_dma = ibmvnic_get_desired_dma,
5834 .name = ibmvnic_driver_name,
5835 .pm = &ibmvnic_pm_ops,
5836};
5837
5838/* module functions */
5839static int __init ibmvnic_module_init(void)
5840{
5841 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5842 IBMVNIC_DRIVER_VERSION);
5843
5844 return vio_register_driver(&ibmvnic_driver);
5845}
5846
5847static void __exit ibmvnic_module_exit(void)
5848{
5849 vio_unregister_driver(&ibmvnic_driver);
5850}
5851
5852module_init(ibmvnic_module_init);
5853module_exit(ibmvnic_module_exit);