]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/ibm/ibmvnic.c
ibmvnic: Fix endian error when requesting device capabilities
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / ibm / ibmvnic.c
CommitLineData
032c5e82
TF
1/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
ad7775dc 64#include <linux/ipv6.h>
032c5e82
TF
65#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
68#include <linux/debugfs.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
65dc6891 77#include <linux/workqueue.h>
032c5e82
TF
78
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
ea22d51a 92static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
032c5e82
TF
93static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
95static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
96static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
97static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
98 union sub_crq *sub_crq);
ad7775dc 99static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
032c5e82
TF
100static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
101static int enable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103static int disable_scrq_irq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105static int pending_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
108 struct ibmvnic_sub_crq_queue *);
109static int ibmvnic_poll(struct napi_struct *napi, int data);
110static void send_map_query(struct ibmvnic_adapter *adapter);
111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112static void send_request_unmap(struct ibmvnic_adapter *, u8);
113
114struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117};
118
119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122
123static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146};
147
148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
150 unsigned long *irq)
151{
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 long rc;
154
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 *number = retbuf[0];
157 *irq = retbuf[1];
158
159 return rc;
160}
161
162/* net_device_ops functions */
163
164static void init_rx_pool(struct ibmvnic_adapter *adapter,
165 struct ibmvnic_rx_pool *rx_pool, int num, int index,
166 int buff_size, int active)
167{
168 netdev_dbg(adapter->netdev,
169 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
170 index, num, buff_size);
171 rx_pool->size = num;
172 rx_pool->index = index;
173 rx_pool->buff_size = buff_size;
174 rx_pool->active = active;
175}
176
177static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
178 struct ibmvnic_long_term_buff *ltb, int size)
179{
180 struct device *dev = &adapter->vdev->dev;
181
182 ltb->size = size;
183 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
184 GFP_KERNEL);
185
186 if (!ltb->buff) {
187 dev_err(dev, "Couldn't alloc long term buffer\n");
188 return -ENOMEM;
189 }
190 ltb->map_id = adapter->map_id;
191 adapter->map_id++;
db5d0b59
NF
192
193 init_completion(&adapter->fw_done);
032c5e82
TF
194 send_request_map(adapter, ltb->addr,
195 ltb->size, ltb->map_id);
032c5e82
TF
196 wait_for_completion(&adapter->fw_done);
197 return 0;
198}
199
200static void free_long_term_buff(struct ibmvnic_adapter *adapter,
201 struct ibmvnic_long_term_buff *ltb)
202{
203 struct device *dev = &adapter->vdev->dev;
204
205 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
dfad09a6
TF
206 if (!adapter->failover)
207 send_request_unmap(adapter, ltb->map_id);
032c5e82
TF
208}
209
210static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
211 struct ibmvnic_rx_pool *pool)
212{
213 struct device *dev = &adapter->vdev->dev;
214 int i;
215
216 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
217 if (!pool->free_map)
218 return -ENOMEM;
219
220 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
221 GFP_KERNEL);
222
223 if (!pool->rx_buff) {
224 dev_err(dev, "Couldn't alloc rx buffers\n");
225 kfree(pool->free_map);
226 return -ENOMEM;
227 }
228
229 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
230 pool->size * pool->buff_size)) {
231 kfree(pool->free_map);
232 kfree(pool->rx_buff);
233 return -ENOMEM;
234 }
235
236 for (i = 0; i < pool->size; ++i)
237 pool->free_map[i] = i;
238
239 atomic_set(&pool->available, 0);
240 pool->next_alloc = 0;
241 pool->next_free = 0;
242
243 return 0;
244}
245
246static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
247 struct ibmvnic_rx_pool *pool)
248{
249 int count = pool->size - atomic_read(&pool->available);
250 struct device *dev = &adapter->vdev->dev;
251 int buffers_added = 0;
252 unsigned long lpar_rc;
253 union sub_crq sub_crq;
254 struct sk_buff *skb;
255 unsigned int offset;
256 dma_addr_t dma_addr;
257 unsigned char *dst;
258 u64 *handle_array;
259 int shift = 0;
260 int index;
261 int i;
262
263 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
264 be32_to_cpu(adapter->login_rsp_buf->
265 off_rxadd_subcrqs));
266
267 for (i = 0; i < count; ++i) {
268 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
269 if (!skb) {
270 dev_err(dev, "Couldn't replenish rx buff\n");
271 adapter->replenish_no_mem++;
272 break;
273 }
274
275 index = pool->free_map[pool->next_free];
276
277 if (pool->rx_buff[index].skb)
278 dev_err(dev, "Inconsistent free_map!\n");
279
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset = index * pool->buff_size;
282 dst = pool->long_term_buff.buff + offset;
283 memset(dst, 0, pool->buff_size);
284 dma_addr = pool->long_term_buff.addr + offset;
285 pool->rx_buff[index].data = dst;
286
287 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
288 pool->rx_buff[index].dma = dma_addr;
289 pool->rx_buff[index].skb = skb;
290 pool->rx_buff[index].pool_index = pool->index;
291 pool->rx_buff[index].size = pool->buff_size;
292
293 memset(&sub_crq, 0, sizeof(sub_crq));
294 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
295 sub_crq.rx_add.correlator =
296 cpu_to_be64((u64)&pool->rx_buff[index]);
297 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
298 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
299
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
303 * truncated.
304 */
305#ifdef __LITTLE_ENDIAN__
306 shift = 8;
307#endif
308 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
309
310 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
311 &sub_crq);
312 if (lpar_rc != H_SUCCESS)
313 goto failure;
314
315 buffers_added++;
316 adapter->replenish_add_buff_success++;
317 pool->next_free = (pool->next_free + 1) % pool->size;
318 }
319 atomic_add(buffers_added, &pool->available);
320 return;
321
322failure:
323 dev_info(dev, "replenish pools failure\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
326 if (!dma_mapping_error(dev, dma_addr))
327 dma_unmap_single(dev, dma_addr, pool->buff_size,
328 DMA_FROM_DEVICE);
329
330 dev_kfree_skb_any(skb);
331 adapter->replenish_add_buff_failure++;
332 atomic_add(buffers_added, &pool->available);
333}
334
335static void replenish_pools(struct ibmvnic_adapter *adapter)
336{
337 int i;
338
339 if (adapter->migrated)
340 return;
341
342 adapter->replenish_task_cycles++;
343 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
344 i++) {
345 if (adapter->rx_pool[i].active)
346 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
347 }
348}
349
350static void free_rx_pool(struct ibmvnic_adapter *adapter,
351 struct ibmvnic_rx_pool *pool)
352{
353 int i;
354
355 kfree(pool->free_map);
356 pool->free_map = NULL;
357
358 if (!pool->rx_buff)
359 return;
360
361 for (i = 0; i < pool->size; i++) {
362 if (pool->rx_buff[i].skb) {
363 dev_kfree_skb_any(pool->rx_buff[i].skb);
364 pool->rx_buff[i].skb = NULL;
365 }
366 }
367 kfree(pool->rx_buff);
368 pool->rx_buff = NULL;
369}
370
371static int ibmvnic_open(struct net_device *netdev)
372{
373 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
374 struct device *dev = &adapter->vdev->dev;
375 struct ibmvnic_tx_pool *tx_pool;
376 union ibmvnic_crq crq;
377 int rxadd_subcrqs;
378 u64 *size_array;
379 int tx_subcrqs;
380 int i, j;
381
382 rxadd_subcrqs =
383 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
384 tx_subcrqs =
385 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
386 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
387 be32_to_cpu(adapter->login_rsp_buf->
388 off_rxadd_buff_size));
389 adapter->map_id = 1;
390 adapter->napi = kcalloc(adapter->req_rx_queues,
391 sizeof(struct napi_struct), GFP_KERNEL);
392 if (!adapter->napi)
393 goto alloc_napi_failed;
394 for (i = 0; i < adapter->req_rx_queues; i++) {
395 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
396 NAPI_POLL_WEIGHT);
397 napi_enable(&adapter->napi[i]);
398 }
399 adapter->rx_pool =
400 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
401
402 if (!adapter->rx_pool)
403 goto rx_pool_arr_alloc_failed;
404 send_map_query(adapter);
405 for (i = 0; i < rxadd_subcrqs; i++) {
406 init_rx_pool(adapter, &adapter->rx_pool[i],
407 IBMVNIC_BUFFS_PER_POOL, i,
408 be64_to_cpu(size_array[i]), 1);
409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 dev_err(dev, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed;
412 }
413 }
414 adapter->tx_pool =
415 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
416
417 if (!adapter->tx_pool)
418 goto tx_pool_arr_alloc_failed;
419 for (i = 0; i < tx_subcrqs; i++) {
420 tx_pool = &adapter->tx_pool[i];
421 tx_pool->tx_buff =
422 kcalloc(adapter->max_tx_entries_per_subcrq,
423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 if (!tx_pool->tx_buff)
425 goto tx_pool_alloc_failed;
426
427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 adapter->max_tx_entries_per_subcrq *
429 adapter->req_mtu))
430 goto tx_ltb_alloc_failed;
431
432 tx_pool->free_map =
433 kcalloc(adapter->max_tx_entries_per_subcrq,
434 sizeof(int), GFP_KERNEL);
435 if (!tx_pool->free_map)
436 goto tx_fm_alloc_failed;
437
438 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
439 tx_pool->free_map[j] = j;
440
441 tx_pool->consumer_index = 0;
442 tx_pool->producer_index = 0;
443 }
444 adapter->bounce_buffer_size =
445 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
446 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
447 GFP_KERNEL);
448 if (!adapter->bounce_buffer)
449 goto bounce_alloc_failed;
450
451 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
452 adapter->bounce_buffer_size,
453 DMA_TO_DEVICE);
454 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
455 dev_err(dev, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed;
457 }
458 replenish_pools(adapter);
459
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
462 */
463 for (i = 0; i < adapter->req_rx_queues; i++)
464 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
465
466 for (i = 0; i < adapter->req_tx_queues; i++)
467 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
468
469 memset(&crq, 0, sizeof(crq));
470 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
471 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
472 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
473 ibmvnic_send_crq(adapter, &crq);
474
b8efb894
TF
475 netif_tx_start_all_queues(netdev);
476
032c5e82
TF
477 return 0;
478
479bounce_map_failed:
480 kfree(adapter->bounce_buffer);
481bounce_alloc_failed:
482 i = tx_subcrqs - 1;
483 kfree(adapter->tx_pool[i].free_map);
484tx_fm_alloc_failed:
485 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
486tx_ltb_alloc_failed:
487 kfree(adapter->tx_pool[i].tx_buff);
488tx_pool_alloc_failed:
489 for (j = 0; j < i; j++) {
490 kfree(adapter->tx_pool[j].tx_buff);
491 free_long_term_buff(adapter,
492 &adapter->tx_pool[j].long_term_buff);
493 kfree(adapter->tx_pool[j].free_map);
494 }
495 kfree(adapter->tx_pool);
496 adapter->tx_pool = NULL;
497tx_pool_arr_alloc_failed:
498 i = rxadd_subcrqs;
499rx_pool_alloc_failed:
500 for (j = 0; j < i; j++) {
501 free_rx_pool(adapter, &adapter->rx_pool[j]);
502 free_long_term_buff(adapter,
503 &adapter->rx_pool[j].long_term_buff);
504 }
505 kfree(adapter->rx_pool);
506 adapter->rx_pool = NULL;
507rx_pool_arr_alloc_failed:
508 for (i = 0; i < adapter->req_rx_queues; i++)
e722af63 509 napi_disable(&adapter->napi[i]);
032c5e82
TF
510alloc_napi_failed:
511 return -ENOMEM;
512}
513
514static int ibmvnic_close(struct net_device *netdev)
515{
516 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
517 struct device *dev = &adapter->vdev->dev;
518 union ibmvnic_crq crq;
519 int i;
520
521 adapter->closing = true;
522
523 for (i = 0; i < adapter->req_rx_queues; i++)
524 napi_disable(&adapter->napi[i]);
525
dfad09a6
TF
526 if (!adapter->failover)
527 netif_tx_stop_all_queues(netdev);
032c5e82
TF
528
529 if (adapter->bounce_buffer) {
530 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
531 dma_unmap_single(&adapter->vdev->dev,
532 adapter->bounce_buffer_dma,
533 adapter->bounce_buffer_size,
534 DMA_BIDIRECTIONAL);
535 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
536 }
537 kfree(adapter->bounce_buffer);
538 adapter->bounce_buffer = NULL;
539 }
540
541 memset(&crq, 0, sizeof(crq));
542 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
543 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
544 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
545 ibmvnic_send_crq(adapter, &crq);
546
547 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
548 i++) {
549 kfree(adapter->tx_pool[i].tx_buff);
550 free_long_term_buff(adapter,
551 &adapter->tx_pool[i].long_term_buff);
552 kfree(adapter->tx_pool[i].free_map);
553 }
554 kfree(adapter->tx_pool);
555 adapter->tx_pool = NULL;
556
557 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
558 i++) {
559 free_rx_pool(adapter, &adapter->rx_pool[i]);
560 free_long_term_buff(adapter,
561 &adapter->rx_pool[i].long_term_buff);
562 }
563 kfree(adapter->rx_pool);
564 adapter->rx_pool = NULL;
565
566 adapter->closing = false;
567
568 return 0;
569}
570
ad7775dc
TF
571/**
572 * build_hdr_data - creates L2/L3/L4 header data buffer
573 * @hdr_field - bitfield determining needed headers
574 * @skb - socket buffer
575 * @hdr_len - array of header lengths
576 * @tot_len - total length of data
577 *
578 * Reads hdr_field to determine which headers are needed by firmware.
579 * Builds a buffer containing these headers. Saves individual header
580 * lengths and total buffer length to be used to build descriptors.
581 */
582static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
583 int *hdr_len, u8 *hdr_data)
584{
585 int len = 0;
586 u8 *hdr;
587
588 hdr_len[0] = sizeof(struct ethhdr);
589
590 if (skb->protocol == htons(ETH_P_IP)) {
591 hdr_len[1] = ip_hdr(skb)->ihl * 4;
592 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
593 hdr_len[2] = tcp_hdrlen(skb);
594 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
595 hdr_len[2] = sizeof(struct udphdr);
596 } else if (skb->protocol == htons(ETH_P_IPV6)) {
597 hdr_len[1] = sizeof(struct ipv6hdr);
598 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
599 hdr_len[2] = tcp_hdrlen(skb);
600 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
601 hdr_len[2] = sizeof(struct udphdr);
602 }
603
604 memset(hdr_data, 0, 120);
605 if ((hdr_field >> 6) & 1) {
606 hdr = skb_mac_header(skb);
607 memcpy(hdr_data, hdr, hdr_len[0]);
608 len += hdr_len[0];
609 }
610
611 if ((hdr_field >> 5) & 1) {
612 hdr = skb_network_header(skb);
613 memcpy(hdr_data + len, hdr, hdr_len[1]);
614 len += hdr_len[1];
615 }
616
617 if ((hdr_field >> 4) & 1) {
618 hdr = skb_transport_header(skb);
619 memcpy(hdr_data + len, hdr, hdr_len[2]);
620 len += hdr_len[2];
621 }
622 return len;
623}
624
625/**
626 * create_hdr_descs - create header and header extension descriptors
627 * @hdr_field - bitfield determining needed headers
628 * @data - buffer containing header data
629 * @len - length of data buffer
630 * @hdr_len - array of individual header lengths
631 * @scrq_arr - descriptor array
632 *
633 * Creates header and, if needed, header extension descriptors and
634 * places them in a descriptor array, scrq_arr
635 */
636
637static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
638 union sub_crq *scrq_arr)
639{
640 union sub_crq hdr_desc;
641 int tmp_len = len;
642 u8 *data, *cur;
643 int tmp;
644
645 while (tmp_len > 0) {
646 cur = hdr_data + len - tmp_len;
647
648 memset(&hdr_desc, 0, sizeof(hdr_desc));
649 if (cur != hdr_data) {
650 data = hdr_desc.hdr_ext.data;
651 tmp = tmp_len > 29 ? 29 : tmp_len;
652 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
653 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
654 hdr_desc.hdr_ext.len = tmp;
655 } else {
656 data = hdr_desc.hdr.data;
657 tmp = tmp_len > 24 ? 24 : tmp_len;
658 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
659 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
660 hdr_desc.hdr.len = tmp;
661 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
662 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
663 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
664 hdr_desc.hdr.flag = hdr_field << 1;
665 }
666 memcpy(data, cur, tmp);
667 tmp_len -= tmp;
668 *scrq_arr = hdr_desc;
669 scrq_arr++;
670 }
671}
672
673/**
674 * build_hdr_descs_arr - build a header descriptor array
675 * @skb - socket buffer
676 * @num_entries - number of descriptors to be sent
677 * @subcrq - first TX descriptor
678 * @hdr_field - bit field determining which headers will be sent
679 *
680 * This function will build a TX descriptor array with applicable
681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
682 */
683
684static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
685 int *num_entries, u8 hdr_field)
686{
687 int hdr_len[3] = {0, 0, 0};
688 int tot_len, len;
689 u8 *hdr_data = txbuff->hdr_data;
690
691 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
692 txbuff->hdr_data);
693 len = tot_len;
694 len -= 24;
695 if (len > 0)
696 num_entries += len % 29 ? len / 29 + 1 : len / 29;
697 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
698 txbuff->indir_arr + 1);
699}
700
032c5e82
TF
701static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
702{
703 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
704 int queue_num = skb_get_queue_mapping(skb);
ad7775dc 705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
032c5e82
TF
706 struct device *dev = &adapter->vdev->dev;
707 struct ibmvnic_tx_buff *tx_buff = NULL;
708 struct ibmvnic_tx_pool *tx_pool;
709 unsigned int tx_send_failed = 0;
710 unsigned int tx_map_failed = 0;
711 unsigned int tx_dropped = 0;
712 unsigned int tx_packets = 0;
713 unsigned int tx_bytes = 0;
714 dma_addr_t data_dma_addr;
715 struct netdev_queue *txq;
716 bool used_bounce = false;
717 unsigned long lpar_rc;
718 union sub_crq tx_crq;
719 unsigned int offset;
ad7775dc 720 int num_entries = 1;
032c5e82
TF
721 unsigned char *dst;
722 u64 *handle_array;
723 int index = 0;
724 int ret = 0;
725
726 tx_pool = &adapter->tx_pool[queue_num];
727 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
728 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
729 be32_to_cpu(adapter->login_rsp_buf->
730 off_txsubm_subcrqs));
731 if (adapter->migrated) {
732 tx_send_failed++;
733 tx_dropped++;
734 ret = NETDEV_TX_BUSY;
735 goto out;
736 }
737
738 index = tx_pool->free_map[tx_pool->consumer_index];
739 offset = index * adapter->req_mtu;
740 dst = tx_pool->long_term_buff.buff + offset;
741 memset(dst, 0, adapter->req_mtu);
742 skb_copy_from_linear_data(skb, dst, skb->len);
743 data_dma_addr = tx_pool->long_term_buff.addr + offset;
744
745 tx_pool->consumer_index =
746 (tx_pool->consumer_index + 1) %
747 adapter->max_tx_entries_per_subcrq;
748
749 tx_buff = &tx_pool->tx_buff[index];
750 tx_buff->skb = skb;
751 tx_buff->data_dma[0] = data_dma_addr;
752 tx_buff->data_len[0] = skb->len;
753 tx_buff->index = index;
754 tx_buff->pool_index = queue_num;
755 tx_buff->last_frag = true;
756 tx_buff->used_bounce = used_bounce;
757
758 memset(&tx_crq, 0, sizeof(tx_crq));
759 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
760 tx_crq.v1.type = IBMVNIC_TX_DESC;
761 tx_crq.v1.n_crq_elem = 1;
762 tx_crq.v1.n_sge = 1;
763 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
764 tx_crq.v1.correlator = cpu_to_be32(index);
765 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
766 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
767 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
768
769 if (adapter->vlan_header_insertion) {
770 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
771 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
772 }
773
774 if (skb->protocol == htons(ETH_P_IP)) {
775 if (ip_hdr(skb)->version == 4)
776 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
777 else if (ip_hdr(skb)->version == 6)
778 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
779
780 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
781 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
782 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
783 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
784 }
785
ad7775dc 786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
032c5e82 787 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
ad7775dc
TF
788 hdrs += 2;
789 }
790 /* determine if l2/3/4 headers are sent to firmware */
791 if ((*hdrs >> 7) & 1 &&
792 (skb->protocol == htons(ETH_P_IP) ||
793 skb->protocol == htons(ETH_P_IPV6))) {
794 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
795 tx_crq.v1.n_crq_elem = num_entries;
796 tx_buff->indir_arr[0] = tx_crq;
797 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
798 sizeof(tx_buff->indir_arr),
799 DMA_TO_DEVICE);
800 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
801 if (!firmware_has_feature(FW_FEATURE_CMO))
802 dev_err(dev, "tx: unable to map descriptor array\n");
803 tx_map_failed++;
804 tx_dropped++;
805 ret = NETDEV_TX_BUSY;
806 goto out;
807 }
498cd8e4 808 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
ad7775dc
TF
809 (u64)tx_buff->indir_dma,
810 (u64)num_entries);
811 } else {
498cd8e4
JA
812 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
813 &tx_crq);
ad7775dc 814 }
032c5e82
TF
815 if (lpar_rc != H_SUCCESS) {
816 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
817
818 if (tx_pool->consumer_index == 0)
819 tx_pool->consumer_index =
820 adapter->max_tx_entries_per_subcrq - 1;
821 else
822 tx_pool->consumer_index--;
823
824 tx_send_failed++;
825 tx_dropped++;
826 ret = NETDEV_TX_BUSY;
827 goto out;
828 }
829 tx_packets++;
830 tx_bytes += skb->len;
831 txq->trans_start = jiffies;
832 ret = NETDEV_TX_OK;
833
834out:
835 netdev->stats.tx_dropped += tx_dropped;
836 netdev->stats.tx_bytes += tx_bytes;
837 netdev->stats.tx_packets += tx_packets;
838 adapter->tx_send_failed += tx_send_failed;
839 adapter->tx_map_failed += tx_map_failed;
840
841 return ret;
842}
843
844static void ibmvnic_set_multi(struct net_device *netdev)
845{
846 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
847 struct netdev_hw_addr *ha;
848 union ibmvnic_crq crq;
849
850 memset(&crq, 0, sizeof(crq));
851 crq.request_capability.first = IBMVNIC_CRQ_CMD;
852 crq.request_capability.cmd = REQUEST_CAPABILITY;
853
854 if (netdev->flags & IFF_PROMISC) {
855 if (!adapter->promisc_supported)
856 return;
857 } else {
858 if (netdev->flags & IFF_ALLMULTI) {
859 /* Accept all multicast */
860 memset(&crq, 0, sizeof(crq));
861 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
862 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
863 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
864 ibmvnic_send_crq(adapter, &crq);
865 } else if (netdev_mc_empty(netdev)) {
866 /* Reject all multicast */
867 memset(&crq, 0, sizeof(crq));
868 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
869 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
870 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
871 ibmvnic_send_crq(adapter, &crq);
872 } else {
873 /* Accept one or more multicast(s) */
874 netdev_for_each_mc_addr(ha, netdev) {
875 memset(&crq, 0, sizeof(crq));
876 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
877 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
878 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
879 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
880 ha->addr);
881 ibmvnic_send_crq(adapter, &crq);
882 }
883 }
884 }
885}
886
887static int ibmvnic_set_mac(struct net_device *netdev, void *p)
888{
889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
890 struct sockaddr *addr = p;
891 union ibmvnic_crq crq;
892
893 if (!is_valid_ether_addr(addr->sa_data))
894 return -EADDRNOTAVAIL;
895
896 memset(&crq, 0, sizeof(crq));
897 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
898 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
899 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
900 ibmvnic_send_crq(adapter, &crq);
901 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
902 return 0;
903}
904
032c5e82
TF
905static void ibmvnic_tx_timeout(struct net_device *dev)
906{
907 struct ibmvnic_adapter *adapter = netdev_priv(dev);
908 int rc;
909
910 /* Adapter timed out, resetting it */
911 release_sub_crqs(adapter);
912 rc = ibmvnic_reset_crq(adapter);
913 if (rc)
914 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
915 else
916 ibmvnic_send_crq_init(adapter);
917}
918
919static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
920 struct ibmvnic_rx_buff *rx_buff)
921{
922 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
923
924 rx_buff->skb = NULL;
925
926 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
927 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
928
929 atomic_dec(&pool->available);
930}
931
932static int ibmvnic_poll(struct napi_struct *napi, int budget)
933{
934 struct net_device *netdev = napi->dev;
935 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
936 int scrq_num = (int)(napi - adapter->napi);
937 int frames_processed = 0;
938restart_poll:
939 while (frames_processed < budget) {
940 struct sk_buff *skb;
941 struct ibmvnic_rx_buff *rx_buff;
942 union sub_crq *next;
943 u32 length;
944 u16 offset;
945 u8 flags = 0;
946
947 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
948 break;
949 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
950 rx_buff =
951 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
952 rx_comp.correlator);
953 /* do error checking */
954 if (next->rx_comp.rc) {
955 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
956 /* free the entry */
957 next->rx_comp.first = 0;
958 remove_buff_from_pool(adapter, rx_buff);
959 break;
960 }
961
962 length = be32_to_cpu(next->rx_comp.len);
963 offset = be16_to_cpu(next->rx_comp.off_frame_data);
964 flags = next->rx_comp.flags;
965 skb = rx_buff->skb;
966 skb_copy_to_linear_data(skb, rx_buff->data + offset,
967 length);
968 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
969 /* free the entry */
970 next->rx_comp.first = 0;
971 remove_buff_from_pool(adapter, rx_buff);
972
973 skb_put(skb, length);
974 skb->protocol = eth_type_trans(skb, netdev);
975
976 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
977 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
978 skb->ip_summed = CHECKSUM_UNNECESSARY;
979 }
980
981 length = skb->len;
982 napi_gro_receive(napi, skb); /* send it up */
983 netdev->stats.rx_packets++;
984 netdev->stats.rx_bytes += length;
985 frames_processed++;
986 }
498cd8e4 987 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
032c5e82
TF
988
989 if (frames_processed < budget) {
990 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
991 napi_complete(napi);
992 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
993 napi_reschedule(napi)) {
994 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
995 goto restart_poll;
996 }
997 }
998 return frames_processed;
999}
1000
1001#ifdef CONFIG_NET_POLL_CONTROLLER
1002static void ibmvnic_netpoll_controller(struct net_device *dev)
1003{
1004 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1005 int i;
1006
1007 replenish_pools(netdev_priv(dev));
1008 for (i = 0; i < adapter->req_rx_queues; i++)
1009 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1010 adapter->rx_scrq[i]);
1011}
1012#endif
1013
1014static const struct net_device_ops ibmvnic_netdev_ops = {
1015 .ndo_open = ibmvnic_open,
1016 .ndo_stop = ibmvnic_close,
1017 .ndo_start_xmit = ibmvnic_xmit,
1018 .ndo_set_rx_mode = ibmvnic_set_multi,
1019 .ndo_set_mac_address = ibmvnic_set_mac,
1020 .ndo_validate_addr = eth_validate_addr,
032c5e82
TF
1021 .ndo_tx_timeout = ibmvnic_tx_timeout,
1022#ifdef CONFIG_NET_POLL_CONTROLLER
1023 .ndo_poll_controller = ibmvnic_netpoll_controller,
1024#endif
1025};
1026
1027/* ethtool functions */
1028
1029static int ibmvnic_get_settings(struct net_device *netdev,
1030 struct ethtool_cmd *cmd)
1031{
1032 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1033 SUPPORTED_FIBRE);
1034 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1035 ADVERTISED_FIBRE);
1036 ethtool_cmd_speed_set(cmd, SPEED_1000);
1037 cmd->duplex = DUPLEX_FULL;
1038 cmd->port = PORT_FIBRE;
1039 cmd->phy_address = 0;
1040 cmd->transceiver = XCVR_INTERNAL;
1041 cmd->autoneg = AUTONEG_ENABLE;
1042 cmd->maxtxpkt = 0;
1043 cmd->maxrxpkt = 1;
1044 return 0;
1045}
1046
1047static void ibmvnic_get_drvinfo(struct net_device *dev,
1048 struct ethtool_drvinfo *info)
1049{
1050 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1051 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1052}
1053
1054static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1055{
1056 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1057
1058 return adapter->msg_enable;
1059}
1060
1061static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1062{
1063 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1064
1065 adapter->msg_enable = data;
1066}
1067
1068static u32 ibmvnic_get_link(struct net_device *netdev)
1069{
1070 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1071
1072 /* Don't need to send a query because we request a logical link up at
1073 * init and then we wait for link state indications
1074 */
1075 return adapter->logical_link_state;
1076}
1077
1078static void ibmvnic_get_ringparam(struct net_device *netdev,
1079 struct ethtool_ringparam *ring)
1080{
1081 ring->rx_max_pending = 0;
1082 ring->tx_max_pending = 0;
1083 ring->rx_mini_max_pending = 0;
1084 ring->rx_jumbo_max_pending = 0;
1085 ring->rx_pending = 0;
1086 ring->tx_pending = 0;
1087 ring->rx_mini_pending = 0;
1088 ring->rx_jumbo_pending = 0;
1089}
1090
1091static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1092{
1093 int i;
1094
1095 if (stringset != ETH_SS_STATS)
1096 return;
1097
1098 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1099 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1100}
1101
1102static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1103{
1104 switch (sset) {
1105 case ETH_SS_STATS:
1106 return ARRAY_SIZE(ibmvnic_stats);
1107 default:
1108 return -EOPNOTSUPP;
1109 }
1110}
1111
1112static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1113 struct ethtool_stats *stats, u64 *data)
1114{
1115 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1116 union ibmvnic_crq crq;
1117 int i;
1118
1119 memset(&crq, 0, sizeof(crq));
1120 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1121 crq.request_statistics.cmd = REQUEST_STATISTICS;
1122 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1123 crq.request_statistics.len =
1124 cpu_to_be32(sizeof(struct ibmvnic_statistics));
032c5e82
TF
1125
1126 /* Wait for data to be written */
1127 init_completion(&adapter->stats_done);
db5d0b59 1128 ibmvnic_send_crq(adapter, &crq);
032c5e82
TF
1129 wait_for_completion(&adapter->stats_done);
1130
1131 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1132 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1133}
1134
1135static const struct ethtool_ops ibmvnic_ethtool_ops = {
1136 .get_settings = ibmvnic_get_settings,
1137 .get_drvinfo = ibmvnic_get_drvinfo,
1138 .get_msglevel = ibmvnic_get_msglevel,
1139 .set_msglevel = ibmvnic_set_msglevel,
1140 .get_link = ibmvnic_get_link,
1141 .get_ringparam = ibmvnic_get_ringparam,
1142 .get_strings = ibmvnic_get_strings,
1143 .get_sset_count = ibmvnic_get_sset_count,
1144 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1145};
1146
1147/* Routines for managing CRQs/sCRQs */
1148
1149static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1150 struct ibmvnic_sub_crq_queue *scrq)
1151{
1152 struct device *dev = &adapter->vdev->dev;
1153 long rc;
1154
1155 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1156
1157 /* Close the sub-crqs */
1158 do {
1159 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1160 adapter->vdev->unit_address,
1161 scrq->crq_num);
1162 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1163
1164 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1165 DMA_BIDIRECTIONAL);
1166 free_pages((unsigned long)scrq->msgs, 2);
1167 kfree(scrq);
1168}
1169
1170static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1171 *adapter)
1172{
1173 struct device *dev = &adapter->vdev->dev;
1174 struct ibmvnic_sub_crq_queue *scrq;
1175 int rc;
1176
1177 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1178 if (!scrq)
1179 return NULL;
1180
12608c26 1181 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
032c5e82
TF
1182 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1183 if (!scrq->msgs) {
1184 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1185 goto zero_page_failed;
1186 }
1187
1188 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1189 DMA_BIDIRECTIONAL);
1190 if (dma_mapping_error(dev, scrq->msg_token)) {
1191 dev_warn(dev, "Couldn't map crq queue messages page\n");
1192 goto map_failed;
1193 }
1194
1195 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1196 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1197
1198 if (rc == H_RESOURCE)
1199 rc = ibmvnic_reset_crq(adapter);
1200
1201 if (rc == H_CLOSED) {
1202 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1203 } else if (rc) {
1204 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1205 goto reg_failed;
1206 }
1207
032c5e82
TF
1208 scrq->adapter = adapter;
1209 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1210 scrq->cur = 0;
1211 scrq->rx_skb_top = NULL;
1212 spin_lock_init(&scrq->lock);
1213
1214 netdev_dbg(adapter->netdev,
1215 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1216 scrq->crq_num, scrq->hw_irq, scrq->irq);
1217
1218 return scrq;
1219
032c5e82
TF
1220reg_failed:
1221 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1222 DMA_BIDIRECTIONAL);
1223map_failed:
1224 free_pages((unsigned long)scrq->msgs, 2);
1225zero_page_failed:
1226 kfree(scrq);
1227
1228 return NULL;
1229}
1230
1231static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1232{
1233 int i;
1234
1235 if (adapter->tx_scrq) {
1236 for (i = 0; i < adapter->req_tx_queues; i++)
1237 if (adapter->tx_scrq[i]) {
1238 free_irq(adapter->tx_scrq[i]->irq,
1239 adapter->tx_scrq[i]);
88eb98a0 1240 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
032c5e82
TF
1241 release_sub_crq_queue(adapter,
1242 adapter->tx_scrq[i]);
1243 }
1244 adapter->tx_scrq = NULL;
1245 }
1246
1247 if (adapter->rx_scrq) {
1248 for (i = 0; i < adapter->req_rx_queues; i++)
1249 if (adapter->rx_scrq[i]) {
1250 free_irq(adapter->rx_scrq[i]->irq,
1251 adapter->rx_scrq[i]);
88eb98a0 1252 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
032c5e82
TF
1253 release_sub_crq_queue(adapter,
1254 adapter->rx_scrq[i]);
1255 }
1256 adapter->rx_scrq = NULL;
1257 }
1258
1259 adapter->requested_caps = 0;
1260}
1261
ea22d51a
TF
1262static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1263{
1264 int i;
1265
1266 if (adapter->tx_scrq) {
1267 for (i = 0; i < adapter->req_tx_queues; i++)
1268 if (adapter->tx_scrq[i])
1269 release_sub_crq_queue(adapter,
1270 adapter->tx_scrq[i]);
1271 adapter->tx_scrq = NULL;
1272 }
1273
1274 if (adapter->rx_scrq) {
1275 for (i = 0; i < adapter->req_rx_queues; i++)
1276 if (adapter->rx_scrq[i])
1277 release_sub_crq_queue(adapter,
1278 adapter->rx_scrq[i]);
1279 adapter->rx_scrq = NULL;
1280 }
1281
1282 adapter->requested_caps = 0;
1283}
1284
032c5e82
TF
1285static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1286 struct ibmvnic_sub_crq_queue *scrq)
1287{
1288 struct device *dev = &adapter->vdev->dev;
1289 unsigned long rc;
1290
1291 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1292 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1293 if (rc)
1294 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1295 scrq->hw_irq, rc);
1296 return rc;
1297}
1298
1299static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1300 struct ibmvnic_sub_crq_queue *scrq)
1301{
1302 struct device *dev = &adapter->vdev->dev;
1303 unsigned long rc;
1304
1305 if (scrq->hw_irq > 0x100000000ULL) {
1306 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1307 return 1;
1308 }
1309
1310 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1311 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1312 if (rc)
1313 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1314 scrq->hw_irq, rc);
1315 return rc;
1316}
1317
1318static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1319 struct ibmvnic_sub_crq_queue *scrq)
1320{
1321 struct device *dev = &adapter->vdev->dev;
1322 struct ibmvnic_tx_buff *txbuff;
1323 union sub_crq *next;
1324 int index;
1325 int i, j;
ad7775dc 1326 u8 first;
032c5e82
TF
1327
1328restart_loop:
1329 while (pending_scrq(adapter, scrq)) {
1330 unsigned int pool = scrq->pool_index;
1331
1332 next = ibmvnic_next_scrq(adapter, scrq);
1333 for (i = 0; i < next->tx_comp.num_comps; i++) {
1334 if (next->tx_comp.rcs[i]) {
1335 dev_err(dev, "tx error %x\n",
1336 next->tx_comp.rcs[i]);
1337 continue;
1338 }
1339 index = be32_to_cpu(next->tx_comp.correlators[i]);
1340 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1341
1342 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1343 if (!txbuff->data_dma[j])
1344 continue;
1345
1346 txbuff->data_dma[j] = 0;
1347 txbuff->used_bounce = false;
1348 }
ad7775dc
TF
1349 /* if sub_crq was sent indirectly */
1350 first = txbuff->indir_arr[0].generic.first;
1351 if (first == IBMVNIC_CRQ_CMD) {
1352 dma_unmap_single(dev, txbuff->indir_dma,
1353 sizeof(txbuff->indir_arr),
1354 DMA_TO_DEVICE);
1355 }
032c5e82
TF
1356
1357 if (txbuff->last_frag)
1358 dev_kfree_skb_any(txbuff->skb);
1359
1360 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1361 producer_index] = index;
1362 adapter->tx_pool[pool].producer_index =
1363 (adapter->tx_pool[pool].producer_index + 1) %
1364 adapter->max_tx_entries_per_subcrq;
1365 }
1366 /* remove tx_comp scrq*/
1367 next->tx_comp.first = 0;
1368 }
1369
1370 enable_scrq_irq(adapter, scrq);
1371
1372 if (pending_scrq(adapter, scrq)) {
1373 disable_scrq_irq(adapter, scrq);
1374 goto restart_loop;
1375 }
1376
1377 return 0;
1378}
1379
1380static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1381{
1382 struct ibmvnic_sub_crq_queue *scrq = instance;
1383 struct ibmvnic_adapter *adapter = scrq->adapter;
1384
1385 disable_scrq_irq(adapter, scrq);
1386 ibmvnic_complete_tx(adapter, scrq);
1387
1388 return IRQ_HANDLED;
1389}
1390
1391static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1392{
1393 struct ibmvnic_sub_crq_queue *scrq = instance;
1394 struct ibmvnic_adapter *adapter = scrq->adapter;
1395
1396 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1397 disable_scrq_irq(adapter, scrq);
1398 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1399 }
1400
1401 return IRQ_HANDLED;
1402}
1403
ea22d51a
TF
1404static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1405{
1406 struct device *dev = &adapter->vdev->dev;
1407 struct ibmvnic_sub_crq_queue *scrq;
1408 int i = 0, j = 0;
1409 int rc = 0;
1410
1411 for (i = 0; i < adapter->req_tx_queues; i++) {
1412 scrq = adapter->tx_scrq[i];
1413 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1414
99c1790e 1415 if (!scrq->irq) {
ea22d51a
TF
1416 rc = -EINVAL;
1417 dev_err(dev, "Error mapping irq\n");
1418 goto req_tx_irq_failed;
1419 }
1420
1421 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1422 0, "ibmvnic_tx", scrq);
1423
1424 if (rc) {
1425 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1426 scrq->irq, rc);
1427 irq_dispose_mapping(scrq->irq);
1428 goto req_rx_irq_failed;
1429 }
1430 }
1431
1432 for (i = 0; i < adapter->req_rx_queues; i++) {
1433 scrq = adapter->rx_scrq[i];
1434 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
99c1790e 1435 if (!scrq->irq) {
ea22d51a
TF
1436 rc = -EINVAL;
1437 dev_err(dev, "Error mapping irq\n");
1438 goto req_rx_irq_failed;
1439 }
1440 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1441 0, "ibmvnic_rx", scrq);
1442 if (rc) {
1443 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1444 scrq->irq, rc);
1445 irq_dispose_mapping(scrq->irq);
1446 goto req_rx_irq_failed;
1447 }
1448 }
1449 return rc;
1450
1451req_rx_irq_failed:
8bf371e6 1452 for (j = 0; j < i; j++) {
ea22d51a
TF
1453 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1454 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
8bf371e6 1455 }
ea22d51a
TF
1456 i = adapter->req_tx_queues;
1457req_tx_irq_failed:
8bf371e6 1458 for (j = 0; j < i; j++) {
ea22d51a
TF
1459 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1460 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
8bf371e6 1461 }
ea22d51a
TF
1462 release_sub_crqs_no_irqs(adapter);
1463 return rc;
1464}
1465
032c5e82
TF
1466static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1467{
1468 struct device *dev = &adapter->vdev->dev;
1469 struct ibmvnic_sub_crq_queue **allqueues;
1470 int registered_queues = 0;
1471 union ibmvnic_crq crq;
1472 int total_queues;
1473 int more = 0;
ea22d51a 1474 int i;
032c5e82
TF
1475
1476 if (!retry) {
1477 /* Sub-CRQ entries are 32 byte long */
1478 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1479
1480 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1481 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1482 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1483 goto allqueues_failed;
1484 }
1485
1486 /* Get the minimum between the queried max and the entries
1487 * that fit in our PAGE_SIZE
1488 */
1489 adapter->req_tx_entries_per_subcrq =
1490 adapter->max_tx_entries_per_subcrq > entries_page ?
1491 entries_page : adapter->max_tx_entries_per_subcrq;
1492 adapter->req_rx_add_entries_per_subcrq =
1493 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1494 entries_page : adapter->max_rx_add_entries_per_subcrq;
1495
6dbcd8fb
JA
1496 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1497 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
498cd8e4 1498 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
032c5e82 1499
f39f0d1e 1500 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
032c5e82
TF
1501 }
1502
1503 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1504
1505 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1506 if (!allqueues)
1507 goto allqueues_failed;
1508
1509 for (i = 0; i < total_queues; i++) {
1510 allqueues[i] = init_sub_crq_queue(adapter);
1511 if (!allqueues[i]) {
1512 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1513 break;
1514 }
1515 registered_queues++;
1516 }
1517
1518 /* Make sure we were able to register the minimum number of queues */
1519 if (registered_queues <
1520 adapter->min_tx_queues + adapter->min_rx_queues) {
1521 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1522 goto tx_failed;
1523 }
1524
1525 /* Distribute the failed allocated queues*/
1526 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1527 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1528 switch (i % 3) {
1529 case 0:
1530 if (adapter->req_rx_queues > adapter->min_rx_queues)
1531 adapter->req_rx_queues--;
1532 else
1533 more++;
1534 break;
1535 case 1:
1536 if (adapter->req_tx_queues > adapter->min_tx_queues)
1537 adapter->req_tx_queues--;
1538 else
1539 more++;
1540 break;
1541 }
1542 }
1543
1544 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1545 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1546 if (!adapter->tx_scrq)
1547 goto tx_failed;
1548
1549 for (i = 0; i < adapter->req_tx_queues; i++) {
1550 adapter->tx_scrq[i] = allqueues[i];
1551 adapter->tx_scrq[i]->pool_index = i;
032c5e82
TF
1552 }
1553
1554 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1555 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1556 if (!adapter->rx_scrq)
1557 goto rx_failed;
1558
1559 for (i = 0; i < adapter->req_rx_queues; i++) {
1560 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1561 adapter->rx_scrq[i]->scrq_num = i;
032c5e82
TF
1562 }
1563
1564 memset(&crq, 0, sizeof(crq));
1565 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1566 crq.request_capability.cmd = REQUEST_CAPABILITY;
1567
1568 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
de89e854 1569 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
032c5e82
TF
1570 ibmvnic_send_crq(adapter, &crq);
1571
1572 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
de89e854 1573 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
032c5e82
TF
1574 ibmvnic_send_crq(adapter, &crq);
1575
1576 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
de89e854 1577 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
032c5e82
TF
1578 ibmvnic_send_crq(adapter, &crq);
1579
1580 crq.request_capability.capability =
1581 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1582 crq.request_capability.number =
de89e854 1583 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
032c5e82
TF
1584 ibmvnic_send_crq(adapter, &crq);
1585
1586 crq.request_capability.capability =
1587 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1588 crq.request_capability.number =
de89e854 1589 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
032c5e82
TF
1590 ibmvnic_send_crq(adapter, &crq);
1591
1592 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
de89e854 1593 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
032c5e82
TF
1594 ibmvnic_send_crq(adapter, &crq);
1595
1596 if (adapter->netdev->flags & IFF_PROMISC) {
1597 if (adapter->promisc_supported) {
1598 crq.request_capability.capability =
1599 cpu_to_be16(PROMISC_REQUESTED);
de89e854 1600 crq.request_capability.number = cpu_to_be64(1);
032c5e82
TF
1601 ibmvnic_send_crq(adapter, &crq);
1602 }
1603 } else {
1604 crq.request_capability.capability =
1605 cpu_to_be16(PROMISC_REQUESTED);
de89e854 1606 crq.request_capability.number = cpu_to_be64(0);
032c5e82
TF
1607 ibmvnic_send_crq(adapter, &crq);
1608 }
1609
1610 kfree(allqueues);
1611
1612 return;
1613
032c5e82
TF
1614rx_failed:
1615 kfree(adapter->tx_scrq);
1616 adapter->tx_scrq = NULL;
1617tx_failed:
1618 for (i = 0; i < registered_queues; i++)
1619 release_sub_crq_queue(adapter, allqueues[i]);
1620 kfree(allqueues);
1621allqueues_failed:
1622 ibmvnic_remove(adapter->vdev);
1623}
1624
1625static int pending_scrq(struct ibmvnic_adapter *adapter,
1626 struct ibmvnic_sub_crq_queue *scrq)
1627{
1628 union sub_crq *entry = &scrq->msgs[scrq->cur];
1629
1630 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1631 return 1;
1632 else
1633 return 0;
1634}
1635
1636static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1637 struct ibmvnic_sub_crq_queue *scrq)
1638{
1639 union sub_crq *entry;
1640 unsigned long flags;
1641
1642 spin_lock_irqsave(&scrq->lock, flags);
1643 entry = &scrq->msgs[scrq->cur];
1644 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1645 if (++scrq->cur == scrq->size)
1646 scrq->cur = 0;
1647 } else {
1648 entry = NULL;
1649 }
1650 spin_unlock_irqrestore(&scrq->lock, flags);
1651
1652 return entry;
1653}
1654
1655static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1656{
1657 struct ibmvnic_crq_queue *queue = &adapter->crq;
1658 union ibmvnic_crq *crq;
1659
1660 crq = &queue->msgs[queue->cur];
1661 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1662 if (++queue->cur == queue->size)
1663 queue->cur = 0;
1664 } else {
1665 crq = NULL;
1666 }
1667
1668 return crq;
1669}
1670
1671static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1672 union sub_crq *sub_crq)
1673{
1674 unsigned int ua = adapter->vdev->unit_address;
1675 struct device *dev = &adapter->vdev->dev;
1676 u64 *u64_crq = (u64 *)sub_crq;
1677 int rc;
1678
1679 netdev_dbg(adapter->netdev,
1680 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1681 (unsigned long int)cpu_to_be64(remote_handle),
1682 (unsigned long int)cpu_to_be64(u64_crq[0]),
1683 (unsigned long int)cpu_to_be64(u64_crq[1]),
1684 (unsigned long int)cpu_to_be64(u64_crq[2]),
1685 (unsigned long int)cpu_to_be64(u64_crq[3]));
1686
1687 /* Make sure the hypervisor sees the complete request */
1688 mb();
1689
1690 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1691 cpu_to_be64(remote_handle),
1692 cpu_to_be64(u64_crq[0]),
1693 cpu_to_be64(u64_crq[1]),
1694 cpu_to_be64(u64_crq[2]),
1695 cpu_to_be64(u64_crq[3]));
1696
1697 if (rc) {
1698 if (rc == H_CLOSED)
1699 dev_warn(dev, "CRQ Queue closed\n");
1700 dev_err(dev, "Send error (rc=%d)\n", rc);
1701 }
1702
1703 return rc;
1704}
1705
ad7775dc
TF
1706static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1707 u64 remote_handle, u64 ioba, u64 num_entries)
1708{
1709 unsigned int ua = adapter->vdev->unit_address;
1710 struct device *dev = &adapter->vdev->dev;
1711 int rc;
1712
1713 /* Make sure the hypervisor sees the complete request */
1714 mb();
1715 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1716 cpu_to_be64(remote_handle),
1717 ioba, num_entries);
1718
1719 if (rc) {
1720 if (rc == H_CLOSED)
1721 dev_warn(dev, "CRQ Queue closed\n");
1722 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1723 }
1724
1725 return rc;
1726}
1727
032c5e82
TF
1728static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1729 union ibmvnic_crq *crq)
1730{
1731 unsigned int ua = adapter->vdev->unit_address;
1732 struct device *dev = &adapter->vdev->dev;
1733 u64 *u64_crq = (u64 *)crq;
1734 int rc;
1735
1736 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1737 (unsigned long int)cpu_to_be64(u64_crq[0]),
1738 (unsigned long int)cpu_to_be64(u64_crq[1]));
1739
1740 /* Make sure the hypervisor sees the complete request */
1741 mb();
1742
1743 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1744 cpu_to_be64(u64_crq[0]),
1745 cpu_to_be64(u64_crq[1]));
1746
1747 if (rc) {
1748 if (rc == H_CLOSED)
1749 dev_warn(dev, "CRQ Queue closed\n");
1750 dev_warn(dev, "Send error (rc=%d)\n", rc);
1751 }
1752
1753 return rc;
1754}
1755
1756static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1757{
1758 union ibmvnic_crq crq;
1759
1760 memset(&crq, 0, sizeof(crq));
1761 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1762 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1763 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1764
1765 return ibmvnic_send_crq(adapter, &crq);
1766}
1767
1768static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1769{
1770 union ibmvnic_crq crq;
1771
1772 memset(&crq, 0, sizeof(crq));
1773 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1774 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1775 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1776
1777 return ibmvnic_send_crq(adapter, &crq);
1778}
1779
1780static int send_version_xchg(struct ibmvnic_adapter *adapter)
1781{
1782 union ibmvnic_crq crq;
1783
1784 memset(&crq, 0, sizeof(crq));
1785 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1786 crq.version_exchange.cmd = VERSION_EXCHANGE;
1787 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1788
1789 return ibmvnic_send_crq(adapter, &crq);
1790}
1791
1792static void send_login(struct ibmvnic_adapter *adapter)
1793{
1794 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1795 struct ibmvnic_login_buffer *login_buffer;
1796 struct ibmvnic_inflight_cmd *inflight_cmd;
1797 struct device *dev = &adapter->vdev->dev;
1798 dma_addr_t rsp_buffer_token;
1799 dma_addr_t buffer_token;
1800 size_t rsp_buffer_size;
1801 union ibmvnic_crq crq;
1802 unsigned long flags;
1803 size_t buffer_size;
1804 __be64 *tx_list_p;
1805 __be64 *rx_list_p;
1806 int i;
1807
1808 buffer_size =
1809 sizeof(struct ibmvnic_login_buffer) +
1810 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1811
1812 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1813 if (!login_buffer)
1814 goto buf_alloc_failed;
1815
1816 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1817 DMA_TO_DEVICE);
1818 if (dma_mapping_error(dev, buffer_token)) {
1819 dev_err(dev, "Couldn't map login buffer\n");
1820 goto buf_map_failed;
1821 }
1822
498cd8e4
JA
1823 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1824 sizeof(u64) * adapter->req_tx_queues +
1825 sizeof(u64) * adapter->req_rx_queues +
1826 sizeof(u64) * adapter->req_rx_queues +
1827 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
032c5e82
TF
1828
1829 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1830 if (!login_rsp_buffer)
1831 goto buf_rsp_alloc_failed;
1832
1833 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1834 rsp_buffer_size, DMA_FROM_DEVICE);
1835 if (dma_mapping_error(dev, rsp_buffer_token)) {
1836 dev_err(dev, "Couldn't map login rsp buffer\n");
1837 goto buf_rsp_map_failed;
1838 }
1839 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1840 if (!inflight_cmd) {
1841 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1842 goto inflight_alloc_failed;
1843 }
1844 adapter->login_buf = login_buffer;
1845 adapter->login_buf_token = buffer_token;
1846 adapter->login_buf_sz = buffer_size;
1847 adapter->login_rsp_buf = login_rsp_buffer;
1848 adapter->login_rsp_buf_token = rsp_buffer_token;
1849 adapter->login_rsp_buf_sz = rsp_buffer_size;
1850
1851 login_buffer->len = cpu_to_be32(buffer_size);
1852 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1853 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1854 login_buffer->off_txcomp_subcrqs =
1855 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1856 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1857 login_buffer->off_rxcomp_subcrqs =
1858 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1859 sizeof(u64) * adapter->req_tx_queues);
1860 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1861 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1862
1863 tx_list_p = (__be64 *)((char *)login_buffer +
1864 sizeof(struct ibmvnic_login_buffer));
1865 rx_list_p = (__be64 *)((char *)login_buffer +
1866 sizeof(struct ibmvnic_login_buffer) +
1867 sizeof(u64) * adapter->req_tx_queues);
1868
1869 for (i = 0; i < adapter->req_tx_queues; i++) {
1870 if (adapter->tx_scrq[i]) {
1871 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1872 crq_num);
1873 }
1874 }
1875
1876 for (i = 0; i < adapter->req_rx_queues; i++) {
1877 if (adapter->rx_scrq[i]) {
1878 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1879 crq_num);
1880 }
1881 }
1882
1883 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1884 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1885 netdev_dbg(adapter->netdev, "%016lx\n",
1886 ((unsigned long int *)(adapter->login_buf))[i]);
1887 }
1888
1889 memset(&crq, 0, sizeof(crq));
1890 crq.login.first = IBMVNIC_CRQ_CMD;
1891 crq.login.cmd = LOGIN;
1892 crq.login.ioba = cpu_to_be32(buffer_token);
1893 crq.login.len = cpu_to_be32(buffer_size);
1894
1895 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1896
1897 spin_lock_irqsave(&adapter->inflight_lock, flags);
1898 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1899 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1900
1901 ibmvnic_send_crq(adapter, &crq);
1902
1903 return;
1904
1905inflight_alloc_failed:
1906 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1907 DMA_FROM_DEVICE);
1908buf_rsp_map_failed:
1909 kfree(login_rsp_buffer);
1910buf_rsp_alloc_failed:
1911 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1912buf_map_failed:
1913 kfree(login_buffer);
1914buf_alloc_failed:
1915 return;
1916}
1917
1918static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1919 u32 len, u8 map_id)
1920{
1921 union ibmvnic_crq crq;
1922
1923 memset(&crq, 0, sizeof(crq));
1924 crq.request_map.first = IBMVNIC_CRQ_CMD;
1925 crq.request_map.cmd = REQUEST_MAP;
1926 crq.request_map.map_id = map_id;
1927 crq.request_map.ioba = cpu_to_be32(addr);
1928 crq.request_map.len = cpu_to_be32(len);
1929 ibmvnic_send_crq(adapter, &crq);
1930}
1931
1932static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1933{
1934 union ibmvnic_crq crq;
1935
1936 memset(&crq, 0, sizeof(crq));
1937 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1938 crq.request_unmap.cmd = REQUEST_UNMAP;
1939 crq.request_unmap.map_id = map_id;
1940 ibmvnic_send_crq(adapter, &crq);
1941}
1942
1943static void send_map_query(struct ibmvnic_adapter *adapter)
1944{
1945 union ibmvnic_crq crq;
1946
1947 memset(&crq, 0, sizeof(crq));
1948 crq.query_map.first = IBMVNIC_CRQ_CMD;
1949 crq.query_map.cmd = QUERY_MAP;
1950 ibmvnic_send_crq(adapter, &crq);
1951}
1952
1953/* Send a series of CRQs requesting various capabilities of the VNIC server */
1954static void send_cap_queries(struct ibmvnic_adapter *adapter)
1955{
1956 union ibmvnic_crq crq;
1957
1958 atomic_set(&adapter->running_cap_queries, 0);
1959 memset(&crq, 0, sizeof(crq));
1960 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1961 crq.query_capability.cmd = QUERY_CAPABILITY;
1962
1963 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1964 atomic_inc(&adapter->running_cap_queries);
1965 ibmvnic_send_crq(adapter, &crq);
1966
1967 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1968 atomic_inc(&adapter->running_cap_queries);
1969 ibmvnic_send_crq(adapter, &crq);
1970
1971 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1972 atomic_inc(&adapter->running_cap_queries);
1973 ibmvnic_send_crq(adapter, &crq);
1974
1975 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1976 atomic_inc(&adapter->running_cap_queries);
1977 ibmvnic_send_crq(adapter, &crq);
1978
1979 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1980 atomic_inc(&adapter->running_cap_queries);
1981 ibmvnic_send_crq(adapter, &crq);
1982
1983 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1984 atomic_inc(&adapter->running_cap_queries);
1985 ibmvnic_send_crq(adapter, &crq);
1986
1987 crq.query_capability.capability =
1988 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1989 atomic_inc(&adapter->running_cap_queries);
1990 ibmvnic_send_crq(adapter, &crq);
1991
1992 crq.query_capability.capability =
1993 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1994 atomic_inc(&adapter->running_cap_queries);
1995 ibmvnic_send_crq(adapter, &crq);
1996
1997 crq.query_capability.capability =
1998 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
1999 atomic_inc(&adapter->running_cap_queries);
2000 ibmvnic_send_crq(adapter, &crq);
2001
2002 crq.query_capability.capability =
2003 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2004 atomic_inc(&adapter->running_cap_queries);
2005 ibmvnic_send_crq(adapter, &crq);
2006
2007 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2008 atomic_inc(&adapter->running_cap_queries);
2009 ibmvnic_send_crq(adapter, &crq);
2010
2011 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2012 atomic_inc(&adapter->running_cap_queries);
2013 ibmvnic_send_crq(adapter, &crq);
2014
2015 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2016 atomic_inc(&adapter->running_cap_queries);
2017 ibmvnic_send_crq(adapter, &crq);
2018
2019 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2020 atomic_inc(&adapter->running_cap_queries);
2021 ibmvnic_send_crq(adapter, &crq);
2022
2023 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2024 atomic_inc(&adapter->running_cap_queries);
2025 ibmvnic_send_crq(adapter, &crq);
2026
2027 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2028 atomic_inc(&adapter->running_cap_queries);
2029 ibmvnic_send_crq(adapter, &crq);
2030
2031 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2032 atomic_inc(&adapter->running_cap_queries);
2033 ibmvnic_send_crq(adapter, &crq);
2034
2035 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2036 atomic_inc(&adapter->running_cap_queries);
2037 ibmvnic_send_crq(adapter, &crq);
2038
2039 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2040 atomic_inc(&adapter->running_cap_queries);
2041 ibmvnic_send_crq(adapter, &crq);
2042
2043 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2044 atomic_inc(&adapter->running_cap_queries);
2045 ibmvnic_send_crq(adapter, &crq);
2046
2047 crq.query_capability.capability =
2048 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2049 atomic_inc(&adapter->running_cap_queries);
2050 ibmvnic_send_crq(adapter, &crq);
2051
2052 crq.query_capability.capability =
2053 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2054 atomic_inc(&adapter->running_cap_queries);
2055 ibmvnic_send_crq(adapter, &crq);
2056
2057 crq.query_capability.capability =
2058 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2059 atomic_inc(&adapter->running_cap_queries);
2060 ibmvnic_send_crq(adapter, &crq);
2061
2062 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2063 atomic_inc(&adapter->running_cap_queries);
2064 ibmvnic_send_crq(adapter, &crq);
2065}
2066
2067static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2068{
2069 struct device *dev = &adapter->vdev->dev;
2070 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2071 union ibmvnic_crq crq;
2072 int i;
2073
2074 dma_unmap_single(dev, adapter->ip_offload_tok,
2075 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2076
2077 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2078 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2079 netdev_dbg(adapter->netdev, "%016lx\n",
2080 ((unsigned long int *)(buf))[i]);
2081
2082 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2083 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2084 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2085 buf->tcp_ipv4_chksum);
2086 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2087 buf->tcp_ipv6_chksum);
2088 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2089 buf->udp_ipv4_chksum);
2090 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2091 buf->udp_ipv6_chksum);
2092 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2093 buf->large_tx_ipv4);
2094 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2095 buf->large_tx_ipv6);
2096 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2097 buf->large_rx_ipv4);
2098 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2099 buf->large_rx_ipv6);
2100 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2101 buf->max_ipv4_header_size);
2102 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2103 buf->max_ipv6_header_size);
2104 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2105 buf->max_tcp_header_size);
2106 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2107 buf->max_udp_header_size);
2108 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2109 buf->max_large_tx_size);
2110 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2111 buf->max_large_rx_size);
2112 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2113 buf->ipv6_extension_header);
2114 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2115 buf->tcp_pseudosum_req);
2116 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2117 buf->num_ipv6_ext_headers);
2118 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2119 buf->off_ipv6_ext_headers);
2120
2121 adapter->ip_offload_ctrl_tok =
2122 dma_map_single(dev, &adapter->ip_offload_ctrl,
2123 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2124
2125 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2126 dev_err(dev, "Couldn't map ip offload control buffer\n");
2127 return;
2128 }
2129
2130 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2131 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2132 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2133 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2134 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2135
2136 /* large_tx/rx disabled for now, additional features needed */
2137 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2138 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2139 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2140 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2141
2142 adapter->netdev->features = NETIF_F_GSO;
2143
2144 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2145 adapter->netdev->features |= NETIF_F_IP_CSUM;
2146
2147 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2148 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2149
9be02cdf
TF
2150 if ((adapter->netdev->features &
2151 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2152 adapter->netdev->features |= NETIF_F_RXCSUM;
2153
032c5e82
TF
2154 memset(&crq, 0, sizeof(crq));
2155 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2156 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2157 crq.control_ip_offload.len =
2158 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2159 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2160 ibmvnic_send_crq(adapter, &crq);
2161}
2162
2163static void handle_error_info_rsp(union ibmvnic_crq *crq,
2164 struct ibmvnic_adapter *adapter)
2165{
2166 struct device *dev = &adapter->vdev->dev;
96183182 2167 struct ibmvnic_error_buff *error_buff, *tmp;
032c5e82
TF
2168 unsigned long flags;
2169 bool found = false;
2170 int i;
2171
2172 if (!crq->request_error_rsp.rc.code) {
2173 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2174 crq->request_error_rsp.rc.code);
2175 return;
2176 }
2177
2178 spin_lock_irqsave(&adapter->error_list_lock, flags);
96183182 2179 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
032c5e82
TF
2180 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2181 found = true;
2182 list_del(&error_buff->list);
2183 break;
2184 }
2185 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2186
2187 if (!found) {
2188 dev_err(dev, "Couldn't find error id %x\n",
2189 crq->request_error_rsp.error_id);
2190 return;
2191 }
2192
2193 dev_err(dev, "Detailed info for error id %x:",
2194 crq->request_error_rsp.error_id);
2195
2196 for (i = 0; i < error_buff->len; i++) {
2197 pr_cont("%02x", (int)error_buff->buff[i]);
2198 if (i % 8 == 7)
2199 pr_cont(" ");
2200 }
2201 pr_cont("\n");
2202
2203 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2204 DMA_FROM_DEVICE);
2205 kfree(error_buff->buff);
2206 kfree(error_buff);
2207}
2208
2209static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2210 struct ibmvnic_adapter *adapter)
2211{
2212 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2213 struct ibmvnic_inflight_cmd *inflight_cmd;
2214 struct device *dev = &adapter->vdev->dev;
2215 union ibmvnic_crq newcrq;
2216 unsigned long flags;
2217
2218 /* allocate and map buffer */
2219 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2220 if (!adapter->dump_data) {
2221 complete(&adapter->fw_done);
2222 return;
2223 }
2224
2225 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2226 DMA_FROM_DEVICE);
2227
2228 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2229 if (!firmware_has_feature(FW_FEATURE_CMO))
2230 dev_err(dev, "Couldn't map dump data\n");
2231 kfree(adapter->dump_data);
2232 complete(&adapter->fw_done);
2233 return;
2234 }
2235
2236 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2237 if (!inflight_cmd) {
2238 dma_unmap_single(dev, adapter->dump_data_token, len,
2239 DMA_FROM_DEVICE);
2240 kfree(adapter->dump_data);
2241 complete(&adapter->fw_done);
2242 return;
2243 }
2244
2245 memset(&newcrq, 0, sizeof(newcrq));
2246 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2247 newcrq.request_dump.cmd = REQUEST_DUMP;
2248 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2249 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2250
2251 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2252
2253 spin_lock_irqsave(&adapter->inflight_lock, flags);
2254 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2255 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2256
2257 ibmvnic_send_crq(adapter, &newcrq);
2258}
2259
2260static void handle_error_indication(union ibmvnic_crq *crq,
2261 struct ibmvnic_adapter *adapter)
2262{
2263 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2264 struct ibmvnic_inflight_cmd *inflight_cmd;
2265 struct device *dev = &adapter->vdev->dev;
2266 struct ibmvnic_error_buff *error_buff;
2267 union ibmvnic_crq new_crq;
2268 unsigned long flags;
2269
2270 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2271 crq->error_indication.
2272 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2273 crq->error_indication.error_id,
2274 crq->error_indication.error_cause);
2275
2276 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2277 if (!error_buff)
2278 return;
2279
2280 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2281 if (!error_buff->buff) {
2282 kfree(error_buff);
2283 return;
2284 }
2285
2286 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2287 DMA_FROM_DEVICE);
2288 if (dma_mapping_error(dev, error_buff->dma)) {
2289 if (!firmware_has_feature(FW_FEATURE_CMO))
2290 dev_err(dev, "Couldn't map error buffer\n");
2291 kfree(error_buff->buff);
2292 kfree(error_buff);
2293 return;
2294 }
2295
2296 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2297 if (!inflight_cmd) {
2298 dma_unmap_single(dev, error_buff->dma, detail_len,
2299 DMA_FROM_DEVICE);
2300 kfree(error_buff->buff);
2301 kfree(error_buff);
2302 return;
2303 }
2304
2305 error_buff->len = detail_len;
2306 error_buff->error_id = crq->error_indication.error_id;
2307
2308 spin_lock_irqsave(&adapter->error_list_lock, flags);
2309 list_add_tail(&error_buff->list, &adapter->errors);
2310 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2311
2312 memset(&new_crq, 0, sizeof(new_crq));
2313 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2314 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2315 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2316 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2317 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2318
2319 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2320
2321 spin_lock_irqsave(&adapter->inflight_lock, flags);
2322 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2323 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2324
2325 ibmvnic_send_crq(adapter, &new_crq);
2326}
2327
2328static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2329 struct ibmvnic_adapter *adapter)
2330{
2331 struct net_device *netdev = adapter->netdev;
2332 struct device *dev = &adapter->vdev->dev;
2333 long rc;
2334
2335 rc = crq->change_mac_addr_rsp.rc.code;
2336 if (rc) {
2337 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2338 return;
2339 }
2340 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2341 ETH_ALEN);
2342}
2343
2344static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2345 struct ibmvnic_adapter *adapter)
2346{
2347 struct device *dev = &adapter->vdev->dev;
2348 u64 *req_value;
2349 char *name;
2350
2351 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2352 case REQ_TX_QUEUES:
2353 req_value = &adapter->req_tx_queues;
2354 name = "tx";
2355 break;
2356 case REQ_RX_QUEUES:
2357 req_value = &adapter->req_rx_queues;
2358 name = "rx";
2359 break;
2360 case REQ_RX_ADD_QUEUES:
2361 req_value = &adapter->req_rx_add_queues;
2362 name = "rx_add";
2363 break;
2364 case REQ_TX_ENTRIES_PER_SUBCRQ:
2365 req_value = &adapter->req_tx_entries_per_subcrq;
2366 name = "tx_entries_per_subcrq";
2367 break;
2368 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2369 req_value = &adapter->req_rx_add_entries_per_subcrq;
2370 name = "rx_add_entries_per_subcrq";
2371 break;
2372 case REQ_MTU:
2373 req_value = &adapter->req_mtu;
2374 name = "mtu";
2375 break;
2376 case PROMISC_REQUESTED:
2377 req_value = &adapter->promisc;
2378 name = "promisc";
2379 break;
2380 default:
2381 dev_err(dev, "Got invalid cap request rsp %d\n",
2382 crq->request_capability.capability);
2383 return;
2384 }
2385
2386 switch (crq->request_capability_rsp.rc.code) {
2387 case SUCCESS:
2388 break;
2389 case PARTIALSUCCESS:
2390 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2391 *req_value,
28f4d165 2392 (long int)be64_to_cpu(crq->request_capability_rsp.
032c5e82 2393 number), name);
ea22d51a 2394 release_sub_crqs_no_irqs(adapter);
28f4d165 2395 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
ea22d51a 2396 init_sub_crqs(adapter, 1);
032c5e82
TF
2397 return;
2398 default:
2399 dev_err(dev, "Error %d in request cap rsp\n",
2400 crq->request_capability_rsp.rc.code);
2401 return;
2402 }
2403
2404 /* Done receiving requested capabilities, query IP offload support */
2405 if (++adapter->requested_caps == 7) {
2406 union ibmvnic_crq newcrq;
2407 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2408 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2409 &adapter->ip_offload_buf;
2410
2411 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2412 buf_sz,
2413 DMA_FROM_DEVICE);
2414
2415 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2416 if (!firmware_has_feature(FW_FEATURE_CMO))
2417 dev_err(dev, "Couldn't map offload buffer\n");
2418 return;
2419 }
2420
2421 memset(&newcrq, 0, sizeof(newcrq));
2422 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2423 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2424 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2425 newcrq.query_ip_offload.ioba =
2426 cpu_to_be32(adapter->ip_offload_tok);
2427
2428 ibmvnic_send_crq(adapter, &newcrq);
2429 }
2430}
2431
2432static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2433 struct ibmvnic_adapter *adapter)
2434{
2435 struct device *dev = &adapter->vdev->dev;
2436 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2437 struct ibmvnic_login_buffer *login = adapter->login_buf;
2438 union ibmvnic_crq crq;
2439 int i;
2440
2441 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2442 DMA_BIDIRECTIONAL);
2443 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2444 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2445
498cd8e4
JA
2446 /* If the number of queues requested can't be allocated by the
2447 * server, the login response will return with code 1. We will need
2448 * to resend the login buffer with fewer queues requested.
2449 */
2450 if (login_rsp_crq->generic.rc.code) {
2451 adapter->renegotiate = true;
2452 complete(&adapter->init_done);
2453 return 0;
2454 }
2455
032c5e82
TF
2456 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2457 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2458 netdev_dbg(adapter->netdev, "%016lx\n",
2459 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2460 }
2461
2462 /* Sanity checks */
2463 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2464 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2465 adapter->req_rx_add_queues !=
2466 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2467 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2468 ibmvnic_remove(adapter->vdev);
2469 return -EIO;
2470 }
2471 complete(&adapter->init_done);
2472
2473 memset(&crq, 0, sizeof(crq));
2474 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2475 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2476 ibmvnic_send_crq(adapter, &crq);
2477
2478 return 0;
2479}
2480
2481static void handle_request_map_rsp(union ibmvnic_crq *crq,
2482 struct ibmvnic_adapter *adapter)
2483{
2484 struct device *dev = &adapter->vdev->dev;
2485 u8 map_id = crq->request_map_rsp.map_id;
2486 int tx_subcrqs;
2487 int rx_subcrqs;
2488 long rc;
2489 int i;
2490
2491 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2492 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2493
2494 rc = crq->request_map_rsp.rc.code;
2495 if (rc) {
2496 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2497 adapter->map_id--;
2498 /* need to find and zero tx/rx_pool map_id */
2499 for (i = 0; i < tx_subcrqs; i++) {
2500 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2501 adapter->tx_pool[i].long_term_buff.map_id = 0;
2502 }
2503 for (i = 0; i < rx_subcrqs; i++) {
2504 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2505 adapter->rx_pool[i].long_term_buff.map_id = 0;
2506 }
2507 }
2508 complete(&adapter->fw_done);
2509}
2510
2511static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2512 struct ibmvnic_adapter *adapter)
2513{
2514 struct device *dev = &adapter->vdev->dev;
2515 long rc;
2516
2517 rc = crq->request_unmap_rsp.rc.code;
2518 if (rc)
2519 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2520}
2521
2522static void handle_query_map_rsp(union ibmvnic_crq *crq,
2523 struct ibmvnic_adapter *adapter)
2524{
2525 struct net_device *netdev = adapter->netdev;
2526 struct device *dev = &adapter->vdev->dev;
2527 long rc;
2528
2529 rc = crq->query_map_rsp.rc.code;
2530 if (rc) {
2531 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2532 return;
2533 }
2534 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2535 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2536 crq->query_map_rsp.free_pages);
2537}
2538
2539static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2540 struct ibmvnic_adapter *adapter)
2541{
2542 struct net_device *netdev = adapter->netdev;
2543 struct device *dev = &adapter->vdev->dev;
2544 long rc;
2545
2546 atomic_dec(&adapter->running_cap_queries);
2547 netdev_dbg(netdev, "Outstanding queries: %d\n",
2548 atomic_read(&adapter->running_cap_queries));
2549 rc = crq->query_capability.rc.code;
2550 if (rc) {
2551 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2552 goto out;
2553 }
2554
2555 switch (be16_to_cpu(crq->query_capability.capability)) {
2556 case MIN_TX_QUEUES:
2557 adapter->min_tx_queues =
de89e854 2558 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2559 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2560 adapter->min_tx_queues);
2561 break;
2562 case MIN_RX_QUEUES:
2563 adapter->min_rx_queues =
de89e854 2564 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2565 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2566 adapter->min_rx_queues);
2567 break;
2568 case MIN_RX_ADD_QUEUES:
2569 adapter->min_rx_add_queues =
de89e854 2570 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2571 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2572 adapter->min_rx_add_queues);
2573 break;
2574 case MAX_TX_QUEUES:
2575 adapter->max_tx_queues =
de89e854 2576 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2577 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2578 adapter->max_tx_queues);
2579 break;
2580 case MAX_RX_QUEUES:
2581 adapter->max_rx_queues =
de89e854 2582 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2583 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2584 adapter->max_rx_queues);
2585 break;
2586 case MAX_RX_ADD_QUEUES:
2587 adapter->max_rx_add_queues =
de89e854 2588 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2589 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2590 adapter->max_rx_add_queues);
2591 break;
2592 case MIN_TX_ENTRIES_PER_SUBCRQ:
2593 adapter->min_tx_entries_per_subcrq =
de89e854 2594 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2595 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2596 adapter->min_tx_entries_per_subcrq);
2597 break;
2598 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2599 adapter->min_rx_add_entries_per_subcrq =
de89e854 2600 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2601 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2602 adapter->min_rx_add_entries_per_subcrq);
2603 break;
2604 case MAX_TX_ENTRIES_PER_SUBCRQ:
2605 adapter->max_tx_entries_per_subcrq =
de89e854 2606 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2607 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2608 adapter->max_tx_entries_per_subcrq);
2609 break;
2610 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2611 adapter->max_rx_add_entries_per_subcrq =
de89e854 2612 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2613 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2614 adapter->max_rx_add_entries_per_subcrq);
2615 break;
2616 case TCP_IP_OFFLOAD:
2617 adapter->tcp_ip_offload =
de89e854 2618 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2619 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2620 adapter->tcp_ip_offload);
2621 break;
2622 case PROMISC_SUPPORTED:
2623 adapter->promisc_supported =
de89e854 2624 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2625 netdev_dbg(netdev, "promisc_supported = %lld\n",
2626 adapter->promisc_supported);
2627 break;
2628 case MIN_MTU:
de89e854 2629 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
f39f0d1e 2630 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
032c5e82
TF
2631 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2632 break;
2633 case MAX_MTU:
de89e854 2634 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
f39f0d1e 2635 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
032c5e82
TF
2636 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2637 break;
2638 case MAX_MULTICAST_FILTERS:
2639 adapter->max_multicast_filters =
de89e854 2640 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2641 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2642 adapter->max_multicast_filters);
2643 break;
2644 case VLAN_HEADER_INSERTION:
2645 adapter->vlan_header_insertion =
de89e854 2646 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2647 if (adapter->vlan_header_insertion)
2648 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2649 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2650 adapter->vlan_header_insertion);
2651 break;
2652 case MAX_TX_SG_ENTRIES:
2653 adapter->max_tx_sg_entries =
de89e854 2654 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2655 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2656 adapter->max_tx_sg_entries);
2657 break;
2658 case RX_SG_SUPPORTED:
2659 adapter->rx_sg_supported =
de89e854 2660 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2661 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2662 adapter->rx_sg_supported);
2663 break;
2664 case OPT_TX_COMP_SUB_QUEUES:
2665 adapter->opt_tx_comp_sub_queues =
de89e854 2666 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2667 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2668 adapter->opt_tx_comp_sub_queues);
2669 break;
2670 case OPT_RX_COMP_QUEUES:
2671 adapter->opt_rx_comp_queues =
de89e854 2672 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2673 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2674 adapter->opt_rx_comp_queues);
2675 break;
2676 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2677 adapter->opt_rx_bufadd_q_per_rx_comp_q =
de89e854 2678 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2679 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2680 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2681 break;
2682 case OPT_TX_ENTRIES_PER_SUBCRQ:
2683 adapter->opt_tx_entries_per_subcrq =
de89e854 2684 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2685 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2686 adapter->opt_tx_entries_per_subcrq);
2687 break;
2688 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2689 adapter->opt_rxba_entries_per_subcrq =
de89e854 2690 be64_to_cpu(crq->query_capability.number);
032c5e82
TF
2691 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2692 adapter->opt_rxba_entries_per_subcrq);
2693 break;
2694 case TX_RX_DESC_REQ:
2695 adapter->tx_rx_desc_req = crq->query_capability.number;
2696 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2697 adapter->tx_rx_desc_req);
2698 break;
2699
2700 default:
2701 netdev_err(netdev, "Got invalid cap rsp %d\n",
2702 crq->query_capability.capability);
2703 }
2704
2705out:
2706 if (atomic_read(&adapter->running_cap_queries) == 0)
ea22d51a 2707 init_sub_crqs(adapter, 0);
032c5e82
TF
2708 /* We're done querying the capabilities, initialize sub-crqs */
2709}
2710
2711static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2712 struct ibmvnic_adapter *adapter)
2713{
2714 u8 correlator = crq->control_ras_rsp.correlator;
2715 struct device *dev = &adapter->vdev->dev;
2716 bool found = false;
2717 int i;
2718
2719 if (crq->control_ras_rsp.rc.code) {
2720 dev_warn(dev, "Control ras failed rc=%d\n",
2721 crq->control_ras_rsp.rc.code);
2722 return;
2723 }
2724
2725 for (i = 0; i < adapter->ras_comp_num; i++) {
2726 if (adapter->ras_comps[i].correlator == correlator) {
2727 found = true;
2728 break;
2729 }
2730 }
2731
2732 if (!found) {
2733 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2734 return;
2735 }
2736
2737 switch (crq->control_ras_rsp.op) {
2738 case IBMVNIC_TRACE_LEVEL:
2739 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2740 break;
2741 case IBMVNIC_ERROR_LEVEL:
2742 adapter->ras_comps[i].error_check_level =
2743 crq->control_ras.level;
2744 break;
2745 case IBMVNIC_TRACE_PAUSE:
2746 adapter->ras_comp_int[i].paused = 1;
2747 break;
2748 case IBMVNIC_TRACE_RESUME:
2749 adapter->ras_comp_int[i].paused = 0;
2750 break;
2751 case IBMVNIC_TRACE_ON:
2752 adapter->ras_comps[i].trace_on = 1;
2753 break;
2754 case IBMVNIC_TRACE_OFF:
2755 adapter->ras_comps[i].trace_on = 0;
2756 break;
2757 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2758 /* trace_buff_sz is 3 bytes, stuff it into an int */
2759 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2760 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2761 crq->control_ras_rsp.trace_buff_sz[0];
2762 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2763 crq->control_ras_rsp.trace_buff_sz[1];
2764 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2765 crq->control_ras_rsp.trace_buff_sz[2];
2766 break;
2767 default:
2768 dev_err(dev, "invalid op %d on control_ras_rsp",
2769 crq->control_ras_rsp.op);
2770 }
2771}
2772
032c5e82
TF
2773static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2774 loff_t *ppos)
2775{
2776 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2777 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2778 struct device *dev = &adapter->vdev->dev;
2779 struct ibmvnic_fw_trace_entry *trace;
2780 int num = ras_comp_int->num;
2781 union ibmvnic_crq crq;
2782 dma_addr_t trace_tok;
2783
2784 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2785 return 0;
2786
2787 trace =
2788 dma_alloc_coherent(dev,
2789 be32_to_cpu(adapter->ras_comps[num].
2790 trace_buff_size), &trace_tok,
2791 GFP_KERNEL);
2792 if (!trace) {
2793 dev_err(dev, "Couldn't alloc trace buffer\n");
2794 return 0;
2795 }
2796
2797 memset(&crq, 0, sizeof(crq));
2798 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2799 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2800 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2801 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2802 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
032c5e82
TF
2803
2804 init_completion(&adapter->fw_done);
db5d0b59 2805 ibmvnic_send_crq(adapter, &crq);
032c5e82
TF
2806 wait_for_completion(&adapter->fw_done);
2807
2808 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2809 len =
2810 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2811 *ppos;
2812
2813 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2814
2815 dma_free_coherent(dev,
2816 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2817 trace, trace_tok);
2818 *ppos += len;
2819 return len;
2820}
2821
2822static const struct file_operations trace_ops = {
2823 .owner = THIS_MODULE,
7a95e94c 2824 .open = simple_open,
032c5e82
TF
2825 .read = trace_read,
2826};
2827
2828static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2829 loff_t *ppos)
2830{
2831 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2832 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2833 int num = ras_comp_int->num;
2834 char buff[5]; /* 1 or 0 plus \n and \0 */
2835 int size;
2836
2837 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2838
2839 if (*ppos >= size)
2840 return 0;
2841
2842 copy_to_user(user_buf, buff, size);
2843 *ppos += size;
2844 return size;
2845}
2846
2847static ssize_t paused_write(struct file *file, const char __user *user_buf,
2848 size_t len, loff_t *ppos)
2849{
2850 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2851 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2852 int num = ras_comp_int->num;
2853 union ibmvnic_crq crq;
2854 unsigned long val;
2855 char buff[9]; /* decimal max int plus \n and \0 */
2856
2857 copy_from_user(buff, user_buf, sizeof(buff));
2858 val = kstrtoul(buff, 10, NULL);
2859
2860 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2861
2862 memset(&crq, 0, sizeof(crq));
2863 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2864 crq.control_ras.cmd = CONTROL_RAS;
2865 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2866 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2867 ibmvnic_send_crq(adapter, &crq);
2868
2869 return len;
2870}
2871
2872static const struct file_operations paused_ops = {
2873 .owner = THIS_MODULE,
7a95e94c 2874 .open = simple_open,
032c5e82
TF
2875 .read = paused_read,
2876 .write = paused_write,
2877};
2878
2879static ssize_t tracing_read(struct file *file, char __user *user_buf,
2880 size_t len, loff_t *ppos)
2881{
2882 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2883 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2884 int num = ras_comp_int->num;
2885 char buff[5]; /* 1 or 0 plus \n and \0 */
2886 int size;
2887
2888 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2889
2890 if (*ppos >= size)
2891 return 0;
2892
2893 copy_to_user(user_buf, buff, size);
2894 *ppos += size;
2895 return size;
2896}
2897
2898static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2899 size_t len, loff_t *ppos)
2900{
2901 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2902 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2903 int num = ras_comp_int->num;
2904 union ibmvnic_crq crq;
2905 unsigned long val;
2906 char buff[9]; /* decimal max int plus \n and \0 */
2907
2908 copy_from_user(buff, user_buf, sizeof(buff));
2909 val = kstrtoul(buff, 10, NULL);
2910
2911 memset(&crq, 0, sizeof(crq));
2912 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2913 crq.control_ras.cmd = CONTROL_RAS;
2914 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2915 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2916
2917 return len;
2918}
2919
2920static const struct file_operations tracing_ops = {
2921 .owner = THIS_MODULE,
7a95e94c 2922 .open = simple_open,
032c5e82
TF
2923 .read = tracing_read,
2924 .write = tracing_write,
2925};
2926
2927static ssize_t error_level_read(struct file *file, char __user *user_buf,
2928 size_t len, loff_t *ppos)
2929{
2930 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2931 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2932 int num = ras_comp_int->num;
2933 char buff[5]; /* decimal max char plus \n and \0 */
2934 int size;
2935
2936 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2937
2938 if (*ppos >= size)
2939 return 0;
2940
2941 copy_to_user(user_buf, buff, size);
2942 *ppos += size;
2943 return size;
2944}
2945
2946static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2947 size_t len, loff_t *ppos)
2948{
2949 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2950 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2951 int num = ras_comp_int->num;
2952 union ibmvnic_crq crq;
2953 unsigned long val;
2954 char buff[9]; /* decimal max int plus \n and \0 */
2955
2956 copy_from_user(buff, user_buf, sizeof(buff));
2957 val = kstrtoul(buff, 10, NULL);
2958
2959 if (val > 9)
2960 val = 9;
2961
2962 memset(&crq, 0, sizeof(crq));
2963 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2964 crq.control_ras.cmd = CONTROL_RAS;
2965 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2966 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2967 crq.control_ras.level = val;
2968 ibmvnic_send_crq(adapter, &crq);
2969
2970 return len;
2971}
2972
2973static const struct file_operations error_level_ops = {
2974 .owner = THIS_MODULE,
7a95e94c 2975 .open = simple_open,
032c5e82
TF
2976 .read = error_level_read,
2977 .write = error_level_write,
2978};
2979
2980static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2981 size_t len, loff_t *ppos)
2982{
2983 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2984 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2985 int num = ras_comp_int->num;
2986 char buff[5]; /* decimal max char plus \n and \0 */
2987 int size;
2988
2989 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2990 if (*ppos >= size)
2991 return 0;
2992
2993 copy_to_user(user_buf, buff, size);
2994 *ppos += size;
2995 return size;
2996}
2997
2998static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
2999 size_t len, loff_t *ppos)
3000{
3001 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3002 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3003 union ibmvnic_crq crq;
3004 unsigned long val;
3005 char buff[9]; /* decimal max int plus \n and \0 */
3006
3007 copy_from_user(buff, user_buf, sizeof(buff));
3008 val = kstrtoul(buff, 10, NULL);
3009 if (val > 9)
3010 val = 9;
3011
3012 memset(&crq, 0, sizeof(crq));
3013 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3014 crq.control_ras.cmd = CONTROL_RAS;
3015 crq.control_ras.correlator =
3016 adapter->ras_comps[ras_comp_int->num].correlator;
3017 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3018 crq.control_ras.level = val;
3019 ibmvnic_send_crq(adapter, &crq);
3020
3021 return len;
3022}
3023
3024static const struct file_operations trace_level_ops = {
3025 .owner = THIS_MODULE,
7a95e94c 3026 .open = simple_open,
032c5e82
TF
3027 .read = trace_level_read,
3028 .write = trace_level_write,
3029};
3030
3031static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3032 size_t len, loff_t *ppos)
3033{
3034 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3035 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3036 int num = ras_comp_int->num;
3037 char buff[9]; /* decimal max int plus \n and \0 */
3038 int size;
3039
3040 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3041 if (*ppos >= size)
3042 return 0;
3043
3044 copy_to_user(user_buf, buff, size);
3045 *ppos += size;
3046 return size;
3047}
3048
3049static ssize_t trace_buff_size_write(struct file *file,
3050 const char __user *user_buf, size_t len,
3051 loff_t *ppos)
3052{
3053 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3054 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3055 union ibmvnic_crq crq;
3056 unsigned long val;
3057 char buff[9]; /* decimal max int plus \n and \0 */
3058
3059 copy_from_user(buff, user_buf, sizeof(buff));
3060 val = kstrtoul(buff, 10, NULL);
3061
3062 memset(&crq, 0, sizeof(crq));
3063 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3064 crq.control_ras.cmd = CONTROL_RAS;
3065 crq.control_ras.correlator =
3066 adapter->ras_comps[ras_comp_int->num].correlator;
3067 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3068 /* trace_buff_sz is 3 bytes, stuff an int into it */
3069 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3070 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3071 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3072 ibmvnic_send_crq(adapter, &crq);
3073
3074 return len;
3075}
3076
3077static const struct file_operations trace_size_ops = {
3078 .owner = THIS_MODULE,
7a95e94c 3079 .open = simple_open,
032c5e82
TF
3080 .read = trace_buff_size_read,
3081 .write = trace_buff_size_write,
3082};
3083
3084static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3085 struct ibmvnic_adapter *adapter)
3086{
3087 struct device *dev = &adapter->vdev->dev;
3088 struct dentry *dir_ent;
3089 struct dentry *ent;
3090 int i;
3091
3092 debugfs_remove_recursive(adapter->ras_comps_ent);
3093
3094 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3095 adapter->debugfs_dir);
3096 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3097 dev_info(dev, "debugfs create ras_comps dir failed\n");
3098 return;
3099 }
3100
3101 for (i = 0; i < adapter->ras_comp_num; i++) {
3102 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3103 adapter->ras_comps_ent);
3104 if (!dir_ent || IS_ERR(dir_ent)) {
3105 dev_info(dev, "debugfs create %s dir failed\n",
3106 adapter->ras_comps[i].name);
3107 continue;
3108 }
3109
3110 adapter->ras_comp_int[i].adapter = adapter;
3111 adapter->ras_comp_int[i].num = i;
3112 adapter->ras_comp_int[i].desc_blob.data =
3113 &adapter->ras_comps[i].description;
3114 adapter->ras_comp_int[i].desc_blob.size =
3115 sizeof(adapter->ras_comps[i].description);
3116
3117 /* Don't need to remember the dentry's because the debugfs dir
3118 * gets removed recursively
3119 */
3120 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3121 &adapter->ras_comp_int[i].desc_blob);
3122 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3123 dir_ent, &adapter->ras_comp_int[i],
3124 &trace_size_ops);
3125 ent = debugfs_create_file("trace_level",
3126 S_IRUGO |
3127 (adapter->ras_comps[i].trace_level !=
3128 0xFF ? S_IWUSR : 0),
3129 dir_ent, &adapter->ras_comp_int[i],
3130 &trace_level_ops);
3131 ent = debugfs_create_file("error_level",
3132 S_IRUGO |
3133 (adapter->
3134 ras_comps[i].error_check_level !=
3135 0xFF ? S_IWUSR : 0),
3136 dir_ent, &adapter->ras_comp_int[i],
3137 &trace_level_ops);
3138 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3139 dir_ent, &adapter->ras_comp_int[i],
3140 &tracing_ops);
3141 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3142 dir_ent, &adapter->ras_comp_int[i],
3143 &paused_ops);
3144 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3145 &adapter->ras_comp_int[i],
3146 &trace_ops);
3147 }
3148}
3149
3150static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3151 struct ibmvnic_adapter *adapter)
3152{
3153 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3154 struct device *dev = &adapter->vdev->dev;
3155 union ibmvnic_crq newcrq;
3156
3157 adapter->ras_comps = dma_alloc_coherent(dev, len,
3158 &adapter->ras_comps_tok,
3159 GFP_KERNEL);
3160 if (!adapter->ras_comps) {
3161 if (!firmware_has_feature(FW_FEATURE_CMO))
3162 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3163 return;
3164 }
3165
3166 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3167 sizeof(struct ibmvnic_fw_comp_internal),
3168 GFP_KERNEL);
3169 if (!adapter->ras_comp_int)
3170 dma_free_coherent(dev, len, adapter->ras_comps,
3171 adapter->ras_comps_tok);
3172
3173 memset(&newcrq, 0, sizeof(newcrq));
3174 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3175 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3176 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3177 newcrq.request_ras_comps.len = cpu_to_be32(len);
3178 ibmvnic_send_crq(adapter, &newcrq);
3179}
3180
3181static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3182{
96183182 3183 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
032c5e82 3184 struct device *dev = &adapter->vdev->dev;
96183182 3185 struct ibmvnic_error_buff *error_buff, *tmp2;
032c5e82
TF
3186 unsigned long flags;
3187 unsigned long flags2;
3188
3189 spin_lock_irqsave(&adapter->inflight_lock, flags);
96183182 3190 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
032c5e82
TF
3191 switch (inflight_cmd->crq.generic.cmd) {
3192 case LOGIN:
3193 dma_unmap_single(dev, adapter->login_buf_token,
3194 adapter->login_buf_sz,
3195 DMA_BIDIRECTIONAL);
3196 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3197 adapter->login_rsp_buf_sz,
3198 DMA_BIDIRECTIONAL);
3199 kfree(adapter->login_rsp_buf);
3200 kfree(adapter->login_buf);
3201 break;
3202 case REQUEST_DUMP:
3203 complete(&adapter->fw_done);
3204 break;
3205 case REQUEST_ERROR_INFO:
3206 spin_lock_irqsave(&adapter->error_list_lock, flags2);
96183182
WY
3207 list_for_each_entry_safe(error_buff, tmp2,
3208 &adapter->errors, list) {
032c5e82
TF
3209 dma_unmap_single(dev, error_buff->dma,
3210 error_buff->len,
3211 DMA_FROM_DEVICE);
3212 kfree(error_buff->buff);
3213 list_del(&error_buff->list);
3214 kfree(error_buff);
3215 }
3216 spin_unlock_irqrestore(&adapter->error_list_lock,
3217 flags2);
3218 break;
3219 }
3220 list_del(&inflight_cmd->list);
3221 kfree(inflight_cmd);
3222 }
3223 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3224}
3225
9888d7b0
TF
3226static void ibmvnic_xport_event(struct work_struct *work)
3227{
3228 struct ibmvnic_adapter *adapter = container_of(work,
3229 struct ibmvnic_adapter,
3230 ibmvnic_xport);
3231 struct device *dev = &adapter->vdev->dev;
3232 long rc;
3233
3234 ibmvnic_free_inflight(adapter);
3235 release_sub_crqs(adapter);
3236 if (adapter->migrated) {
3237 rc = ibmvnic_reenable_crq_queue(adapter);
3238 if (rc)
3239 dev_err(dev, "Error after enable rc=%ld\n", rc);
3240 adapter->migrated = false;
3241 rc = ibmvnic_send_crq_init(adapter);
3242 if (rc)
3243 dev_err(dev, "Error sending init rc=%ld\n", rc);
3244 }
3245}
3246
032c5e82
TF
3247static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3248 struct ibmvnic_adapter *adapter)
3249{
3250 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3251 struct net_device *netdev = adapter->netdev;
3252 struct device *dev = &adapter->vdev->dev;
3253 long rc;
3254
3255 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3256 ((unsigned long int *)crq)[0],
3257 ((unsigned long int *)crq)[1]);
3258 switch (gen_crq->first) {
3259 case IBMVNIC_CRQ_INIT_RSP:
3260 switch (gen_crq->cmd) {
3261 case IBMVNIC_CRQ_INIT:
3262 dev_info(dev, "Partner initialized\n");
3263 /* Send back a response */
3264 rc = ibmvnic_send_crq_init_complete(adapter);
65dc6891
TF
3265 if (!rc)
3266 schedule_work(&adapter->vnic_crq_init);
032c5e82
TF
3267 else
3268 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3269 break;
3270 case IBMVNIC_CRQ_INIT_COMPLETE:
3271 dev_info(dev, "Partner initialization complete\n");
3272 send_version_xchg(adapter);
3273 break;
3274 default:
3275 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3276 }
3277 return;
3278 case IBMVNIC_CRQ_XPORT_EVENT:
3279 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3280 dev_info(dev, "Re-enabling adapter\n");
3281 adapter->migrated = true;
9888d7b0 3282 schedule_work(&adapter->ibmvnic_xport);
dfad09a6
TF
3283 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3284 dev_info(dev, "Backing device failover detected\n");
3285 netif_carrier_off(netdev);
3286 adapter->failover = true;
032c5e82
TF
3287 } else {
3288 /* The adapter lost the connection */
3289 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3290 gen_crq->cmd);
9888d7b0 3291 schedule_work(&adapter->ibmvnic_xport);
032c5e82
TF
3292 }
3293 return;
3294 case IBMVNIC_CRQ_CMD_RSP:
3295 break;
3296 default:
3297 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3298 gen_crq->first);
3299 return;
3300 }
3301
3302 switch (gen_crq->cmd) {
3303 case VERSION_EXCHANGE_RSP:
3304 rc = crq->version_exchange_rsp.rc.code;
3305 if (rc) {
3306 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3307 break;
3308 }
3309 dev_info(dev, "Partner protocol version is %d\n",
3310 crq->version_exchange_rsp.version);
3311 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3312 ibmvnic_version)
3313 ibmvnic_version =
3314 be16_to_cpu(crq->version_exchange_rsp.version);
3315 send_cap_queries(adapter);
3316 break;
3317 case QUERY_CAPABILITY_RSP:
3318 handle_query_cap_rsp(crq, adapter);
3319 break;
3320 case QUERY_MAP_RSP:
3321 handle_query_map_rsp(crq, adapter);
3322 break;
3323 case REQUEST_MAP_RSP:
3324 handle_request_map_rsp(crq, adapter);
3325 break;
3326 case REQUEST_UNMAP_RSP:
3327 handle_request_unmap_rsp(crq, adapter);
3328 break;
3329 case REQUEST_CAPABILITY_RSP:
3330 handle_request_cap_rsp(crq, adapter);
3331 break;
3332 case LOGIN_RSP:
3333 netdev_dbg(netdev, "Got Login Response\n");
3334 handle_login_rsp(crq, adapter);
3335 break;
3336 case LOGICAL_LINK_STATE_RSP:
3337 netdev_dbg(netdev, "Got Logical Link State Response\n");
3338 adapter->logical_link_state =
3339 crq->logical_link_state_rsp.link_state;
3340 break;
3341 case LINK_STATE_INDICATION:
3342 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3343 adapter->phys_link_state =
3344 crq->link_state_indication.phys_link_state;
3345 adapter->logical_link_state =
3346 crq->link_state_indication.logical_link_state;
3347 break;
3348 case CHANGE_MAC_ADDR_RSP:
3349 netdev_dbg(netdev, "Got MAC address change Response\n");
3350 handle_change_mac_rsp(crq, adapter);
3351 break;
3352 case ERROR_INDICATION:
3353 netdev_dbg(netdev, "Got Error Indication\n");
3354 handle_error_indication(crq, adapter);
3355 break;
3356 case REQUEST_ERROR_RSP:
3357 netdev_dbg(netdev, "Got Error Detail Response\n");
3358 handle_error_info_rsp(crq, adapter);
3359 break;
3360 case REQUEST_STATISTICS_RSP:
3361 netdev_dbg(netdev, "Got Statistics Response\n");
3362 complete(&adapter->stats_done);
3363 break;
3364 case REQUEST_DUMP_SIZE_RSP:
3365 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3366 handle_dump_size_rsp(crq, adapter);
3367 break;
3368 case REQUEST_DUMP_RSP:
3369 netdev_dbg(netdev, "Got Request Dump Response\n");
3370 complete(&adapter->fw_done);
3371 break;
3372 case QUERY_IP_OFFLOAD_RSP:
3373 netdev_dbg(netdev, "Got Query IP offload Response\n");
3374 handle_query_ip_offload_rsp(adapter);
3375 break;
3376 case MULTICAST_CTRL_RSP:
3377 netdev_dbg(netdev, "Got multicast control Response\n");
3378 break;
3379 case CONTROL_IP_OFFLOAD_RSP:
3380 netdev_dbg(netdev, "Got Control IP offload Response\n");
3381 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3382 sizeof(adapter->ip_offload_ctrl),
3383 DMA_TO_DEVICE);
3384 /* We're done with the queries, perform the login */
3385 send_login(adapter);
3386 break;
3387 case REQUEST_RAS_COMP_NUM_RSP:
3388 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3389 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3390 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3391 break;
3392 }
3393 adapter->ras_comp_num =
3394 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3395 handle_request_ras_comp_num_rsp(crq, adapter);
3396 break;
3397 case REQUEST_RAS_COMPS_RSP:
3398 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3399 handle_request_ras_comps_rsp(crq, adapter);
3400 break;
3401 case CONTROL_RAS_RSP:
3402 netdev_dbg(netdev, "Got Control RAS Response\n");
3403 handle_control_ras_rsp(crq, adapter);
3404 break;
3405 case COLLECT_FW_TRACE_RSP:
3406 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3407 complete(&adapter->fw_done);
3408 break;
3409 default:
3410 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3411 gen_crq->cmd);
3412 }
3413}
3414
3415static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3416{
3417 struct ibmvnic_adapter *adapter = instance;
3418 struct ibmvnic_crq_queue *queue = &adapter->crq;
3419 struct vio_dev *vdev = adapter->vdev;
3420 union ibmvnic_crq *crq;
3421 unsigned long flags;
3422 bool done = false;
3423
3424 spin_lock_irqsave(&queue->lock, flags);
3425 vio_disable_interrupts(vdev);
3426 while (!done) {
3427 /* Pull all the valid messages off the CRQ */
3428 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3429 ibmvnic_handle_crq(crq, adapter);
3430 crq->generic.first = 0;
3431 }
3432 vio_enable_interrupts(vdev);
3433 crq = ibmvnic_next_crq(adapter);
3434 if (crq) {
3435 vio_disable_interrupts(vdev);
3436 ibmvnic_handle_crq(crq, adapter);
3437 crq->generic.first = 0;
3438 } else {
3439 done = true;
3440 }
3441 }
3442 spin_unlock_irqrestore(&queue->lock, flags);
3443 return IRQ_HANDLED;
3444}
3445
3446static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3447{
3448 struct vio_dev *vdev = adapter->vdev;
3449 int rc;
3450
3451 do {
3452 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3453 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3454
3455 if (rc)
3456 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3457
3458 return rc;
3459}
3460
3461static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3462{
3463 struct ibmvnic_crq_queue *crq = &adapter->crq;
3464 struct device *dev = &adapter->vdev->dev;
3465 struct vio_dev *vdev = adapter->vdev;
3466 int rc;
3467
3468 /* Close the CRQ */
3469 do {
3470 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3471 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3472
3473 /* Clean out the queue */
3474 memset(crq->msgs, 0, PAGE_SIZE);
3475 crq->cur = 0;
3476
3477 /* And re-open it again */
3478 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3479 crq->msg_token, PAGE_SIZE);
3480
3481 if (rc == H_CLOSED)
3482 /* Adapter is good, but other end is not ready */
3483 dev_warn(dev, "Partner adapter not ready\n");
3484 else if (rc != 0)
3485 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3486
3487 return rc;
3488}
3489
3490static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3491{
3492 struct ibmvnic_crq_queue *crq = &adapter->crq;
3493 struct vio_dev *vdev = adapter->vdev;
3494 long rc;
3495
3496 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3497 free_irq(vdev->irq, adapter);
3498 do {
3499 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3500 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3501
3502 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3503 DMA_BIDIRECTIONAL);
3504 free_page((unsigned long)crq->msgs);
3505}
3506
3507static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3508{
3509 struct ibmvnic_crq_queue *crq = &adapter->crq;
3510 struct device *dev = &adapter->vdev->dev;
3511 struct vio_dev *vdev = adapter->vdev;
3512 int rc, retrc = -ENOMEM;
3513
3514 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3515 /* Should we allocate more than one page? */
3516
3517 if (!crq->msgs)
3518 return -ENOMEM;
3519
3520 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3521 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3522 DMA_BIDIRECTIONAL);
3523 if (dma_mapping_error(dev, crq->msg_token))
3524 goto map_failed;
3525
3526 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3527 crq->msg_token, PAGE_SIZE);
3528
3529 if (rc == H_RESOURCE)
3530 /* maybe kexecing and resource is busy. try a reset */
3531 rc = ibmvnic_reset_crq(adapter);
3532 retrc = rc;
3533
3534 if (rc == H_CLOSED) {
3535 dev_warn(dev, "Partner adapter not ready\n");
3536 } else if (rc) {
3537 dev_warn(dev, "Error %d opening adapter\n", rc);
3538 goto reg_crq_failed;
3539 }
3540
3541 retrc = 0;
3542
3543 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3544 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3545 adapter);
3546 if (rc) {
3547 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3548 vdev->irq, rc);
3549 goto req_irq_failed;
3550 }
3551
3552 rc = vio_enable_interrupts(vdev);
3553 if (rc) {
3554 dev_err(dev, "Error %d enabling interrupts\n", rc);
3555 goto req_irq_failed;
3556 }
3557
3558 crq->cur = 0;
3559 spin_lock_init(&crq->lock);
3560
3561 return retrc;
3562
3563req_irq_failed:
3564 do {
3565 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3566 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3567reg_crq_failed:
3568 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3569map_failed:
3570 free_page((unsigned long)crq->msgs);
3571 return retrc;
3572}
3573
3574/* debugfs for dump */
3575static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3576{
3577 struct net_device *netdev = seq->private;
3578 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3579 struct device *dev = &adapter->vdev->dev;
3580 union ibmvnic_crq crq;
3581
3582 memset(&crq, 0, sizeof(crq));
3583 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3584 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
032c5e82
TF
3585
3586 init_completion(&adapter->fw_done);
db5d0b59 3587 ibmvnic_send_crq(adapter, &crq);
032c5e82
TF
3588 wait_for_completion(&adapter->fw_done);
3589
3590 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3591
3592 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3593 DMA_BIDIRECTIONAL);
3594
3595 kfree(adapter->dump_data);
3596
3597 return 0;
3598}
3599
3600static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3601{
3602 return single_open(file, ibmvnic_dump_show, inode->i_private);
3603}
3604
3605static const struct file_operations ibmvnic_dump_ops = {
3606 .owner = THIS_MODULE,
3607 .open = ibmvnic_dump_open,
3608 .read = seq_read,
3609 .llseek = seq_lseek,
3610 .release = single_release,
3611};
3612
65dc6891
TF
3613static void handle_crq_init_rsp(struct work_struct *work)
3614{
3615 struct ibmvnic_adapter *adapter = container_of(work,
3616 struct ibmvnic_adapter,
3617 vnic_crq_init);
3618 struct device *dev = &adapter->vdev->dev;
3619 struct net_device *netdev = adapter->netdev;
3620 unsigned long timeout = msecs_to_jiffies(30000);
dfad09a6 3621 bool restart = false;
65dc6891
TF
3622 int rc;
3623
dfad09a6
TF
3624 if (adapter->failover) {
3625 release_sub_crqs(adapter);
3626 if (netif_running(netdev)) {
3627 netif_tx_disable(netdev);
3628 ibmvnic_close(netdev);
3629 restart = true;
3630 }
3631 }
3632
65dc6891 3633 reinit_completion(&adapter->init_done);
db5d0b59 3634 send_version_xchg(adapter);
65dc6891
TF
3635 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3636 dev_err(dev, "Passive init timeout\n");
3637 goto task_failed;
3638 }
3639
3640 do {
3641 if (adapter->renegotiate) {
3642 adapter->renegotiate = false;
3643 release_sub_crqs_no_irqs(adapter);
65dc6891
TF
3644
3645 reinit_completion(&adapter->init_done);
db5d0b59 3646 send_cap_queries(adapter);
65dc6891
TF
3647 if (!wait_for_completion_timeout(&adapter->init_done,
3648 timeout)) {
3649 dev_err(dev, "Passive init timeout\n");
3650 goto task_failed;
3651 }
3652 }
3653 } while (adapter->renegotiate);
3654 rc = init_sub_crq_irqs(adapter);
3655
3656 if (rc)
3657 goto task_failed;
3658
3659 netdev->real_num_tx_queues = adapter->req_tx_queues;
f39f0d1e 3660 netdev->mtu = adapter->req_mtu - ETH_HLEN;
65dc6891 3661
dfad09a6
TF
3662 if (adapter->failover) {
3663 adapter->failover = false;
3664 if (restart) {
3665 rc = ibmvnic_open(netdev);
3666 if (rc)
3667 goto restart_failed;
3668 }
3669 netif_carrier_on(netdev);
3670 return;
3671 }
3672
65dc6891
TF
3673 rc = register_netdev(netdev);
3674 if (rc) {
3675 dev_err(dev,
3676 "failed to register netdev rc=%d\n", rc);
3677 goto register_failed;
3678 }
3679 dev_info(dev, "ibmvnic registered\n");
3680
3681 return;
3682
dfad09a6
TF
3683restart_failed:
3684 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
65dc6891
TF
3685register_failed:
3686 release_sub_crqs(adapter);
3687task_failed:
3688 dev_err(dev, "Passive initialization was not successful\n");
3689}
3690
032c5e82
TF
3691static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3692{
ea22d51a 3693 unsigned long timeout = msecs_to_jiffies(30000);
032c5e82
TF
3694 struct ibmvnic_adapter *adapter;
3695 struct net_device *netdev;
3696 unsigned char *mac_addr_p;
3697 struct dentry *ent;
e1fac0ad 3698 char buf[17]; /* debugfs name buf */
032c5e82
TF
3699 int rc;
3700
3701 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3702 dev->unit_address);
3703
3704 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3705 VETH_MAC_ADDR, NULL);
3706 if (!mac_addr_p) {
3707 dev_err(&dev->dev,
3708 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3709 __FILE__, __LINE__);
3710 return 0;
3711 }
3712
3713 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3714 IBMVNIC_MAX_TX_QUEUES);
3715 if (!netdev)
3716 return -ENOMEM;
3717
3718 adapter = netdev_priv(netdev);
3719 dev_set_drvdata(&dev->dev, netdev);
3720 adapter->vdev = dev;
3721 adapter->netdev = netdev;
dfad09a6 3722 adapter->failover = false;
032c5e82
TF
3723
3724 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3725 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3726 netdev->irq = dev->irq;
3727 netdev->netdev_ops = &ibmvnic_netdev_ops;
3728 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3729 SET_NETDEV_DEV(netdev, &dev->dev);
3730
65dc6891 3731 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
9888d7b0 3732 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
65dc6891 3733
032c5e82
TF
3734 spin_lock_init(&adapter->stats_lock);
3735
3736 rc = ibmvnic_init_crq_queue(adapter);
3737 if (rc) {
3738 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3739 goto free_netdev;
3740 }
3741
3742 INIT_LIST_HEAD(&adapter->errors);
3743 INIT_LIST_HEAD(&adapter->inflight);
3744 spin_lock_init(&adapter->error_list_lock);
3745 spin_lock_init(&adapter->inflight_lock);
3746
3747 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3748 sizeof(struct ibmvnic_statistics),
3749 DMA_FROM_DEVICE);
3750 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3751 if (!firmware_has_feature(FW_FEATURE_CMO))
3752 dev_err(&dev->dev, "Couldn't map stats buffer\n");
0e87203a 3753 rc = -ENOMEM;
032c5e82
TF
3754 goto free_crq;
3755 }
3756
3757 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3758 ent = debugfs_create_dir(buf, NULL);
3759 if (!ent || IS_ERR(ent)) {
3760 dev_info(&dev->dev, "debugfs create directory failed\n");
3761 adapter->debugfs_dir = NULL;
3762 } else {
3763 adapter->debugfs_dir = ent;
3764 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3765 netdev, &ibmvnic_dump_ops);
3766 if (!ent || IS_ERR(ent)) {
3767 dev_info(&dev->dev,
3768 "debugfs create dump file failed\n");
3769 adapter->debugfs_dump = NULL;
3770 } else {
3771 adapter->debugfs_dump = ent;
3772 }
3773 }
032c5e82
TF
3774
3775 init_completion(&adapter->init_done);
db5d0b59 3776 ibmvnic_send_crq_init(adapter);
ea22d51a
TF
3777 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3778 return 0;
032c5e82 3779
498cd8e4 3780 do {
498cd8e4 3781 if (adapter->renegotiate) {
ea22d51a
TF
3782 adapter->renegotiate = false;
3783 release_sub_crqs_no_irqs(adapter);
498cd8e4
JA
3784
3785 reinit_completion(&adapter->init_done);
db5d0b59 3786 send_cap_queries(adapter);
ea22d51a
TF
3787 if (!wait_for_completion_timeout(&adapter->init_done,
3788 timeout))
3789 return 0;
498cd8e4
JA
3790 }
3791 } while (adapter->renegotiate);
032c5e82 3792
ea22d51a
TF
3793 rc = init_sub_crq_irqs(adapter);
3794 if (rc) {
3795 dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3796 goto free_debugfs;
032c5e82
TF
3797 }
3798
3799 netdev->real_num_tx_queues = adapter->req_tx_queues;
f39f0d1e 3800 netdev->mtu = adapter->req_mtu - ETH_HLEN;
032c5e82
TF
3801
3802 rc = register_netdev(netdev);
3803 if (rc) {
3804 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
ea22d51a 3805 goto free_sub_crqs;
032c5e82
TF
3806 }
3807 dev_info(&dev->dev, "ibmvnic registered\n");
3808
3809 return 0;
3810
ea22d51a
TF
3811free_sub_crqs:
3812 release_sub_crqs(adapter);
032c5e82
TF
3813free_debugfs:
3814 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3815 debugfs_remove_recursive(adapter->debugfs_dir);
3816free_crq:
3817 ibmvnic_release_crq_queue(adapter);
3818free_netdev:
3819 free_netdev(netdev);
3820 return rc;
3821}
3822
3823static int ibmvnic_remove(struct vio_dev *dev)
3824{
3825 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3826 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3827
3828 unregister_netdev(netdev);
3829
3830 release_sub_crqs(adapter);
3831
3832 ibmvnic_release_crq_queue(adapter);
3833
3834 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3835 debugfs_remove_recursive(adapter->debugfs_dir);
3836
b7f193da
TF
3837 dma_unmap_single(&dev->dev, adapter->stats_token,
3838 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3839
032c5e82
TF
3840 if (adapter->ras_comps)
3841 dma_free_coherent(&dev->dev,
3842 adapter->ras_comp_num *
3843 sizeof(struct ibmvnic_fw_component),
3844 adapter->ras_comps, adapter->ras_comps_tok);
3845
3846 kfree(adapter->ras_comp_int);
3847
3848 free_netdev(netdev);
3849 dev_set_drvdata(&dev->dev, NULL);
3850
3851 return 0;
3852}
3853
3854static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3855{
3856 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3857 struct ibmvnic_adapter *adapter;
3858 struct iommu_table *tbl;
3859 unsigned long ret = 0;
3860 int i;
3861
3862 tbl = get_iommu_table_base(&vdev->dev);
3863
3864 /* netdev inits at probe time along with the structures we need below*/
3865 if (!netdev)
3866 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3867
3868 adapter = netdev_priv(netdev);
3869
3870 ret += PAGE_SIZE; /* the crq message queue */
3871 ret += adapter->bounce_buffer_size;
3872 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3873
3874 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3875 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3876
3877 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3878 i++)
3879 ret += adapter->rx_pool[i].size *
3880 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3881
3882 return ret;
3883}
3884
3885static int ibmvnic_resume(struct device *dev)
3886{
3887 struct net_device *netdev = dev_get_drvdata(dev);
3888 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3889 int i;
3890
3891 /* kick the interrupt handlers just in case we lost an interrupt */
3892 for (i = 0; i < adapter->req_rx_queues; i++)
3893 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3894 adapter->rx_scrq[i]);
3895
3896 return 0;
3897}
3898
3899static struct vio_device_id ibmvnic_device_table[] = {
3900 {"network", "IBM,vnic"},
3901 {"", "" }
3902};
3903MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3904
3905static const struct dev_pm_ops ibmvnic_pm_ops = {
3906 .resume = ibmvnic_resume
3907};
3908
3909static struct vio_driver ibmvnic_driver = {
3910 .id_table = ibmvnic_device_table,
3911 .probe = ibmvnic_probe,
3912 .remove = ibmvnic_remove,
3913 .get_desired_dma = ibmvnic_get_desired_dma,
3914 .name = ibmvnic_driver_name,
3915 .pm = &ibmvnic_pm_ops,
3916};
3917
3918/* module functions */
3919static int __init ibmvnic_module_init(void)
3920{
3921 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3922 IBMVNIC_DRIVER_VERSION);
3923
3924 return vio_register_driver(&ibmvnic_driver);
3925}
3926
3927static void __exit ibmvnic_module_exit(void)
3928{
3929 vio_unregister_driver(&ibmvnic_driver);
3930}
3931
3932module_init(ibmvnic_module_init);
3933module_exit(ibmvnic_module_exit);