1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
78 #include <linux/workqueue.h>
82 static const char ibmvnic_driver_name
[] = "ibmvnic";
83 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
85 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
86 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
90 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
91 static int ibmvnic_remove(struct vio_dev
*);
92 static void release_sub_crqs(struct ibmvnic_adapter
*);
93 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
98 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
99 union sub_crq
*sub_crq
);
100 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
101 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
102 static int enable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int disable_scrq_irq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static int pending_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
109 struct ibmvnic_sub_crq_queue
*);
110 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
111 static void send_map_query(struct ibmvnic_adapter
*adapter
);
112 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
113 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
115 struct ibmvnic_stat
{
116 char name
[ETH_GSTRING_LEN
];
120 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
121 offsetof(struct ibmvnic_statistics, stat))
122 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
124 static const struct ibmvnic_stat ibmvnic_stats
[] = {
125 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
126 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
127 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
128 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
129 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
130 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
131 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
132 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
133 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
134 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
135 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
136 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
137 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
138 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
139 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
140 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
141 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
142 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
143 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
144 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
145 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
146 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
149 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
150 unsigned long length
, unsigned long *number
,
153 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
156 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
163 /* net_device_ops functions */
165 static void init_rx_pool(struct ibmvnic_adapter
*adapter
,
166 struct ibmvnic_rx_pool
*rx_pool
, int num
, int index
,
167 int buff_size
, int active
)
169 netdev_dbg(adapter
->netdev
,
170 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
171 index
, num
, buff_size
);
173 rx_pool
->index
= index
;
174 rx_pool
->buff_size
= buff_size
;
175 rx_pool
->active
= active
;
178 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
179 struct ibmvnic_long_term_buff
*ltb
, int size
)
181 struct device
*dev
= &adapter
->vdev
->dev
;
184 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
188 dev_err(dev
, "Couldn't alloc long term buffer\n");
191 ltb
->map_id
= adapter
->map_id
;
193 send_request_map(adapter
, ltb
->addr
,
194 ltb
->size
, ltb
->map_id
);
195 init_completion(&adapter
->fw_done
);
196 wait_for_completion(&adapter
->fw_done
);
200 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
201 struct ibmvnic_long_term_buff
*ltb
)
203 struct device
*dev
= &adapter
->vdev
->dev
;
205 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
206 if (!adapter
->failover
)
207 send_request_unmap(adapter
, ltb
->map_id
);
210 static int alloc_rx_pool(struct ibmvnic_adapter
*adapter
,
211 struct ibmvnic_rx_pool
*pool
)
213 struct device
*dev
= &adapter
->vdev
->dev
;
216 pool
->free_map
= kcalloc(pool
->size
, sizeof(int), GFP_KERNEL
);
220 pool
->rx_buff
= kcalloc(pool
->size
, sizeof(struct ibmvnic_rx_buff
),
223 if (!pool
->rx_buff
) {
224 dev_err(dev
, "Couldn't alloc rx buffers\n");
225 kfree(pool
->free_map
);
229 if (alloc_long_term_buff(adapter
, &pool
->long_term_buff
,
230 pool
->size
* pool
->buff_size
)) {
231 kfree(pool
->free_map
);
232 kfree(pool
->rx_buff
);
236 for (i
= 0; i
< pool
->size
; ++i
)
237 pool
->free_map
[i
] = i
;
239 atomic_set(&pool
->available
, 0);
240 pool
->next_alloc
= 0;
246 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
247 struct ibmvnic_rx_pool
*pool
)
249 int count
= pool
->size
- atomic_read(&pool
->available
);
250 struct device
*dev
= &adapter
->vdev
->dev
;
251 int buffers_added
= 0;
252 unsigned long lpar_rc
;
253 union sub_crq sub_crq
;
263 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
264 be32_to_cpu(adapter
->login_rsp_buf
->
267 for (i
= 0; i
< count
; ++i
) {
268 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
270 dev_err(dev
, "Couldn't replenish rx buff\n");
271 adapter
->replenish_no_mem
++;
275 index
= pool
->free_map
[pool
->next_free
];
277 if (pool
->rx_buff
[index
].skb
)
278 dev_err(dev
, "Inconsistent free_map!\n");
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset
= index
* pool
->buff_size
;
282 dst
= pool
->long_term_buff
.buff
+ offset
;
283 memset(dst
, 0, pool
->buff_size
);
284 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
285 pool
->rx_buff
[index
].data
= dst
;
287 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
288 pool
->rx_buff
[index
].dma
= dma_addr
;
289 pool
->rx_buff
[index
].skb
= skb
;
290 pool
->rx_buff
[index
].pool_index
= pool
->index
;
291 pool
->rx_buff
[index
].size
= pool
->buff_size
;
293 memset(&sub_crq
, 0, sizeof(sub_crq
));
294 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
295 sub_crq
.rx_add
.correlator
=
296 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
297 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
298 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
305 #ifdef __LITTLE_ENDIAN__
308 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
310 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
312 if (lpar_rc
!= H_SUCCESS
)
316 adapter
->replenish_add_buff_success
++;
317 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
319 atomic_add(buffers_added
, &pool
->available
);
323 dev_info(dev
, "replenish pools failure\n");
324 pool
->free_map
[pool
->next_free
] = index
;
325 pool
->rx_buff
[index
].skb
= NULL
;
326 if (!dma_mapping_error(dev
, dma_addr
))
327 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
330 dev_kfree_skb_any(skb
);
331 adapter
->replenish_add_buff_failure
++;
332 atomic_add(buffers_added
, &pool
->available
);
335 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
339 if (adapter
->migrated
)
342 adapter
->replenish_task_cycles
++;
343 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
345 if (adapter
->rx_pool
[i
].active
)
346 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
350 static void free_rx_pool(struct ibmvnic_adapter
*adapter
,
351 struct ibmvnic_rx_pool
*pool
)
355 kfree(pool
->free_map
);
356 pool
->free_map
= NULL
;
361 for (i
= 0; i
< pool
->size
; i
++) {
362 if (pool
->rx_buff
[i
].skb
) {
363 dev_kfree_skb_any(pool
->rx_buff
[i
].skb
);
364 pool
->rx_buff
[i
].skb
= NULL
;
367 kfree(pool
->rx_buff
);
368 pool
->rx_buff
= NULL
;
371 static int ibmvnic_open(struct net_device
*netdev
)
373 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
374 struct device
*dev
= &adapter
->vdev
->dev
;
375 struct ibmvnic_tx_pool
*tx_pool
;
376 union ibmvnic_crq crq
;
383 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
385 be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
386 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
387 be32_to_cpu(adapter
->login_rsp_buf
->
388 off_rxadd_buff_size
));
390 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
391 sizeof(struct napi_struct
), GFP_KERNEL
);
393 goto alloc_napi_failed
;
394 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
395 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
397 napi_enable(&adapter
->napi
[i
]);
400 kcalloc(rxadd_subcrqs
, sizeof(struct ibmvnic_rx_pool
), GFP_KERNEL
);
402 if (!adapter
->rx_pool
)
403 goto rx_pool_arr_alloc_failed
;
404 send_map_query(adapter
);
405 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
406 init_rx_pool(adapter
, &adapter
->rx_pool
[i
],
407 IBMVNIC_BUFFS_PER_POOL
, i
,
408 be64_to_cpu(size_array
[i
]), 1);
409 if (alloc_rx_pool(adapter
, &adapter
->rx_pool
[i
])) {
410 dev_err(dev
, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed
;
415 kcalloc(tx_subcrqs
, sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
417 if (!adapter
->tx_pool
)
418 goto tx_pool_arr_alloc_failed
;
419 for (i
= 0; i
< tx_subcrqs
; i
++) {
420 tx_pool
= &adapter
->tx_pool
[i
];
422 kcalloc(adapter
->max_tx_entries_per_subcrq
,
423 sizeof(struct ibmvnic_tx_buff
), GFP_KERNEL
);
424 if (!tx_pool
->tx_buff
)
425 goto tx_pool_alloc_failed
;
427 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
428 adapter
->max_tx_entries_per_subcrq
*
430 goto tx_ltb_alloc_failed
;
433 kcalloc(adapter
->max_tx_entries_per_subcrq
,
434 sizeof(int), GFP_KERNEL
);
435 if (!tx_pool
->free_map
)
436 goto tx_fm_alloc_failed
;
438 for (j
= 0; j
< adapter
->max_tx_entries_per_subcrq
; j
++)
439 tx_pool
->free_map
[j
] = j
;
441 tx_pool
->consumer_index
= 0;
442 tx_pool
->producer_index
= 0;
444 adapter
->bounce_buffer_size
=
445 (netdev
->mtu
+ ETH_HLEN
- 1) / PAGE_SIZE
+ 1;
446 adapter
->bounce_buffer
= kmalloc(adapter
->bounce_buffer_size
,
448 if (!adapter
->bounce_buffer
)
449 goto bounce_alloc_failed
;
451 adapter
->bounce_buffer_dma
= dma_map_single(dev
, adapter
->bounce_buffer
,
452 adapter
->bounce_buffer_size
,
454 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
455 dev_err(dev
, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed
;
458 replenish_pools(adapter
);
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
463 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
464 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
466 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
467 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
469 memset(&crq
, 0, sizeof(crq
));
470 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
471 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
472 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_UP
;
473 ibmvnic_send_crq(adapter
, &crq
);
475 netif_tx_start_all_queues(netdev
);
480 kfree(adapter
->bounce_buffer
);
483 kfree(adapter
->tx_pool
[i
].free_map
);
485 free_long_term_buff(adapter
, &adapter
->tx_pool
[i
].long_term_buff
);
487 kfree(adapter
->tx_pool
[i
].tx_buff
);
488 tx_pool_alloc_failed
:
489 for (j
= 0; j
< i
; j
++) {
490 kfree(adapter
->tx_pool
[j
].tx_buff
);
491 free_long_term_buff(adapter
,
492 &adapter
->tx_pool
[j
].long_term_buff
);
493 kfree(adapter
->tx_pool
[j
].free_map
);
495 kfree(adapter
->tx_pool
);
496 adapter
->tx_pool
= NULL
;
497 tx_pool_arr_alloc_failed
:
499 rx_pool_alloc_failed
:
500 for (j
= 0; j
< i
; j
++) {
501 free_rx_pool(adapter
, &adapter
->rx_pool
[j
]);
502 free_long_term_buff(adapter
,
503 &adapter
->rx_pool
[j
].long_term_buff
);
505 kfree(adapter
->rx_pool
);
506 adapter
->rx_pool
= NULL
;
507 rx_pool_arr_alloc_failed
:
508 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
509 napi_enable(&adapter
->napi
[i
]);
514 static int ibmvnic_close(struct net_device
*netdev
)
516 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
517 struct device
*dev
= &adapter
->vdev
->dev
;
518 union ibmvnic_crq crq
;
521 adapter
->closing
= true;
523 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
524 napi_disable(&adapter
->napi
[i
]);
526 if (!adapter
->failover
)
527 netif_tx_stop_all_queues(netdev
);
529 if (adapter
->bounce_buffer
) {
530 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
531 dma_unmap_single(&adapter
->vdev
->dev
,
532 adapter
->bounce_buffer_dma
,
533 adapter
->bounce_buffer_size
,
535 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
537 kfree(adapter
->bounce_buffer
);
538 adapter
->bounce_buffer
= NULL
;
541 memset(&crq
, 0, sizeof(crq
));
542 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
543 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
544 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_DN
;
545 ibmvnic_send_crq(adapter
, &crq
);
547 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
549 kfree(adapter
->tx_pool
[i
].tx_buff
);
550 free_long_term_buff(adapter
,
551 &adapter
->tx_pool
[i
].long_term_buff
);
552 kfree(adapter
->tx_pool
[i
].free_map
);
554 kfree(adapter
->tx_pool
);
555 adapter
->tx_pool
= NULL
;
557 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
559 free_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
560 free_long_term_buff(adapter
,
561 &adapter
->rx_pool
[i
].long_term_buff
);
563 kfree(adapter
->rx_pool
);
564 adapter
->rx_pool
= NULL
;
566 adapter
->closing
= false;
572 * build_hdr_data - creates L2/L3/L4 header data buffer
573 * @hdr_field - bitfield determining needed headers
574 * @skb - socket buffer
575 * @hdr_len - array of header lengths
576 * @tot_len - total length of data
578 * Reads hdr_field to determine which headers are needed by firmware.
579 * Builds a buffer containing these headers. Saves individual header
580 * lengths and total buffer length to be used to build descriptors.
582 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
583 int *hdr_len
, u8
*hdr_data
)
588 hdr_len
[0] = sizeof(struct ethhdr
);
590 if (skb
->protocol
== htons(ETH_P_IP
)) {
591 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
592 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
593 hdr_len
[2] = tcp_hdrlen(skb
);
594 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
595 hdr_len
[2] = sizeof(struct udphdr
);
596 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
597 hdr_len
[1] = sizeof(struct ipv6hdr
);
598 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
599 hdr_len
[2] = tcp_hdrlen(skb
);
600 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
601 hdr_len
[2] = sizeof(struct udphdr
);
604 memset(hdr_data
, 0, 120);
605 if ((hdr_field
>> 6) & 1) {
606 hdr
= skb_mac_header(skb
);
607 memcpy(hdr_data
, hdr
, hdr_len
[0]);
611 if ((hdr_field
>> 5) & 1) {
612 hdr
= skb_network_header(skb
);
613 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
617 if ((hdr_field
>> 4) & 1) {
618 hdr
= skb_transport_header(skb
);
619 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
626 * create_hdr_descs - create header and header extension descriptors
627 * @hdr_field - bitfield determining needed headers
628 * @data - buffer containing header data
629 * @len - length of data buffer
630 * @hdr_len - array of individual header lengths
631 * @scrq_arr - descriptor array
633 * Creates header and, if needed, header extension descriptors and
634 * places them in a descriptor array, scrq_arr
637 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
638 union sub_crq
*scrq_arr
)
640 union sub_crq hdr_desc
;
645 while (tmp_len
> 0) {
646 cur
= hdr_data
+ len
- tmp_len
;
648 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
649 if (cur
!= hdr_data
) {
650 data
= hdr_desc
.hdr_ext
.data
;
651 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
652 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
653 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
654 hdr_desc
.hdr_ext
.len
= tmp
;
656 data
= hdr_desc
.hdr
.data
;
657 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
658 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
659 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
660 hdr_desc
.hdr
.len
= tmp
;
661 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
662 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
663 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
664 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
666 memcpy(data
, cur
, tmp
);
668 *scrq_arr
= hdr_desc
;
674 * build_hdr_descs_arr - build a header descriptor array
675 * @skb - socket buffer
676 * @num_entries - number of descriptors to be sent
677 * @subcrq - first TX descriptor
678 * @hdr_field - bit field determining which headers will be sent
680 * This function will build a TX descriptor array with applicable
681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
684 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
685 int *num_entries
, u8 hdr_field
)
687 int hdr_len
[3] = {0, 0, 0};
689 u8
*hdr_data
= txbuff
->hdr_data
;
691 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
696 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
697 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
698 txbuff
->indir_arr
+ 1);
701 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
703 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
704 int queue_num
= skb_get_queue_mapping(skb
);
705 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
706 struct device
*dev
= &adapter
->vdev
->dev
;
707 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
708 struct ibmvnic_tx_pool
*tx_pool
;
709 unsigned int tx_send_failed
= 0;
710 unsigned int tx_map_failed
= 0;
711 unsigned int tx_dropped
= 0;
712 unsigned int tx_packets
= 0;
713 unsigned int tx_bytes
= 0;
714 dma_addr_t data_dma_addr
;
715 struct netdev_queue
*txq
;
716 bool used_bounce
= false;
717 unsigned long lpar_rc
;
718 union sub_crq tx_crq
;
726 tx_pool
= &adapter
->tx_pool
[queue_num
];
727 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
728 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
729 be32_to_cpu(adapter
->login_rsp_buf
->
730 off_txsubm_subcrqs
));
731 if (adapter
->migrated
) {
734 ret
= NETDEV_TX_BUSY
;
738 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
739 offset
= index
* adapter
->req_mtu
;
740 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
741 memset(dst
, 0, adapter
->req_mtu
);
742 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
743 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
745 tx_pool
->consumer_index
=
746 (tx_pool
->consumer_index
+ 1) %
747 adapter
->max_tx_entries_per_subcrq
;
749 tx_buff
= &tx_pool
->tx_buff
[index
];
751 tx_buff
->data_dma
[0] = data_dma_addr
;
752 tx_buff
->data_len
[0] = skb
->len
;
753 tx_buff
->index
= index
;
754 tx_buff
->pool_index
= queue_num
;
755 tx_buff
->last_frag
= true;
756 tx_buff
->used_bounce
= used_bounce
;
758 memset(&tx_crq
, 0, sizeof(tx_crq
));
759 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
760 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
761 tx_crq
.v1
.n_crq_elem
= 1;
763 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
764 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
765 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
766 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
767 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
769 if (adapter
->vlan_header_insertion
) {
770 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
771 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
774 if (skb
->protocol
== htons(ETH_P_IP
)) {
775 if (ip_hdr(skb
)->version
== 4)
776 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
777 else if (ip_hdr(skb
)->version
== 6)
778 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
780 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
781 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
782 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
783 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
786 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
787 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
790 /* determine if l2/3/4 headers are sent to firmware */
791 if ((*hdrs
>> 7) & 1 &&
792 (skb
->protocol
== htons(ETH_P_IP
) ||
793 skb
->protocol
== htons(ETH_P_IPV6
))) {
794 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
795 tx_crq
.v1
.n_crq_elem
= num_entries
;
796 tx_buff
->indir_arr
[0] = tx_crq
;
797 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
798 sizeof(tx_buff
->indir_arr
),
800 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
801 if (!firmware_has_feature(FW_FEATURE_CMO
))
802 dev_err(dev
, "tx: unable to map descriptor array\n");
805 ret
= NETDEV_TX_BUSY
;
808 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
809 (u64
)tx_buff
->indir_dma
,
812 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
815 if (lpar_rc
!= H_SUCCESS
) {
816 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
818 if (tx_pool
->consumer_index
== 0)
819 tx_pool
->consumer_index
=
820 adapter
->max_tx_entries_per_subcrq
- 1;
822 tx_pool
->consumer_index
--;
826 ret
= NETDEV_TX_BUSY
;
830 tx_bytes
+= skb
->len
;
831 txq
->trans_start
= jiffies
;
835 netdev
->stats
.tx_dropped
+= tx_dropped
;
836 netdev
->stats
.tx_bytes
+= tx_bytes
;
837 netdev
->stats
.tx_packets
+= tx_packets
;
838 adapter
->tx_send_failed
+= tx_send_failed
;
839 adapter
->tx_map_failed
+= tx_map_failed
;
844 static void ibmvnic_set_multi(struct net_device
*netdev
)
846 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
847 struct netdev_hw_addr
*ha
;
848 union ibmvnic_crq crq
;
850 memset(&crq
, 0, sizeof(crq
));
851 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
852 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
854 if (netdev
->flags
& IFF_PROMISC
) {
855 if (!adapter
->promisc_supported
)
858 if (netdev
->flags
& IFF_ALLMULTI
) {
859 /* Accept all multicast */
860 memset(&crq
, 0, sizeof(crq
));
861 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
862 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
863 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
864 ibmvnic_send_crq(adapter
, &crq
);
865 } else if (netdev_mc_empty(netdev
)) {
866 /* Reject all multicast */
867 memset(&crq
, 0, sizeof(crq
));
868 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
869 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
870 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
871 ibmvnic_send_crq(adapter
, &crq
);
873 /* Accept one or more multicast(s) */
874 netdev_for_each_mc_addr(ha
, netdev
) {
875 memset(&crq
, 0, sizeof(crq
));
876 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
877 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
878 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
879 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
881 ibmvnic_send_crq(adapter
, &crq
);
887 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
889 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
890 struct sockaddr
*addr
= p
;
891 union ibmvnic_crq crq
;
893 if (!is_valid_ether_addr(addr
->sa_data
))
894 return -EADDRNOTAVAIL
;
896 memset(&crq
, 0, sizeof(crq
));
897 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
898 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
899 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
900 ibmvnic_send_crq(adapter
, &crq
);
901 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
905 static void ibmvnic_tx_timeout(struct net_device
*dev
)
907 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
910 /* Adapter timed out, resetting it */
911 release_sub_crqs(adapter
);
912 rc
= ibmvnic_reset_crq(adapter
);
914 dev_err(&adapter
->vdev
->dev
, "Adapter timeout, reset failed\n");
916 ibmvnic_send_crq_init(adapter
);
919 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
920 struct ibmvnic_rx_buff
*rx_buff
)
922 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
926 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
927 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
929 atomic_dec(&pool
->available
);
932 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
934 struct net_device
*netdev
= napi
->dev
;
935 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
936 int scrq_num
= (int)(napi
- adapter
->napi
);
937 int frames_processed
= 0;
939 while (frames_processed
< budget
) {
941 struct ibmvnic_rx_buff
*rx_buff
;
947 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
949 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
951 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
953 /* do error checking */
954 if (next
->rx_comp
.rc
) {
955 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
957 next
->rx_comp
.first
= 0;
958 remove_buff_from_pool(adapter
, rx_buff
);
962 length
= be32_to_cpu(next
->rx_comp
.len
);
963 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
964 flags
= next
->rx_comp
.flags
;
966 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
968 skb
->vlan_tci
= be16_to_cpu(next
->rx_comp
.vlan_tci
);
970 next
->rx_comp
.first
= 0;
971 remove_buff_from_pool(adapter
, rx_buff
);
973 skb_put(skb
, length
);
974 skb
->protocol
= eth_type_trans(skb
, netdev
);
976 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
977 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
978 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
982 napi_gro_receive(napi
, skb
); /* send it up */
983 netdev
->stats
.rx_packets
++;
984 netdev
->stats
.rx_bytes
+= length
;
987 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
989 if (frames_processed
< budget
) {
990 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
992 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
993 napi_reschedule(napi
)) {
994 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
998 return frames_processed
;
1001 #ifdef CONFIG_NET_POLL_CONTROLLER
1002 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1004 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1007 replenish_pools(netdev_priv(dev
));
1008 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1009 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1010 adapter
->rx_scrq
[i
]);
1014 static const struct net_device_ops ibmvnic_netdev_ops
= {
1015 .ndo_open
= ibmvnic_open
,
1016 .ndo_stop
= ibmvnic_close
,
1017 .ndo_start_xmit
= ibmvnic_xmit
,
1018 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1019 .ndo_set_mac_address
= ibmvnic_set_mac
,
1020 .ndo_validate_addr
= eth_validate_addr
,
1021 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1022 #ifdef CONFIG_NET_POLL_CONTROLLER
1023 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1027 /* ethtool functions */
1029 static int ibmvnic_get_settings(struct net_device
*netdev
,
1030 struct ethtool_cmd
*cmd
)
1032 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1034 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1036 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
1037 cmd
->duplex
= DUPLEX_FULL
;
1038 cmd
->port
= PORT_FIBRE
;
1039 cmd
->phy_address
= 0;
1040 cmd
->transceiver
= XCVR_INTERNAL
;
1041 cmd
->autoneg
= AUTONEG_ENABLE
;
1047 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1048 struct ethtool_drvinfo
*info
)
1050 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1051 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1054 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1056 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1058 return adapter
->msg_enable
;
1061 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1063 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1065 adapter
->msg_enable
= data
;
1068 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1070 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1072 /* Don't need to send a query because we request a logical link up at
1073 * init and then we wait for link state indications
1075 return adapter
->logical_link_state
;
1078 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1079 struct ethtool_ringparam
*ring
)
1081 ring
->rx_max_pending
= 0;
1082 ring
->tx_max_pending
= 0;
1083 ring
->rx_mini_max_pending
= 0;
1084 ring
->rx_jumbo_max_pending
= 0;
1085 ring
->rx_pending
= 0;
1086 ring
->tx_pending
= 0;
1087 ring
->rx_mini_pending
= 0;
1088 ring
->rx_jumbo_pending
= 0;
1091 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1095 if (stringset
!= ETH_SS_STATS
)
1098 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1099 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1102 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1106 return ARRAY_SIZE(ibmvnic_stats
);
1112 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1113 struct ethtool_stats
*stats
, u64
*data
)
1115 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1116 union ibmvnic_crq crq
;
1119 memset(&crq
, 0, sizeof(crq
));
1120 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1121 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1122 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1123 crq
.request_statistics
.len
=
1124 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1125 ibmvnic_send_crq(adapter
, &crq
);
1127 /* Wait for data to be written */
1128 init_completion(&adapter
->stats_done
);
1129 wait_for_completion(&adapter
->stats_done
);
1131 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1132 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1135 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1136 .get_settings
= ibmvnic_get_settings
,
1137 .get_drvinfo
= ibmvnic_get_drvinfo
,
1138 .get_msglevel
= ibmvnic_get_msglevel
,
1139 .set_msglevel
= ibmvnic_set_msglevel
,
1140 .get_link
= ibmvnic_get_link
,
1141 .get_ringparam
= ibmvnic_get_ringparam
,
1142 .get_strings
= ibmvnic_get_strings
,
1143 .get_sset_count
= ibmvnic_get_sset_count
,
1144 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1147 /* Routines for managing CRQs/sCRQs */
1149 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1150 struct ibmvnic_sub_crq_queue
*scrq
)
1152 struct device
*dev
= &adapter
->vdev
->dev
;
1155 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1157 /* Close the sub-crqs */
1159 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1160 adapter
->vdev
->unit_address
,
1162 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1164 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1166 free_pages((unsigned long)scrq
->msgs
, 2);
1170 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1173 struct device
*dev
= &adapter
->vdev
->dev
;
1174 struct ibmvnic_sub_crq_queue
*scrq
;
1177 scrq
= kmalloc(sizeof(*scrq
), GFP_ATOMIC
);
1181 scrq
->msgs
= (union sub_crq
*)__get_free_pages(GFP_KERNEL
, 2);
1182 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1184 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1185 goto zero_page_failed
;
1188 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1190 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1191 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1195 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1196 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1198 if (rc
== H_RESOURCE
)
1199 rc
= ibmvnic_reset_crq(adapter
);
1201 if (rc
== H_CLOSED
) {
1202 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1204 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1208 scrq
->adapter
= adapter
;
1209 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1211 scrq
->rx_skb_top
= NULL
;
1212 spin_lock_init(&scrq
->lock
);
1214 netdev_dbg(adapter
->netdev
,
1215 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1216 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1221 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1224 free_pages((unsigned long)scrq
->msgs
, 2);
1231 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1235 if (adapter
->tx_scrq
) {
1236 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1237 if (adapter
->tx_scrq
[i
]) {
1238 free_irq(adapter
->tx_scrq
[i
]->irq
,
1239 adapter
->tx_scrq
[i
]);
1240 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
1241 release_sub_crq_queue(adapter
,
1242 adapter
->tx_scrq
[i
]);
1244 adapter
->tx_scrq
= NULL
;
1247 if (adapter
->rx_scrq
) {
1248 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1249 if (adapter
->rx_scrq
[i
]) {
1250 free_irq(adapter
->rx_scrq
[i
]->irq
,
1251 adapter
->rx_scrq
[i
]);
1252 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
1253 release_sub_crq_queue(adapter
,
1254 adapter
->rx_scrq
[i
]);
1256 adapter
->rx_scrq
= NULL
;
1259 adapter
->requested_caps
= 0;
1262 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*adapter
)
1266 if (adapter
->tx_scrq
) {
1267 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1268 if (adapter
->tx_scrq
[i
])
1269 release_sub_crq_queue(adapter
,
1270 adapter
->tx_scrq
[i
]);
1271 adapter
->tx_scrq
= NULL
;
1274 if (adapter
->rx_scrq
) {
1275 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1276 if (adapter
->rx_scrq
[i
])
1277 release_sub_crq_queue(adapter
,
1278 adapter
->rx_scrq
[i
]);
1279 adapter
->rx_scrq
= NULL
;
1282 adapter
->requested_caps
= 0;
1285 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1286 struct ibmvnic_sub_crq_queue
*scrq
)
1288 struct device
*dev
= &adapter
->vdev
->dev
;
1291 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1292 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1294 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1299 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1300 struct ibmvnic_sub_crq_queue
*scrq
)
1302 struct device
*dev
= &adapter
->vdev
->dev
;
1305 if (scrq
->hw_irq
> 0x100000000ULL
) {
1306 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1310 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1311 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1313 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1318 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1319 struct ibmvnic_sub_crq_queue
*scrq
)
1321 struct device
*dev
= &adapter
->vdev
->dev
;
1322 struct ibmvnic_tx_buff
*txbuff
;
1323 union sub_crq
*next
;
1329 while (pending_scrq(adapter
, scrq
)) {
1330 unsigned int pool
= scrq
->pool_index
;
1332 next
= ibmvnic_next_scrq(adapter
, scrq
);
1333 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1334 if (next
->tx_comp
.rcs
[i
]) {
1335 dev_err(dev
, "tx error %x\n",
1336 next
->tx_comp
.rcs
[i
]);
1339 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1340 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1342 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1343 if (!txbuff
->data_dma
[j
])
1346 txbuff
->data_dma
[j
] = 0;
1347 txbuff
->used_bounce
= false;
1349 /* if sub_crq was sent indirectly */
1350 first
= txbuff
->indir_arr
[0].generic
.first
;
1351 if (first
== IBMVNIC_CRQ_CMD
) {
1352 dma_unmap_single(dev
, txbuff
->indir_dma
,
1353 sizeof(txbuff
->indir_arr
),
1357 if (txbuff
->last_frag
)
1358 dev_kfree_skb_any(txbuff
->skb
);
1360 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
1361 producer_index
] = index
;
1362 adapter
->tx_pool
[pool
].producer_index
=
1363 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
1364 adapter
->max_tx_entries_per_subcrq
;
1366 /* remove tx_comp scrq*/
1367 next
->tx_comp
.first
= 0;
1370 enable_scrq_irq(adapter
, scrq
);
1372 if (pending_scrq(adapter
, scrq
)) {
1373 disable_scrq_irq(adapter
, scrq
);
1380 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
1382 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1383 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1385 disable_scrq_irq(adapter
, scrq
);
1386 ibmvnic_complete_tx(adapter
, scrq
);
1391 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
1393 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1394 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1396 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
1397 disable_scrq_irq(adapter
, scrq
);
1398 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
1404 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
1406 struct device
*dev
= &adapter
->vdev
->dev
;
1407 struct ibmvnic_sub_crq_queue
*scrq
;
1411 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1412 scrq
= adapter
->tx_scrq
[i
];
1413 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1417 dev_err(dev
, "Error mapping irq\n");
1418 goto req_tx_irq_failed
;
1421 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
1422 0, "ibmvnic_tx", scrq
);
1425 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
1427 irq_dispose_mapping(scrq
->irq
);
1428 goto req_rx_irq_failed
;
1432 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1433 scrq
= adapter
->rx_scrq
[i
];
1434 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1437 dev_err(dev
, "Error mapping irq\n");
1438 goto req_rx_irq_failed
;
1440 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
1441 0, "ibmvnic_rx", scrq
);
1443 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
1445 irq_dispose_mapping(scrq
->irq
);
1446 goto req_rx_irq_failed
;
1452 for (j
= 0; j
< i
; j
++)
1453 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
1454 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1455 i
= adapter
->req_tx_queues
;
1457 for (j
= 0; j
< i
; j
++)
1458 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
1459 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1460 release_sub_crqs_no_irqs(adapter
);
1464 static void init_sub_crqs(struct ibmvnic_adapter
*adapter
, int retry
)
1466 struct device
*dev
= &adapter
->vdev
->dev
;
1467 struct ibmvnic_sub_crq_queue
**allqueues
;
1468 int registered_queues
= 0;
1469 union ibmvnic_crq crq
;
1475 /* Sub-CRQ entries are 32 byte long */
1476 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
1478 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
1479 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
1480 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
1481 goto allqueues_failed
;
1484 /* Get the minimum between the queried max and the entries
1485 * that fit in our PAGE_SIZE
1487 adapter
->req_tx_entries_per_subcrq
=
1488 adapter
->max_tx_entries_per_subcrq
> entries_page
?
1489 entries_page
: adapter
->max_tx_entries_per_subcrq
;
1490 adapter
->req_rx_add_entries_per_subcrq
=
1491 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
1492 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
1494 /* Choosing the maximum number of queues supported by firmware*/
1495 adapter
->req_tx_queues
= adapter
->max_tx_queues
;
1496 adapter
->req_rx_queues
= adapter
->max_rx_queues
;
1497 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
1499 adapter
->req_mtu
= adapter
->max_mtu
;
1502 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
1504 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_ATOMIC
);
1506 goto allqueues_failed
;
1508 for (i
= 0; i
< total_queues
; i
++) {
1509 allqueues
[i
] = init_sub_crq_queue(adapter
);
1510 if (!allqueues
[i
]) {
1511 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
1514 registered_queues
++;
1517 /* Make sure we were able to register the minimum number of queues */
1518 if (registered_queues
<
1519 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
1520 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
1524 /* Distribute the failed allocated queues*/
1525 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
1526 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
1529 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
1530 adapter
->req_rx_queues
--;
1535 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
1536 adapter
->req_tx_queues
--;
1543 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
1544 sizeof(*adapter
->tx_scrq
), GFP_ATOMIC
);
1545 if (!adapter
->tx_scrq
)
1548 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1549 adapter
->tx_scrq
[i
] = allqueues
[i
];
1550 adapter
->tx_scrq
[i
]->pool_index
= i
;
1553 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
1554 sizeof(*adapter
->rx_scrq
), GFP_ATOMIC
);
1555 if (!adapter
->rx_scrq
)
1558 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1559 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
1560 adapter
->rx_scrq
[i
]->scrq_num
= i
;
1563 memset(&crq
, 0, sizeof(crq
));
1564 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1565 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1567 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
1568 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
1569 ibmvnic_send_crq(adapter
, &crq
);
1571 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
1572 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
1573 ibmvnic_send_crq(adapter
, &crq
);
1575 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
1576 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
1577 ibmvnic_send_crq(adapter
, &crq
);
1579 crq
.request_capability
.capability
=
1580 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
1581 crq
.request_capability
.number
=
1582 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
1583 ibmvnic_send_crq(adapter
, &crq
);
1585 crq
.request_capability
.capability
=
1586 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
1587 crq
.request_capability
.number
=
1588 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
1589 ibmvnic_send_crq(adapter
, &crq
);
1591 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
1592 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
1593 ibmvnic_send_crq(adapter
, &crq
);
1595 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
1596 if (adapter
->promisc_supported
) {
1597 crq
.request_capability
.capability
=
1598 cpu_to_be16(PROMISC_REQUESTED
);
1599 crq
.request_capability
.number
= cpu_to_be64(1);
1600 ibmvnic_send_crq(adapter
, &crq
);
1603 crq
.request_capability
.capability
=
1604 cpu_to_be16(PROMISC_REQUESTED
);
1605 crq
.request_capability
.number
= cpu_to_be64(0);
1606 ibmvnic_send_crq(adapter
, &crq
);
1614 kfree(adapter
->tx_scrq
);
1615 adapter
->tx_scrq
= NULL
;
1617 for (i
= 0; i
< registered_queues
; i
++)
1618 release_sub_crq_queue(adapter
, allqueues
[i
]);
1621 ibmvnic_remove(adapter
->vdev
);
1624 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
1625 struct ibmvnic_sub_crq_queue
*scrq
)
1627 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
1629 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
|| adapter
->closing
)
1635 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
1636 struct ibmvnic_sub_crq_queue
*scrq
)
1638 union sub_crq
*entry
;
1639 unsigned long flags
;
1641 spin_lock_irqsave(&scrq
->lock
, flags
);
1642 entry
= &scrq
->msgs
[scrq
->cur
];
1643 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1644 if (++scrq
->cur
== scrq
->size
)
1649 spin_unlock_irqrestore(&scrq
->lock
, flags
);
1654 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
1656 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
1657 union ibmvnic_crq
*crq
;
1659 crq
= &queue
->msgs
[queue
->cur
];
1660 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1661 if (++queue
->cur
== queue
->size
)
1670 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
1671 union sub_crq
*sub_crq
)
1673 unsigned int ua
= adapter
->vdev
->unit_address
;
1674 struct device
*dev
= &adapter
->vdev
->dev
;
1675 u64
*u64_crq
= (u64
*)sub_crq
;
1678 netdev_dbg(adapter
->netdev
,
1679 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1680 (unsigned long int)cpu_to_be64(remote_handle
),
1681 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1682 (unsigned long int)cpu_to_be64(u64_crq
[1]),
1683 (unsigned long int)cpu_to_be64(u64_crq
[2]),
1684 (unsigned long int)cpu_to_be64(u64_crq
[3]));
1686 /* Make sure the hypervisor sees the complete request */
1689 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
1690 cpu_to_be64(remote_handle
),
1691 cpu_to_be64(u64_crq
[0]),
1692 cpu_to_be64(u64_crq
[1]),
1693 cpu_to_be64(u64_crq
[2]),
1694 cpu_to_be64(u64_crq
[3]));
1698 dev_warn(dev
, "CRQ Queue closed\n");
1699 dev_err(dev
, "Send error (rc=%d)\n", rc
);
1705 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
1706 u64 remote_handle
, u64 ioba
, u64 num_entries
)
1708 unsigned int ua
= adapter
->vdev
->unit_address
;
1709 struct device
*dev
= &adapter
->vdev
->dev
;
1712 /* Make sure the hypervisor sees the complete request */
1714 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
1715 cpu_to_be64(remote_handle
),
1720 dev_warn(dev
, "CRQ Queue closed\n");
1721 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
1727 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
1728 union ibmvnic_crq
*crq
)
1730 unsigned int ua
= adapter
->vdev
->unit_address
;
1731 struct device
*dev
= &adapter
->vdev
->dev
;
1732 u64
*u64_crq
= (u64
*)crq
;
1735 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
1736 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1737 (unsigned long int)cpu_to_be64(u64_crq
[1]));
1739 /* Make sure the hypervisor sees the complete request */
1742 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
1743 cpu_to_be64(u64_crq
[0]),
1744 cpu_to_be64(u64_crq
[1]));
1748 dev_warn(dev
, "CRQ Queue closed\n");
1749 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
1755 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
1757 union ibmvnic_crq crq
;
1759 memset(&crq
, 0, sizeof(crq
));
1760 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1761 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
1762 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
1764 return ibmvnic_send_crq(adapter
, &crq
);
1767 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter
*adapter
)
1769 union ibmvnic_crq crq
;
1771 memset(&crq
, 0, sizeof(crq
));
1772 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1773 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT_COMPLETE
;
1774 netdev_dbg(adapter
->netdev
, "Sending CRQ init complete\n");
1776 return ibmvnic_send_crq(adapter
, &crq
);
1779 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
1781 union ibmvnic_crq crq
;
1783 memset(&crq
, 0, sizeof(crq
));
1784 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
1785 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
1786 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
1788 return ibmvnic_send_crq(adapter
, &crq
);
1791 static void send_login(struct ibmvnic_adapter
*adapter
)
1793 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
1794 struct ibmvnic_login_buffer
*login_buffer
;
1795 struct ibmvnic_inflight_cmd
*inflight_cmd
;
1796 struct device
*dev
= &adapter
->vdev
->dev
;
1797 dma_addr_t rsp_buffer_token
;
1798 dma_addr_t buffer_token
;
1799 size_t rsp_buffer_size
;
1800 union ibmvnic_crq crq
;
1801 unsigned long flags
;
1808 sizeof(struct ibmvnic_login_buffer
) +
1809 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
1811 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
1813 goto buf_alloc_failed
;
1815 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
1817 if (dma_mapping_error(dev
, buffer_token
)) {
1818 dev_err(dev
, "Couldn't map login buffer\n");
1819 goto buf_map_failed
;
1822 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
1823 sizeof(u64
) * adapter
->req_tx_queues
+
1824 sizeof(u64
) * adapter
->req_rx_queues
+
1825 sizeof(u64
) * adapter
->req_rx_queues
+
1826 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
1828 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
1829 if (!login_rsp_buffer
)
1830 goto buf_rsp_alloc_failed
;
1832 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
1833 rsp_buffer_size
, DMA_FROM_DEVICE
);
1834 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
1835 dev_err(dev
, "Couldn't map login rsp buffer\n");
1836 goto buf_rsp_map_failed
;
1838 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
1839 if (!inflight_cmd
) {
1840 dev_err(dev
, "Couldn't allocate inflight_cmd\n");
1841 goto inflight_alloc_failed
;
1843 adapter
->login_buf
= login_buffer
;
1844 adapter
->login_buf_token
= buffer_token
;
1845 adapter
->login_buf_sz
= buffer_size
;
1846 adapter
->login_rsp_buf
= login_rsp_buffer
;
1847 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
1848 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
1850 login_buffer
->len
= cpu_to_be32(buffer_size
);
1851 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
1852 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
1853 login_buffer
->off_txcomp_subcrqs
=
1854 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
1855 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
1856 login_buffer
->off_rxcomp_subcrqs
=
1857 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
1858 sizeof(u64
) * adapter
->req_tx_queues
);
1859 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
1860 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
1862 tx_list_p
= (__be64
*)((char *)login_buffer
+
1863 sizeof(struct ibmvnic_login_buffer
));
1864 rx_list_p
= (__be64
*)((char *)login_buffer
+
1865 sizeof(struct ibmvnic_login_buffer
) +
1866 sizeof(u64
) * adapter
->req_tx_queues
);
1868 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1869 if (adapter
->tx_scrq
[i
]) {
1870 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
1875 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1876 if (adapter
->rx_scrq
[i
]) {
1877 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
1882 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
1883 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
1884 netdev_dbg(adapter
->netdev
, "%016lx\n",
1885 ((unsigned long int *)(adapter
->login_buf
))[i
]);
1888 memset(&crq
, 0, sizeof(crq
));
1889 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
1890 crq
.login
.cmd
= LOGIN
;
1891 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
1892 crq
.login
.len
= cpu_to_be32(buffer_size
);
1894 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
1896 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
1897 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
1898 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
1900 ibmvnic_send_crq(adapter
, &crq
);
1904 inflight_alloc_failed
:
1905 dma_unmap_single(dev
, rsp_buffer_token
, rsp_buffer_size
,
1908 kfree(login_rsp_buffer
);
1909 buf_rsp_alloc_failed
:
1910 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
1912 kfree(login_buffer
);
1917 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
1920 union ibmvnic_crq crq
;
1922 memset(&crq
, 0, sizeof(crq
));
1923 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
1924 crq
.request_map
.cmd
= REQUEST_MAP
;
1925 crq
.request_map
.map_id
= map_id
;
1926 crq
.request_map
.ioba
= cpu_to_be32(addr
);
1927 crq
.request_map
.len
= cpu_to_be32(len
);
1928 ibmvnic_send_crq(adapter
, &crq
);
1931 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
1933 union ibmvnic_crq crq
;
1935 memset(&crq
, 0, sizeof(crq
));
1936 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
1937 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
1938 crq
.request_unmap
.map_id
= map_id
;
1939 ibmvnic_send_crq(adapter
, &crq
);
1942 static void send_map_query(struct ibmvnic_adapter
*adapter
)
1944 union ibmvnic_crq crq
;
1946 memset(&crq
, 0, sizeof(crq
));
1947 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
1948 crq
.query_map
.cmd
= QUERY_MAP
;
1949 ibmvnic_send_crq(adapter
, &crq
);
1952 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1953 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
1955 union ibmvnic_crq crq
;
1957 atomic_set(&adapter
->running_cap_queries
, 0);
1958 memset(&crq
, 0, sizeof(crq
));
1959 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
1960 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
1962 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
1963 atomic_inc(&adapter
->running_cap_queries
);
1964 ibmvnic_send_crq(adapter
, &crq
);
1966 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
1967 atomic_inc(&adapter
->running_cap_queries
);
1968 ibmvnic_send_crq(adapter
, &crq
);
1970 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
1971 atomic_inc(&adapter
->running_cap_queries
);
1972 ibmvnic_send_crq(adapter
, &crq
);
1974 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
1975 atomic_inc(&adapter
->running_cap_queries
);
1976 ibmvnic_send_crq(adapter
, &crq
);
1978 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
1979 atomic_inc(&adapter
->running_cap_queries
);
1980 ibmvnic_send_crq(adapter
, &crq
);
1982 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
1983 atomic_inc(&adapter
->running_cap_queries
);
1984 ibmvnic_send_crq(adapter
, &crq
);
1986 crq
.query_capability
.capability
=
1987 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
1988 atomic_inc(&adapter
->running_cap_queries
);
1989 ibmvnic_send_crq(adapter
, &crq
);
1991 crq
.query_capability
.capability
=
1992 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
1993 atomic_inc(&adapter
->running_cap_queries
);
1994 ibmvnic_send_crq(adapter
, &crq
);
1996 crq
.query_capability
.capability
=
1997 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
1998 atomic_inc(&adapter
->running_cap_queries
);
1999 ibmvnic_send_crq(adapter
, &crq
);
2001 crq
.query_capability
.capability
=
2002 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
2003 atomic_inc(&adapter
->running_cap_queries
);
2004 ibmvnic_send_crq(adapter
, &crq
);
2006 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
2007 atomic_inc(&adapter
->running_cap_queries
);
2008 ibmvnic_send_crq(adapter
, &crq
);
2010 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
2011 atomic_inc(&adapter
->running_cap_queries
);
2012 ibmvnic_send_crq(adapter
, &crq
);
2014 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
2015 atomic_inc(&adapter
->running_cap_queries
);
2016 ibmvnic_send_crq(adapter
, &crq
);
2018 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
2019 atomic_inc(&adapter
->running_cap_queries
);
2020 ibmvnic_send_crq(adapter
, &crq
);
2022 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
2023 atomic_inc(&adapter
->running_cap_queries
);
2024 ibmvnic_send_crq(adapter
, &crq
);
2026 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
2027 atomic_inc(&adapter
->running_cap_queries
);
2028 ibmvnic_send_crq(adapter
, &crq
);
2030 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
2031 atomic_inc(&adapter
->running_cap_queries
);
2032 ibmvnic_send_crq(adapter
, &crq
);
2034 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
2035 atomic_inc(&adapter
->running_cap_queries
);
2036 ibmvnic_send_crq(adapter
, &crq
);
2038 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
2039 atomic_inc(&adapter
->running_cap_queries
);
2040 ibmvnic_send_crq(adapter
, &crq
);
2042 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2043 atomic_inc(&adapter
->running_cap_queries
);
2044 ibmvnic_send_crq(adapter
, &crq
);
2046 crq
.query_capability
.capability
=
2047 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2048 atomic_inc(&adapter
->running_cap_queries
);
2049 ibmvnic_send_crq(adapter
, &crq
);
2051 crq
.query_capability
.capability
=
2052 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2053 atomic_inc(&adapter
->running_cap_queries
);
2054 ibmvnic_send_crq(adapter
, &crq
);
2056 crq
.query_capability
.capability
=
2057 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2058 atomic_inc(&adapter
->running_cap_queries
);
2059 ibmvnic_send_crq(adapter
, &crq
);
2061 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2062 atomic_inc(&adapter
->running_cap_queries
);
2063 ibmvnic_send_crq(adapter
, &crq
);
2066 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2068 struct device
*dev
= &adapter
->vdev
->dev
;
2069 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2070 union ibmvnic_crq crq
;
2073 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2074 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2076 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2077 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2078 netdev_dbg(adapter
->netdev
, "%016lx\n",
2079 ((unsigned long int *)(buf
))[i
]);
2081 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2082 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2083 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2084 buf
->tcp_ipv4_chksum
);
2085 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2086 buf
->tcp_ipv6_chksum
);
2087 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2088 buf
->udp_ipv4_chksum
);
2089 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2090 buf
->udp_ipv6_chksum
);
2091 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2092 buf
->large_tx_ipv4
);
2093 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2094 buf
->large_tx_ipv6
);
2095 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2096 buf
->large_rx_ipv4
);
2097 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2098 buf
->large_rx_ipv6
);
2099 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2100 buf
->max_ipv4_header_size
);
2101 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2102 buf
->max_ipv6_header_size
);
2103 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2104 buf
->max_tcp_header_size
);
2105 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2106 buf
->max_udp_header_size
);
2107 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2108 buf
->max_large_tx_size
);
2109 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2110 buf
->max_large_rx_size
);
2111 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2112 buf
->ipv6_extension_header
);
2113 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2114 buf
->tcp_pseudosum_req
);
2115 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2116 buf
->num_ipv6_ext_headers
);
2117 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2118 buf
->off_ipv6_ext_headers
);
2120 adapter
->ip_offload_ctrl_tok
=
2121 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2122 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2124 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2125 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2129 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2130 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2131 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2132 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2133 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2135 /* large_tx/rx disabled for now, additional features needed */
2136 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2137 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2138 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2139 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2141 adapter
->netdev
->features
= NETIF_F_GSO
;
2143 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2144 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2146 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2147 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2149 if ((adapter
->netdev
->features
&
2150 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2151 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2153 memset(&crq
, 0, sizeof(crq
));
2154 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2155 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2156 crq
.control_ip_offload
.len
=
2157 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2158 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2159 ibmvnic_send_crq(adapter
, &crq
);
2162 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2163 struct ibmvnic_adapter
*adapter
)
2165 struct device
*dev
= &adapter
->vdev
->dev
;
2166 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2167 unsigned long flags
;
2171 if (!crq
->request_error_rsp
.rc
.code
) {
2172 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2173 crq
->request_error_rsp
.rc
.code
);
2177 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2178 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2179 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2181 list_del(&error_buff
->list
);
2184 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2187 dev_err(dev
, "Couldn't find error id %x\n",
2188 crq
->request_error_rsp
.error_id
);
2192 dev_err(dev
, "Detailed info for error id %x:",
2193 crq
->request_error_rsp
.error_id
);
2195 for (i
= 0; i
< error_buff
->len
; i
++) {
2196 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2202 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2204 kfree(error_buff
->buff
);
2208 static void handle_dump_size_rsp(union ibmvnic_crq
*crq
,
2209 struct ibmvnic_adapter
*adapter
)
2211 int len
= be32_to_cpu(crq
->request_dump_size_rsp
.len
);
2212 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2213 struct device
*dev
= &adapter
->vdev
->dev
;
2214 union ibmvnic_crq newcrq
;
2215 unsigned long flags
;
2217 /* allocate and map buffer */
2218 adapter
->dump_data
= kmalloc(len
, GFP_KERNEL
);
2219 if (!adapter
->dump_data
) {
2220 complete(&adapter
->fw_done
);
2224 adapter
->dump_data_token
= dma_map_single(dev
, adapter
->dump_data
, len
,
2227 if (dma_mapping_error(dev
, adapter
->dump_data_token
)) {
2228 if (!firmware_has_feature(FW_FEATURE_CMO
))
2229 dev_err(dev
, "Couldn't map dump data\n");
2230 kfree(adapter
->dump_data
);
2231 complete(&adapter
->fw_done
);
2235 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2236 if (!inflight_cmd
) {
2237 dma_unmap_single(dev
, adapter
->dump_data_token
, len
,
2239 kfree(adapter
->dump_data
);
2240 complete(&adapter
->fw_done
);
2244 memset(&newcrq
, 0, sizeof(newcrq
));
2245 newcrq
.request_dump
.first
= IBMVNIC_CRQ_CMD
;
2246 newcrq
.request_dump
.cmd
= REQUEST_DUMP
;
2247 newcrq
.request_dump
.ioba
= cpu_to_be32(adapter
->dump_data_token
);
2248 newcrq
.request_dump
.len
= cpu_to_be32(adapter
->dump_data_size
);
2250 memcpy(&inflight_cmd
->crq
, &newcrq
, sizeof(newcrq
));
2252 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2253 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2254 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2256 ibmvnic_send_crq(adapter
, &newcrq
);
2259 static void handle_error_indication(union ibmvnic_crq
*crq
,
2260 struct ibmvnic_adapter
*adapter
)
2262 int detail_len
= be32_to_cpu(crq
->error_indication
.detail_error_sz
);
2263 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2264 struct device
*dev
= &adapter
->vdev
->dev
;
2265 struct ibmvnic_error_buff
*error_buff
;
2266 union ibmvnic_crq new_crq
;
2267 unsigned long flags
;
2269 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2270 crq
->error_indication
.
2271 flags
& IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2272 crq
->error_indication
.error_id
,
2273 crq
->error_indication
.error_cause
);
2275 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2279 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2280 if (!error_buff
->buff
) {
2285 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2287 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2288 if (!firmware_has_feature(FW_FEATURE_CMO
))
2289 dev_err(dev
, "Couldn't map error buffer\n");
2290 kfree(error_buff
->buff
);
2295 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2296 if (!inflight_cmd
) {
2297 dma_unmap_single(dev
, error_buff
->dma
, detail_len
,
2299 kfree(error_buff
->buff
);
2304 error_buff
->len
= detail_len
;
2305 error_buff
->error_id
= crq
->error_indication
.error_id
;
2307 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2308 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2309 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2311 memset(&new_crq
, 0, sizeof(new_crq
));
2312 new_crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2313 new_crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2314 new_crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2315 new_crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2316 new_crq
.request_error_info
.error_id
= crq
->error_indication
.error_id
;
2318 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
2320 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2321 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2322 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2324 ibmvnic_send_crq(adapter
, &new_crq
);
2327 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2328 struct ibmvnic_adapter
*adapter
)
2330 struct net_device
*netdev
= adapter
->netdev
;
2331 struct device
*dev
= &adapter
->vdev
->dev
;
2334 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2336 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2339 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2343 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2344 struct ibmvnic_adapter
*adapter
)
2346 struct device
*dev
= &adapter
->vdev
->dev
;
2350 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2352 req_value
= &adapter
->req_tx_queues
;
2356 req_value
= &adapter
->req_rx_queues
;
2359 case REQ_RX_ADD_QUEUES
:
2360 req_value
= &adapter
->req_rx_add_queues
;
2363 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2364 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2365 name
= "tx_entries_per_subcrq";
2367 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2368 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2369 name
= "rx_add_entries_per_subcrq";
2372 req_value
= &adapter
->req_mtu
;
2375 case PROMISC_REQUESTED
:
2376 req_value
= &adapter
->promisc
;
2380 dev_err(dev
, "Got invalid cap request rsp %d\n",
2381 crq
->request_capability
.capability
);
2385 switch (crq
->request_capability_rsp
.rc
.code
) {
2388 case PARTIALSUCCESS
:
2389 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2391 (long int)be32_to_cpu(crq
->request_capability_rsp
.
2393 release_sub_crqs_no_irqs(adapter
);
2394 *req_value
= be32_to_cpu(crq
->request_capability_rsp
.number
);
2395 init_sub_crqs(adapter
, 1);
2398 dev_err(dev
, "Error %d in request cap rsp\n",
2399 crq
->request_capability_rsp
.rc
.code
);
2403 /* Done receiving requested capabilities, query IP offload support */
2404 if (++adapter
->requested_caps
== 7) {
2405 union ibmvnic_crq newcrq
;
2406 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
2407 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
2408 &adapter
->ip_offload_buf
;
2410 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
2414 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
2415 if (!firmware_has_feature(FW_FEATURE_CMO
))
2416 dev_err(dev
, "Couldn't map offload buffer\n");
2420 memset(&newcrq
, 0, sizeof(newcrq
));
2421 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2422 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
2423 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
2424 newcrq
.query_ip_offload
.ioba
=
2425 cpu_to_be32(adapter
->ip_offload_tok
);
2427 ibmvnic_send_crq(adapter
, &newcrq
);
2431 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
2432 struct ibmvnic_adapter
*adapter
)
2434 struct device
*dev
= &adapter
->vdev
->dev
;
2435 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
2436 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
2437 union ibmvnic_crq crq
;
2440 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
2442 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2443 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
2445 /* If the number of queues requested can't be allocated by the
2446 * server, the login response will return with code 1. We will need
2447 * to resend the login buffer with fewer queues requested.
2449 if (login_rsp_crq
->generic
.rc
.code
) {
2450 adapter
->renegotiate
= true;
2451 complete(&adapter
->init_done
);
2455 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
2456 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
2457 netdev_dbg(adapter
->netdev
, "%016lx\n",
2458 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
2462 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
2463 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
2464 adapter
->req_rx_add_queues
!=
2465 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
2466 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
2467 ibmvnic_remove(adapter
->vdev
);
2470 complete(&adapter
->init_done
);
2472 memset(&crq
, 0, sizeof(crq
));
2473 crq
.request_ras_comp_num
.first
= IBMVNIC_CRQ_CMD
;
2474 crq
.request_ras_comp_num
.cmd
= REQUEST_RAS_COMP_NUM
;
2475 ibmvnic_send_crq(adapter
, &crq
);
2480 static void handle_request_map_rsp(union ibmvnic_crq
*crq
,
2481 struct ibmvnic_adapter
*adapter
)
2483 struct device
*dev
= &adapter
->vdev
->dev
;
2484 u8 map_id
= crq
->request_map_rsp
.map_id
;
2490 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
2491 rx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
2493 rc
= crq
->request_map_rsp
.rc
.code
;
2495 dev_err(dev
, "Error %ld in REQUEST_MAP_RSP\n", rc
);
2497 /* need to find and zero tx/rx_pool map_id */
2498 for (i
= 0; i
< tx_subcrqs
; i
++) {
2499 if (adapter
->tx_pool
[i
].long_term_buff
.map_id
== map_id
)
2500 adapter
->tx_pool
[i
].long_term_buff
.map_id
= 0;
2502 for (i
= 0; i
< rx_subcrqs
; i
++) {
2503 if (adapter
->rx_pool
[i
].long_term_buff
.map_id
== map_id
)
2504 adapter
->rx_pool
[i
].long_term_buff
.map_id
= 0;
2507 complete(&adapter
->fw_done
);
2510 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
2511 struct ibmvnic_adapter
*adapter
)
2513 struct device
*dev
= &adapter
->vdev
->dev
;
2516 rc
= crq
->request_unmap_rsp
.rc
.code
;
2518 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
2521 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
2522 struct ibmvnic_adapter
*adapter
)
2524 struct net_device
*netdev
= adapter
->netdev
;
2525 struct device
*dev
= &adapter
->vdev
->dev
;
2528 rc
= crq
->query_map_rsp
.rc
.code
;
2530 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
2533 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2534 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
2535 crq
->query_map_rsp
.free_pages
);
2538 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
2539 struct ibmvnic_adapter
*adapter
)
2541 struct net_device
*netdev
= adapter
->netdev
;
2542 struct device
*dev
= &adapter
->vdev
->dev
;
2545 atomic_dec(&adapter
->running_cap_queries
);
2546 netdev_dbg(netdev
, "Outstanding queries: %d\n",
2547 atomic_read(&adapter
->running_cap_queries
));
2548 rc
= crq
->query_capability
.rc
.code
;
2550 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
2554 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
2556 adapter
->min_tx_queues
=
2557 be64_to_cpu(crq
->query_capability
.number
);
2558 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
2559 adapter
->min_tx_queues
);
2562 adapter
->min_rx_queues
=
2563 be64_to_cpu(crq
->query_capability
.number
);
2564 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
2565 adapter
->min_rx_queues
);
2567 case MIN_RX_ADD_QUEUES
:
2568 adapter
->min_rx_add_queues
=
2569 be64_to_cpu(crq
->query_capability
.number
);
2570 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
2571 adapter
->min_rx_add_queues
);
2574 adapter
->max_tx_queues
=
2575 be64_to_cpu(crq
->query_capability
.number
);
2576 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
2577 adapter
->max_tx_queues
);
2580 adapter
->max_rx_queues
=
2581 be64_to_cpu(crq
->query_capability
.number
);
2582 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
2583 adapter
->max_rx_queues
);
2585 case MAX_RX_ADD_QUEUES
:
2586 adapter
->max_rx_add_queues
=
2587 be64_to_cpu(crq
->query_capability
.number
);
2588 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
2589 adapter
->max_rx_add_queues
);
2591 case MIN_TX_ENTRIES_PER_SUBCRQ
:
2592 adapter
->min_tx_entries_per_subcrq
=
2593 be64_to_cpu(crq
->query_capability
.number
);
2594 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
2595 adapter
->min_tx_entries_per_subcrq
);
2597 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
2598 adapter
->min_rx_add_entries_per_subcrq
=
2599 be64_to_cpu(crq
->query_capability
.number
);
2600 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
2601 adapter
->min_rx_add_entries_per_subcrq
);
2603 case MAX_TX_ENTRIES_PER_SUBCRQ
:
2604 adapter
->max_tx_entries_per_subcrq
=
2605 be64_to_cpu(crq
->query_capability
.number
);
2606 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
2607 adapter
->max_tx_entries_per_subcrq
);
2609 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
2610 adapter
->max_rx_add_entries_per_subcrq
=
2611 be64_to_cpu(crq
->query_capability
.number
);
2612 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
2613 adapter
->max_rx_add_entries_per_subcrq
);
2615 case TCP_IP_OFFLOAD
:
2616 adapter
->tcp_ip_offload
=
2617 be64_to_cpu(crq
->query_capability
.number
);
2618 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
2619 adapter
->tcp_ip_offload
);
2621 case PROMISC_SUPPORTED
:
2622 adapter
->promisc_supported
=
2623 be64_to_cpu(crq
->query_capability
.number
);
2624 netdev_dbg(netdev
, "promisc_supported = %lld\n",
2625 adapter
->promisc_supported
);
2628 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2629 netdev
->min_mtu
= adapter
->min_mtu
;
2630 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
2633 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2634 netdev
->max_mtu
= adapter
->max_mtu
;
2635 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
2637 case MAX_MULTICAST_FILTERS
:
2638 adapter
->max_multicast_filters
=
2639 be64_to_cpu(crq
->query_capability
.number
);
2640 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
2641 adapter
->max_multicast_filters
);
2643 case VLAN_HEADER_INSERTION
:
2644 adapter
->vlan_header_insertion
=
2645 be64_to_cpu(crq
->query_capability
.number
);
2646 if (adapter
->vlan_header_insertion
)
2647 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
2648 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
2649 adapter
->vlan_header_insertion
);
2651 case MAX_TX_SG_ENTRIES
:
2652 adapter
->max_tx_sg_entries
=
2653 be64_to_cpu(crq
->query_capability
.number
);
2654 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
2655 adapter
->max_tx_sg_entries
);
2657 case RX_SG_SUPPORTED
:
2658 adapter
->rx_sg_supported
=
2659 be64_to_cpu(crq
->query_capability
.number
);
2660 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
2661 adapter
->rx_sg_supported
);
2663 case OPT_TX_COMP_SUB_QUEUES
:
2664 adapter
->opt_tx_comp_sub_queues
=
2665 be64_to_cpu(crq
->query_capability
.number
);
2666 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
2667 adapter
->opt_tx_comp_sub_queues
);
2669 case OPT_RX_COMP_QUEUES
:
2670 adapter
->opt_rx_comp_queues
=
2671 be64_to_cpu(crq
->query_capability
.number
);
2672 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
2673 adapter
->opt_rx_comp_queues
);
2675 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
2676 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
2677 be64_to_cpu(crq
->query_capability
.number
);
2678 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2679 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
2681 case OPT_TX_ENTRIES_PER_SUBCRQ
:
2682 adapter
->opt_tx_entries_per_subcrq
=
2683 be64_to_cpu(crq
->query_capability
.number
);
2684 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
2685 adapter
->opt_tx_entries_per_subcrq
);
2687 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
2688 adapter
->opt_rxba_entries_per_subcrq
=
2689 be64_to_cpu(crq
->query_capability
.number
);
2690 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
2691 adapter
->opt_rxba_entries_per_subcrq
);
2693 case TX_RX_DESC_REQ
:
2694 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
2695 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
2696 adapter
->tx_rx_desc_req
);
2700 netdev_err(netdev
, "Got invalid cap rsp %d\n",
2701 crq
->query_capability
.capability
);
2705 if (atomic_read(&adapter
->running_cap_queries
) == 0)
2706 init_sub_crqs(adapter
, 0);
2707 /* We're done querying the capabilities, initialize sub-crqs */
2710 static void handle_control_ras_rsp(union ibmvnic_crq
*crq
,
2711 struct ibmvnic_adapter
*adapter
)
2713 u8 correlator
= crq
->control_ras_rsp
.correlator
;
2714 struct device
*dev
= &adapter
->vdev
->dev
;
2718 if (crq
->control_ras_rsp
.rc
.code
) {
2719 dev_warn(dev
, "Control ras failed rc=%d\n",
2720 crq
->control_ras_rsp
.rc
.code
);
2724 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
2725 if (adapter
->ras_comps
[i
].correlator
== correlator
) {
2732 dev_warn(dev
, "Correlator not found on control_ras_rsp\n");
2736 switch (crq
->control_ras_rsp
.op
) {
2737 case IBMVNIC_TRACE_LEVEL
:
2738 adapter
->ras_comps
[i
].trace_level
= crq
->control_ras
.level
;
2740 case IBMVNIC_ERROR_LEVEL
:
2741 adapter
->ras_comps
[i
].error_check_level
=
2742 crq
->control_ras
.level
;
2744 case IBMVNIC_TRACE_PAUSE
:
2745 adapter
->ras_comp_int
[i
].paused
= 1;
2747 case IBMVNIC_TRACE_RESUME
:
2748 adapter
->ras_comp_int
[i
].paused
= 0;
2750 case IBMVNIC_TRACE_ON
:
2751 adapter
->ras_comps
[i
].trace_on
= 1;
2753 case IBMVNIC_TRACE_OFF
:
2754 adapter
->ras_comps
[i
].trace_on
= 0;
2756 case IBMVNIC_CHG_TRACE_BUFF_SZ
:
2757 /* trace_buff_sz is 3 bytes, stuff it into an int */
2758 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[0] = 0;
2759 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[1] =
2760 crq
->control_ras_rsp
.trace_buff_sz
[0];
2761 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[2] =
2762 crq
->control_ras_rsp
.trace_buff_sz
[1];
2763 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[3] =
2764 crq
->control_ras_rsp
.trace_buff_sz
[2];
2767 dev_err(dev
, "invalid op %d on control_ras_rsp",
2768 crq
->control_ras_rsp
.op
);
2772 static ssize_t
trace_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2775 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2776 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2777 struct device
*dev
= &adapter
->vdev
->dev
;
2778 struct ibmvnic_fw_trace_entry
*trace
;
2779 int num
= ras_comp_int
->num
;
2780 union ibmvnic_crq crq
;
2781 dma_addr_t trace_tok
;
2783 if (*ppos
>= be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2787 dma_alloc_coherent(dev
,
2788 be32_to_cpu(adapter
->ras_comps
[num
].
2789 trace_buff_size
), &trace_tok
,
2792 dev_err(dev
, "Couldn't alloc trace buffer\n");
2796 memset(&crq
, 0, sizeof(crq
));
2797 crq
.collect_fw_trace
.first
= IBMVNIC_CRQ_CMD
;
2798 crq
.collect_fw_trace
.cmd
= COLLECT_FW_TRACE
;
2799 crq
.collect_fw_trace
.correlator
= adapter
->ras_comps
[num
].correlator
;
2800 crq
.collect_fw_trace
.ioba
= cpu_to_be32(trace_tok
);
2801 crq
.collect_fw_trace
.len
= adapter
->ras_comps
[num
].trace_buff_size
;
2802 ibmvnic_send_crq(adapter
, &crq
);
2804 init_completion(&adapter
->fw_done
);
2805 wait_for_completion(&adapter
->fw_done
);
2807 if (*ppos
+ len
> be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2809 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
) -
2812 copy_to_user(user_buf
, &((u8
*)trace
)[*ppos
], len
);
2814 dma_free_coherent(dev
,
2815 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
),
2821 static const struct file_operations trace_ops
= {
2822 .owner
= THIS_MODULE
,
2823 .open
= simple_open
,
2827 static ssize_t
paused_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2830 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2831 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2832 int num
= ras_comp_int
->num
;
2833 char buff
[5]; /* 1 or 0 plus \n and \0 */
2836 size
= sprintf(buff
, "%d\n", adapter
->ras_comp_int
[num
].paused
);
2841 copy_to_user(user_buf
, buff
, size
);
2846 static ssize_t
paused_write(struct file
*file
, const char __user
*user_buf
,
2847 size_t len
, loff_t
*ppos
)
2849 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2850 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2851 int num
= ras_comp_int
->num
;
2852 union ibmvnic_crq crq
;
2854 char buff
[9]; /* decimal max int plus \n and \0 */
2856 copy_from_user(buff
, user_buf
, sizeof(buff
));
2857 val
= kstrtoul(buff
, 10, NULL
);
2859 adapter
->ras_comp_int
[num
].paused
= val
? 1 : 0;
2861 memset(&crq
, 0, sizeof(crq
));
2862 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2863 crq
.control_ras
.cmd
= CONTROL_RAS
;
2864 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2865 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_PAUSE
: IBMVNIC_TRACE_RESUME
;
2866 ibmvnic_send_crq(adapter
, &crq
);
2871 static const struct file_operations paused_ops
= {
2872 .owner
= THIS_MODULE
,
2873 .open
= simple_open
,
2874 .read
= paused_read
,
2875 .write
= paused_write
,
2878 static ssize_t
tracing_read(struct file
*file
, char __user
*user_buf
,
2879 size_t len
, loff_t
*ppos
)
2881 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2882 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2883 int num
= ras_comp_int
->num
;
2884 char buff
[5]; /* 1 or 0 plus \n and \0 */
2887 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_on
);
2892 copy_to_user(user_buf
, buff
, size
);
2897 static ssize_t
tracing_write(struct file
*file
, const char __user
*user_buf
,
2898 size_t len
, loff_t
*ppos
)
2900 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2901 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2902 int num
= ras_comp_int
->num
;
2903 union ibmvnic_crq crq
;
2905 char buff
[9]; /* decimal max int plus \n and \0 */
2907 copy_from_user(buff
, user_buf
, sizeof(buff
));
2908 val
= kstrtoul(buff
, 10, NULL
);
2910 memset(&crq
, 0, sizeof(crq
));
2911 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2912 crq
.control_ras
.cmd
= CONTROL_RAS
;
2913 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2914 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_ON
: IBMVNIC_TRACE_OFF
;
2919 static const struct file_operations tracing_ops
= {
2920 .owner
= THIS_MODULE
,
2921 .open
= simple_open
,
2922 .read
= tracing_read
,
2923 .write
= tracing_write
,
2926 static ssize_t
error_level_read(struct file
*file
, char __user
*user_buf
,
2927 size_t len
, loff_t
*ppos
)
2929 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2930 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2931 int num
= ras_comp_int
->num
;
2932 char buff
[5]; /* decimal max char plus \n and \0 */
2935 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].error_check_level
);
2940 copy_to_user(user_buf
, buff
, size
);
2945 static ssize_t
error_level_write(struct file
*file
, const char __user
*user_buf
,
2946 size_t len
, loff_t
*ppos
)
2948 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2949 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2950 int num
= ras_comp_int
->num
;
2951 union ibmvnic_crq crq
;
2953 char buff
[9]; /* decimal max int plus \n and \0 */
2955 copy_from_user(buff
, user_buf
, sizeof(buff
));
2956 val
= kstrtoul(buff
, 10, NULL
);
2961 memset(&crq
, 0, sizeof(crq
));
2962 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2963 crq
.control_ras
.cmd
= CONTROL_RAS
;
2964 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2965 crq
.control_ras
.op
= IBMVNIC_ERROR_LEVEL
;
2966 crq
.control_ras
.level
= val
;
2967 ibmvnic_send_crq(adapter
, &crq
);
2972 static const struct file_operations error_level_ops
= {
2973 .owner
= THIS_MODULE
,
2974 .open
= simple_open
,
2975 .read
= error_level_read
,
2976 .write
= error_level_write
,
2979 static ssize_t
trace_level_read(struct file
*file
, char __user
*user_buf
,
2980 size_t len
, loff_t
*ppos
)
2982 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2983 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2984 int num
= ras_comp_int
->num
;
2985 char buff
[5]; /* decimal max char plus \n and \0 */
2988 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_level
);
2992 copy_to_user(user_buf
, buff
, size
);
2997 static ssize_t
trace_level_write(struct file
*file
, const char __user
*user_buf
,
2998 size_t len
, loff_t
*ppos
)
3000 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3001 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3002 union ibmvnic_crq crq
;
3004 char buff
[9]; /* decimal max int plus \n and \0 */
3006 copy_from_user(buff
, user_buf
, sizeof(buff
));
3007 val
= kstrtoul(buff
, 10, NULL
);
3011 memset(&crq
, 0, sizeof(crq
));
3012 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3013 crq
.control_ras
.cmd
= CONTROL_RAS
;
3014 crq
.control_ras
.correlator
=
3015 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3016 crq
.control_ras
.op
= IBMVNIC_TRACE_LEVEL
;
3017 crq
.control_ras
.level
= val
;
3018 ibmvnic_send_crq(adapter
, &crq
);
3023 static const struct file_operations trace_level_ops
= {
3024 .owner
= THIS_MODULE
,
3025 .open
= simple_open
,
3026 .read
= trace_level_read
,
3027 .write
= trace_level_write
,
3030 static ssize_t
trace_buff_size_read(struct file
*file
, char __user
*user_buf
,
3031 size_t len
, loff_t
*ppos
)
3033 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3034 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3035 int num
= ras_comp_int
->num
;
3036 char buff
[9]; /* decimal max int plus \n and \0 */
3039 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_buff_size
);
3043 copy_to_user(user_buf
, buff
, size
);
3048 static ssize_t
trace_buff_size_write(struct file
*file
,
3049 const char __user
*user_buf
, size_t len
,
3052 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3053 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3054 union ibmvnic_crq crq
;
3056 char buff
[9]; /* decimal max int plus \n and \0 */
3058 copy_from_user(buff
, user_buf
, sizeof(buff
));
3059 val
= kstrtoul(buff
, 10, NULL
);
3061 memset(&crq
, 0, sizeof(crq
));
3062 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3063 crq
.control_ras
.cmd
= CONTROL_RAS
;
3064 crq
.control_ras
.correlator
=
3065 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3066 crq
.control_ras
.op
= IBMVNIC_CHG_TRACE_BUFF_SZ
;
3067 /* trace_buff_sz is 3 bytes, stuff an int into it */
3068 crq
.control_ras
.trace_buff_sz
[0] = ((u8
*)(&val
))[5];
3069 crq
.control_ras
.trace_buff_sz
[1] = ((u8
*)(&val
))[6];
3070 crq
.control_ras
.trace_buff_sz
[2] = ((u8
*)(&val
))[7];
3071 ibmvnic_send_crq(adapter
, &crq
);
3076 static const struct file_operations trace_size_ops
= {
3077 .owner
= THIS_MODULE
,
3078 .open
= simple_open
,
3079 .read
= trace_buff_size_read
,
3080 .write
= trace_buff_size_write
,
3083 static void handle_request_ras_comps_rsp(union ibmvnic_crq
*crq
,
3084 struct ibmvnic_adapter
*adapter
)
3086 struct device
*dev
= &adapter
->vdev
->dev
;
3087 struct dentry
*dir_ent
;
3091 debugfs_remove_recursive(adapter
->ras_comps_ent
);
3093 adapter
->ras_comps_ent
= debugfs_create_dir("ras_comps",
3094 adapter
->debugfs_dir
);
3095 if (!adapter
->ras_comps_ent
|| IS_ERR(adapter
->ras_comps_ent
)) {
3096 dev_info(dev
, "debugfs create ras_comps dir failed\n");
3100 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
3101 dir_ent
= debugfs_create_dir(adapter
->ras_comps
[i
].name
,
3102 adapter
->ras_comps_ent
);
3103 if (!dir_ent
|| IS_ERR(dir_ent
)) {
3104 dev_info(dev
, "debugfs create %s dir failed\n",
3105 adapter
->ras_comps
[i
].name
);
3109 adapter
->ras_comp_int
[i
].adapter
= adapter
;
3110 adapter
->ras_comp_int
[i
].num
= i
;
3111 adapter
->ras_comp_int
[i
].desc_blob
.data
=
3112 &adapter
->ras_comps
[i
].description
;
3113 adapter
->ras_comp_int
[i
].desc_blob
.size
=
3114 sizeof(adapter
->ras_comps
[i
].description
);
3116 /* Don't need to remember the dentry's because the debugfs dir
3117 * gets removed recursively
3119 ent
= debugfs_create_blob("description", S_IRUGO
, dir_ent
,
3120 &adapter
->ras_comp_int
[i
].desc_blob
);
3121 ent
= debugfs_create_file("trace_buf_size", S_IRUGO
| S_IWUSR
,
3122 dir_ent
, &adapter
->ras_comp_int
[i
],
3124 ent
= debugfs_create_file("trace_level",
3126 (adapter
->ras_comps
[i
].trace_level
!=
3127 0xFF ? S_IWUSR
: 0),
3128 dir_ent
, &adapter
->ras_comp_int
[i
],
3130 ent
= debugfs_create_file("error_level",
3133 ras_comps
[i
].error_check_level
!=
3134 0xFF ? S_IWUSR
: 0),
3135 dir_ent
, &adapter
->ras_comp_int
[i
],
3137 ent
= debugfs_create_file("tracing", S_IRUGO
| S_IWUSR
,
3138 dir_ent
, &adapter
->ras_comp_int
[i
],
3140 ent
= debugfs_create_file("paused", S_IRUGO
| S_IWUSR
,
3141 dir_ent
, &adapter
->ras_comp_int
[i
],
3143 ent
= debugfs_create_file("trace", S_IRUGO
, dir_ent
,
3144 &adapter
->ras_comp_int
[i
],
3149 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq
*crq
,
3150 struct ibmvnic_adapter
*adapter
)
3152 int len
= adapter
->ras_comp_num
* sizeof(struct ibmvnic_fw_component
);
3153 struct device
*dev
= &adapter
->vdev
->dev
;
3154 union ibmvnic_crq newcrq
;
3156 adapter
->ras_comps
= dma_alloc_coherent(dev
, len
,
3157 &adapter
->ras_comps_tok
,
3159 if (!adapter
->ras_comps
) {
3160 if (!firmware_has_feature(FW_FEATURE_CMO
))
3161 dev_err(dev
, "Couldn't alloc fw comps buffer\n");
3165 adapter
->ras_comp_int
= kmalloc(adapter
->ras_comp_num
*
3166 sizeof(struct ibmvnic_fw_comp_internal
),
3168 if (!adapter
->ras_comp_int
)
3169 dma_free_coherent(dev
, len
, adapter
->ras_comps
,
3170 adapter
->ras_comps_tok
);
3172 memset(&newcrq
, 0, sizeof(newcrq
));
3173 newcrq
.request_ras_comps
.first
= IBMVNIC_CRQ_CMD
;
3174 newcrq
.request_ras_comps
.cmd
= REQUEST_RAS_COMPS
;
3175 newcrq
.request_ras_comps
.ioba
= cpu_to_be32(adapter
->ras_comps_tok
);
3176 newcrq
.request_ras_comps
.len
= cpu_to_be32(len
);
3177 ibmvnic_send_crq(adapter
, &newcrq
);
3180 static void ibmvnic_free_inflight(struct ibmvnic_adapter
*adapter
)
3182 struct ibmvnic_inflight_cmd
*inflight_cmd
, *tmp1
;
3183 struct device
*dev
= &adapter
->vdev
->dev
;
3184 struct ibmvnic_error_buff
*error_buff
, *tmp2
;
3185 unsigned long flags
;
3186 unsigned long flags2
;
3188 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
3189 list_for_each_entry_safe(inflight_cmd
, tmp1
, &adapter
->inflight
, list
) {
3190 switch (inflight_cmd
->crq
.generic
.cmd
) {
3192 dma_unmap_single(dev
, adapter
->login_buf_token
,
3193 adapter
->login_buf_sz
,
3195 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3196 adapter
->login_rsp_buf_sz
,
3198 kfree(adapter
->login_rsp_buf
);
3199 kfree(adapter
->login_buf
);
3202 complete(&adapter
->fw_done
);
3204 case REQUEST_ERROR_INFO
:
3205 spin_lock_irqsave(&adapter
->error_list_lock
, flags2
);
3206 list_for_each_entry_safe(error_buff
, tmp2
,
3207 &adapter
->errors
, list
) {
3208 dma_unmap_single(dev
, error_buff
->dma
,
3211 kfree(error_buff
->buff
);
3212 list_del(&error_buff
->list
);
3215 spin_unlock_irqrestore(&adapter
->error_list_lock
,
3219 list_del(&inflight_cmd
->list
);
3220 kfree(inflight_cmd
);
3222 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
3225 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3226 struct ibmvnic_adapter
*adapter
)
3228 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3229 struct net_device
*netdev
= adapter
->netdev
;
3230 struct device
*dev
= &adapter
->vdev
->dev
;
3233 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3234 ((unsigned long int *)crq
)[0],
3235 ((unsigned long int *)crq
)[1]);
3236 switch (gen_crq
->first
) {
3237 case IBMVNIC_CRQ_INIT_RSP
:
3238 switch (gen_crq
->cmd
) {
3239 case IBMVNIC_CRQ_INIT
:
3240 dev_info(dev
, "Partner initialized\n");
3241 /* Send back a response */
3242 rc
= ibmvnic_send_crq_init_complete(adapter
);
3244 schedule_work(&adapter
->vnic_crq_init
);
3246 dev_err(dev
, "Can't send initrsp rc=%ld\n", rc
);
3248 case IBMVNIC_CRQ_INIT_COMPLETE
:
3249 dev_info(dev
, "Partner initialization complete\n");
3250 send_version_xchg(adapter
);
3253 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3256 case IBMVNIC_CRQ_XPORT_EVENT
:
3257 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3258 dev_info(dev
, "Re-enabling adapter\n");
3259 adapter
->migrated
= true;
3260 ibmvnic_free_inflight(adapter
);
3261 release_sub_crqs(adapter
);
3262 rc
= ibmvnic_reenable_crq_queue(adapter
);
3264 dev_err(dev
, "Error after enable rc=%ld\n", rc
);
3265 adapter
->migrated
= false;
3266 rc
= ibmvnic_send_crq_init(adapter
);
3268 dev_err(dev
, "Error sending init rc=%ld\n", rc
);
3269 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
3270 dev_info(dev
, "Backing device failover detected\n");
3271 netif_carrier_off(netdev
);
3272 adapter
->failover
= true;
3274 /* The adapter lost the connection */
3275 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3277 ibmvnic_free_inflight(adapter
);
3278 release_sub_crqs(adapter
);
3281 case IBMVNIC_CRQ_CMD_RSP
:
3284 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3289 switch (gen_crq
->cmd
) {
3290 case VERSION_EXCHANGE_RSP
:
3291 rc
= crq
->version_exchange_rsp
.rc
.code
;
3293 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3296 dev_info(dev
, "Partner protocol version is %d\n",
3297 crq
->version_exchange_rsp
.version
);
3298 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3301 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3302 send_cap_queries(adapter
);
3304 case QUERY_CAPABILITY_RSP
:
3305 handle_query_cap_rsp(crq
, adapter
);
3308 handle_query_map_rsp(crq
, adapter
);
3310 case REQUEST_MAP_RSP
:
3311 handle_request_map_rsp(crq
, adapter
);
3313 case REQUEST_UNMAP_RSP
:
3314 handle_request_unmap_rsp(crq
, adapter
);
3316 case REQUEST_CAPABILITY_RSP
:
3317 handle_request_cap_rsp(crq
, adapter
);
3320 netdev_dbg(netdev
, "Got Login Response\n");
3321 handle_login_rsp(crq
, adapter
);
3323 case LOGICAL_LINK_STATE_RSP
:
3324 netdev_dbg(netdev
, "Got Logical Link State Response\n");
3325 adapter
->logical_link_state
=
3326 crq
->logical_link_state_rsp
.link_state
;
3328 case LINK_STATE_INDICATION
:
3329 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3330 adapter
->phys_link_state
=
3331 crq
->link_state_indication
.phys_link_state
;
3332 adapter
->logical_link_state
=
3333 crq
->link_state_indication
.logical_link_state
;
3335 case CHANGE_MAC_ADDR_RSP
:
3336 netdev_dbg(netdev
, "Got MAC address change Response\n");
3337 handle_change_mac_rsp(crq
, adapter
);
3339 case ERROR_INDICATION
:
3340 netdev_dbg(netdev
, "Got Error Indication\n");
3341 handle_error_indication(crq
, adapter
);
3343 case REQUEST_ERROR_RSP
:
3344 netdev_dbg(netdev
, "Got Error Detail Response\n");
3345 handle_error_info_rsp(crq
, adapter
);
3347 case REQUEST_STATISTICS_RSP
:
3348 netdev_dbg(netdev
, "Got Statistics Response\n");
3349 complete(&adapter
->stats_done
);
3351 case REQUEST_DUMP_SIZE_RSP
:
3352 netdev_dbg(netdev
, "Got Request Dump Size Response\n");
3353 handle_dump_size_rsp(crq
, adapter
);
3355 case REQUEST_DUMP_RSP
:
3356 netdev_dbg(netdev
, "Got Request Dump Response\n");
3357 complete(&adapter
->fw_done
);
3359 case QUERY_IP_OFFLOAD_RSP
:
3360 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3361 handle_query_ip_offload_rsp(adapter
);
3363 case MULTICAST_CTRL_RSP
:
3364 netdev_dbg(netdev
, "Got multicast control Response\n");
3366 case CONTROL_IP_OFFLOAD_RSP
:
3367 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3368 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3369 sizeof(adapter
->ip_offload_ctrl
),
3371 /* We're done with the queries, perform the login */
3372 send_login(adapter
);
3374 case REQUEST_RAS_COMP_NUM_RSP
:
3375 netdev_dbg(netdev
, "Got Request RAS Comp Num Response\n");
3376 if (crq
->request_ras_comp_num_rsp
.rc
.code
== 10) {
3377 netdev_dbg(netdev
, "Request RAS Comp Num not supported\n");
3380 adapter
->ras_comp_num
=
3381 be32_to_cpu(crq
->request_ras_comp_num_rsp
.num_components
);
3382 handle_request_ras_comp_num_rsp(crq
, adapter
);
3384 case REQUEST_RAS_COMPS_RSP
:
3385 netdev_dbg(netdev
, "Got Request RAS Comps Response\n");
3386 handle_request_ras_comps_rsp(crq
, adapter
);
3388 case CONTROL_RAS_RSP
:
3389 netdev_dbg(netdev
, "Got Control RAS Response\n");
3390 handle_control_ras_rsp(crq
, adapter
);
3392 case COLLECT_FW_TRACE_RSP
:
3393 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3394 complete(&adapter
->fw_done
);
3397 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3402 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3404 struct ibmvnic_adapter
*adapter
= instance
;
3405 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3406 struct vio_dev
*vdev
= adapter
->vdev
;
3407 union ibmvnic_crq
*crq
;
3408 unsigned long flags
;
3411 spin_lock_irqsave(&queue
->lock
, flags
);
3412 vio_disable_interrupts(vdev
);
3414 /* Pull all the valid messages off the CRQ */
3415 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3416 ibmvnic_handle_crq(crq
, adapter
);
3417 crq
->generic
.first
= 0;
3419 vio_enable_interrupts(vdev
);
3420 crq
= ibmvnic_next_crq(adapter
);
3422 vio_disable_interrupts(vdev
);
3423 ibmvnic_handle_crq(crq
, adapter
);
3424 crq
->generic
.first
= 0;
3429 spin_unlock_irqrestore(&queue
->lock
, flags
);
3433 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3435 struct vio_dev
*vdev
= adapter
->vdev
;
3439 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3440 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3443 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3448 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3450 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3451 struct device
*dev
= &adapter
->vdev
->dev
;
3452 struct vio_dev
*vdev
= adapter
->vdev
;
3457 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3458 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3460 /* Clean out the queue */
3461 memset(crq
->msgs
, 0, PAGE_SIZE
);
3464 /* And re-open it again */
3465 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3466 crq
->msg_token
, PAGE_SIZE
);
3469 /* Adapter is good, but other end is not ready */
3470 dev_warn(dev
, "Partner adapter not ready\n");
3472 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3477 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter
*adapter
)
3479 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3480 struct vio_dev
*vdev
= adapter
->vdev
;
3483 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3484 free_irq(vdev
->irq
, adapter
);
3486 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3487 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3489 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3491 free_page((unsigned long)crq
->msgs
);
3494 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter
*adapter
)
3496 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3497 struct device
*dev
= &adapter
->vdev
->dev
;
3498 struct vio_dev
*vdev
= adapter
->vdev
;
3499 int rc
, retrc
= -ENOMEM
;
3501 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3502 /* Should we allocate more than one page? */
3507 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3508 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3510 if (dma_mapping_error(dev
, crq
->msg_token
))
3513 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3514 crq
->msg_token
, PAGE_SIZE
);
3516 if (rc
== H_RESOURCE
)
3517 /* maybe kexecing and resource is busy. try a reset */
3518 rc
= ibmvnic_reset_crq(adapter
);
3521 if (rc
== H_CLOSED
) {
3522 dev_warn(dev
, "Partner adapter not ready\n");
3524 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3525 goto reg_crq_failed
;
3530 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3531 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3534 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3536 goto req_irq_failed
;
3539 rc
= vio_enable_interrupts(vdev
);
3541 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3542 goto req_irq_failed
;
3546 spin_lock_init(&crq
->lock
);
3552 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3553 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3555 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3557 free_page((unsigned long)crq
->msgs
);
3561 /* debugfs for dump */
3562 static int ibmvnic_dump_show(struct seq_file
*seq
, void *v
)
3564 struct net_device
*netdev
= seq
->private;
3565 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3566 struct device
*dev
= &adapter
->vdev
->dev
;
3567 union ibmvnic_crq crq
;
3569 memset(&crq
, 0, sizeof(crq
));
3570 crq
.request_dump_size
.first
= IBMVNIC_CRQ_CMD
;
3571 crq
.request_dump_size
.cmd
= REQUEST_DUMP_SIZE
;
3572 ibmvnic_send_crq(adapter
, &crq
);
3574 init_completion(&adapter
->fw_done
);
3575 wait_for_completion(&adapter
->fw_done
);
3577 seq_write(seq
, adapter
->dump_data
, adapter
->dump_data_size
);
3579 dma_unmap_single(dev
, adapter
->dump_data_token
, adapter
->dump_data_size
,
3582 kfree(adapter
->dump_data
);
3587 static int ibmvnic_dump_open(struct inode
*inode
, struct file
*file
)
3589 return single_open(file
, ibmvnic_dump_show
, inode
->i_private
);
3592 static const struct file_operations ibmvnic_dump_ops
= {
3593 .owner
= THIS_MODULE
,
3594 .open
= ibmvnic_dump_open
,
3596 .llseek
= seq_lseek
,
3597 .release
= single_release
,
3600 static void handle_crq_init_rsp(struct work_struct
*work
)
3602 struct ibmvnic_adapter
*adapter
= container_of(work
,
3603 struct ibmvnic_adapter
,
3605 struct device
*dev
= &adapter
->vdev
->dev
;
3606 struct net_device
*netdev
= adapter
->netdev
;
3607 unsigned long timeout
= msecs_to_jiffies(30000);
3608 bool restart
= false;
3611 if (adapter
->failover
) {
3612 release_sub_crqs(adapter
);
3613 if (netif_running(netdev
)) {
3614 netif_tx_disable(netdev
);
3615 ibmvnic_close(netdev
);
3620 send_version_xchg(adapter
);
3621 reinit_completion(&adapter
->init_done
);
3622 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3623 dev_err(dev
, "Passive init timeout\n");
3628 if (adapter
->renegotiate
) {
3629 adapter
->renegotiate
= false;
3630 release_sub_crqs_no_irqs(adapter
);
3631 send_cap_queries(adapter
);
3633 reinit_completion(&adapter
->init_done
);
3634 if (!wait_for_completion_timeout(&adapter
->init_done
,
3636 dev_err(dev
, "Passive init timeout\n");
3640 } while (adapter
->renegotiate
);
3641 rc
= init_sub_crq_irqs(adapter
);
3646 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3647 netdev
->min_mtu
= adapter
->min_mtu
;
3648 netdev
->max_mtu
= adapter
->max_mtu
;
3650 if (adapter
->failover
) {
3651 adapter
->failover
= false;
3653 rc
= ibmvnic_open(netdev
);
3655 goto restart_failed
;
3657 netif_carrier_on(netdev
);
3661 rc
= register_netdev(netdev
);
3664 "failed to register netdev rc=%d\n", rc
);
3665 goto register_failed
;
3667 dev_info(dev
, "ibmvnic registered\n");
3672 dev_err(dev
, "Failed to restart ibmvnic, rc=%d\n", rc
);
3674 release_sub_crqs(adapter
);
3676 dev_err(dev
, "Passive initialization was not successful\n");
3679 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3681 unsigned long timeout
= msecs_to_jiffies(30000);
3682 struct ibmvnic_adapter
*adapter
;
3683 struct net_device
*netdev
;
3684 unsigned char *mac_addr_p
;
3686 char buf
[16]; /* debugfs name buf */
3689 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3692 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3693 VETH_MAC_ADDR
, NULL
);
3696 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3697 __FILE__
, __LINE__
);
3701 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3702 IBMVNIC_MAX_TX_QUEUES
);
3706 adapter
= netdev_priv(netdev
);
3707 dev_set_drvdata(&dev
->dev
, netdev
);
3708 adapter
->vdev
= dev
;
3709 adapter
->netdev
= netdev
;
3710 adapter
->failover
= false;
3712 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3713 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3714 netdev
->irq
= dev
->irq
;
3715 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3716 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3717 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3719 INIT_WORK(&adapter
->vnic_crq_init
, handle_crq_init_rsp
);
3721 spin_lock_init(&adapter
->stats_lock
);
3723 rc
= ibmvnic_init_crq_queue(adapter
);
3725 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3729 INIT_LIST_HEAD(&adapter
->errors
);
3730 INIT_LIST_HEAD(&adapter
->inflight
);
3731 spin_lock_init(&adapter
->error_list_lock
);
3732 spin_lock_init(&adapter
->inflight_lock
);
3734 adapter
->stats_token
= dma_map_single(&dev
->dev
, &adapter
->stats
,
3735 sizeof(struct ibmvnic_statistics
),
3737 if (dma_mapping_error(&dev
->dev
, adapter
->stats_token
)) {
3738 if (!firmware_has_feature(FW_FEATURE_CMO
))
3739 dev_err(&dev
->dev
, "Couldn't map stats buffer\n");
3744 snprintf(buf
, sizeof(buf
), "ibmvnic_%x", dev
->unit_address
);
3745 ent
= debugfs_create_dir(buf
, NULL
);
3746 if (!ent
|| IS_ERR(ent
)) {
3747 dev_info(&dev
->dev
, "debugfs create directory failed\n");
3748 adapter
->debugfs_dir
= NULL
;
3750 adapter
->debugfs_dir
= ent
;
3751 ent
= debugfs_create_file("dump", S_IRUGO
, adapter
->debugfs_dir
,
3752 netdev
, &ibmvnic_dump_ops
);
3753 if (!ent
|| IS_ERR(ent
)) {
3755 "debugfs create dump file failed\n");
3756 adapter
->debugfs_dump
= NULL
;
3758 adapter
->debugfs_dump
= ent
;
3761 ibmvnic_send_crq_init(adapter
);
3763 init_completion(&adapter
->init_done
);
3764 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
))
3768 if (adapter
->renegotiate
) {
3769 adapter
->renegotiate
= false;
3770 release_sub_crqs_no_irqs(adapter
);
3771 send_cap_queries(adapter
);
3773 reinit_completion(&adapter
->init_done
);
3774 if (!wait_for_completion_timeout(&adapter
->init_done
,
3778 } while (adapter
->renegotiate
);
3780 rc
= init_sub_crq_irqs(adapter
);
3782 dev_err(&dev
->dev
, "failed to initialize sub crq irqs\n");
3786 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3788 rc
= register_netdev(netdev
);
3790 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3793 dev_info(&dev
->dev
, "ibmvnic registered\n");
3798 release_sub_crqs(adapter
);
3800 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3801 debugfs_remove_recursive(adapter
->debugfs_dir
);
3803 ibmvnic_release_crq_queue(adapter
);
3805 free_netdev(netdev
);
3809 static int ibmvnic_remove(struct vio_dev
*dev
)
3811 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3812 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3814 unregister_netdev(netdev
);
3816 release_sub_crqs(adapter
);
3818 ibmvnic_release_crq_queue(adapter
);
3820 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3821 debugfs_remove_recursive(adapter
->debugfs_dir
);
3823 if (adapter
->ras_comps
)
3824 dma_free_coherent(&dev
->dev
,
3825 adapter
->ras_comp_num
*
3826 sizeof(struct ibmvnic_fw_component
),
3827 adapter
->ras_comps
, adapter
->ras_comps_tok
);
3829 kfree(adapter
->ras_comp_int
);
3831 free_netdev(netdev
);
3832 dev_set_drvdata(&dev
->dev
, NULL
);
3837 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3839 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3840 struct ibmvnic_adapter
*adapter
;
3841 struct iommu_table
*tbl
;
3842 unsigned long ret
= 0;
3845 tbl
= get_iommu_table_base(&vdev
->dev
);
3847 /* netdev inits at probe time along with the structures we need below*/
3849 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3851 adapter
= netdev_priv(netdev
);
3853 ret
+= PAGE_SIZE
; /* the crq message queue */
3854 ret
+= adapter
->bounce_buffer_size
;
3855 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3857 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3858 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3860 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3862 ret
+= adapter
->rx_pool
[i
].size
*
3863 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3868 static int ibmvnic_resume(struct device
*dev
)
3870 struct net_device
*netdev
= dev_get_drvdata(dev
);
3871 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3874 /* kick the interrupt handlers just in case we lost an interrupt */
3875 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3876 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3877 adapter
->rx_scrq
[i
]);
3882 static struct vio_device_id ibmvnic_device_table
[] = {
3883 {"network", "IBM,vnic"},
3886 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3888 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3889 .resume
= ibmvnic_resume
3892 static struct vio_driver ibmvnic_driver
= {
3893 .id_table
= ibmvnic_device_table
,
3894 .probe
= ibmvnic_probe
,
3895 .remove
= ibmvnic_remove
,
3896 .get_desired_dma
= ibmvnic_get_desired_dma
,
3897 .name
= ibmvnic_driver_name
,
3898 .pm
= &ibmvnic_pm_ops
,
3901 /* module functions */
3902 static int __init
ibmvnic_module_init(void)
3904 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3905 IBMVNIC_DRIVER_VERSION
);
3907 return vio_register_driver(&ibmvnic_driver
);
3910 static void __exit
ibmvnic_module_exit(void)
3912 vio_unregister_driver(&ibmvnic_driver
);
3915 module_init(ibmvnic_module_init
);
3916 module_exit(ibmvnic_module_exit
);