]>
Commit | Line | Data |
---|---|---|
032c5e82 TF |
1 | /**************************************************************************/ |
2 | /* */ | |
3 | /* IBM System i and System p Virtual NIC Device Driver */ | |
4 | /* Copyright (C) 2014 IBM Corp. */ | |
5 | /* Santiago Leon (santi_leon@yahoo.com) */ | |
6 | /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ | |
7 | /* John Allen (jallen@linux.vnet.ibm.com) */ | |
8 | /* */ | |
9 | /* This program is free software; you can redistribute it and/or modify */ | |
10 | /* it under the terms of the GNU General Public License as published by */ | |
11 | /* the Free Software Foundation; either version 2 of the License, or */ | |
12 | /* (at your option) any later version. */ | |
13 | /* */ | |
14 | /* This program is distributed in the hope that it will be useful, */ | |
15 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | |
16 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | |
17 | /* GNU General Public License for more details. */ | |
18 | /* */ | |
19 | /* You should have received a copy of the GNU General Public License */ | |
20 | /* along with this program. */ | |
21 | /* */ | |
22 | /* This module contains the implementation of a virtual ethernet device */ | |
23 | /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ | |
24 | /* option of the RS/6000 Platform Architecture to interface with virtual */ | |
25 | /* ethernet NICs that are presented to the partition by the hypervisor. */ | |
26 | /* */ | |
27 | /* Messages are passed between the VNIC driver and the VNIC server using */ | |
28 | /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ | |
29 | /* issue and receive commands that initiate communication with the server */ | |
30 | /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ | |
31 | /* are used by the driver to notify the server that a packet is */ | |
32 | /* ready for transmission or that a buffer has been added to receive a */ | |
33 | /* packet. Subsequently, sCRQs are used by the server to notify the */ | |
34 | /* driver that a packet transmission has been completed or that a packet */ | |
35 | /* has been received and placed in a waiting buffer. */ | |
36 | /* */ | |
37 | /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ | |
38 | /* which skbs are DMA mapped and immediately unmapped when the transmit */ | |
39 | /* or receive has been completed, the VNIC driver is required to use */ | |
40 | /* "long term mapping". This entails that large, continuous DMA mapped */ | |
41 | /* buffers are allocated on driver initialization and these buffers are */ | |
42 | /* then continuously reused to pass skbs to and from the VNIC server. */ | |
43 | /* */ | |
44 | /**************************************************************************/ | |
45 | ||
46 | #include <linux/module.h> | |
47 | #include <linux/moduleparam.h> | |
48 | #include <linux/types.h> | |
49 | #include <linux/errno.h> | |
50 | #include <linux/completion.h> | |
51 | #include <linux/ioport.h> | |
52 | #include <linux/dma-mapping.h> | |
53 | #include <linux/kernel.h> | |
54 | #include <linux/netdevice.h> | |
55 | #include <linux/etherdevice.h> | |
56 | #include <linux/skbuff.h> | |
57 | #include <linux/init.h> | |
58 | #include <linux/delay.h> | |
59 | #include <linux/mm.h> | |
60 | #include <linux/ethtool.h> | |
61 | #include <linux/proc_fs.h> | |
62 | #include <linux/in.h> | |
63 | #include <linux/ip.h> | |
ad7775dc | 64 | #include <linux/ipv6.h> |
032c5e82 TF |
65 | #include <linux/irq.h> |
66 | #include <linux/kthread.h> | |
67 | #include <linux/seq_file.h> | |
032c5e82 TF |
68 | #include <linux/interrupt.h> |
69 | #include <net/net_namespace.h> | |
70 | #include <asm/hvcall.h> | |
71 | #include <linux/atomic.h> | |
72 | #include <asm/vio.h> | |
73 | #include <asm/iommu.h> | |
74 | #include <linux/uaccess.h> | |
75 | #include <asm/firmware.h> | |
65dc6891 | 76 | #include <linux/workqueue.h> |
6052d5e2 | 77 | #include <linux/if_vlan.h> |
032c5e82 TF |
78 | |
79 | #include "ibmvnic.h" | |
80 | ||
81 | static const char ibmvnic_driver_name[] = "ibmvnic"; | |
82 | static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; | |
83 | ||
78b07ac1 | 84 | MODULE_AUTHOR("Santiago Leon"); |
032c5e82 TF |
85 | MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); |
86 | MODULE_LICENSE("GPL"); | |
87 | MODULE_VERSION(IBMVNIC_DRIVER_VERSION); | |
88 | ||
89 | static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; | |
90 | static int ibmvnic_remove(struct vio_dev *); | |
91 | static void release_sub_crqs(struct ibmvnic_adapter *); | |
92 | static int ibmvnic_reset_crq(struct ibmvnic_adapter *); | |
93 | static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); | |
94 | static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); | |
95 | static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); | |
96 | static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, | |
97 | union sub_crq *sub_crq); | |
ad7775dc | 98 | static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); |
032c5e82 TF |
99 | static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); |
100 | static int enable_scrq_irq(struct ibmvnic_adapter *, | |
101 | struct ibmvnic_sub_crq_queue *); | |
102 | static int disable_scrq_irq(struct ibmvnic_adapter *, | |
103 | struct ibmvnic_sub_crq_queue *); | |
104 | static int pending_scrq(struct ibmvnic_adapter *, | |
105 | struct ibmvnic_sub_crq_queue *); | |
106 | static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, | |
107 | struct ibmvnic_sub_crq_queue *); | |
108 | static int ibmvnic_poll(struct napi_struct *napi, int data); | |
109 | static void send_map_query(struct ibmvnic_adapter *adapter); | |
110 | static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); | |
111 | static void send_request_unmap(struct ibmvnic_adapter *, u8); | |
bd0b6723 JA |
112 | static void send_login(struct ibmvnic_adapter *adapter); |
113 | static void send_cap_queries(struct ibmvnic_adapter *adapter); | |
114 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); | |
ea5509f5 | 115 | static int ibmvnic_init(struct ibmvnic_adapter *); |
f992887c | 116 | static void release_crq_queue(struct ibmvnic_adapter *); |
032c5e82 TF |
117 | |
118 | struct ibmvnic_stat { | |
119 | char name[ETH_GSTRING_LEN]; | |
120 | int offset; | |
121 | }; | |
122 | ||
123 | #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ | |
124 | offsetof(struct ibmvnic_statistics, stat)) | |
125 | #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) | |
126 | ||
127 | static const struct ibmvnic_stat ibmvnic_stats[] = { | |
128 | {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, | |
129 | {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, | |
130 | {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, | |
131 | {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, | |
132 | {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, | |
133 | {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, | |
134 | {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, | |
135 | {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, | |
136 | {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, | |
137 | {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, | |
138 | {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, | |
139 | {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, | |
140 | {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, | |
141 | {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, | |
142 | {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, | |
143 | {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, | |
144 | {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, | |
145 | {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, | |
146 | {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, | |
147 | {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, | |
148 | {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, | |
149 | {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, | |
150 | }; | |
151 | ||
152 | static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, | |
153 | unsigned long length, unsigned long *number, | |
154 | unsigned long *irq) | |
155 | { | |
156 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
157 | long rc; | |
158 | ||
159 | rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); | |
160 | *number = retbuf[0]; | |
161 | *irq = retbuf[1]; | |
162 | ||
163 | return rc; | |
164 | } | |
165 | ||
8c0543ad NF |
166 | static void reset_long_term_buff(struct ibmvnic_adapter *adapter, |
167 | struct ibmvnic_long_term_buff *ltb) | |
168 | { | |
169 | memset(ltb->buff, 0, ltb->size); | |
170 | ||
171 | init_completion(&adapter->fw_done); | |
172 | send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); | |
173 | wait_for_completion(&adapter->fw_done); | |
174 | } | |
175 | ||
032c5e82 TF |
176 | static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, |
177 | struct ibmvnic_long_term_buff *ltb, int size) | |
178 | { | |
179 | struct device *dev = &adapter->vdev->dev; | |
180 | ||
181 | ltb->size = size; | |
182 | ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, | |
183 | GFP_KERNEL); | |
184 | ||
185 | if (!ltb->buff) { | |
186 | dev_err(dev, "Couldn't alloc long term buffer\n"); | |
187 | return -ENOMEM; | |
188 | } | |
189 | ltb->map_id = adapter->map_id; | |
190 | adapter->map_id++; | |
db5d0b59 NF |
191 | |
192 | init_completion(&adapter->fw_done); | |
032c5e82 TF |
193 | send_request_map(adapter, ltb->addr, |
194 | ltb->size, ltb->map_id); | |
032c5e82 TF |
195 | wait_for_completion(&adapter->fw_done); |
196 | return 0; | |
197 | } | |
198 | ||
199 | static void free_long_term_buff(struct ibmvnic_adapter *adapter, | |
200 | struct ibmvnic_long_term_buff *ltb) | |
201 | { | |
202 | struct device *dev = &adapter->vdev->dev; | |
203 | ||
c657e32c NF |
204 | if (!ltb->buff) |
205 | return; | |
206 | ||
ed651a10 NF |
207 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
208 | adapter->reset_reason != VNIC_RESET_MOBILITY) | |
dfad09a6 | 209 | send_request_unmap(adapter, ltb->map_id); |
59af56c2 | 210 | dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); |
032c5e82 TF |
211 | } |
212 | ||
f185a49a TF |
213 | static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) |
214 | { | |
215 | int i; | |
216 | ||
217 | for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | |
218 | i++) | |
219 | adapter->rx_pool[i].active = 0; | |
220 | } | |
221 | ||
032c5e82 TF |
222 | static void replenish_rx_pool(struct ibmvnic_adapter *adapter, |
223 | struct ibmvnic_rx_pool *pool) | |
224 | { | |
225 | int count = pool->size - atomic_read(&pool->available); | |
226 | struct device *dev = &adapter->vdev->dev; | |
227 | int buffers_added = 0; | |
228 | unsigned long lpar_rc; | |
229 | union sub_crq sub_crq; | |
230 | struct sk_buff *skb; | |
231 | unsigned int offset; | |
232 | dma_addr_t dma_addr; | |
233 | unsigned char *dst; | |
234 | u64 *handle_array; | |
235 | int shift = 0; | |
236 | int index; | |
237 | int i; | |
238 | ||
f185a49a TF |
239 | if (!pool->active) |
240 | return; | |
241 | ||
032c5e82 TF |
242 | handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
243 | be32_to_cpu(adapter->login_rsp_buf-> | |
244 | off_rxadd_subcrqs)); | |
245 | ||
246 | for (i = 0; i < count; ++i) { | |
247 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); | |
248 | if (!skb) { | |
249 | dev_err(dev, "Couldn't replenish rx buff\n"); | |
250 | adapter->replenish_no_mem++; | |
251 | break; | |
252 | } | |
253 | ||
254 | index = pool->free_map[pool->next_free]; | |
255 | ||
256 | if (pool->rx_buff[index].skb) | |
257 | dev_err(dev, "Inconsistent free_map!\n"); | |
258 | ||
259 | /* Copy the skb to the long term mapped DMA buffer */ | |
260 | offset = index * pool->buff_size; | |
261 | dst = pool->long_term_buff.buff + offset; | |
262 | memset(dst, 0, pool->buff_size); | |
263 | dma_addr = pool->long_term_buff.addr + offset; | |
264 | pool->rx_buff[index].data = dst; | |
265 | ||
266 | pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; | |
267 | pool->rx_buff[index].dma = dma_addr; | |
268 | pool->rx_buff[index].skb = skb; | |
269 | pool->rx_buff[index].pool_index = pool->index; | |
270 | pool->rx_buff[index].size = pool->buff_size; | |
271 | ||
272 | memset(&sub_crq, 0, sizeof(sub_crq)); | |
273 | sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; | |
274 | sub_crq.rx_add.correlator = | |
275 | cpu_to_be64((u64)&pool->rx_buff[index]); | |
276 | sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); | |
277 | sub_crq.rx_add.map_id = pool->long_term_buff.map_id; | |
278 | ||
279 | /* The length field of the sCRQ is defined to be 24 bits so the | |
280 | * buffer size needs to be left shifted by a byte before it is | |
281 | * converted to big endian to prevent the last byte from being | |
282 | * truncated. | |
283 | */ | |
284 | #ifdef __LITTLE_ENDIAN__ | |
285 | shift = 8; | |
286 | #endif | |
287 | sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); | |
288 | ||
289 | lpar_rc = send_subcrq(adapter, handle_array[pool->index], | |
290 | &sub_crq); | |
291 | if (lpar_rc != H_SUCCESS) | |
292 | goto failure; | |
293 | ||
294 | buffers_added++; | |
295 | adapter->replenish_add_buff_success++; | |
296 | pool->next_free = (pool->next_free + 1) % pool->size; | |
297 | } | |
298 | atomic_add(buffers_added, &pool->available); | |
299 | return; | |
300 | ||
301 | failure: | |
302 | dev_info(dev, "replenish pools failure\n"); | |
303 | pool->free_map[pool->next_free] = index; | |
304 | pool->rx_buff[index].skb = NULL; | |
305 | if (!dma_mapping_error(dev, dma_addr)) | |
306 | dma_unmap_single(dev, dma_addr, pool->buff_size, | |
307 | DMA_FROM_DEVICE); | |
308 | ||
309 | dev_kfree_skb_any(skb); | |
310 | adapter->replenish_add_buff_failure++; | |
311 | atomic_add(buffers_added, &pool->available); | |
f185a49a TF |
312 | |
313 | if (lpar_rc == H_CLOSED) { | |
314 | /* Disable buffer pool replenishment and report carrier off if | |
315 | * queue is closed. Firmware guarantees that a signal will | |
316 | * be sent to the driver, triggering a reset. | |
317 | */ | |
318 | deactivate_rx_pools(adapter); | |
319 | netif_carrier_off(adapter->netdev); | |
320 | } | |
032c5e82 TF |
321 | } |
322 | ||
323 | static void replenish_pools(struct ibmvnic_adapter *adapter) | |
324 | { | |
325 | int i; | |
326 | ||
032c5e82 TF |
327 | adapter->replenish_task_cycles++; |
328 | for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | |
329 | i++) { | |
330 | if (adapter->rx_pool[i].active) | |
331 | replenish_rx_pool(adapter, &adapter->rx_pool[i]); | |
332 | } | |
333 | } | |
334 | ||
7bbc27a4 NF |
335 | static void release_stats_token(struct ibmvnic_adapter *adapter) |
336 | { | |
337 | struct device *dev = &adapter->vdev->dev; | |
338 | ||
339 | if (!adapter->stats_token) | |
340 | return; | |
341 | ||
342 | dma_unmap_single(dev, adapter->stats_token, | |
343 | sizeof(struct ibmvnic_statistics), | |
344 | DMA_FROM_DEVICE); | |
345 | adapter->stats_token = 0; | |
346 | } | |
347 | ||
348 | static int init_stats_token(struct ibmvnic_adapter *adapter) | |
349 | { | |
350 | struct device *dev = &adapter->vdev->dev; | |
351 | dma_addr_t stok; | |
352 | ||
353 | stok = dma_map_single(dev, &adapter->stats, | |
354 | sizeof(struct ibmvnic_statistics), | |
355 | DMA_FROM_DEVICE); | |
356 | if (dma_mapping_error(dev, stok)) { | |
357 | dev_err(dev, "Couldn't map stats buffer\n"); | |
358 | return -1; | |
359 | } | |
360 | ||
361 | adapter->stats_token = stok; | |
362 | return 0; | |
363 | } | |
364 | ||
8c0543ad NF |
365 | static int reset_rx_pools(struct ibmvnic_adapter *adapter) |
366 | { | |
367 | struct ibmvnic_rx_pool *rx_pool; | |
368 | int rx_scrqs; | |
369 | int i, j; | |
370 | ||
371 | rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | |
372 | for (i = 0; i < rx_scrqs; i++) { | |
373 | rx_pool = &adapter->rx_pool[i]; | |
374 | ||
375 | reset_long_term_buff(adapter, &rx_pool->long_term_buff); | |
376 | ||
377 | for (j = 0; j < rx_pool->size; j++) | |
378 | rx_pool->free_map[j] = j; | |
379 | ||
380 | memset(rx_pool->rx_buff, 0, | |
381 | rx_pool->size * sizeof(struct ibmvnic_rx_buff)); | |
382 | ||
383 | atomic_set(&rx_pool->available, 0); | |
384 | rx_pool->next_alloc = 0; | |
385 | rx_pool->next_free = 0; | |
386 | } | |
387 | ||
388 | return 0; | |
389 | } | |
390 | ||
0ffe2cb7 | 391 | static void release_rx_pools(struct ibmvnic_adapter *adapter) |
032c5e82 | 392 | { |
0ffe2cb7 NF |
393 | struct ibmvnic_rx_pool *rx_pool; |
394 | int rx_scrqs; | |
395 | int i, j; | |
032c5e82 | 396 | |
0ffe2cb7 | 397 | if (!adapter->rx_pool) |
032c5e82 TF |
398 | return; |
399 | ||
0ffe2cb7 NF |
400 | rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
401 | for (i = 0; i < rx_scrqs; i++) { | |
402 | rx_pool = &adapter->rx_pool[i]; | |
403 | ||
404 | kfree(rx_pool->free_map); | |
405 | free_long_term_buff(adapter, &rx_pool->long_term_buff); | |
406 | ||
407 | if (!rx_pool->rx_buff) | |
e0ebe942 | 408 | continue; |
0ffe2cb7 NF |
409 | |
410 | for (j = 0; j < rx_pool->size; j++) { | |
411 | if (rx_pool->rx_buff[j].skb) { | |
412 | dev_kfree_skb_any(rx_pool->rx_buff[i].skb); | |
413 | rx_pool->rx_buff[i].skb = NULL; | |
414 | } | |
415 | } | |
416 | ||
417 | kfree(rx_pool->rx_buff); | |
418 | } | |
419 | ||
420 | kfree(adapter->rx_pool); | |
421 | adapter->rx_pool = NULL; | |
422 | } | |
423 | ||
424 | static int init_rx_pools(struct net_device *netdev) | |
425 | { | |
426 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
427 | struct device *dev = &adapter->vdev->dev; | |
428 | struct ibmvnic_rx_pool *rx_pool; | |
429 | int rxadd_subcrqs; | |
430 | u64 *size_array; | |
431 | int i, j; | |
432 | ||
433 | rxadd_subcrqs = | |
434 | be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | |
435 | size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + | |
436 | be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); | |
437 | ||
438 | adapter->rx_pool = kcalloc(rxadd_subcrqs, | |
439 | sizeof(struct ibmvnic_rx_pool), | |
440 | GFP_KERNEL); | |
441 | if (!adapter->rx_pool) { | |
442 | dev_err(dev, "Failed to allocate rx pools\n"); | |
443 | return -1; | |
444 | } | |
445 | ||
446 | for (i = 0; i < rxadd_subcrqs; i++) { | |
447 | rx_pool = &adapter->rx_pool[i]; | |
448 | ||
449 | netdev_dbg(adapter->netdev, | |
450 | "Initializing rx_pool %d, %lld buffs, %lld bytes each\n", | |
451 | i, adapter->req_rx_add_entries_per_subcrq, | |
452 | be64_to_cpu(size_array[i])); | |
453 | ||
454 | rx_pool->size = adapter->req_rx_add_entries_per_subcrq; | |
455 | rx_pool->index = i; | |
456 | rx_pool->buff_size = be64_to_cpu(size_array[i]); | |
457 | rx_pool->active = 1; | |
458 | ||
459 | rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), | |
460 | GFP_KERNEL); | |
461 | if (!rx_pool->free_map) { | |
462 | release_rx_pools(adapter); | |
463 | return -1; | |
032c5e82 | 464 | } |
0ffe2cb7 NF |
465 | |
466 | rx_pool->rx_buff = kcalloc(rx_pool->size, | |
467 | sizeof(struct ibmvnic_rx_buff), | |
468 | GFP_KERNEL); | |
469 | if (!rx_pool->rx_buff) { | |
470 | dev_err(dev, "Couldn't alloc rx buffers\n"); | |
471 | release_rx_pools(adapter); | |
472 | return -1; | |
473 | } | |
474 | ||
475 | if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, | |
476 | rx_pool->size * rx_pool->buff_size)) { | |
477 | release_rx_pools(adapter); | |
478 | return -1; | |
479 | } | |
480 | ||
481 | for (j = 0; j < rx_pool->size; ++j) | |
482 | rx_pool->free_map[j] = j; | |
483 | ||
484 | atomic_set(&rx_pool->available, 0); | |
485 | rx_pool->next_alloc = 0; | |
486 | rx_pool->next_free = 0; | |
032c5e82 | 487 | } |
0ffe2cb7 NF |
488 | |
489 | return 0; | |
032c5e82 TF |
490 | } |
491 | ||
8c0543ad NF |
492 | static int reset_tx_pools(struct ibmvnic_adapter *adapter) |
493 | { | |
494 | struct ibmvnic_tx_pool *tx_pool; | |
495 | int tx_scrqs; | |
496 | int i, j; | |
497 | ||
498 | tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); | |
499 | for (i = 0; i < tx_scrqs; i++) { | |
500 | tx_pool = &adapter->tx_pool[i]; | |
501 | ||
502 | reset_long_term_buff(adapter, &tx_pool->long_term_buff); | |
503 | ||
504 | memset(tx_pool->tx_buff, 0, | |
505 | adapter->req_tx_entries_per_subcrq * | |
506 | sizeof(struct ibmvnic_tx_buff)); | |
507 | ||
508 | for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) | |
509 | tx_pool->free_map[j] = j; | |
510 | ||
511 | tx_pool->consumer_index = 0; | |
512 | tx_pool->producer_index = 0; | |
513 | } | |
514 | ||
515 | return 0; | |
516 | } | |
517 | ||
c657e32c NF |
518 | static void release_tx_pools(struct ibmvnic_adapter *adapter) |
519 | { | |
520 | struct ibmvnic_tx_pool *tx_pool; | |
521 | int i, tx_scrqs; | |
522 | ||
523 | if (!adapter->tx_pool) | |
524 | return; | |
525 | ||
526 | tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); | |
527 | for (i = 0; i < tx_scrqs; i++) { | |
528 | tx_pool = &adapter->tx_pool[i]; | |
529 | kfree(tx_pool->tx_buff); | |
530 | free_long_term_buff(adapter, &tx_pool->long_term_buff); | |
531 | kfree(tx_pool->free_map); | |
532 | } | |
533 | ||
534 | kfree(adapter->tx_pool); | |
535 | adapter->tx_pool = NULL; | |
536 | } | |
537 | ||
538 | static int init_tx_pools(struct net_device *netdev) | |
539 | { | |
540 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
541 | struct device *dev = &adapter->vdev->dev; | |
542 | struct ibmvnic_tx_pool *tx_pool; | |
543 | int tx_subcrqs; | |
544 | int i, j; | |
545 | ||
546 | tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); | |
547 | adapter->tx_pool = kcalloc(tx_subcrqs, | |
548 | sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); | |
549 | if (!adapter->tx_pool) | |
550 | return -1; | |
551 | ||
552 | for (i = 0; i < tx_subcrqs; i++) { | |
553 | tx_pool = &adapter->tx_pool[i]; | |
554 | tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, | |
555 | sizeof(struct ibmvnic_tx_buff), | |
556 | GFP_KERNEL); | |
557 | if (!tx_pool->tx_buff) { | |
558 | dev_err(dev, "tx pool buffer allocation failed\n"); | |
559 | release_tx_pools(adapter); | |
560 | return -1; | |
561 | } | |
562 | ||
563 | if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, | |
564 | adapter->req_tx_entries_per_subcrq * | |
565 | adapter->req_mtu)) { | |
566 | release_tx_pools(adapter); | |
567 | return -1; | |
568 | } | |
569 | ||
570 | tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq, | |
571 | sizeof(int), GFP_KERNEL); | |
572 | if (!tx_pool->free_map) { | |
573 | release_tx_pools(adapter); | |
574 | return -1; | |
575 | } | |
576 | ||
577 | for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) | |
578 | tx_pool->free_map[j] = j; | |
579 | ||
580 | tx_pool->consumer_index = 0; | |
581 | tx_pool->producer_index = 0; | |
582 | } | |
583 | ||
584 | return 0; | |
585 | } | |
586 | ||
661a2622 NF |
587 | static void release_error_buffers(struct ibmvnic_adapter *adapter) |
588 | { | |
589 | struct device *dev = &adapter->vdev->dev; | |
590 | struct ibmvnic_error_buff *error_buff, *tmp; | |
591 | unsigned long flags; | |
592 | ||
593 | spin_lock_irqsave(&adapter->error_list_lock, flags); | |
594 | list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) { | |
595 | list_del(&error_buff->list); | |
596 | dma_unmap_single(dev, error_buff->dma, error_buff->len, | |
597 | DMA_FROM_DEVICE); | |
598 | kfree(error_buff->buff); | |
599 | kfree(error_buff); | |
600 | } | |
601 | spin_unlock_irqrestore(&adapter->error_list_lock, flags); | |
602 | } | |
603 | ||
d944c3d6 JA |
604 | static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) |
605 | { | |
606 | int i; | |
607 | ||
608 | if (adapter->napi_enabled) | |
609 | return; | |
610 | ||
611 | for (i = 0; i < adapter->req_rx_queues; i++) | |
612 | napi_enable(&adapter->napi[i]); | |
613 | ||
614 | adapter->napi_enabled = true; | |
615 | } | |
616 | ||
617 | static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) | |
618 | { | |
619 | int i; | |
620 | ||
621 | if (!adapter->napi_enabled) | |
622 | return; | |
623 | ||
624 | for (i = 0; i < adapter->req_rx_queues; i++) | |
625 | napi_disable(&adapter->napi[i]); | |
626 | ||
627 | adapter->napi_enabled = false; | |
628 | } | |
629 | ||
a57a5d25 | 630 | static int ibmvnic_login(struct net_device *netdev) |
032c5e82 TF |
631 | { |
632 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
bd0b6723 | 633 | unsigned long timeout = msecs_to_jiffies(30000); |
032c5e82 | 634 | struct device *dev = &adapter->vdev->dev; |
032c5e82 | 635 | |
bd0b6723 JA |
636 | do { |
637 | if (adapter->renegotiate) { | |
638 | adapter->renegotiate = false; | |
b510888f | 639 | release_sub_crqs(adapter); |
bd0b6723 JA |
640 | |
641 | reinit_completion(&adapter->init_done); | |
642 | send_cap_queries(adapter); | |
643 | if (!wait_for_completion_timeout(&adapter->init_done, | |
644 | timeout)) { | |
645 | dev_err(dev, "Capabilities query timeout\n"); | |
646 | return -1; | |
647 | } | |
648 | } | |
649 | ||
650 | reinit_completion(&adapter->init_done); | |
651 | send_login(adapter); | |
652 | if (!wait_for_completion_timeout(&adapter->init_done, | |
653 | timeout)) { | |
654 | dev_err(dev, "Login timeout\n"); | |
655 | return -1; | |
656 | } | |
657 | } while (adapter->renegotiate); | |
658 | ||
a57a5d25 JA |
659 | return 0; |
660 | } | |
661 | ||
1b8955ee NF |
662 | static void release_resources(struct ibmvnic_adapter *adapter) |
663 | { | |
c7bac00b NF |
664 | int i; |
665 | ||
1b8955ee NF |
666 | release_tx_pools(adapter); |
667 | release_rx_pools(adapter); | |
668 | ||
1b8955ee | 669 | release_stats_token(adapter); |
661a2622 | 670 | release_error_buffers(adapter); |
c7bac00b NF |
671 | |
672 | if (adapter->napi) { | |
673 | for (i = 0; i < adapter->req_rx_queues; i++) { | |
674 | if (&adapter->napi[i]) | |
675 | netif_napi_del(&adapter->napi[i]); | |
676 | } | |
677 | } | |
1b8955ee NF |
678 | } |
679 | ||
53da09e9 NF |
680 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) |
681 | { | |
682 | struct net_device *netdev = adapter->netdev; | |
683 | unsigned long timeout = msecs_to_jiffies(30000); | |
684 | union ibmvnic_crq crq; | |
685 | bool resend; | |
686 | int rc; | |
687 | ||
53da09e9 NF |
688 | netdev_err(netdev, "setting link state %d\n", link_state); |
689 | memset(&crq, 0, sizeof(crq)); | |
690 | crq.logical_link_state.first = IBMVNIC_CRQ_CMD; | |
691 | crq.logical_link_state.cmd = LOGICAL_LINK_STATE; | |
692 | crq.logical_link_state.link_state = link_state; | |
693 | ||
694 | do { | |
695 | resend = false; | |
696 | ||
697 | reinit_completion(&adapter->init_done); | |
698 | rc = ibmvnic_send_crq(adapter, &crq); | |
699 | if (rc) { | |
700 | netdev_err(netdev, "Failed to set link state\n"); | |
701 | return rc; | |
702 | } | |
703 | ||
704 | if (!wait_for_completion_timeout(&adapter->init_done, | |
705 | timeout)) { | |
706 | netdev_err(netdev, "timeout setting link state\n"); | |
707 | return -1; | |
708 | } | |
709 | ||
710 | if (adapter->init_done_rc == 1) { | |
711 | /* Partuial success, delay and re-send */ | |
712 | mdelay(1000); | |
713 | resend = true; | |
714 | } | |
715 | } while (resend); | |
716 | ||
717 | return 0; | |
718 | } | |
719 | ||
7f3c6e6b TF |
720 | static int set_real_num_queues(struct net_device *netdev) |
721 | { | |
722 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
723 | int rc; | |
724 | ||
725 | rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); | |
726 | if (rc) { | |
727 | netdev_err(netdev, "failed to set the number of tx queues\n"); | |
728 | return rc; | |
729 | } | |
730 | ||
731 | rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); | |
732 | if (rc) | |
733 | netdev_err(netdev, "failed to set the number of rx queues\n"); | |
734 | ||
735 | return rc; | |
736 | } | |
737 | ||
bfc32f29 | 738 | static int init_resources(struct ibmvnic_adapter *adapter) |
a57a5d25 | 739 | { |
bfc32f29 NF |
740 | struct net_device *netdev = adapter->netdev; |
741 | int i, rc; | |
a57a5d25 | 742 | |
7f3c6e6b TF |
743 | rc = set_real_num_queues(netdev); |
744 | if (rc) | |
745 | return rc; | |
bd0b6723 JA |
746 | |
747 | rc = init_sub_crq_irqs(adapter); | |
748 | if (rc) { | |
bfc32f29 | 749 | netdev_err(netdev, "failed to initialize sub crq irqs\n"); |
bd0b6723 JA |
750 | return -1; |
751 | } | |
752 | ||
5d5e84eb NF |
753 | rc = init_stats_token(adapter); |
754 | if (rc) | |
755 | return rc; | |
756 | ||
032c5e82 TF |
757 | adapter->map_id = 1; |
758 | adapter->napi = kcalloc(adapter->req_rx_queues, | |
759 | sizeof(struct napi_struct), GFP_KERNEL); | |
760 | if (!adapter->napi) | |
bfc32f29 NF |
761 | return -ENOMEM; |
762 | ||
032c5e82 TF |
763 | for (i = 0; i < adapter->req_rx_queues; i++) { |
764 | netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, | |
765 | NAPI_POLL_WEIGHT); | |
032c5e82 | 766 | } |
032c5e82 | 767 | |
032c5e82 | 768 | send_map_query(adapter); |
0ffe2cb7 NF |
769 | |
770 | rc = init_rx_pools(netdev); | |
771 | if (rc) | |
bfc32f29 | 772 | return rc; |
032c5e82 | 773 | |
c657e32c | 774 | rc = init_tx_pools(netdev); |
bfc32f29 NF |
775 | return rc; |
776 | } | |
777 | ||
ed651a10 | 778 | static int __ibmvnic_open(struct net_device *netdev) |
bfc32f29 NF |
779 | { |
780 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
ed651a10 | 781 | enum vnic_state prev_state = adapter->state; |
bfc32f29 NF |
782 | int i, rc; |
783 | ||
90c8014c | 784 | adapter->state = VNIC_OPENING; |
032c5e82 | 785 | replenish_pools(adapter); |
d944c3d6 | 786 | ibmvnic_napi_enable(adapter); |
bfc32f29 | 787 | |
032c5e82 TF |
788 | /* We're ready to receive frames, enable the sub-crq interrupts and |
789 | * set the logical link state to up | |
790 | */ | |
ed651a10 NF |
791 | for (i = 0; i < adapter->req_rx_queues; i++) { |
792 | if (prev_state == VNIC_CLOSED) | |
793 | enable_irq(adapter->rx_scrq[i]->irq); | |
794 | else | |
795 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); | |
796 | } | |
032c5e82 | 797 | |
ed651a10 NF |
798 | for (i = 0; i < adapter->req_tx_queues; i++) { |
799 | if (prev_state == VNIC_CLOSED) | |
800 | enable_irq(adapter->tx_scrq[i]->irq); | |
801 | else | |
802 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); | |
803 | } | |
032c5e82 | 804 | |
53da09e9 | 805 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); |
bfc32f29 NF |
806 | if (rc) { |
807 | for (i = 0; i < adapter->req_rx_queues; i++) | |
808 | napi_disable(&adapter->napi[i]); | |
809 | release_resources(adapter); | |
ed651a10 | 810 | return rc; |
bfc32f29 | 811 | } |
032c5e82 | 812 | |
ed651a10 NF |
813 | netif_tx_start_all_queues(netdev); |
814 | ||
815 | if (prev_state == VNIC_CLOSED) { | |
816 | for (i = 0; i < adapter->req_rx_queues; i++) | |
817 | napi_schedule(&adapter->napi[i]); | |
818 | } | |
819 | ||
820 | adapter->state = VNIC_OPEN; | |
821 | return rc; | |
822 | } | |
823 | ||
824 | static int ibmvnic_open(struct net_device *netdev) | |
825 | { | |
826 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
827 | int rc; | |
828 | ||
829 | mutex_lock(&adapter->reset_lock); | |
830 | ||
831 | if (adapter->state != VNIC_CLOSED) { | |
832 | rc = ibmvnic_login(netdev); | |
833 | if (rc) { | |
834 | mutex_unlock(&adapter->reset_lock); | |
835 | return rc; | |
836 | } | |
837 | ||
838 | rc = init_resources(adapter); | |
839 | if (rc) { | |
840 | netdev_err(netdev, "failed to initialize resources\n"); | |
841 | release_resources(adapter); | |
842 | mutex_unlock(&adapter->reset_lock); | |
843 | return rc; | |
844 | } | |
845 | } | |
846 | ||
847 | rc = __ibmvnic_open(netdev); | |
848 | mutex_unlock(&adapter->reset_lock); | |
849 | ||
bfc32f29 | 850 | return rc; |
032c5e82 TF |
851 | } |
852 | ||
b41b83e9 NF |
853 | static void clean_tx_pools(struct ibmvnic_adapter *adapter) |
854 | { | |
855 | struct ibmvnic_tx_pool *tx_pool; | |
856 | u64 tx_entries; | |
857 | int tx_scrqs; | |
858 | int i, j; | |
859 | ||
860 | if (!adapter->tx_pool) | |
861 | return; | |
862 | ||
863 | tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); | |
864 | tx_entries = adapter->req_tx_entries_per_subcrq; | |
865 | ||
866 | /* Free any remaining skbs in the tx buffer pools */ | |
867 | for (i = 0; i < tx_scrqs; i++) { | |
868 | tx_pool = &adapter->tx_pool[i]; | |
869 | if (!tx_pool) | |
870 | continue; | |
871 | ||
872 | for (j = 0; j < tx_entries; j++) { | |
873 | if (tx_pool->tx_buff[j].skb) { | |
874 | dev_kfree_skb_any(tx_pool->tx_buff[j].skb); | |
875 | tx_pool->tx_buff[j].skb = NULL; | |
876 | } | |
877 | } | |
878 | } | |
879 | } | |
880 | ||
ed651a10 | 881 | static int __ibmvnic_close(struct net_device *netdev) |
ea5509f5 JA |
882 | { |
883 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
53da09e9 | 884 | int rc = 0; |
ea5509f5 JA |
885 | int i; |
886 | ||
90c8014c | 887 | adapter->state = VNIC_CLOSING; |
ed651a10 | 888 | netif_tx_stop_all_queues(netdev); |
d944c3d6 | 889 | ibmvnic_napi_disable(adapter); |
46293b94 NF |
890 | |
891 | if (adapter->tx_scrq) { | |
892 | for (i = 0; i < adapter->req_tx_queues; i++) | |
893 | if (adapter->tx_scrq[i]->irq) | |
894 | disable_irq(adapter->tx_scrq[i]->irq); | |
895 | } | |
896 | ||
53da09e9 | 897 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); |
46293b94 NF |
898 | if (rc) |
899 | return rc; | |
900 | ||
901 | if (adapter->rx_scrq) { | |
902 | for (i = 0; i < adapter->req_rx_queues; i++) { | |
903 | int retries = 10; | |
904 | ||
905 | while (pending_scrq(adapter, adapter->rx_scrq[i])) { | |
906 | retries--; | |
907 | mdelay(100); | |
908 | ||
909 | if (retries == 0) | |
910 | break; | |
911 | } | |
912 | ||
913 | if (adapter->rx_scrq[i]->irq) | |
914 | disable_irq(adapter->rx_scrq[i]->irq); | |
915 | } | |
916 | } | |
ea5509f5 | 917 | |
10f76215 | 918 | clean_tx_pools(adapter); |
90c8014c | 919 | adapter->state = VNIC_CLOSED; |
53da09e9 | 920 | return rc; |
032c5e82 TF |
921 | } |
922 | ||
ed651a10 NF |
923 | static int ibmvnic_close(struct net_device *netdev) |
924 | { | |
925 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
926 | int rc; | |
927 | ||
928 | mutex_lock(&adapter->reset_lock); | |
929 | rc = __ibmvnic_close(netdev); | |
930 | mutex_unlock(&adapter->reset_lock); | |
931 | ||
932 | return rc; | |
933 | } | |
934 | ||
ad7775dc TF |
935 | /** |
936 | * build_hdr_data - creates L2/L3/L4 header data buffer | |
937 | * @hdr_field - bitfield determining needed headers | |
938 | * @skb - socket buffer | |
939 | * @hdr_len - array of header lengths | |
940 | * @tot_len - total length of data | |
941 | * | |
942 | * Reads hdr_field to determine which headers are needed by firmware. | |
943 | * Builds a buffer containing these headers. Saves individual header | |
944 | * lengths and total buffer length to be used to build descriptors. | |
945 | */ | |
946 | static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, | |
947 | int *hdr_len, u8 *hdr_data) | |
948 | { | |
949 | int len = 0; | |
950 | u8 *hdr; | |
951 | ||
952 | hdr_len[0] = sizeof(struct ethhdr); | |
953 | ||
954 | if (skb->protocol == htons(ETH_P_IP)) { | |
955 | hdr_len[1] = ip_hdr(skb)->ihl * 4; | |
956 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
957 | hdr_len[2] = tcp_hdrlen(skb); | |
958 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) | |
959 | hdr_len[2] = sizeof(struct udphdr); | |
960 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
961 | hdr_len[1] = sizeof(struct ipv6hdr); | |
962 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | |
963 | hdr_len[2] = tcp_hdrlen(skb); | |
964 | else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) | |
965 | hdr_len[2] = sizeof(struct udphdr); | |
966 | } | |
967 | ||
968 | memset(hdr_data, 0, 120); | |
969 | if ((hdr_field >> 6) & 1) { | |
970 | hdr = skb_mac_header(skb); | |
971 | memcpy(hdr_data, hdr, hdr_len[0]); | |
972 | len += hdr_len[0]; | |
973 | } | |
974 | ||
975 | if ((hdr_field >> 5) & 1) { | |
976 | hdr = skb_network_header(skb); | |
977 | memcpy(hdr_data + len, hdr, hdr_len[1]); | |
978 | len += hdr_len[1]; | |
979 | } | |
980 | ||
981 | if ((hdr_field >> 4) & 1) { | |
982 | hdr = skb_transport_header(skb); | |
983 | memcpy(hdr_data + len, hdr, hdr_len[2]); | |
984 | len += hdr_len[2]; | |
985 | } | |
986 | return len; | |
987 | } | |
988 | ||
989 | /** | |
990 | * create_hdr_descs - create header and header extension descriptors | |
991 | * @hdr_field - bitfield determining needed headers | |
992 | * @data - buffer containing header data | |
993 | * @len - length of data buffer | |
994 | * @hdr_len - array of individual header lengths | |
995 | * @scrq_arr - descriptor array | |
996 | * | |
997 | * Creates header and, if needed, header extension descriptors and | |
998 | * places them in a descriptor array, scrq_arr | |
999 | */ | |
1000 | ||
1001 | static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, | |
1002 | union sub_crq *scrq_arr) | |
1003 | { | |
1004 | union sub_crq hdr_desc; | |
1005 | int tmp_len = len; | |
1006 | u8 *data, *cur; | |
1007 | int tmp; | |
1008 | ||
1009 | while (tmp_len > 0) { | |
1010 | cur = hdr_data + len - tmp_len; | |
1011 | ||
1012 | memset(&hdr_desc, 0, sizeof(hdr_desc)); | |
1013 | if (cur != hdr_data) { | |
1014 | data = hdr_desc.hdr_ext.data; | |
1015 | tmp = tmp_len > 29 ? 29 : tmp_len; | |
1016 | hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; | |
1017 | hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; | |
1018 | hdr_desc.hdr_ext.len = tmp; | |
1019 | } else { | |
1020 | data = hdr_desc.hdr.data; | |
1021 | tmp = tmp_len > 24 ? 24 : tmp_len; | |
1022 | hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; | |
1023 | hdr_desc.hdr.type = IBMVNIC_HDR_DESC; | |
1024 | hdr_desc.hdr.len = tmp; | |
1025 | hdr_desc.hdr.l2_len = (u8)hdr_len[0]; | |
1026 | hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); | |
1027 | hdr_desc.hdr.l4_len = (u8)hdr_len[2]; | |
1028 | hdr_desc.hdr.flag = hdr_field << 1; | |
1029 | } | |
1030 | memcpy(data, cur, tmp); | |
1031 | tmp_len -= tmp; | |
1032 | *scrq_arr = hdr_desc; | |
1033 | scrq_arr++; | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | /** | |
1038 | * build_hdr_descs_arr - build a header descriptor array | |
1039 | * @skb - socket buffer | |
1040 | * @num_entries - number of descriptors to be sent | |
1041 | * @subcrq - first TX descriptor | |
1042 | * @hdr_field - bit field determining which headers will be sent | |
1043 | * | |
1044 | * This function will build a TX descriptor array with applicable | |
1045 | * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. | |
1046 | */ | |
1047 | ||
1048 | static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, | |
1049 | int *num_entries, u8 hdr_field) | |
1050 | { | |
1051 | int hdr_len[3] = {0, 0, 0}; | |
1052 | int tot_len, len; | |
1053 | u8 *hdr_data = txbuff->hdr_data; | |
1054 | ||
1055 | tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, | |
1056 | txbuff->hdr_data); | |
1057 | len = tot_len; | |
1058 | len -= 24; | |
1059 | if (len > 0) | |
1060 | num_entries += len % 29 ? len / 29 + 1 : len / 29; | |
1061 | create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, | |
1062 | txbuff->indir_arr + 1); | |
1063 | } | |
1064 | ||
032c5e82 TF |
1065 | static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) |
1066 | { | |
1067 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
1068 | int queue_num = skb_get_queue_mapping(skb); | |
ad7775dc | 1069 | u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; |
032c5e82 TF |
1070 | struct device *dev = &adapter->vdev->dev; |
1071 | struct ibmvnic_tx_buff *tx_buff = NULL; | |
142c0ac4 | 1072 | struct ibmvnic_sub_crq_queue *tx_scrq; |
032c5e82 TF |
1073 | struct ibmvnic_tx_pool *tx_pool; |
1074 | unsigned int tx_send_failed = 0; | |
1075 | unsigned int tx_map_failed = 0; | |
1076 | unsigned int tx_dropped = 0; | |
1077 | unsigned int tx_packets = 0; | |
1078 | unsigned int tx_bytes = 0; | |
1079 | dma_addr_t data_dma_addr; | |
1080 | struct netdev_queue *txq; | |
032c5e82 TF |
1081 | unsigned long lpar_rc; |
1082 | union sub_crq tx_crq; | |
1083 | unsigned int offset; | |
ad7775dc | 1084 | int num_entries = 1; |
032c5e82 TF |
1085 | unsigned char *dst; |
1086 | u64 *handle_array; | |
1087 | int index = 0; | |
1088 | int ret = 0; | |
1089 | ||
ed651a10 | 1090 | if (adapter->resetting) { |
7f5b0308 TF |
1091 | if (!netif_subqueue_stopped(netdev, skb)) |
1092 | netif_stop_subqueue(netdev, queue_num); | |
1093 | dev_kfree_skb_any(skb); | |
1094 | ||
032c5e82 TF |
1095 | tx_send_failed++; |
1096 | tx_dropped++; | |
7f5b0308 | 1097 | ret = NETDEV_TX_OK; |
032c5e82 TF |
1098 | goto out; |
1099 | } | |
1100 | ||
161b8a81 NF |
1101 | tx_pool = &adapter->tx_pool[queue_num]; |
1102 | tx_scrq = adapter->tx_scrq[queue_num]; | |
1103 | txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); | |
1104 | handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + | |
1105 | be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); | |
1106 | ||
032c5e82 TF |
1107 | index = tx_pool->free_map[tx_pool->consumer_index]; |
1108 | offset = index * adapter->req_mtu; | |
1109 | dst = tx_pool->long_term_buff.buff + offset; | |
1110 | memset(dst, 0, adapter->req_mtu); | |
1111 | skb_copy_from_linear_data(skb, dst, skb->len); | |
1112 | data_dma_addr = tx_pool->long_term_buff.addr + offset; | |
1113 | ||
1114 | tx_pool->consumer_index = | |
1115 | (tx_pool->consumer_index + 1) % | |
068d9f90 | 1116 | adapter->req_tx_entries_per_subcrq; |
032c5e82 TF |
1117 | |
1118 | tx_buff = &tx_pool->tx_buff[index]; | |
1119 | tx_buff->skb = skb; | |
1120 | tx_buff->data_dma[0] = data_dma_addr; | |
1121 | tx_buff->data_len[0] = skb->len; | |
1122 | tx_buff->index = index; | |
1123 | tx_buff->pool_index = queue_num; | |
1124 | tx_buff->last_frag = true; | |
032c5e82 TF |
1125 | |
1126 | memset(&tx_crq, 0, sizeof(tx_crq)); | |
1127 | tx_crq.v1.first = IBMVNIC_CRQ_CMD; | |
1128 | tx_crq.v1.type = IBMVNIC_TX_DESC; | |
1129 | tx_crq.v1.n_crq_elem = 1; | |
1130 | tx_crq.v1.n_sge = 1; | |
1131 | tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; | |
1132 | tx_crq.v1.correlator = cpu_to_be32(index); | |
1133 | tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); | |
1134 | tx_crq.v1.sge_len = cpu_to_be32(skb->len); | |
1135 | tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); | |
1136 | ||
1137 | if (adapter->vlan_header_insertion) { | |
1138 | tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; | |
1139 | tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); | |
1140 | } | |
1141 | ||
1142 | if (skb->protocol == htons(ETH_P_IP)) { | |
1143 | if (ip_hdr(skb)->version == 4) | |
1144 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; | |
1145 | else if (ip_hdr(skb)->version == 6) | |
1146 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; | |
1147 | ||
1148 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
1149 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; | |
1150 | else if (ip_hdr(skb)->protocol != IPPROTO_TCP) | |
1151 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; | |
1152 | } | |
1153 | ||
ad7775dc | 1154 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
032c5e82 | 1155 | tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; |
ad7775dc TF |
1156 | hdrs += 2; |
1157 | } | |
1158 | /* determine if l2/3/4 headers are sent to firmware */ | |
1159 | if ((*hdrs >> 7) & 1 && | |
1160 | (skb->protocol == htons(ETH_P_IP) || | |
1161 | skb->protocol == htons(ETH_P_IPV6))) { | |
1162 | build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); | |
1163 | tx_crq.v1.n_crq_elem = num_entries; | |
1164 | tx_buff->indir_arr[0] = tx_crq; | |
1165 | tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, | |
1166 | sizeof(tx_buff->indir_arr), | |
1167 | DMA_TO_DEVICE); | |
1168 | if (dma_mapping_error(dev, tx_buff->indir_dma)) { | |
7f5b0308 TF |
1169 | dev_kfree_skb_any(skb); |
1170 | tx_buff->skb = NULL; | |
ad7775dc TF |
1171 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
1172 | dev_err(dev, "tx: unable to map descriptor array\n"); | |
1173 | tx_map_failed++; | |
1174 | tx_dropped++; | |
7f5b0308 | 1175 | ret = NETDEV_TX_OK; |
ad7775dc TF |
1176 | goto out; |
1177 | } | |
498cd8e4 | 1178 | lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], |
ad7775dc TF |
1179 | (u64)tx_buff->indir_dma, |
1180 | (u64)num_entries); | |
1181 | } else { | |
498cd8e4 JA |
1182 | lpar_rc = send_subcrq(adapter, handle_array[queue_num], |
1183 | &tx_crq); | |
ad7775dc | 1184 | } |
032c5e82 TF |
1185 | if (lpar_rc != H_SUCCESS) { |
1186 | dev_err(dev, "tx failed with code %ld\n", lpar_rc); | |
1187 | ||
1188 | if (tx_pool->consumer_index == 0) | |
1189 | tx_pool->consumer_index = | |
068d9f90 | 1190 | adapter->req_tx_entries_per_subcrq - 1; |
032c5e82 TF |
1191 | else |
1192 | tx_pool->consumer_index--; | |
1193 | ||
7f5b0308 TF |
1194 | dev_kfree_skb_any(skb); |
1195 | tx_buff->skb = NULL; | |
1196 | ||
b8c80b84 TF |
1197 | if (lpar_rc == H_CLOSED) { |
1198 | /* Disable TX and report carrier off if queue is closed. | |
1199 | * Firmware guarantees that a signal will be sent to the | |
1200 | * driver, triggering a reset or some other action. | |
1201 | */ | |
1202 | netif_tx_stop_all_queues(netdev); | |
1203 | netif_carrier_off(netdev); | |
1204 | } | |
7f5b0308 | 1205 | |
032c5e82 TF |
1206 | tx_send_failed++; |
1207 | tx_dropped++; | |
7f5b0308 | 1208 | ret = NETDEV_TX_OK; |
032c5e82 TF |
1209 | goto out; |
1210 | } | |
142c0ac4 | 1211 | |
58c8c0c0 BK |
1212 | if (atomic_inc_return(&tx_scrq->used) |
1213 | >= adapter->req_tx_entries_per_subcrq) { | |
142c0ac4 TF |
1214 | netdev_info(netdev, "Stopping queue %d\n", queue_num); |
1215 | netif_stop_subqueue(netdev, queue_num); | |
1216 | } | |
1217 | ||
032c5e82 TF |
1218 | tx_packets++; |
1219 | tx_bytes += skb->len; | |
1220 | txq->trans_start = jiffies; | |
1221 | ret = NETDEV_TX_OK; | |
1222 | ||
1223 | out: | |
1224 | netdev->stats.tx_dropped += tx_dropped; | |
1225 | netdev->stats.tx_bytes += tx_bytes; | |
1226 | netdev->stats.tx_packets += tx_packets; | |
1227 | adapter->tx_send_failed += tx_send_failed; | |
1228 | adapter->tx_map_failed += tx_map_failed; | |
1229 | ||
1230 | return ret; | |
1231 | } | |
1232 | ||
1233 | static void ibmvnic_set_multi(struct net_device *netdev) | |
1234 | { | |
1235 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
1236 | struct netdev_hw_addr *ha; | |
1237 | union ibmvnic_crq crq; | |
1238 | ||
1239 | memset(&crq, 0, sizeof(crq)); | |
1240 | crq.request_capability.first = IBMVNIC_CRQ_CMD; | |
1241 | crq.request_capability.cmd = REQUEST_CAPABILITY; | |
1242 | ||
1243 | if (netdev->flags & IFF_PROMISC) { | |
1244 | if (!adapter->promisc_supported) | |
1245 | return; | |
1246 | } else { | |
1247 | if (netdev->flags & IFF_ALLMULTI) { | |
1248 | /* Accept all multicast */ | |
1249 | memset(&crq, 0, sizeof(crq)); | |
1250 | crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; | |
1251 | crq.multicast_ctrl.cmd = MULTICAST_CTRL; | |
1252 | crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; | |
1253 | ibmvnic_send_crq(adapter, &crq); | |
1254 | } else if (netdev_mc_empty(netdev)) { | |
1255 | /* Reject all multicast */ | |
1256 | memset(&crq, 0, sizeof(crq)); | |
1257 | crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; | |
1258 | crq.multicast_ctrl.cmd = MULTICAST_CTRL; | |
1259 | crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; | |
1260 | ibmvnic_send_crq(adapter, &crq); | |
1261 | } else { | |
1262 | /* Accept one or more multicast(s) */ | |
1263 | netdev_for_each_mc_addr(ha, netdev) { | |
1264 | memset(&crq, 0, sizeof(crq)); | |
1265 | crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; | |
1266 | crq.multicast_ctrl.cmd = MULTICAST_CTRL; | |
1267 | crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; | |
1268 | ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], | |
1269 | ha->addr); | |
1270 | ibmvnic_send_crq(adapter, &crq); | |
1271 | } | |
1272 | } | |
1273 | } | |
1274 | } | |
1275 | ||
1276 | static int ibmvnic_set_mac(struct net_device *netdev, void *p) | |
1277 | { | |
1278 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
1279 | struct sockaddr *addr = p; | |
1280 | union ibmvnic_crq crq; | |
1281 | ||
1282 | if (!is_valid_ether_addr(addr->sa_data)) | |
1283 | return -EADDRNOTAVAIL; | |
1284 | ||
1285 | memset(&crq, 0, sizeof(crq)); | |
1286 | crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; | |
1287 | crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; | |
1288 | ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); | |
1289 | ibmvnic_send_crq(adapter, &crq); | |
1290 | /* netdev->dev_addr is changed in handle_change_mac_rsp function */ | |
1291 | return 0; | |
1292 | } | |
1293 | ||
ed651a10 NF |
1294 | /** |
1295 | * do_reset returns zero if we are able to keep processing reset events, or | |
1296 | * non-zero if we hit a fatal error and must halt. | |
1297 | */ | |
1298 | static int do_reset(struct ibmvnic_adapter *adapter, | |
1299 | struct ibmvnic_rwi *rwi, u32 reset_state) | |
032c5e82 | 1300 | { |
ed651a10 NF |
1301 | struct net_device *netdev = adapter->netdev; |
1302 | int i, rc; | |
1303 | ||
1304 | netif_carrier_off(netdev); | |
1305 | adapter->reset_reason = rwi->reset_reason; | |
1306 | ||
1307 | if (rwi->reset_reason == VNIC_RESET_MOBILITY) { | |
1308 | rc = ibmvnic_reenable_crq_queue(adapter); | |
1309 | if (rc) | |
1310 | return 0; | |
1311 | } | |
1312 | ||
1313 | rc = __ibmvnic_close(netdev); | |
1314 | if (rc) | |
1315 | return rc; | |
1316 | ||
8cb31cfc JA |
1317 | if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { |
1318 | /* remove the closed state so when we call open it appears | |
1319 | * we are coming from the probed state. | |
1320 | */ | |
1321 | adapter->state = VNIC_PROBED; | |
032c5e82 | 1322 | |
8cb31cfc JA |
1323 | rc = ibmvnic_init(adapter); |
1324 | if (rc) | |
1325 | return 0; | |
ed651a10 | 1326 | |
8cb31cfc JA |
1327 | /* If the adapter was in PROBE state prior to the reset, |
1328 | * exit here. | |
1329 | */ | |
1330 | if (reset_state == VNIC_PROBED) | |
1331 | return 0; | |
ed651a10 | 1332 | |
8cb31cfc JA |
1333 | rc = ibmvnic_login(netdev); |
1334 | if (rc) { | |
1335 | adapter->state = VNIC_PROBED; | |
1336 | return 0; | |
1337 | } | |
ed651a10 | 1338 | |
8c0543ad NF |
1339 | rc = reset_tx_pools(adapter); |
1340 | if (rc) | |
1341 | return rc; | |
1342 | ||
1343 | rc = reset_rx_pools(adapter); | |
8cb31cfc JA |
1344 | if (rc) |
1345 | return rc; | |
ed651a10 | 1346 | |
8cb31cfc JA |
1347 | if (reset_state == VNIC_CLOSED) |
1348 | return 0; | |
1349 | } | |
ed651a10 NF |
1350 | |
1351 | rc = __ibmvnic_open(netdev); | |
1352 | if (rc) { | |
1353 | if (list_empty(&adapter->rwi_list)) | |
1354 | adapter->state = VNIC_CLOSED; | |
1355 | else | |
1356 | adapter->state = reset_state; | |
1357 | ||
1358 | return 0; | |
1359 | } | |
1360 | ||
1361 | netif_carrier_on(netdev); | |
1362 | ||
1363 | /* kick napi */ | |
1364 | for (i = 0; i < adapter->req_rx_queues; i++) | |
1365 | napi_schedule(&adapter->napi[i]); | |
1366 | ||
2ce9e4ef | 1367 | netdev_notify_peers(netdev); |
ed651a10 NF |
1368 | return 0; |
1369 | } | |
1370 | ||
1371 | static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) | |
1372 | { | |
1373 | struct ibmvnic_rwi *rwi; | |
1374 | ||
1375 | mutex_lock(&adapter->rwi_lock); | |
1376 | ||
1377 | if (!list_empty(&adapter->rwi_list)) { | |
1378 | rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, | |
1379 | list); | |
1380 | list_del(&rwi->list); | |
1381 | } else { | |
1382 | rwi = NULL; | |
1383 | } | |
1384 | ||
1385 | mutex_unlock(&adapter->rwi_lock); | |
1386 | return rwi; | |
1387 | } | |
1388 | ||
1389 | static void free_all_rwi(struct ibmvnic_adapter *adapter) | |
1390 | { | |
1391 | struct ibmvnic_rwi *rwi; | |
1392 | ||
1393 | rwi = get_next_rwi(adapter); | |
1394 | while (rwi) { | |
1395 | kfree(rwi); | |
1396 | rwi = get_next_rwi(adapter); | |
1397 | } | |
1398 | } | |
1399 | ||
1400 | static void __ibmvnic_reset(struct work_struct *work) | |
1401 | { | |
1402 | struct ibmvnic_rwi *rwi; | |
1403 | struct ibmvnic_adapter *adapter; | |
1404 | struct net_device *netdev; | |
1405 | u32 reset_state; | |
1406 | int rc; | |
1407 | ||
1408 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); | |
1409 | netdev = adapter->netdev; | |
1410 | ||
1411 | mutex_lock(&adapter->reset_lock); | |
1412 | adapter->resetting = true; | |
1413 | reset_state = adapter->state; | |
1414 | ||
1415 | rwi = get_next_rwi(adapter); | |
1416 | while (rwi) { | |
1417 | rc = do_reset(adapter, rwi, reset_state); | |
1418 | kfree(rwi); | |
1419 | if (rc) | |
1420 | break; | |
1421 | ||
1422 | rwi = get_next_rwi(adapter); | |
1423 | } | |
1424 | ||
1425 | if (rc) { | |
1426 | free_all_rwi(adapter); | |
6d0af07d | 1427 | mutex_unlock(&adapter->reset_lock); |
ed651a10 NF |
1428 | return; |
1429 | } | |
1430 | ||
1431 | adapter->resetting = false; | |
1432 | mutex_unlock(&adapter->reset_lock); | |
1433 | } | |
1434 | ||
1435 | static void ibmvnic_reset(struct ibmvnic_adapter *adapter, | |
1436 | enum ibmvnic_reset_reason reason) | |
1437 | { | |
1438 | struct ibmvnic_rwi *rwi, *tmp; | |
1439 | struct net_device *netdev = adapter->netdev; | |
1440 | struct list_head *entry; | |
1441 | ||
1442 | if (adapter->state == VNIC_REMOVING || | |
1443 | adapter->state == VNIC_REMOVED) { | |
1444 | netdev_dbg(netdev, "Adapter removing, skipping reset\n"); | |
1445 | return; | |
1446 | } | |
1447 | ||
1448 | mutex_lock(&adapter->rwi_lock); | |
1449 | ||
1450 | list_for_each(entry, &adapter->rwi_list) { | |
1451 | tmp = list_entry(entry, struct ibmvnic_rwi, list); | |
1452 | if (tmp->reset_reason == reason) { | |
1453 | netdev_err(netdev, "Matching reset found, skipping\n"); | |
1454 | mutex_unlock(&adapter->rwi_lock); | |
1455 | return; | |
1456 | } | |
1457 | } | |
1458 | ||
1459 | rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); | |
1460 | if (!rwi) { | |
1461 | mutex_unlock(&adapter->rwi_lock); | |
1462 | ibmvnic_close(netdev); | |
1463 | return; | |
1464 | } | |
1465 | ||
1466 | rwi->reset_reason = reason; | |
1467 | list_add_tail(&rwi->list, &adapter->rwi_list); | |
1468 | mutex_unlock(&adapter->rwi_lock); | |
1469 | schedule_work(&adapter->ibmvnic_reset); | |
1470 | } | |
1471 | ||
1472 | static void ibmvnic_tx_timeout(struct net_device *dev) | |
1473 | { | |
1474 | struct ibmvnic_adapter *adapter = netdev_priv(dev); | |
1475 | ||
1476 | ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); | |
032c5e82 TF |
1477 | } |
1478 | ||
1479 | static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, | |
1480 | struct ibmvnic_rx_buff *rx_buff) | |
1481 | { | |
1482 | struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; | |
1483 | ||
1484 | rx_buff->skb = NULL; | |
1485 | ||
1486 | pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); | |
1487 | pool->next_alloc = (pool->next_alloc + 1) % pool->size; | |
1488 | ||
1489 | atomic_dec(&pool->available); | |
1490 | } | |
1491 | ||
1492 | static int ibmvnic_poll(struct napi_struct *napi, int budget) | |
1493 | { | |
1494 | struct net_device *netdev = napi->dev; | |
1495 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
1496 | int scrq_num = (int)(napi - adapter->napi); | |
1497 | int frames_processed = 0; | |
152ce47d NF |
1498 | |
1499 | if (adapter->resetting) | |
1500 | return 0; | |
1501 | ||
032c5e82 TF |
1502 | restart_poll: |
1503 | while (frames_processed < budget) { | |
1504 | struct sk_buff *skb; | |
1505 | struct ibmvnic_rx_buff *rx_buff; | |
1506 | union sub_crq *next; | |
1507 | u32 length; | |
1508 | u16 offset; | |
1509 | u8 flags = 0; | |
1510 | ||
1511 | if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) | |
1512 | break; | |
1513 | next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); | |
1514 | rx_buff = | |
1515 | (struct ibmvnic_rx_buff *)be64_to_cpu(next-> | |
1516 | rx_comp.correlator); | |
1517 | /* do error checking */ | |
1518 | if (next->rx_comp.rc) { | |
1519 | netdev_err(netdev, "rx error %x\n", next->rx_comp.rc); | |
1520 | /* free the entry */ | |
1521 | next->rx_comp.first = 0; | |
1522 | remove_buff_from_pool(adapter, rx_buff); | |
ca05e316 | 1523 | continue; |
032c5e82 TF |
1524 | } |
1525 | ||
1526 | length = be32_to_cpu(next->rx_comp.len); | |
1527 | offset = be16_to_cpu(next->rx_comp.off_frame_data); | |
1528 | flags = next->rx_comp.flags; | |
1529 | skb = rx_buff->skb; | |
1530 | skb_copy_to_linear_data(skb, rx_buff->data + offset, | |
1531 | length); | |
6052d5e2 MFV |
1532 | |
1533 | /* VLAN Header has been stripped by the system firmware and | |
1534 | * needs to be inserted by the driver | |
1535 | */ | |
1536 | if (adapter->rx_vlan_header_insertion && | |
1537 | (flags & IBMVNIC_VLAN_STRIPPED)) | |
1538 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
1539 | ntohs(next->rx_comp.vlan_tci)); | |
1540 | ||
032c5e82 TF |
1541 | /* free the entry */ |
1542 | next->rx_comp.first = 0; | |
1543 | remove_buff_from_pool(adapter, rx_buff); | |
1544 | ||
1545 | skb_put(skb, length); | |
1546 | skb->protocol = eth_type_trans(skb, netdev); | |
94ca305f | 1547 | skb_record_rx_queue(skb, scrq_num); |
032c5e82 TF |
1548 | |
1549 | if (flags & IBMVNIC_IP_CHKSUM_GOOD && | |
1550 | flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { | |
1551 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1552 | } | |
1553 | ||
1554 | length = skb->len; | |
1555 | napi_gro_receive(napi, skb); /* send it up */ | |
1556 | netdev->stats.rx_packets++; | |
1557 | netdev->stats.rx_bytes += length; | |
1558 | frames_processed++; | |
1559 | } | |
152ce47d NF |
1560 | |
1561 | if (adapter->state != VNIC_CLOSING) | |
1562 | replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); | |
032c5e82 TF |
1563 | |
1564 | if (frames_processed < budget) { | |
1565 | enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); | |
6ad20165 | 1566 | napi_complete_done(napi, frames_processed); |
032c5e82 TF |
1567 | if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && |
1568 | napi_reschedule(napi)) { | |
1569 | disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); | |
1570 | goto restart_poll; | |
1571 | } | |
1572 | } | |
1573 | return frames_processed; | |
1574 | } | |
1575 | ||
1576 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1577 | static void ibmvnic_netpoll_controller(struct net_device *dev) | |
1578 | { | |
1579 | struct ibmvnic_adapter *adapter = netdev_priv(dev); | |
1580 | int i; | |
1581 | ||
1582 | replenish_pools(netdev_priv(dev)); | |
1583 | for (i = 0; i < adapter->req_rx_queues; i++) | |
1584 | ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, | |
1585 | adapter->rx_scrq[i]); | |
1586 | } | |
1587 | #endif | |
1588 | ||
1589 | static const struct net_device_ops ibmvnic_netdev_ops = { | |
1590 | .ndo_open = ibmvnic_open, | |
1591 | .ndo_stop = ibmvnic_close, | |
1592 | .ndo_start_xmit = ibmvnic_xmit, | |
1593 | .ndo_set_rx_mode = ibmvnic_set_multi, | |
1594 | .ndo_set_mac_address = ibmvnic_set_mac, | |
1595 | .ndo_validate_addr = eth_validate_addr, | |
032c5e82 TF |
1596 | .ndo_tx_timeout = ibmvnic_tx_timeout, |
1597 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1598 | .ndo_poll_controller = ibmvnic_netpoll_controller, | |
1599 | #endif | |
1600 | }; | |
1601 | ||
1602 | /* ethtool functions */ | |
1603 | ||
8a43379f PR |
1604 | static int ibmvnic_get_link_ksettings(struct net_device *netdev, |
1605 | struct ethtool_link_ksettings *cmd) | |
032c5e82 | 1606 | { |
8a43379f PR |
1607 | u32 supported, advertising; |
1608 | ||
1609 | supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | | |
032c5e82 | 1610 | SUPPORTED_FIBRE); |
8a43379f | 1611 | advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | |
032c5e82 | 1612 | ADVERTISED_FIBRE); |
8a43379f PR |
1613 | cmd->base.speed = SPEED_1000; |
1614 | cmd->base.duplex = DUPLEX_FULL; | |
1615 | cmd->base.port = PORT_FIBRE; | |
1616 | cmd->base.phy_address = 0; | |
1617 | cmd->base.autoneg = AUTONEG_ENABLE; | |
1618 | ||
1619 | ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, | |
1620 | supported); | |
1621 | ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, | |
1622 | advertising); | |
1623 | ||
032c5e82 TF |
1624 | return 0; |
1625 | } | |
1626 | ||
1627 | static void ibmvnic_get_drvinfo(struct net_device *dev, | |
1628 | struct ethtool_drvinfo *info) | |
1629 | { | |
1630 | strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); | |
1631 | strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); | |
1632 | } | |
1633 | ||
1634 | static u32 ibmvnic_get_msglevel(struct net_device *netdev) | |
1635 | { | |
1636 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
1637 | ||
1638 | return adapter->msg_enable; | |
1639 | } | |
1640 | ||
1641 | static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) | |
1642 | { | |
1643 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
1644 | ||
1645 | adapter->msg_enable = data; | |
1646 | } | |
1647 | ||
1648 | static u32 ibmvnic_get_link(struct net_device *netdev) | |
1649 | { | |
1650 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
1651 | ||
1652 | /* Don't need to send a query because we request a logical link up at | |
1653 | * init and then we wait for link state indications | |
1654 | */ | |
1655 | return adapter->logical_link_state; | |
1656 | } | |
1657 | ||
1658 | static void ibmvnic_get_ringparam(struct net_device *netdev, | |
1659 | struct ethtool_ringparam *ring) | |
1660 | { | |
1661 | ring->rx_max_pending = 0; | |
1662 | ring->tx_max_pending = 0; | |
1663 | ring->rx_mini_max_pending = 0; | |
1664 | ring->rx_jumbo_max_pending = 0; | |
1665 | ring->rx_pending = 0; | |
1666 | ring->tx_pending = 0; | |
1667 | ring->rx_mini_pending = 0; | |
1668 | ring->rx_jumbo_pending = 0; | |
1669 | } | |
1670 | ||
1671 | static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) | |
1672 | { | |
1673 | int i; | |
1674 | ||
1675 | if (stringset != ETH_SS_STATS) | |
1676 | return; | |
1677 | ||
1678 | for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) | |
1679 | memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); | |
1680 | } | |
1681 | ||
1682 | static int ibmvnic_get_sset_count(struct net_device *dev, int sset) | |
1683 | { | |
1684 | switch (sset) { | |
1685 | case ETH_SS_STATS: | |
1686 | return ARRAY_SIZE(ibmvnic_stats); | |
1687 | default: | |
1688 | return -EOPNOTSUPP; | |
1689 | } | |
1690 | } | |
1691 | ||
1692 | static void ibmvnic_get_ethtool_stats(struct net_device *dev, | |
1693 | struct ethtool_stats *stats, u64 *data) | |
1694 | { | |
1695 | struct ibmvnic_adapter *adapter = netdev_priv(dev); | |
1696 | union ibmvnic_crq crq; | |
1697 | int i; | |
1698 | ||
1699 | memset(&crq, 0, sizeof(crq)); | |
1700 | crq.request_statistics.first = IBMVNIC_CRQ_CMD; | |
1701 | crq.request_statistics.cmd = REQUEST_STATISTICS; | |
1702 | crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); | |
1703 | crq.request_statistics.len = | |
1704 | cpu_to_be32(sizeof(struct ibmvnic_statistics)); | |
032c5e82 TF |
1705 | |
1706 | /* Wait for data to be written */ | |
1707 | init_completion(&adapter->stats_done); | |
db5d0b59 | 1708 | ibmvnic_send_crq(adapter, &crq); |
032c5e82 TF |
1709 | wait_for_completion(&adapter->stats_done); |
1710 | ||
1711 | for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) | |
1712 | data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset); | |
1713 | } | |
1714 | ||
1715 | static const struct ethtool_ops ibmvnic_ethtool_ops = { | |
032c5e82 TF |
1716 | .get_drvinfo = ibmvnic_get_drvinfo, |
1717 | .get_msglevel = ibmvnic_get_msglevel, | |
1718 | .set_msglevel = ibmvnic_set_msglevel, | |
1719 | .get_link = ibmvnic_get_link, | |
1720 | .get_ringparam = ibmvnic_get_ringparam, | |
1721 | .get_strings = ibmvnic_get_strings, | |
1722 | .get_sset_count = ibmvnic_get_sset_count, | |
1723 | .get_ethtool_stats = ibmvnic_get_ethtool_stats, | |
8a43379f | 1724 | .get_link_ksettings = ibmvnic_get_link_ksettings, |
032c5e82 TF |
1725 | }; |
1726 | ||
1727 | /* Routines for managing CRQs/sCRQs */ | |
1728 | ||
57a49436 NF |
1729 | static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, |
1730 | struct ibmvnic_sub_crq_queue *scrq) | |
1731 | { | |
1732 | int rc; | |
1733 | ||
1734 | if (scrq->irq) { | |
1735 | free_irq(scrq->irq, scrq); | |
1736 | irq_dispose_mapping(scrq->irq); | |
1737 | scrq->irq = 0; | |
1738 | } | |
1739 | ||
1740 | memset(scrq->msgs, 0, 2 * PAGE_SIZE); | |
1741 | scrq->cur = 0; | |
1742 | ||
1743 | rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, | |
1744 | 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); | |
1745 | return rc; | |
1746 | } | |
1747 | ||
1748 | static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) | |
1749 | { | |
1750 | int i, rc; | |
1751 | ||
1752 | for (i = 0; i < adapter->req_tx_queues; i++) { | |
1753 | rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); | |
1754 | if (rc) | |
1755 | return rc; | |
1756 | } | |
1757 | ||
1758 | for (i = 0; i < adapter->req_rx_queues; i++) { | |
1759 | rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); | |
1760 | if (rc) | |
1761 | return rc; | |
1762 | } | |
1763 | ||
1764 | rc = init_sub_crq_irqs(adapter); | |
1765 | return rc; | |
1766 | } | |
1767 | ||
032c5e82 TF |
1768 | static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, |
1769 | struct ibmvnic_sub_crq_queue *scrq) | |
1770 | { | |
1771 | struct device *dev = &adapter->vdev->dev; | |
1772 | long rc; | |
1773 | ||
1774 | netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); | |
1775 | ||
1776 | /* Close the sub-crqs */ | |
1777 | do { | |
1778 | rc = plpar_hcall_norets(H_FREE_SUB_CRQ, | |
1779 | adapter->vdev->unit_address, | |
1780 | scrq->crq_num); | |
1781 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); | |
1782 | ||
ffa73855 TF |
1783 | if (rc) { |
1784 | netdev_err(adapter->netdev, | |
1785 | "Failed to release sub-CRQ %16lx, rc = %ld\n", | |
1786 | scrq->crq_num, rc); | |
1787 | } | |
1788 | ||
032c5e82 TF |
1789 | dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, |
1790 | DMA_BIDIRECTIONAL); | |
1791 | free_pages((unsigned long)scrq->msgs, 2); | |
1792 | kfree(scrq); | |
1793 | } | |
1794 | ||
1795 | static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter | |
1796 | *adapter) | |
1797 | { | |
1798 | struct device *dev = &adapter->vdev->dev; | |
1799 | struct ibmvnic_sub_crq_queue *scrq; | |
1800 | int rc; | |
1801 | ||
1bb3c739 | 1802 | scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); |
032c5e82 TF |
1803 | if (!scrq) |
1804 | return NULL; | |
1805 | ||
7f7adc50 | 1806 | scrq->msgs = |
1bb3c739 | 1807 | (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); |
032c5e82 TF |
1808 | if (!scrq->msgs) { |
1809 | dev_warn(dev, "Couldn't allocate crq queue messages page\n"); | |
1810 | goto zero_page_failed; | |
1811 | } | |
1812 | ||
1813 | scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, | |
1814 | DMA_BIDIRECTIONAL); | |
1815 | if (dma_mapping_error(dev, scrq->msg_token)) { | |
1816 | dev_warn(dev, "Couldn't map crq queue messages page\n"); | |
1817 | goto map_failed; | |
1818 | } | |
1819 | ||
1820 | rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, | |
1821 | 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); | |
1822 | ||
1823 | if (rc == H_RESOURCE) | |
1824 | rc = ibmvnic_reset_crq(adapter); | |
1825 | ||
1826 | if (rc == H_CLOSED) { | |
1827 | dev_warn(dev, "Partner adapter not ready, waiting.\n"); | |
1828 | } else if (rc) { | |
1829 | dev_warn(dev, "Error %d registering sub-crq\n", rc); | |
1830 | goto reg_failed; | |
1831 | } | |
1832 | ||
032c5e82 TF |
1833 | scrq->adapter = adapter; |
1834 | scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); | |
032c5e82 TF |
1835 | spin_lock_init(&scrq->lock); |
1836 | ||
1837 | netdev_dbg(adapter->netdev, | |
1838 | "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", | |
1839 | scrq->crq_num, scrq->hw_irq, scrq->irq); | |
1840 | ||
1841 | return scrq; | |
1842 | ||
032c5e82 TF |
1843 | reg_failed: |
1844 | dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, | |
1845 | DMA_BIDIRECTIONAL); | |
1846 | map_failed: | |
1847 | free_pages((unsigned long)scrq->msgs, 2); | |
1848 | zero_page_failed: | |
1849 | kfree(scrq); | |
1850 | ||
1851 | return NULL; | |
1852 | } | |
1853 | ||
1854 | static void release_sub_crqs(struct ibmvnic_adapter *adapter) | |
1855 | { | |
1856 | int i; | |
1857 | ||
1858 | if (adapter->tx_scrq) { | |
b510888f NF |
1859 | for (i = 0; i < adapter->req_tx_queues; i++) { |
1860 | if (!adapter->tx_scrq[i]) | |
1861 | continue; | |
1862 | ||
1863 | if (adapter->tx_scrq[i]->irq) { | |
032c5e82 TF |
1864 | free_irq(adapter->tx_scrq[i]->irq, |
1865 | adapter->tx_scrq[i]); | |
88eb98a0 | 1866 | irq_dispose_mapping(adapter->tx_scrq[i]->irq); |
b510888f | 1867 | adapter->tx_scrq[i]->irq = 0; |
032c5e82 | 1868 | } |
b510888f NF |
1869 | |
1870 | release_sub_crq_queue(adapter, adapter->tx_scrq[i]); | |
1871 | } | |
1872 | ||
9501df3c | 1873 | kfree(adapter->tx_scrq); |
032c5e82 TF |
1874 | adapter->tx_scrq = NULL; |
1875 | } | |
1876 | ||
1877 | if (adapter->rx_scrq) { | |
b510888f NF |
1878 | for (i = 0; i < adapter->req_rx_queues; i++) { |
1879 | if (!adapter->rx_scrq[i]) | |
1880 | continue; | |
1881 | ||
1882 | if (adapter->rx_scrq[i]->irq) { | |
032c5e82 TF |
1883 | free_irq(adapter->rx_scrq[i]->irq, |
1884 | adapter->rx_scrq[i]); | |
88eb98a0 | 1885 | irq_dispose_mapping(adapter->rx_scrq[i]->irq); |
b510888f | 1886 | adapter->rx_scrq[i]->irq = 0; |
032c5e82 | 1887 | } |
032c5e82 | 1888 | |
b510888f NF |
1889 | release_sub_crq_queue(adapter, adapter->rx_scrq[i]); |
1890 | } | |
ea22d51a | 1891 | |
b510888f | 1892 | kfree(adapter->rx_scrq); |
ea22d51a TF |
1893 | adapter->rx_scrq = NULL; |
1894 | } | |
ea22d51a TF |
1895 | } |
1896 | ||
032c5e82 TF |
1897 | static int disable_scrq_irq(struct ibmvnic_adapter *adapter, |
1898 | struct ibmvnic_sub_crq_queue *scrq) | |
1899 | { | |
1900 | struct device *dev = &adapter->vdev->dev; | |
1901 | unsigned long rc; | |
1902 | ||
1903 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, | |
1904 | H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); | |
1905 | if (rc) | |
1906 | dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", | |
1907 | scrq->hw_irq, rc); | |
1908 | return rc; | |
1909 | } | |
1910 | ||
1911 | static int enable_scrq_irq(struct ibmvnic_adapter *adapter, | |
1912 | struct ibmvnic_sub_crq_queue *scrq) | |
1913 | { | |
1914 | struct device *dev = &adapter->vdev->dev; | |
1915 | unsigned long rc; | |
1916 | ||
1917 | if (scrq->hw_irq > 0x100000000ULL) { | |
1918 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); | |
1919 | return 1; | |
1920 | } | |
1921 | ||
1922 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, | |
1923 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); | |
1924 | if (rc) | |
1925 | dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", | |
1926 | scrq->hw_irq, rc); | |
1927 | return rc; | |
1928 | } | |
1929 | ||
1930 | static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, | |
1931 | struct ibmvnic_sub_crq_queue *scrq) | |
1932 | { | |
1933 | struct device *dev = &adapter->vdev->dev; | |
1934 | struct ibmvnic_tx_buff *txbuff; | |
1935 | union sub_crq *next; | |
1936 | int index; | |
1937 | int i, j; | |
ad7775dc | 1938 | u8 first; |
032c5e82 TF |
1939 | |
1940 | restart_loop: | |
1941 | while (pending_scrq(adapter, scrq)) { | |
1942 | unsigned int pool = scrq->pool_index; | |
1943 | ||
1944 | next = ibmvnic_next_scrq(adapter, scrq); | |
1945 | for (i = 0; i < next->tx_comp.num_comps; i++) { | |
1946 | if (next->tx_comp.rcs[i]) { | |
1947 | dev_err(dev, "tx error %x\n", | |
1948 | next->tx_comp.rcs[i]); | |
1949 | continue; | |
1950 | } | |
1951 | index = be32_to_cpu(next->tx_comp.correlators[i]); | |
1952 | txbuff = &adapter->tx_pool[pool].tx_buff[index]; | |
1953 | ||
1954 | for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { | |
1955 | if (!txbuff->data_dma[j]) | |
1956 | continue; | |
1957 | ||
1958 | txbuff->data_dma[j] = 0; | |
032c5e82 | 1959 | } |
ad7775dc TF |
1960 | /* if sub_crq was sent indirectly */ |
1961 | first = txbuff->indir_arr[0].generic.first; | |
1962 | if (first == IBMVNIC_CRQ_CMD) { | |
1963 | dma_unmap_single(dev, txbuff->indir_dma, | |
1964 | sizeof(txbuff->indir_arr), | |
1965 | DMA_TO_DEVICE); | |
1966 | } | |
032c5e82 | 1967 | |
142c0ac4 | 1968 | if (txbuff->last_frag) { |
032c5e82 | 1969 | dev_kfree_skb_any(txbuff->skb); |
7c3e7de3 | 1970 | txbuff->skb = NULL; |
142c0ac4 | 1971 | } |
032c5e82 TF |
1972 | |
1973 | adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. | |
1974 | producer_index] = index; | |
1975 | adapter->tx_pool[pool].producer_index = | |
1976 | (adapter->tx_pool[pool].producer_index + 1) % | |
068d9f90 | 1977 | adapter->req_tx_entries_per_subcrq; |
032c5e82 TF |
1978 | } |
1979 | /* remove tx_comp scrq*/ | |
1980 | next->tx_comp.first = 0; | |
7c3e7de3 NF |
1981 | |
1982 | if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <= | |
1983 | (adapter->req_tx_entries_per_subcrq / 2) && | |
1984 | __netif_subqueue_stopped(adapter->netdev, | |
1985 | scrq->pool_index)) { | |
1986 | netif_wake_subqueue(adapter->netdev, scrq->pool_index); | |
1987 | netdev_info(adapter->netdev, "Started queue %d\n", | |
1988 | scrq->pool_index); | |
1989 | } | |
032c5e82 TF |
1990 | } |
1991 | ||
1992 | enable_scrq_irq(adapter, scrq); | |
1993 | ||
1994 | if (pending_scrq(adapter, scrq)) { | |
1995 | disable_scrq_irq(adapter, scrq); | |
1996 | goto restart_loop; | |
1997 | } | |
1998 | ||
1999 | return 0; | |
2000 | } | |
2001 | ||
2002 | static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) | |
2003 | { | |
2004 | struct ibmvnic_sub_crq_queue *scrq = instance; | |
2005 | struct ibmvnic_adapter *adapter = scrq->adapter; | |
2006 | ||
2007 | disable_scrq_irq(adapter, scrq); | |
2008 | ibmvnic_complete_tx(adapter, scrq); | |
2009 | ||
2010 | return IRQ_HANDLED; | |
2011 | } | |
2012 | ||
2013 | static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) | |
2014 | { | |
2015 | struct ibmvnic_sub_crq_queue *scrq = instance; | |
2016 | struct ibmvnic_adapter *adapter = scrq->adapter; | |
2017 | ||
2018 | if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { | |
2019 | disable_scrq_irq(adapter, scrq); | |
2020 | __napi_schedule(&adapter->napi[scrq->scrq_num]); | |
2021 | } | |
2022 | ||
2023 | return IRQ_HANDLED; | |
2024 | } | |
2025 | ||
ea22d51a TF |
2026 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) |
2027 | { | |
2028 | struct device *dev = &adapter->vdev->dev; | |
2029 | struct ibmvnic_sub_crq_queue *scrq; | |
2030 | int i = 0, j = 0; | |
2031 | int rc = 0; | |
2032 | ||
2033 | for (i = 0; i < adapter->req_tx_queues; i++) { | |
2034 | scrq = adapter->tx_scrq[i]; | |
2035 | scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); | |
2036 | ||
99c1790e | 2037 | if (!scrq->irq) { |
ea22d51a TF |
2038 | rc = -EINVAL; |
2039 | dev_err(dev, "Error mapping irq\n"); | |
2040 | goto req_tx_irq_failed; | |
2041 | } | |
2042 | ||
2043 | rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, | |
2044 | 0, "ibmvnic_tx", scrq); | |
2045 | ||
2046 | if (rc) { | |
2047 | dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", | |
2048 | scrq->irq, rc); | |
2049 | irq_dispose_mapping(scrq->irq); | |
2050 | goto req_rx_irq_failed; | |
2051 | } | |
2052 | } | |
2053 | ||
2054 | for (i = 0; i < adapter->req_rx_queues; i++) { | |
2055 | scrq = adapter->rx_scrq[i]; | |
2056 | scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); | |
99c1790e | 2057 | if (!scrq->irq) { |
ea22d51a TF |
2058 | rc = -EINVAL; |
2059 | dev_err(dev, "Error mapping irq\n"); | |
2060 | goto req_rx_irq_failed; | |
2061 | } | |
2062 | rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, | |
2063 | 0, "ibmvnic_rx", scrq); | |
2064 | if (rc) { | |
2065 | dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", | |
2066 | scrq->irq, rc); | |
2067 | irq_dispose_mapping(scrq->irq); | |
2068 | goto req_rx_irq_failed; | |
2069 | } | |
2070 | } | |
2071 | return rc; | |
2072 | ||
2073 | req_rx_irq_failed: | |
8bf371e6 | 2074 | for (j = 0; j < i; j++) { |
ea22d51a TF |
2075 | free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); |
2076 | irq_dispose_mapping(adapter->rx_scrq[j]->irq); | |
8bf371e6 | 2077 | } |
ea22d51a TF |
2078 | i = adapter->req_tx_queues; |
2079 | req_tx_irq_failed: | |
8bf371e6 | 2080 | for (j = 0; j < i; j++) { |
ea22d51a TF |
2081 | free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); |
2082 | irq_dispose_mapping(adapter->rx_scrq[j]->irq); | |
8bf371e6 | 2083 | } |
b510888f | 2084 | release_sub_crqs(adapter); |
ea22d51a TF |
2085 | return rc; |
2086 | } | |
2087 | ||
d346b9bc | 2088 | static int init_sub_crqs(struct ibmvnic_adapter *adapter) |
032c5e82 TF |
2089 | { |
2090 | struct device *dev = &adapter->vdev->dev; | |
2091 | struct ibmvnic_sub_crq_queue **allqueues; | |
2092 | int registered_queues = 0; | |
032c5e82 TF |
2093 | int total_queues; |
2094 | int more = 0; | |
ea22d51a | 2095 | int i; |
032c5e82 | 2096 | |
032c5e82 TF |
2097 | total_queues = adapter->req_tx_queues + adapter->req_rx_queues; |
2098 | ||
1bb3c739 | 2099 | allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); |
032c5e82 | 2100 | if (!allqueues) |
d346b9bc | 2101 | return -1; |
032c5e82 TF |
2102 | |
2103 | for (i = 0; i < total_queues; i++) { | |
2104 | allqueues[i] = init_sub_crq_queue(adapter); | |
2105 | if (!allqueues[i]) { | |
2106 | dev_warn(dev, "Couldn't allocate all sub-crqs\n"); | |
2107 | break; | |
2108 | } | |
2109 | registered_queues++; | |
2110 | } | |
2111 | ||
2112 | /* Make sure we were able to register the minimum number of queues */ | |
2113 | if (registered_queues < | |
2114 | adapter->min_tx_queues + adapter->min_rx_queues) { | |
2115 | dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); | |
2116 | goto tx_failed; | |
2117 | } | |
2118 | ||
2119 | /* Distribute the failed allocated queues*/ | |
2120 | for (i = 0; i < total_queues - registered_queues + more ; i++) { | |
2121 | netdev_dbg(adapter->netdev, "Reducing number of queues\n"); | |
2122 | switch (i % 3) { | |
2123 | case 0: | |
2124 | if (adapter->req_rx_queues > adapter->min_rx_queues) | |
2125 | adapter->req_rx_queues--; | |
2126 | else | |
2127 | more++; | |
2128 | break; | |
2129 | case 1: | |
2130 | if (adapter->req_tx_queues > adapter->min_tx_queues) | |
2131 | adapter->req_tx_queues--; | |
2132 | else | |
2133 | more++; | |
2134 | break; | |
2135 | } | |
2136 | } | |
2137 | ||
2138 | adapter->tx_scrq = kcalloc(adapter->req_tx_queues, | |
1bb3c739 | 2139 | sizeof(*adapter->tx_scrq), GFP_KERNEL); |
032c5e82 TF |
2140 | if (!adapter->tx_scrq) |
2141 | goto tx_failed; | |
2142 | ||
2143 | for (i = 0; i < adapter->req_tx_queues; i++) { | |
2144 | adapter->tx_scrq[i] = allqueues[i]; | |
2145 | adapter->tx_scrq[i]->pool_index = i; | |
032c5e82 TF |
2146 | } |
2147 | ||
2148 | adapter->rx_scrq = kcalloc(adapter->req_rx_queues, | |
1bb3c739 | 2149 | sizeof(*adapter->rx_scrq), GFP_KERNEL); |
032c5e82 TF |
2150 | if (!adapter->rx_scrq) |
2151 | goto rx_failed; | |
2152 | ||
2153 | for (i = 0; i < adapter->req_rx_queues; i++) { | |
2154 | adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; | |
2155 | adapter->rx_scrq[i]->scrq_num = i; | |
032c5e82 TF |
2156 | } |
2157 | ||
d346b9bc NF |
2158 | kfree(allqueues); |
2159 | return 0; | |
2160 | ||
2161 | rx_failed: | |
2162 | kfree(adapter->tx_scrq); | |
2163 | adapter->tx_scrq = NULL; | |
2164 | tx_failed: | |
2165 | for (i = 0; i < registered_queues; i++) | |
2166 | release_sub_crq_queue(adapter, allqueues[i]); | |
2167 | kfree(allqueues); | |
2168 | return -1; | |
2169 | } | |
2170 | ||
2171 | static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) | |
2172 | { | |
2173 | struct device *dev = &adapter->vdev->dev; | |
2174 | union ibmvnic_crq crq; | |
d346b9bc NF |
2175 | |
2176 | if (!retry) { | |
2177 | /* Sub-CRQ entries are 32 byte long */ | |
2178 | int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); | |
2179 | ||
2180 | if (adapter->min_tx_entries_per_subcrq > entries_page || | |
2181 | adapter->min_rx_add_entries_per_subcrq > entries_page) { | |
2182 | dev_err(dev, "Fatal, invalid entries per sub-crq\n"); | |
2183 | return; | |
2184 | } | |
2185 | ||
2186 | /* Get the minimum between the queried max and the entries | |
2187 | * that fit in our PAGE_SIZE | |
2188 | */ | |
2189 | adapter->req_tx_entries_per_subcrq = | |
2190 | adapter->max_tx_entries_per_subcrq > entries_page ? | |
2191 | entries_page : adapter->max_tx_entries_per_subcrq; | |
2192 | adapter->req_rx_add_entries_per_subcrq = | |
2193 | adapter->max_rx_add_entries_per_subcrq > entries_page ? | |
2194 | entries_page : adapter->max_rx_add_entries_per_subcrq; | |
2195 | ||
2196 | adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; | |
2197 | adapter->req_rx_queues = adapter->opt_rx_comp_queues; | |
2198 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; | |
2199 | ||
2200 | adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; | |
2201 | } | |
2202 | ||
032c5e82 TF |
2203 | memset(&crq, 0, sizeof(crq)); |
2204 | crq.request_capability.first = IBMVNIC_CRQ_CMD; | |
2205 | crq.request_capability.cmd = REQUEST_CAPABILITY; | |
2206 | ||
2207 | crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); | |
de89e854 | 2208 | crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); |
901e040a | 2209 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2210 | ibmvnic_send_crq(adapter, &crq); |
2211 | ||
2212 | crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); | |
de89e854 | 2213 | crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); |
901e040a | 2214 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2215 | ibmvnic_send_crq(adapter, &crq); |
2216 | ||
2217 | crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); | |
de89e854 | 2218 | crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); |
901e040a | 2219 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2220 | ibmvnic_send_crq(adapter, &crq); |
2221 | ||
2222 | crq.request_capability.capability = | |
2223 | cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); | |
2224 | crq.request_capability.number = | |
de89e854 | 2225 | cpu_to_be64(adapter->req_tx_entries_per_subcrq); |
901e040a | 2226 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2227 | ibmvnic_send_crq(adapter, &crq); |
2228 | ||
2229 | crq.request_capability.capability = | |
2230 | cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); | |
2231 | crq.request_capability.number = | |
de89e854 | 2232 | cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); |
901e040a | 2233 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2234 | ibmvnic_send_crq(adapter, &crq); |
2235 | ||
2236 | crq.request_capability.capability = cpu_to_be16(REQ_MTU); | |
de89e854 | 2237 | crq.request_capability.number = cpu_to_be64(adapter->req_mtu); |
901e040a | 2238 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2239 | ibmvnic_send_crq(adapter, &crq); |
2240 | ||
2241 | if (adapter->netdev->flags & IFF_PROMISC) { | |
2242 | if (adapter->promisc_supported) { | |
2243 | crq.request_capability.capability = | |
2244 | cpu_to_be16(PROMISC_REQUESTED); | |
de89e854 | 2245 | crq.request_capability.number = cpu_to_be64(1); |
901e040a | 2246 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2247 | ibmvnic_send_crq(adapter, &crq); |
2248 | } | |
2249 | } else { | |
2250 | crq.request_capability.capability = | |
2251 | cpu_to_be16(PROMISC_REQUESTED); | |
de89e854 | 2252 | crq.request_capability.number = cpu_to_be64(0); |
901e040a | 2253 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2254 | ibmvnic_send_crq(adapter, &crq); |
2255 | } | |
032c5e82 TF |
2256 | } |
2257 | ||
2258 | static int pending_scrq(struct ibmvnic_adapter *adapter, | |
2259 | struct ibmvnic_sub_crq_queue *scrq) | |
2260 | { | |
2261 | union sub_crq *entry = &scrq->msgs[scrq->cur]; | |
2262 | ||
90c8014c NF |
2263 | if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || |
2264 | adapter->state == VNIC_CLOSING) | |
032c5e82 TF |
2265 | return 1; |
2266 | else | |
2267 | return 0; | |
2268 | } | |
2269 | ||
2270 | static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, | |
2271 | struct ibmvnic_sub_crq_queue *scrq) | |
2272 | { | |
2273 | union sub_crq *entry; | |
2274 | unsigned long flags; | |
2275 | ||
2276 | spin_lock_irqsave(&scrq->lock, flags); | |
2277 | entry = &scrq->msgs[scrq->cur]; | |
2278 | if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { | |
2279 | if (++scrq->cur == scrq->size) | |
2280 | scrq->cur = 0; | |
2281 | } else { | |
2282 | entry = NULL; | |
2283 | } | |
2284 | spin_unlock_irqrestore(&scrq->lock, flags); | |
2285 | ||
2286 | return entry; | |
2287 | } | |
2288 | ||
2289 | static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) | |
2290 | { | |
2291 | struct ibmvnic_crq_queue *queue = &adapter->crq; | |
2292 | union ibmvnic_crq *crq; | |
2293 | ||
2294 | crq = &queue->msgs[queue->cur]; | |
2295 | if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { | |
2296 | if (++queue->cur == queue->size) | |
2297 | queue->cur = 0; | |
2298 | } else { | |
2299 | crq = NULL; | |
2300 | } | |
2301 | ||
2302 | return crq; | |
2303 | } | |
2304 | ||
2305 | static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, | |
2306 | union sub_crq *sub_crq) | |
2307 | { | |
2308 | unsigned int ua = adapter->vdev->unit_address; | |
2309 | struct device *dev = &adapter->vdev->dev; | |
2310 | u64 *u64_crq = (u64 *)sub_crq; | |
2311 | int rc; | |
2312 | ||
2313 | netdev_dbg(adapter->netdev, | |
2314 | "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", | |
2315 | (unsigned long int)cpu_to_be64(remote_handle), | |
2316 | (unsigned long int)cpu_to_be64(u64_crq[0]), | |
2317 | (unsigned long int)cpu_to_be64(u64_crq[1]), | |
2318 | (unsigned long int)cpu_to_be64(u64_crq[2]), | |
2319 | (unsigned long int)cpu_to_be64(u64_crq[3])); | |
2320 | ||
2321 | /* Make sure the hypervisor sees the complete request */ | |
2322 | mb(); | |
2323 | ||
2324 | rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, | |
2325 | cpu_to_be64(remote_handle), | |
2326 | cpu_to_be64(u64_crq[0]), | |
2327 | cpu_to_be64(u64_crq[1]), | |
2328 | cpu_to_be64(u64_crq[2]), | |
2329 | cpu_to_be64(u64_crq[3])); | |
2330 | ||
2331 | if (rc) { | |
2332 | if (rc == H_CLOSED) | |
2333 | dev_warn(dev, "CRQ Queue closed\n"); | |
2334 | dev_err(dev, "Send error (rc=%d)\n", rc); | |
2335 | } | |
2336 | ||
2337 | return rc; | |
2338 | } | |
2339 | ||
ad7775dc TF |
2340 | static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, |
2341 | u64 remote_handle, u64 ioba, u64 num_entries) | |
2342 | { | |
2343 | unsigned int ua = adapter->vdev->unit_address; | |
2344 | struct device *dev = &adapter->vdev->dev; | |
2345 | int rc; | |
2346 | ||
2347 | /* Make sure the hypervisor sees the complete request */ | |
2348 | mb(); | |
2349 | rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, | |
2350 | cpu_to_be64(remote_handle), | |
2351 | ioba, num_entries); | |
2352 | ||
2353 | if (rc) { | |
2354 | if (rc == H_CLOSED) | |
2355 | dev_warn(dev, "CRQ Queue closed\n"); | |
2356 | dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); | |
2357 | } | |
2358 | ||
2359 | return rc; | |
2360 | } | |
2361 | ||
032c5e82 TF |
2362 | static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, |
2363 | union ibmvnic_crq *crq) | |
2364 | { | |
2365 | unsigned int ua = adapter->vdev->unit_address; | |
2366 | struct device *dev = &adapter->vdev->dev; | |
2367 | u64 *u64_crq = (u64 *)crq; | |
2368 | int rc; | |
2369 | ||
2370 | netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", | |
2371 | (unsigned long int)cpu_to_be64(u64_crq[0]), | |
2372 | (unsigned long int)cpu_to_be64(u64_crq[1])); | |
2373 | ||
2374 | /* Make sure the hypervisor sees the complete request */ | |
2375 | mb(); | |
2376 | ||
2377 | rc = plpar_hcall_norets(H_SEND_CRQ, ua, | |
2378 | cpu_to_be64(u64_crq[0]), | |
2379 | cpu_to_be64(u64_crq[1])); | |
2380 | ||
2381 | if (rc) { | |
2382 | if (rc == H_CLOSED) | |
2383 | dev_warn(dev, "CRQ Queue closed\n"); | |
2384 | dev_warn(dev, "Send error (rc=%d)\n", rc); | |
2385 | } | |
2386 | ||
2387 | return rc; | |
2388 | } | |
2389 | ||
2390 | static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) | |
2391 | { | |
2392 | union ibmvnic_crq crq; | |
2393 | ||
2394 | memset(&crq, 0, sizeof(crq)); | |
2395 | crq.generic.first = IBMVNIC_CRQ_INIT_CMD; | |
2396 | crq.generic.cmd = IBMVNIC_CRQ_INIT; | |
2397 | netdev_dbg(adapter->netdev, "Sending CRQ init\n"); | |
2398 | ||
2399 | return ibmvnic_send_crq(adapter, &crq); | |
2400 | } | |
2401 | ||
032c5e82 TF |
2402 | static int send_version_xchg(struct ibmvnic_adapter *adapter) |
2403 | { | |
2404 | union ibmvnic_crq crq; | |
2405 | ||
2406 | memset(&crq, 0, sizeof(crq)); | |
2407 | crq.version_exchange.first = IBMVNIC_CRQ_CMD; | |
2408 | crq.version_exchange.cmd = VERSION_EXCHANGE; | |
2409 | crq.version_exchange.version = cpu_to_be16(ibmvnic_version); | |
2410 | ||
2411 | return ibmvnic_send_crq(adapter, &crq); | |
2412 | } | |
2413 | ||
2414 | static void send_login(struct ibmvnic_adapter *adapter) | |
2415 | { | |
2416 | struct ibmvnic_login_rsp_buffer *login_rsp_buffer; | |
2417 | struct ibmvnic_login_buffer *login_buffer; | |
032c5e82 TF |
2418 | struct device *dev = &adapter->vdev->dev; |
2419 | dma_addr_t rsp_buffer_token; | |
2420 | dma_addr_t buffer_token; | |
2421 | size_t rsp_buffer_size; | |
2422 | union ibmvnic_crq crq; | |
032c5e82 TF |
2423 | size_t buffer_size; |
2424 | __be64 *tx_list_p; | |
2425 | __be64 *rx_list_p; | |
2426 | int i; | |
2427 | ||
2428 | buffer_size = | |
2429 | sizeof(struct ibmvnic_login_buffer) + | |
2430 | sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues); | |
2431 | ||
2432 | login_buffer = kmalloc(buffer_size, GFP_ATOMIC); | |
2433 | if (!login_buffer) | |
2434 | goto buf_alloc_failed; | |
2435 | ||
2436 | buffer_token = dma_map_single(dev, login_buffer, buffer_size, | |
2437 | DMA_TO_DEVICE); | |
2438 | if (dma_mapping_error(dev, buffer_token)) { | |
2439 | dev_err(dev, "Couldn't map login buffer\n"); | |
2440 | goto buf_map_failed; | |
2441 | } | |
2442 | ||
498cd8e4 JA |
2443 | rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + |
2444 | sizeof(u64) * adapter->req_tx_queues + | |
2445 | sizeof(u64) * adapter->req_rx_queues + | |
2446 | sizeof(u64) * adapter->req_rx_queues + | |
2447 | sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; | |
032c5e82 TF |
2448 | |
2449 | login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); | |
2450 | if (!login_rsp_buffer) | |
2451 | goto buf_rsp_alloc_failed; | |
2452 | ||
2453 | rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, | |
2454 | rsp_buffer_size, DMA_FROM_DEVICE); | |
2455 | if (dma_mapping_error(dev, rsp_buffer_token)) { | |
2456 | dev_err(dev, "Couldn't map login rsp buffer\n"); | |
2457 | goto buf_rsp_map_failed; | |
2458 | } | |
661a2622 | 2459 | |
032c5e82 TF |
2460 | adapter->login_buf = login_buffer; |
2461 | adapter->login_buf_token = buffer_token; | |
2462 | adapter->login_buf_sz = buffer_size; | |
2463 | adapter->login_rsp_buf = login_rsp_buffer; | |
2464 | adapter->login_rsp_buf_token = rsp_buffer_token; | |
2465 | adapter->login_rsp_buf_sz = rsp_buffer_size; | |
2466 | ||
2467 | login_buffer->len = cpu_to_be32(buffer_size); | |
2468 | login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); | |
2469 | login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); | |
2470 | login_buffer->off_txcomp_subcrqs = | |
2471 | cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); | |
2472 | login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); | |
2473 | login_buffer->off_rxcomp_subcrqs = | |
2474 | cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + | |
2475 | sizeof(u64) * adapter->req_tx_queues); | |
2476 | login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); | |
2477 | login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); | |
2478 | ||
2479 | tx_list_p = (__be64 *)((char *)login_buffer + | |
2480 | sizeof(struct ibmvnic_login_buffer)); | |
2481 | rx_list_p = (__be64 *)((char *)login_buffer + | |
2482 | sizeof(struct ibmvnic_login_buffer) + | |
2483 | sizeof(u64) * adapter->req_tx_queues); | |
2484 | ||
2485 | for (i = 0; i < adapter->req_tx_queues; i++) { | |
2486 | if (adapter->tx_scrq[i]) { | |
2487 | tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> | |
2488 | crq_num); | |
2489 | } | |
2490 | } | |
2491 | ||
2492 | for (i = 0; i < adapter->req_rx_queues; i++) { | |
2493 | if (adapter->rx_scrq[i]) { | |
2494 | rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> | |
2495 | crq_num); | |
2496 | } | |
2497 | } | |
2498 | ||
2499 | netdev_dbg(adapter->netdev, "Login Buffer:\n"); | |
2500 | for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { | |
2501 | netdev_dbg(adapter->netdev, "%016lx\n", | |
2502 | ((unsigned long int *)(adapter->login_buf))[i]); | |
2503 | } | |
2504 | ||
2505 | memset(&crq, 0, sizeof(crq)); | |
2506 | crq.login.first = IBMVNIC_CRQ_CMD; | |
2507 | crq.login.cmd = LOGIN; | |
2508 | crq.login.ioba = cpu_to_be32(buffer_token); | |
2509 | crq.login.len = cpu_to_be32(buffer_size); | |
032c5e82 TF |
2510 | ibmvnic_send_crq(adapter, &crq); |
2511 | ||
2512 | return; | |
2513 | ||
032c5e82 TF |
2514 | buf_rsp_map_failed: |
2515 | kfree(login_rsp_buffer); | |
2516 | buf_rsp_alloc_failed: | |
2517 | dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); | |
2518 | buf_map_failed: | |
2519 | kfree(login_buffer); | |
2520 | buf_alloc_failed: | |
2521 | return; | |
2522 | } | |
2523 | ||
2524 | static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, | |
2525 | u32 len, u8 map_id) | |
2526 | { | |
2527 | union ibmvnic_crq crq; | |
2528 | ||
2529 | memset(&crq, 0, sizeof(crq)); | |
2530 | crq.request_map.first = IBMVNIC_CRQ_CMD; | |
2531 | crq.request_map.cmd = REQUEST_MAP; | |
2532 | crq.request_map.map_id = map_id; | |
2533 | crq.request_map.ioba = cpu_to_be32(addr); | |
2534 | crq.request_map.len = cpu_to_be32(len); | |
2535 | ibmvnic_send_crq(adapter, &crq); | |
2536 | } | |
2537 | ||
2538 | static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) | |
2539 | { | |
2540 | union ibmvnic_crq crq; | |
2541 | ||
2542 | memset(&crq, 0, sizeof(crq)); | |
2543 | crq.request_unmap.first = IBMVNIC_CRQ_CMD; | |
2544 | crq.request_unmap.cmd = REQUEST_UNMAP; | |
2545 | crq.request_unmap.map_id = map_id; | |
2546 | ibmvnic_send_crq(adapter, &crq); | |
2547 | } | |
2548 | ||
2549 | static void send_map_query(struct ibmvnic_adapter *adapter) | |
2550 | { | |
2551 | union ibmvnic_crq crq; | |
2552 | ||
2553 | memset(&crq, 0, sizeof(crq)); | |
2554 | crq.query_map.first = IBMVNIC_CRQ_CMD; | |
2555 | crq.query_map.cmd = QUERY_MAP; | |
2556 | ibmvnic_send_crq(adapter, &crq); | |
2557 | } | |
2558 | ||
2559 | /* Send a series of CRQs requesting various capabilities of the VNIC server */ | |
2560 | static void send_cap_queries(struct ibmvnic_adapter *adapter) | |
2561 | { | |
2562 | union ibmvnic_crq crq; | |
2563 | ||
901e040a | 2564 | atomic_set(&adapter->running_cap_crqs, 0); |
032c5e82 TF |
2565 | memset(&crq, 0, sizeof(crq)); |
2566 | crq.query_capability.first = IBMVNIC_CRQ_CMD; | |
2567 | crq.query_capability.cmd = QUERY_CAPABILITY; | |
2568 | ||
2569 | crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); | |
901e040a | 2570 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2571 | ibmvnic_send_crq(adapter, &crq); |
2572 | ||
2573 | crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); | |
901e040a | 2574 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2575 | ibmvnic_send_crq(adapter, &crq); |
2576 | ||
2577 | crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); | |
901e040a | 2578 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2579 | ibmvnic_send_crq(adapter, &crq); |
2580 | ||
2581 | crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); | |
901e040a | 2582 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2583 | ibmvnic_send_crq(adapter, &crq); |
2584 | ||
2585 | crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); | |
901e040a | 2586 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2587 | ibmvnic_send_crq(adapter, &crq); |
2588 | ||
2589 | crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); | |
901e040a | 2590 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2591 | ibmvnic_send_crq(adapter, &crq); |
2592 | ||
2593 | crq.query_capability.capability = | |
2594 | cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); | |
901e040a | 2595 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2596 | ibmvnic_send_crq(adapter, &crq); |
2597 | ||
2598 | crq.query_capability.capability = | |
2599 | cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); | |
901e040a | 2600 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2601 | ibmvnic_send_crq(adapter, &crq); |
2602 | ||
2603 | crq.query_capability.capability = | |
2604 | cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); | |
901e040a | 2605 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2606 | ibmvnic_send_crq(adapter, &crq); |
2607 | ||
2608 | crq.query_capability.capability = | |
2609 | cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); | |
901e040a | 2610 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2611 | ibmvnic_send_crq(adapter, &crq); |
2612 | ||
2613 | crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); | |
901e040a | 2614 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2615 | ibmvnic_send_crq(adapter, &crq); |
2616 | ||
2617 | crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); | |
901e040a | 2618 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2619 | ibmvnic_send_crq(adapter, &crq); |
2620 | ||
2621 | crq.query_capability.capability = cpu_to_be16(MIN_MTU); | |
901e040a | 2622 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2623 | ibmvnic_send_crq(adapter, &crq); |
2624 | ||
2625 | crq.query_capability.capability = cpu_to_be16(MAX_MTU); | |
901e040a | 2626 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2627 | ibmvnic_send_crq(adapter, &crq); |
2628 | ||
2629 | crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); | |
901e040a | 2630 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2631 | ibmvnic_send_crq(adapter, &crq); |
2632 | ||
2633 | crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); | |
901e040a | 2634 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2635 | ibmvnic_send_crq(adapter, &crq); |
2636 | ||
6052d5e2 MFV |
2637 | crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); |
2638 | atomic_inc(&adapter->running_cap_crqs); | |
2639 | ibmvnic_send_crq(adapter, &crq); | |
2640 | ||
032c5e82 | 2641 | crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); |
901e040a | 2642 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2643 | ibmvnic_send_crq(adapter, &crq); |
2644 | ||
2645 | crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); | |
901e040a | 2646 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2647 | ibmvnic_send_crq(adapter, &crq); |
2648 | ||
2649 | crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); | |
901e040a | 2650 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2651 | ibmvnic_send_crq(adapter, &crq); |
2652 | ||
2653 | crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); | |
901e040a | 2654 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2655 | ibmvnic_send_crq(adapter, &crq); |
2656 | ||
2657 | crq.query_capability.capability = | |
2658 | cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); | |
901e040a | 2659 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2660 | ibmvnic_send_crq(adapter, &crq); |
2661 | ||
2662 | crq.query_capability.capability = | |
2663 | cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); | |
901e040a | 2664 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2665 | ibmvnic_send_crq(adapter, &crq); |
2666 | ||
2667 | crq.query_capability.capability = | |
2668 | cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); | |
901e040a | 2669 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2670 | ibmvnic_send_crq(adapter, &crq); |
2671 | ||
2672 | crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); | |
901e040a | 2673 | atomic_inc(&adapter->running_cap_crqs); |
032c5e82 TF |
2674 | ibmvnic_send_crq(adapter, &crq); |
2675 | } | |
2676 | ||
2677 | static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) | |
2678 | { | |
2679 | struct device *dev = &adapter->vdev->dev; | |
2680 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; | |
2681 | union ibmvnic_crq crq; | |
2682 | int i; | |
2683 | ||
2684 | dma_unmap_single(dev, adapter->ip_offload_tok, | |
2685 | sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); | |
2686 | ||
2687 | netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); | |
2688 | for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) | |
2689 | netdev_dbg(adapter->netdev, "%016lx\n", | |
2690 | ((unsigned long int *)(buf))[i]); | |
2691 | ||
2692 | netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); | |
2693 | netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); | |
2694 | netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", | |
2695 | buf->tcp_ipv4_chksum); | |
2696 | netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", | |
2697 | buf->tcp_ipv6_chksum); | |
2698 | netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", | |
2699 | buf->udp_ipv4_chksum); | |
2700 | netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", | |
2701 | buf->udp_ipv6_chksum); | |
2702 | netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", | |
2703 | buf->large_tx_ipv4); | |
2704 | netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", | |
2705 | buf->large_tx_ipv6); | |
2706 | netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", | |
2707 | buf->large_rx_ipv4); | |
2708 | netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", | |
2709 | buf->large_rx_ipv6); | |
2710 | netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", | |
2711 | buf->max_ipv4_header_size); | |
2712 | netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", | |
2713 | buf->max_ipv6_header_size); | |
2714 | netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", | |
2715 | buf->max_tcp_header_size); | |
2716 | netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", | |
2717 | buf->max_udp_header_size); | |
2718 | netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", | |
2719 | buf->max_large_tx_size); | |
2720 | netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", | |
2721 | buf->max_large_rx_size); | |
2722 | netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", | |
2723 | buf->ipv6_extension_header); | |
2724 | netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", | |
2725 | buf->tcp_pseudosum_req); | |
2726 | netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", | |
2727 | buf->num_ipv6_ext_headers); | |
2728 | netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", | |
2729 | buf->off_ipv6_ext_headers); | |
2730 | ||
2731 | adapter->ip_offload_ctrl_tok = | |
2732 | dma_map_single(dev, &adapter->ip_offload_ctrl, | |
2733 | sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); | |
2734 | ||
2735 | if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { | |
2736 | dev_err(dev, "Couldn't map ip offload control buffer\n"); | |
2737 | return; | |
2738 | } | |
2739 | ||
2740 | adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); | |
2741 | adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; | |
2742 | adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; | |
2743 | adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; | |
2744 | adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; | |
2745 | ||
2746 | /* large_tx/rx disabled for now, additional features needed */ | |
2747 | adapter->ip_offload_ctrl.large_tx_ipv4 = 0; | |
2748 | adapter->ip_offload_ctrl.large_tx_ipv6 = 0; | |
2749 | adapter->ip_offload_ctrl.large_rx_ipv4 = 0; | |
2750 | adapter->ip_offload_ctrl.large_rx_ipv6 = 0; | |
2751 | ||
2752 | adapter->netdev->features = NETIF_F_GSO; | |
2753 | ||
2754 | if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) | |
2755 | adapter->netdev->features |= NETIF_F_IP_CSUM; | |
2756 | ||
2757 | if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) | |
2758 | adapter->netdev->features |= NETIF_F_IPV6_CSUM; | |
2759 | ||
9be02cdf TF |
2760 | if ((adapter->netdev->features & |
2761 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) | |
2762 | adapter->netdev->features |= NETIF_F_RXCSUM; | |
2763 | ||
032c5e82 TF |
2764 | memset(&crq, 0, sizeof(crq)); |
2765 | crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; | |
2766 | crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; | |
2767 | crq.control_ip_offload.len = | |
2768 | cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); | |
2769 | crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); | |
2770 | ibmvnic_send_crq(adapter, &crq); | |
2771 | } | |
2772 | ||
2773 | static void handle_error_info_rsp(union ibmvnic_crq *crq, | |
2774 | struct ibmvnic_adapter *adapter) | |
2775 | { | |
2776 | struct device *dev = &adapter->vdev->dev; | |
96183182 | 2777 | struct ibmvnic_error_buff *error_buff, *tmp; |
032c5e82 TF |
2778 | unsigned long flags; |
2779 | bool found = false; | |
2780 | int i; | |
2781 | ||
2782 | if (!crq->request_error_rsp.rc.code) { | |
2783 | dev_info(dev, "Request Error Rsp returned with rc=%x\n", | |
2784 | crq->request_error_rsp.rc.code); | |
2785 | return; | |
2786 | } | |
2787 | ||
2788 | spin_lock_irqsave(&adapter->error_list_lock, flags); | |
96183182 | 2789 | list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) |
032c5e82 TF |
2790 | if (error_buff->error_id == crq->request_error_rsp.error_id) { |
2791 | found = true; | |
2792 | list_del(&error_buff->list); | |
2793 | break; | |
2794 | } | |
2795 | spin_unlock_irqrestore(&adapter->error_list_lock, flags); | |
2796 | ||
2797 | if (!found) { | |
2798 | dev_err(dev, "Couldn't find error id %x\n", | |
75224c93 | 2799 | be32_to_cpu(crq->request_error_rsp.error_id)); |
032c5e82 TF |
2800 | return; |
2801 | } | |
2802 | ||
2803 | dev_err(dev, "Detailed info for error id %x:", | |
75224c93 | 2804 | be32_to_cpu(crq->request_error_rsp.error_id)); |
032c5e82 TF |
2805 | |
2806 | for (i = 0; i < error_buff->len; i++) { | |
2807 | pr_cont("%02x", (int)error_buff->buff[i]); | |
2808 | if (i % 8 == 7) | |
2809 | pr_cont(" "); | |
2810 | } | |
2811 | pr_cont("\n"); | |
2812 | ||
2813 | dma_unmap_single(dev, error_buff->dma, error_buff->len, | |
2814 | DMA_FROM_DEVICE); | |
2815 | kfree(error_buff->buff); | |
2816 | kfree(error_buff); | |
2817 | } | |
2818 | ||
2f9de9ba NF |
2819 | static void request_error_information(struct ibmvnic_adapter *adapter, |
2820 | union ibmvnic_crq *err_crq) | |
032c5e82 | 2821 | { |
032c5e82 | 2822 | struct device *dev = &adapter->vdev->dev; |
2f9de9ba | 2823 | struct net_device *netdev = adapter->netdev; |
032c5e82 | 2824 | struct ibmvnic_error_buff *error_buff; |
2f9de9ba NF |
2825 | unsigned long timeout = msecs_to_jiffies(30000); |
2826 | union ibmvnic_crq crq; | |
032c5e82 | 2827 | unsigned long flags; |
2f9de9ba | 2828 | int rc, detail_len; |
032c5e82 TF |
2829 | |
2830 | error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); | |
2831 | if (!error_buff) | |
2832 | return; | |
2833 | ||
2f9de9ba | 2834 | detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz); |
032c5e82 TF |
2835 | error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); |
2836 | if (!error_buff->buff) { | |
2837 | kfree(error_buff); | |
2838 | return; | |
2839 | } | |
2840 | ||
2841 | error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, | |
2842 | DMA_FROM_DEVICE); | |
2843 | if (dma_mapping_error(dev, error_buff->dma)) { | |
2f9de9ba | 2844 | netdev_err(netdev, "Couldn't map error buffer\n"); |
032c5e82 TF |
2845 | kfree(error_buff->buff); |
2846 | kfree(error_buff); | |
2847 | return; | |
2848 | } | |
2849 | ||
032c5e82 | 2850 | error_buff->len = detail_len; |
2f9de9ba | 2851 | error_buff->error_id = err_crq->error_indication.error_id; |
032c5e82 TF |
2852 | |
2853 | spin_lock_irqsave(&adapter->error_list_lock, flags); | |
2854 | list_add_tail(&error_buff->list, &adapter->errors); | |
2855 | spin_unlock_irqrestore(&adapter->error_list_lock, flags); | |
2856 | ||
2f9de9ba NF |
2857 | memset(&crq, 0, sizeof(crq)); |
2858 | crq.request_error_info.first = IBMVNIC_CRQ_CMD; | |
2859 | crq.request_error_info.cmd = REQUEST_ERROR_INFO; | |
2860 | crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); | |
2861 | crq.request_error_info.len = cpu_to_be32(detail_len); | |
2862 | crq.request_error_info.error_id = err_crq->error_indication.error_id; | |
2863 | ||
2864 | rc = ibmvnic_send_crq(adapter, &crq); | |
2865 | if (rc) { | |
2866 | netdev_err(netdev, "failed to request error information\n"); | |
2867 | goto err_info_fail; | |
2868 | } | |
2869 | ||
2870 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | |
2871 | netdev_err(netdev, "timeout waiting for error information\n"); | |
2872 | goto err_info_fail; | |
2873 | } | |
2874 | ||
2875 | return; | |
2876 | ||
2877 | err_info_fail: | |
2878 | spin_lock_irqsave(&adapter->error_list_lock, flags); | |
2879 | list_del(&error_buff->list); | |
2880 | spin_unlock_irqrestore(&adapter->error_list_lock, flags); | |
2881 | ||
2882 | kfree(error_buff->buff); | |
2883 | kfree(error_buff); | |
2884 | } | |
2885 | ||
2886 | static void handle_error_indication(union ibmvnic_crq *crq, | |
2887 | struct ibmvnic_adapter *adapter) | |
2888 | { | |
2889 | struct device *dev = &adapter->vdev->dev; | |
2890 | ||
2891 | dev_err(dev, "Firmware reports %serror id %x, cause %d\n", | |
2892 | crq->error_indication.flags | |
2893 | & IBMVNIC_FATAL_ERROR ? "FATAL " : "", | |
2894 | be32_to_cpu(crq->error_indication.error_id), | |
2895 | be16_to_cpu(crq->error_indication.error_cause)); | |
2896 | ||
2897 | if (be32_to_cpu(crq->error_indication.error_id)) | |
2898 | request_error_information(adapter, crq); | |
ed651a10 NF |
2899 | |
2900 | if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) | |
2901 | ibmvnic_reset(adapter, VNIC_RESET_FATAL); | |
8cb31cfc JA |
2902 | else |
2903 | ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); | |
032c5e82 TF |
2904 | } |
2905 | ||
2906 | static void handle_change_mac_rsp(union ibmvnic_crq *crq, | |
2907 | struct ibmvnic_adapter *adapter) | |
2908 | { | |
2909 | struct net_device *netdev = adapter->netdev; | |
2910 | struct device *dev = &adapter->vdev->dev; | |
2911 | long rc; | |
2912 | ||
2913 | rc = crq->change_mac_addr_rsp.rc.code; | |
2914 | if (rc) { | |
2915 | dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); | |
2916 | return; | |
2917 | } | |
2918 | memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], | |
2919 | ETH_ALEN); | |
2920 | } | |
2921 | ||
2922 | static void handle_request_cap_rsp(union ibmvnic_crq *crq, | |
2923 | struct ibmvnic_adapter *adapter) | |
2924 | { | |
2925 | struct device *dev = &adapter->vdev->dev; | |
2926 | u64 *req_value; | |
2927 | char *name; | |
2928 | ||
901e040a | 2929 | atomic_dec(&adapter->running_cap_crqs); |
032c5e82 TF |
2930 | switch (be16_to_cpu(crq->request_capability_rsp.capability)) { |
2931 | case REQ_TX_QUEUES: | |
2932 | req_value = &adapter->req_tx_queues; | |
2933 | name = "tx"; | |
2934 | break; | |
2935 | case REQ_RX_QUEUES: | |
2936 | req_value = &adapter->req_rx_queues; | |
2937 | name = "rx"; | |
2938 | break; | |
2939 | case REQ_RX_ADD_QUEUES: | |
2940 | req_value = &adapter->req_rx_add_queues; | |
2941 | name = "rx_add"; | |
2942 | break; | |
2943 | case REQ_TX_ENTRIES_PER_SUBCRQ: | |
2944 | req_value = &adapter->req_tx_entries_per_subcrq; | |
2945 | name = "tx_entries_per_subcrq"; | |
2946 | break; | |
2947 | case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: | |
2948 | req_value = &adapter->req_rx_add_entries_per_subcrq; | |
2949 | name = "rx_add_entries_per_subcrq"; | |
2950 | break; | |
2951 | case REQ_MTU: | |
2952 | req_value = &adapter->req_mtu; | |
2953 | name = "mtu"; | |
2954 | break; | |
2955 | case PROMISC_REQUESTED: | |
2956 | req_value = &adapter->promisc; | |
2957 | name = "promisc"; | |
2958 | break; | |
2959 | default: | |
2960 | dev_err(dev, "Got invalid cap request rsp %d\n", | |
2961 | crq->request_capability.capability); | |
2962 | return; | |
2963 | } | |
2964 | ||
2965 | switch (crq->request_capability_rsp.rc.code) { | |
2966 | case SUCCESS: | |
2967 | break; | |
2968 | case PARTIALSUCCESS: | |
2969 | dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", | |
2970 | *req_value, | |
28f4d165 | 2971 | (long int)be64_to_cpu(crq->request_capability_rsp. |
032c5e82 | 2972 | number), name); |
b510888f | 2973 | release_sub_crqs(adapter); |
28f4d165 | 2974 | *req_value = be64_to_cpu(crq->request_capability_rsp.number); |
d346b9bc | 2975 | ibmvnic_send_req_caps(adapter, 1); |
032c5e82 TF |
2976 | return; |
2977 | default: | |
2978 | dev_err(dev, "Error %d in request cap rsp\n", | |
2979 | crq->request_capability_rsp.rc.code); | |
2980 | return; | |
2981 | } | |
2982 | ||
2983 | /* Done receiving requested capabilities, query IP offload support */ | |
901e040a | 2984 | if (atomic_read(&adapter->running_cap_crqs) == 0) { |
032c5e82 TF |
2985 | union ibmvnic_crq newcrq; |
2986 | int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); | |
2987 | struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = | |
2988 | &adapter->ip_offload_buf; | |
2989 | ||
249168ad | 2990 | adapter->wait_capability = false; |
032c5e82 TF |
2991 | adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, |
2992 | buf_sz, | |
2993 | DMA_FROM_DEVICE); | |
2994 | ||
2995 | if (dma_mapping_error(dev, adapter->ip_offload_tok)) { | |
2996 | if (!firmware_has_feature(FW_FEATURE_CMO)) | |
2997 | dev_err(dev, "Couldn't map offload buffer\n"); | |
2998 | return; | |
2999 | } | |
3000 | ||
3001 | memset(&newcrq, 0, sizeof(newcrq)); | |
3002 | newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; | |
3003 | newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; | |
3004 | newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); | |
3005 | newcrq.query_ip_offload.ioba = | |
3006 | cpu_to_be32(adapter->ip_offload_tok); | |
3007 | ||
3008 | ibmvnic_send_crq(adapter, &newcrq); | |
3009 | } | |
3010 | } | |
3011 | ||
3012 | static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, | |
3013 | struct ibmvnic_adapter *adapter) | |
3014 | { | |
3015 | struct device *dev = &adapter->vdev->dev; | |
3016 | struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; | |
3017 | struct ibmvnic_login_buffer *login = adapter->login_buf; | |
032c5e82 TF |
3018 | int i; |
3019 | ||
3020 | dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, | |
3021 | DMA_BIDIRECTIONAL); | |
3022 | dma_unmap_single(dev, adapter->login_rsp_buf_token, | |
3023 | adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); | |
3024 | ||
498cd8e4 JA |
3025 | /* If the number of queues requested can't be allocated by the |
3026 | * server, the login response will return with code 1. We will need | |
3027 | * to resend the login buffer with fewer queues requested. | |
3028 | */ | |
3029 | if (login_rsp_crq->generic.rc.code) { | |
3030 | adapter->renegotiate = true; | |
3031 | complete(&adapter->init_done); | |
3032 | return 0; | |
3033 | } | |
3034 | ||
032c5e82 TF |
3035 | netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); |
3036 | for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { | |
3037 | netdev_dbg(adapter->netdev, "%016lx\n", | |
3038 | ((unsigned long int *)(adapter->login_rsp_buf))[i]); | |
3039 | } | |
3040 | ||
3041 | /* Sanity checks */ | |
3042 | if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || | |
3043 | (be32_to_cpu(login->num_rxcomp_subcrqs) * | |
3044 | adapter->req_rx_add_queues != | |
3045 | be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { | |
3046 | dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); | |
3047 | ibmvnic_remove(adapter->vdev); | |
3048 | return -EIO; | |
3049 | } | |
3050 | complete(&adapter->init_done); | |
3051 | ||
032c5e82 TF |
3052 | return 0; |
3053 | } | |
3054 | ||
3055 | static void handle_request_map_rsp(union ibmvnic_crq *crq, | |
3056 | struct ibmvnic_adapter *adapter) | |
3057 | { | |
3058 | struct device *dev = &adapter->vdev->dev; | |
3059 | u8 map_id = crq->request_map_rsp.map_id; | |
3060 | int tx_subcrqs; | |
3061 | int rx_subcrqs; | |
3062 | long rc; | |
3063 | int i; | |
3064 | ||
3065 | tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); | |
3066 | rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | |
3067 | ||
3068 | rc = crq->request_map_rsp.rc.code; | |
3069 | if (rc) { | |
3070 | dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc); | |
3071 | adapter->map_id--; | |
3072 | /* need to find and zero tx/rx_pool map_id */ | |
3073 | for (i = 0; i < tx_subcrqs; i++) { | |
3074 | if (adapter->tx_pool[i].long_term_buff.map_id == map_id) | |
3075 | adapter->tx_pool[i].long_term_buff.map_id = 0; | |
3076 | } | |
3077 | for (i = 0; i < rx_subcrqs; i++) { | |
3078 | if (adapter->rx_pool[i].long_term_buff.map_id == map_id) | |
3079 | adapter->rx_pool[i].long_term_buff.map_id = 0; | |
3080 | } | |
3081 | } | |
3082 | complete(&adapter->fw_done); | |
3083 | } | |
3084 | ||
3085 | static void handle_request_unmap_rsp(union ibmvnic_crq *crq, | |
3086 | struct ibmvnic_adapter *adapter) | |
3087 | { | |
3088 | struct device *dev = &adapter->vdev->dev; | |
3089 | long rc; | |
3090 | ||
3091 | rc = crq->request_unmap_rsp.rc.code; | |
3092 | if (rc) | |
3093 | dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); | |
3094 | } | |
3095 | ||
3096 | static void handle_query_map_rsp(union ibmvnic_crq *crq, | |
3097 | struct ibmvnic_adapter *adapter) | |
3098 | { | |
3099 | struct net_device *netdev = adapter->netdev; | |
3100 | struct device *dev = &adapter->vdev->dev; | |
3101 | long rc; | |
3102 | ||
3103 | rc = crq->query_map_rsp.rc.code; | |
3104 | if (rc) { | |
3105 | dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); | |
3106 | return; | |
3107 | } | |
3108 | netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", | |
3109 | crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, | |
3110 | crq->query_map_rsp.free_pages); | |
3111 | } | |
3112 | ||
3113 | static void handle_query_cap_rsp(union ibmvnic_crq *crq, | |
3114 | struct ibmvnic_adapter *adapter) | |
3115 | { | |
3116 | struct net_device *netdev = adapter->netdev; | |
3117 | struct device *dev = &adapter->vdev->dev; | |
3118 | long rc; | |
3119 | ||
901e040a | 3120 | atomic_dec(&adapter->running_cap_crqs); |
032c5e82 | 3121 | netdev_dbg(netdev, "Outstanding queries: %d\n", |
901e040a | 3122 | atomic_read(&adapter->running_cap_crqs)); |
032c5e82 TF |
3123 | rc = crq->query_capability.rc.code; |
3124 | if (rc) { | |
3125 | dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); | |
3126 | goto out; | |
3127 | } | |
3128 | ||
3129 | switch (be16_to_cpu(crq->query_capability.capability)) { | |
3130 | case MIN_TX_QUEUES: | |
3131 | adapter->min_tx_queues = | |
de89e854 | 3132 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3133 | netdev_dbg(netdev, "min_tx_queues = %lld\n", |
3134 | adapter->min_tx_queues); | |
3135 | break; | |
3136 | case MIN_RX_QUEUES: | |
3137 | adapter->min_rx_queues = | |
de89e854 | 3138 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3139 | netdev_dbg(netdev, "min_rx_queues = %lld\n", |
3140 | adapter->min_rx_queues); | |
3141 | break; | |
3142 | case MIN_RX_ADD_QUEUES: | |
3143 | adapter->min_rx_add_queues = | |
de89e854 | 3144 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3145 | netdev_dbg(netdev, "min_rx_add_queues = %lld\n", |
3146 | adapter->min_rx_add_queues); | |
3147 | break; | |
3148 | case MAX_TX_QUEUES: | |
3149 | adapter->max_tx_queues = | |
de89e854 | 3150 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3151 | netdev_dbg(netdev, "max_tx_queues = %lld\n", |
3152 | adapter->max_tx_queues); | |
3153 | break; | |
3154 | case MAX_RX_QUEUES: | |
3155 | adapter->max_rx_queues = | |
de89e854 | 3156 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3157 | netdev_dbg(netdev, "max_rx_queues = %lld\n", |
3158 | adapter->max_rx_queues); | |
3159 | break; | |
3160 | case MAX_RX_ADD_QUEUES: | |
3161 | adapter->max_rx_add_queues = | |
de89e854 | 3162 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3163 | netdev_dbg(netdev, "max_rx_add_queues = %lld\n", |
3164 | adapter->max_rx_add_queues); | |
3165 | break; | |
3166 | case MIN_TX_ENTRIES_PER_SUBCRQ: | |
3167 | adapter->min_tx_entries_per_subcrq = | |
de89e854 | 3168 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3169 | netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", |
3170 | adapter->min_tx_entries_per_subcrq); | |
3171 | break; | |
3172 | case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: | |
3173 | adapter->min_rx_add_entries_per_subcrq = | |
de89e854 | 3174 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3175 | netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", |
3176 | adapter->min_rx_add_entries_per_subcrq); | |
3177 | break; | |
3178 | case MAX_TX_ENTRIES_PER_SUBCRQ: | |
3179 | adapter->max_tx_entries_per_subcrq = | |
de89e854 | 3180 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3181 | netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", |
3182 | adapter->max_tx_entries_per_subcrq); | |
3183 | break; | |
3184 | case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: | |
3185 | adapter->max_rx_add_entries_per_subcrq = | |
de89e854 | 3186 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3187 | netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", |
3188 | adapter->max_rx_add_entries_per_subcrq); | |
3189 | break; | |
3190 | case TCP_IP_OFFLOAD: | |
3191 | adapter->tcp_ip_offload = | |
de89e854 | 3192 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3193 | netdev_dbg(netdev, "tcp_ip_offload = %lld\n", |
3194 | adapter->tcp_ip_offload); | |
3195 | break; | |
3196 | case PROMISC_SUPPORTED: | |
3197 | adapter->promisc_supported = | |
de89e854 | 3198 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3199 | netdev_dbg(netdev, "promisc_supported = %lld\n", |
3200 | adapter->promisc_supported); | |
3201 | break; | |
3202 | case MIN_MTU: | |
de89e854 | 3203 | adapter->min_mtu = be64_to_cpu(crq->query_capability.number); |
f39f0d1e | 3204 | netdev->min_mtu = adapter->min_mtu - ETH_HLEN; |
032c5e82 TF |
3205 | netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); |
3206 | break; | |
3207 | case MAX_MTU: | |
de89e854 | 3208 | adapter->max_mtu = be64_to_cpu(crq->query_capability.number); |
f39f0d1e | 3209 | netdev->max_mtu = adapter->max_mtu - ETH_HLEN; |
032c5e82 TF |
3210 | netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); |
3211 | break; | |
3212 | case MAX_MULTICAST_FILTERS: | |
3213 | adapter->max_multicast_filters = | |
de89e854 | 3214 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3215 | netdev_dbg(netdev, "max_multicast_filters = %lld\n", |
3216 | adapter->max_multicast_filters); | |
3217 | break; | |
3218 | case VLAN_HEADER_INSERTION: | |
3219 | adapter->vlan_header_insertion = | |
de89e854 | 3220 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3221 | if (adapter->vlan_header_insertion) |
3222 | netdev->features |= NETIF_F_HW_VLAN_STAG_TX; | |
3223 | netdev_dbg(netdev, "vlan_header_insertion = %lld\n", | |
3224 | adapter->vlan_header_insertion); | |
3225 | break; | |
6052d5e2 MFV |
3226 | case RX_VLAN_HEADER_INSERTION: |
3227 | adapter->rx_vlan_header_insertion = | |
3228 | be64_to_cpu(crq->query_capability.number); | |
3229 | netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", | |
3230 | adapter->rx_vlan_header_insertion); | |
3231 | break; | |
032c5e82 TF |
3232 | case MAX_TX_SG_ENTRIES: |
3233 | adapter->max_tx_sg_entries = | |
de89e854 | 3234 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3235 | netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", |
3236 | adapter->max_tx_sg_entries); | |
3237 | break; | |
3238 | case RX_SG_SUPPORTED: | |
3239 | adapter->rx_sg_supported = | |
de89e854 | 3240 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3241 | netdev_dbg(netdev, "rx_sg_supported = %lld\n", |
3242 | adapter->rx_sg_supported); | |
3243 | break; | |
3244 | case OPT_TX_COMP_SUB_QUEUES: | |
3245 | adapter->opt_tx_comp_sub_queues = | |
de89e854 | 3246 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3247 | netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", |
3248 | adapter->opt_tx_comp_sub_queues); | |
3249 | break; | |
3250 | case OPT_RX_COMP_QUEUES: | |
3251 | adapter->opt_rx_comp_queues = | |
de89e854 | 3252 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3253 | netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", |
3254 | adapter->opt_rx_comp_queues); | |
3255 | break; | |
3256 | case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: | |
3257 | adapter->opt_rx_bufadd_q_per_rx_comp_q = | |
de89e854 | 3258 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3259 | netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", |
3260 | adapter->opt_rx_bufadd_q_per_rx_comp_q); | |
3261 | break; | |
3262 | case OPT_TX_ENTRIES_PER_SUBCRQ: | |
3263 | adapter->opt_tx_entries_per_subcrq = | |
de89e854 | 3264 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3265 | netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", |
3266 | adapter->opt_tx_entries_per_subcrq); | |
3267 | break; | |
3268 | case OPT_RXBA_ENTRIES_PER_SUBCRQ: | |
3269 | adapter->opt_rxba_entries_per_subcrq = | |
de89e854 | 3270 | be64_to_cpu(crq->query_capability.number); |
032c5e82 TF |
3271 | netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", |
3272 | adapter->opt_rxba_entries_per_subcrq); | |
3273 | break; | |
3274 | case TX_RX_DESC_REQ: | |
3275 | adapter->tx_rx_desc_req = crq->query_capability.number; | |
3276 | netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", | |
3277 | adapter->tx_rx_desc_req); | |
3278 | break; | |
3279 | ||
3280 | default: | |
3281 | netdev_err(netdev, "Got invalid cap rsp %d\n", | |
3282 | crq->query_capability.capability); | |
3283 | } | |
3284 | ||
3285 | out: | |
249168ad TF |
3286 | if (atomic_read(&adapter->running_cap_crqs) == 0) { |
3287 | adapter->wait_capability = false; | |
d346b9bc | 3288 | ibmvnic_send_req_caps(adapter, 0); |
249168ad | 3289 | } |
032c5e82 TF |
3290 | } |
3291 | ||
032c5e82 TF |
3292 | static void ibmvnic_handle_crq(union ibmvnic_crq *crq, |
3293 | struct ibmvnic_adapter *adapter) | |
3294 | { | |
3295 | struct ibmvnic_generic_crq *gen_crq = &crq->generic; | |
3296 | struct net_device *netdev = adapter->netdev; | |
3297 | struct device *dev = &adapter->vdev->dev; | |
993a82b0 | 3298 | u64 *u64_crq = (u64 *)crq; |
032c5e82 TF |
3299 | long rc; |
3300 | ||
3301 | netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", | |
993a82b0 MFV |
3302 | (unsigned long int)cpu_to_be64(u64_crq[0]), |
3303 | (unsigned long int)cpu_to_be64(u64_crq[1])); | |
032c5e82 TF |
3304 | switch (gen_crq->first) { |
3305 | case IBMVNIC_CRQ_INIT_RSP: | |
3306 | switch (gen_crq->cmd) { | |
3307 | case IBMVNIC_CRQ_INIT: | |
3308 | dev_info(dev, "Partner initialized\n"); | |
017892c1 JA |
3309 | adapter->from_passive_init = true; |
3310 | complete(&adapter->init_done); | |
032c5e82 TF |
3311 | break; |
3312 | case IBMVNIC_CRQ_INIT_COMPLETE: | |
3313 | dev_info(dev, "Partner initialization complete\n"); | |
3314 | send_version_xchg(adapter); | |
3315 | break; | |
3316 | default: | |
3317 | dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); | |
3318 | } | |
3319 | return; | |
3320 | case IBMVNIC_CRQ_XPORT_EVENT: | |
ed651a10 | 3321 | netif_carrier_off(netdev); |
032c5e82 | 3322 | if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { |
ed651a10 NF |
3323 | dev_info(dev, "Migrated, re-enabling adapter\n"); |
3324 | ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); | |
dfad09a6 TF |
3325 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { |
3326 | dev_info(dev, "Backing device failover detected\n"); | |
ed651a10 | 3327 | ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); |
032c5e82 TF |
3328 | } else { |
3329 | /* The adapter lost the connection */ | |
3330 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", | |
3331 | gen_crq->cmd); | |
ed651a10 | 3332 | ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
032c5e82 TF |
3333 | } |
3334 | return; | |
3335 | case IBMVNIC_CRQ_CMD_RSP: | |
3336 | break; | |
3337 | default: | |
3338 | dev_err(dev, "Got an invalid msg type 0x%02x\n", | |
3339 | gen_crq->first); | |
3340 | return; | |
3341 | } | |
3342 | ||
3343 | switch (gen_crq->cmd) { | |
3344 | case VERSION_EXCHANGE_RSP: | |
3345 | rc = crq->version_exchange_rsp.rc.code; | |
3346 | if (rc) { | |
3347 | dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); | |
3348 | break; | |
3349 | } | |
3350 | dev_info(dev, "Partner protocol version is %d\n", | |
3351 | crq->version_exchange_rsp.version); | |
3352 | if (be16_to_cpu(crq->version_exchange_rsp.version) < | |
3353 | ibmvnic_version) | |
3354 | ibmvnic_version = | |
3355 | be16_to_cpu(crq->version_exchange_rsp.version); | |
3356 | send_cap_queries(adapter); | |
3357 | break; | |
3358 | case QUERY_CAPABILITY_RSP: | |
3359 | handle_query_cap_rsp(crq, adapter); | |
3360 | break; | |
3361 | case QUERY_MAP_RSP: | |
3362 | handle_query_map_rsp(crq, adapter); | |
3363 | break; | |
3364 | case REQUEST_MAP_RSP: | |
3365 | handle_request_map_rsp(crq, adapter); | |
3366 | break; | |
3367 | case REQUEST_UNMAP_RSP: | |
3368 | handle_request_unmap_rsp(crq, adapter); | |
3369 | break; | |
3370 | case REQUEST_CAPABILITY_RSP: | |
3371 | handle_request_cap_rsp(crq, adapter); | |
3372 | break; | |
3373 | case LOGIN_RSP: | |
3374 | netdev_dbg(netdev, "Got Login Response\n"); | |
3375 | handle_login_rsp(crq, adapter); | |
3376 | break; | |
3377 | case LOGICAL_LINK_STATE_RSP: | |
53da09e9 NF |
3378 | netdev_dbg(netdev, |
3379 | "Got Logical Link State Response, state: %d rc: %d\n", | |
3380 | crq->logical_link_state_rsp.link_state, | |
3381 | crq->logical_link_state_rsp.rc.code); | |
032c5e82 TF |
3382 | adapter->logical_link_state = |
3383 | crq->logical_link_state_rsp.link_state; | |
53da09e9 NF |
3384 | adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; |
3385 | complete(&adapter->init_done); | |
032c5e82 TF |
3386 | break; |
3387 | case LINK_STATE_INDICATION: | |
3388 | netdev_dbg(netdev, "Got Logical Link State Indication\n"); | |
3389 | adapter->phys_link_state = | |
3390 | crq->link_state_indication.phys_link_state; | |
3391 | adapter->logical_link_state = | |
3392 | crq->link_state_indication.logical_link_state; | |
3393 | break; | |
3394 | case CHANGE_MAC_ADDR_RSP: | |
3395 | netdev_dbg(netdev, "Got MAC address change Response\n"); | |
3396 | handle_change_mac_rsp(crq, adapter); | |
3397 | break; | |
3398 | case ERROR_INDICATION: | |
3399 | netdev_dbg(netdev, "Got Error Indication\n"); | |
3400 | handle_error_indication(crq, adapter); | |
3401 | break; | |
3402 | case REQUEST_ERROR_RSP: | |
3403 | netdev_dbg(netdev, "Got Error Detail Response\n"); | |
3404 | handle_error_info_rsp(crq, adapter); | |
3405 | break; | |
3406 | case REQUEST_STATISTICS_RSP: | |
3407 | netdev_dbg(netdev, "Got Statistics Response\n"); | |
3408 | complete(&adapter->stats_done); | |
3409 | break; | |
032c5e82 TF |
3410 | case QUERY_IP_OFFLOAD_RSP: |
3411 | netdev_dbg(netdev, "Got Query IP offload Response\n"); | |
3412 | handle_query_ip_offload_rsp(adapter); | |
3413 | break; | |
3414 | case MULTICAST_CTRL_RSP: | |
3415 | netdev_dbg(netdev, "Got multicast control Response\n"); | |
3416 | break; | |
3417 | case CONTROL_IP_OFFLOAD_RSP: | |
3418 | netdev_dbg(netdev, "Got Control IP offload Response\n"); | |
3419 | dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, | |
3420 | sizeof(adapter->ip_offload_ctrl), | |
3421 | DMA_TO_DEVICE); | |
bd0b6723 | 3422 | complete(&adapter->init_done); |
032c5e82 | 3423 | break; |
032c5e82 TF |
3424 | case COLLECT_FW_TRACE_RSP: |
3425 | netdev_dbg(netdev, "Got Collect firmware trace Response\n"); | |
3426 | complete(&adapter->fw_done); | |
3427 | break; | |
3428 | default: | |
3429 | netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", | |
3430 | gen_crq->cmd); | |
3431 | } | |
3432 | } | |
3433 | ||
3434 | static irqreturn_t ibmvnic_interrupt(int irq, void *instance) | |
3435 | { | |
3436 | struct ibmvnic_adapter *adapter = instance; | |
6c267b3d | 3437 | |
6c267b3d | 3438 | tasklet_schedule(&adapter->tasklet); |
6c267b3d TF |
3439 | return IRQ_HANDLED; |
3440 | } | |
3441 | ||
3442 | static void ibmvnic_tasklet(void *data) | |
3443 | { | |
3444 | struct ibmvnic_adapter *adapter = data; | |
032c5e82 | 3445 | struct ibmvnic_crq_queue *queue = &adapter->crq; |
032c5e82 TF |
3446 | union ibmvnic_crq *crq; |
3447 | unsigned long flags; | |
3448 | bool done = false; | |
3449 | ||
3450 | spin_lock_irqsave(&queue->lock, flags); | |
032c5e82 TF |
3451 | while (!done) { |
3452 | /* Pull all the valid messages off the CRQ */ | |
3453 | while ((crq = ibmvnic_next_crq(adapter)) != NULL) { | |
3454 | ibmvnic_handle_crq(crq, adapter); | |
3455 | crq->generic.first = 0; | |
3456 | } | |
ed7ecbf7 BK |
3457 | |
3458 | /* remain in tasklet until all | |
3459 | * capabilities responses are received | |
3460 | */ | |
3461 | if (!adapter->wait_capability) | |
3462 | done = true; | |
032c5e82 | 3463 | } |
249168ad TF |
3464 | /* if capabilities CRQ's were sent in this tasklet, the following |
3465 | * tasklet must wait until all responses are received | |
3466 | */ | |
3467 | if (atomic_read(&adapter->running_cap_crqs) != 0) | |
3468 | adapter->wait_capability = true; | |
032c5e82 | 3469 | spin_unlock_irqrestore(&queue->lock, flags); |
032c5e82 TF |
3470 | } |
3471 | ||
3472 | static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) | |
3473 | { | |
3474 | struct vio_dev *vdev = adapter->vdev; | |
3475 | int rc; | |
3476 | ||
3477 | do { | |
3478 | rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); | |
3479 | } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); | |
3480 | ||
3481 | if (rc) | |
3482 | dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); | |
3483 | ||
3484 | return rc; | |
3485 | } | |
3486 | ||
3487 | static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) | |
3488 | { | |
3489 | struct ibmvnic_crq_queue *crq = &adapter->crq; | |
3490 | struct device *dev = &adapter->vdev->dev; | |
3491 | struct vio_dev *vdev = adapter->vdev; | |
3492 | int rc; | |
3493 | ||
3494 | /* Close the CRQ */ | |
3495 | do { | |
3496 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | |
3497 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); | |
3498 | ||
3499 | /* Clean out the queue */ | |
3500 | memset(crq->msgs, 0, PAGE_SIZE); | |
3501 | crq->cur = 0; | |
3502 | ||
3503 | /* And re-open it again */ | |
3504 | rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, | |
3505 | crq->msg_token, PAGE_SIZE); | |
3506 | ||
3507 | if (rc == H_CLOSED) | |
3508 | /* Adapter is good, but other end is not ready */ | |
3509 | dev_warn(dev, "Partner adapter not ready\n"); | |
3510 | else if (rc != 0) | |
3511 | dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); | |
3512 | ||
3513 | return rc; | |
3514 | } | |
3515 | ||
f992887c | 3516 | static void release_crq_queue(struct ibmvnic_adapter *adapter) |
032c5e82 TF |
3517 | { |
3518 | struct ibmvnic_crq_queue *crq = &adapter->crq; | |
3519 | struct vio_dev *vdev = adapter->vdev; | |
3520 | long rc; | |
3521 | ||
f992887c NF |
3522 | if (!crq->msgs) |
3523 | return; | |
3524 | ||
032c5e82 TF |
3525 | netdev_dbg(adapter->netdev, "Releasing CRQ\n"); |
3526 | free_irq(vdev->irq, adapter); | |
6c267b3d | 3527 | tasklet_kill(&adapter->tasklet); |
032c5e82 TF |
3528 | do { |
3529 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | |
3530 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); | |
3531 | ||
3532 | dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, | |
3533 | DMA_BIDIRECTIONAL); | |
3534 | free_page((unsigned long)crq->msgs); | |
f992887c | 3535 | crq->msgs = NULL; |
032c5e82 TF |
3536 | } |
3537 | ||
f992887c | 3538 | static int init_crq_queue(struct ibmvnic_adapter *adapter) |
032c5e82 TF |
3539 | { |
3540 | struct ibmvnic_crq_queue *crq = &adapter->crq; | |
3541 | struct device *dev = &adapter->vdev->dev; | |
3542 | struct vio_dev *vdev = adapter->vdev; | |
3543 | int rc, retrc = -ENOMEM; | |
3544 | ||
f992887c NF |
3545 | if (crq->msgs) |
3546 | return 0; | |
3547 | ||
032c5e82 TF |
3548 | crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); |
3549 | /* Should we allocate more than one page? */ | |
3550 | ||
3551 | if (!crq->msgs) | |
3552 | return -ENOMEM; | |
3553 | ||
3554 | crq->size = PAGE_SIZE / sizeof(*crq->msgs); | |
3555 | crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, | |
3556 | DMA_BIDIRECTIONAL); | |
3557 | if (dma_mapping_error(dev, crq->msg_token)) | |
3558 | goto map_failed; | |
3559 | ||
3560 | rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, | |
3561 | crq->msg_token, PAGE_SIZE); | |
3562 | ||
3563 | if (rc == H_RESOURCE) | |
3564 | /* maybe kexecing and resource is busy. try a reset */ | |
3565 | rc = ibmvnic_reset_crq(adapter); | |
3566 | retrc = rc; | |
3567 | ||
3568 | if (rc == H_CLOSED) { | |
3569 | dev_warn(dev, "Partner adapter not ready\n"); | |
3570 | } else if (rc) { | |
3571 | dev_warn(dev, "Error %d opening adapter\n", rc); | |
3572 | goto reg_crq_failed; | |
3573 | } | |
3574 | ||
3575 | retrc = 0; | |
3576 | ||
6c267b3d TF |
3577 | tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, |
3578 | (unsigned long)adapter); | |
3579 | ||
032c5e82 TF |
3580 | netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); |
3581 | rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, | |
3582 | adapter); | |
3583 | if (rc) { | |
3584 | dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", | |
3585 | vdev->irq, rc); | |
3586 | goto req_irq_failed; | |
3587 | } | |
3588 | ||
3589 | rc = vio_enable_interrupts(vdev); | |
3590 | if (rc) { | |
3591 | dev_err(dev, "Error %d enabling interrupts\n", rc); | |
3592 | goto req_irq_failed; | |
3593 | } | |
3594 | ||
3595 | crq->cur = 0; | |
3596 | spin_lock_init(&crq->lock); | |
3597 | ||
3598 | return retrc; | |
3599 | ||
3600 | req_irq_failed: | |
6c267b3d | 3601 | tasklet_kill(&adapter->tasklet); |
032c5e82 TF |
3602 | do { |
3603 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | |
3604 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); | |
3605 | reg_crq_failed: | |
3606 | dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); | |
3607 | map_failed: | |
3608 | free_page((unsigned long)crq->msgs); | |
f992887c | 3609 | crq->msgs = NULL; |
032c5e82 TF |
3610 | return retrc; |
3611 | } | |
3612 | ||
f6ef6408 | 3613 | static int ibmvnic_init(struct ibmvnic_adapter *adapter) |
032c5e82 | 3614 | { |
f6ef6408 | 3615 | struct device *dev = &adapter->vdev->dev; |
ea22d51a | 3616 | unsigned long timeout = msecs_to_jiffies(30000); |
f6ef6408 JA |
3617 | int rc; |
3618 | ||
28cde751 NF |
3619 | if (adapter->resetting) { |
3620 | rc = ibmvnic_reset_crq(adapter); | |
3621 | if (!rc) | |
3622 | rc = vio_enable_interrupts(adapter->vdev); | |
3623 | } else { | |
3624 | rc = init_crq_queue(adapter); | |
3625 | } | |
3626 | ||
f6ef6408 JA |
3627 | if (rc) { |
3628 | dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); | |
3629 | return rc; | |
3630 | } | |
3631 | ||
017892c1 JA |
3632 | adapter->from_passive_init = false; |
3633 | ||
f6ef6408 JA |
3634 | init_completion(&adapter->init_done); |
3635 | ibmvnic_send_crq_init(adapter); | |
3636 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | |
3637 | dev_err(dev, "Initialization sequence timed out\n"); | |
017892c1 JA |
3638 | return -1; |
3639 | } | |
3640 | ||
3641 | if (adapter->from_passive_init) { | |
3642 | adapter->state = VNIC_OPEN; | |
3643 | adapter->from_passive_init = false; | |
f6ef6408 JA |
3644 | return -1; |
3645 | } | |
3646 | ||
57a49436 NF |
3647 | if (adapter->resetting) |
3648 | rc = reset_sub_crq_queues(adapter); | |
3649 | else | |
3650 | rc = init_sub_crqs(adapter); | |
1bb3c739 NF |
3651 | if (rc) { |
3652 | dev_err(dev, "Initialization of sub crqs failed\n"); | |
3653 | release_crq_queue(adapter); | |
3654 | } | |
3655 | ||
3656 | return rc; | |
f6ef6408 JA |
3657 | } |
3658 | ||
3659 | static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |
3660 | { | |
032c5e82 TF |
3661 | struct ibmvnic_adapter *adapter; |
3662 | struct net_device *netdev; | |
3663 | unsigned char *mac_addr_p; | |
032c5e82 TF |
3664 | int rc; |
3665 | ||
3666 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", | |
3667 | dev->unit_address); | |
3668 | ||
3669 | mac_addr_p = (unsigned char *)vio_get_attribute(dev, | |
3670 | VETH_MAC_ADDR, NULL); | |
3671 | if (!mac_addr_p) { | |
3672 | dev_err(&dev->dev, | |
3673 | "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", | |
3674 | __FILE__, __LINE__); | |
3675 | return 0; | |
3676 | } | |
3677 | ||
3678 | netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), | |
3679 | IBMVNIC_MAX_TX_QUEUES); | |
3680 | if (!netdev) | |
3681 | return -ENOMEM; | |
3682 | ||
3683 | adapter = netdev_priv(netdev); | |
90c8014c | 3684 | adapter->state = VNIC_PROBING; |
032c5e82 TF |
3685 | dev_set_drvdata(&dev->dev, netdev); |
3686 | adapter->vdev = dev; | |
3687 | adapter->netdev = netdev; | |
3688 | ||
3689 | ether_addr_copy(adapter->mac_addr, mac_addr_p); | |
3690 | ether_addr_copy(netdev->dev_addr, adapter->mac_addr); | |
3691 | netdev->irq = dev->irq; | |
3692 | netdev->netdev_ops = &ibmvnic_netdev_ops; | |
3693 | netdev->ethtool_ops = &ibmvnic_ethtool_ops; | |
3694 | SET_NETDEV_DEV(netdev, &dev->dev); | |
3695 | ||
3696 | spin_lock_init(&adapter->stats_lock); | |
3697 | ||
032c5e82 | 3698 | INIT_LIST_HEAD(&adapter->errors); |
032c5e82 | 3699 | spin_lock_init(&adapter->error_list_lock); |
032c5e82 | 3700 | |
ed651a10 NF |
3701 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
3702 | INIT_LIST_HEAD(&adapter->rwi_list); | |
3703 | mutex_init(&adapter->reset_lock); | |
3704 | mutex_init(&adapter->rwi_lock); | |
3705 | adapter->resetting = false; | |
3706 | ||
f6ef6408 JA |
3707 | rc = ibmvnic_init(adapter); |
3708 | if (rc) { | |
3709 | free_netdev(netdev); | |
3710 | return rc; | |
032c5e82 | 3711 | } |
032c5e82 | 3712 | |
f39f0d1e | 3713 | netdev->mtu = adapter->req_mtu - ETH_HLEN; |
032c5e82 TF |
3714 | |
3715 | rc = register_netdev(netdev); | |
3716 | if (rc) { | |
3717 | dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); | |
f6ef6408 JA |
3718 | free_netdev(netdev); |
3719 | return rc; | |
032c5e82 TF |
3720 | } |
3721 | dev_info(&dev->dev, "ibmvnic registered\n"); | |
3722 | ||
90c8014c | 3723 | adapter->state = VNIC_PROBED; |
032c5e82 | 3724 | return 0; |
032c5e82 TF |
3725 | } |
3726 | ||
3727 | static int ibmvnic_remove(struct vio_dev *dev) | |
3728 | { | |
3729 | struct net_device *netdev = dev_get_drvdata(&dev->dev); | |
37489055 | 3730 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
032c5e82 | 3731 | |
90c8014c | 3732 | adapter->state = VNIC_REMOVING; |
032c5e82 | 3733 | unregister_netdev(netdev); |
ed651a10 | 3734 | mutex_lock(&adapter->reset_lock); |
37489055 NF |
3735 | |
3736 | release_resources(adapter); | |
3737 | release_sub_crqs(adapter); | |
3738 | release_crq_queue(adapter); | |
3739 | ||
90c8014c NF |
3740 | adapter->state = VNIC_REMOVED; |
3741 | ||
ed651a10 | 3742 | mutex_unlock(&adapter->reset_lock); |
032c5e82 TF |
3743 | free_netdev(netdev); |
3744 | dev_set_drvdata(&dev->dev, NULL); | |
3745 | ||
3746 | return 0; | |
3747 | } | |
3748 | ||
3749 | static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) | |
3750 | { | |
3751 | struct net_device *netdev = dev_get_drvdata(&vdev->dev); | |
3752 | struct ibmvnic_adapter *adapter; | |
3753 | struct iommu_table *tbl; | |
3754 | unsigned long ret = 0; | |
3755 | int i; | |
3756 | ||
3757 | tbl = get_iommu_table_base(&vdev->dev); | |
3758 | ||
3759 | /* netdev inits at probe time along with the structures we need below*/ | |
3760 | if (!netdev) | |
3761 | return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); | |
3762 | ||
3763 | adapter = netdev_priv(netdev); | |
3764 | ||
3765 | ret += PAGE_SIZE; /* the crq message queue */ | |
032c5e82 TF |
3766 | ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); |
3767 | ||
3768 | for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) | |
3769 | ret += 4 * PAGE_SIZE; /* the scrq message queue */ | |
3770 | ||
3771 | for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | |
3772 | i++) | |
3773 | ret += adapter->rx_pool[i].size * | |
3774 | IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); | |
3775 | ||
3776 | return ret; | |
3777 | } | |
3778 | ||
3779 | static int ibmvnic_resume(struct device *dev) | |
3780 | { | |
3781 | struct net_device *netdev = dev_get_drvdata(dev); | |
3782 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | |
3783 | int i; | |
3784 | ||
3785 | /* kick the interrupt handlers just in case we lost an interrupt */ | |
3786 | for (i = 0; i < adapter->req_rx_queues; i++) | |
3787 | ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, | |
3788 | adapter->rx_scrq[i]); | |
3789 | ||
3790 | return 0; | |
3791 | } | |
3792 | ||
3793 | static struct vio_device_id ibmvnic_device_table[] = { | |
3794 | {"network", "IBM,vnic"}, | |
3795 | {"", "" } | |
3796 | }; | |
3797 | MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); | |
3798 | ||
3799 | static const struct dev_pm_ops ibmvnic_pm_ops = { | |
3800 | .resume = ibmvnic_resume | |
3801 | }; | |
3802 | ||
3803 | static struct vio_driver ibmvnic_driver = { | |
3804 | .id_table = ibmvnic_device_table, | |
3805 | .probe = ibmvnic_probe, | |
3806 | .remove = ibmvnic_remove, | |
3807 | .get_desired_dma = ibmvnic_get_desired_dma, | |
3808 | .name = ibmvnic_driver_name, | |
3809 | .pm = &ibmvnic_pm_ops, | |
3810 | }; | |
3811 | ||
3812 | /* module functions */ | |
3813 | static int __init ibmvnic_module_init(void) | |
3814 | { | |
3815 | pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, | |
3816 | IBMVNIC_DRIVER_VERSION); | |
3817 | ||
3818 | return vio_register_driver(&ibmvnic_driver); | |
3819 | } | |
3820 | ||
3821 | static void __exit ibmvnic_module_exit(void) | |
3822 | { | |
3823 | vio_unregister_driver(&ibmvnic_driver); | |
3824 | } | |
3825 | ||
3826 | module_init(ibmvnic_module_init); | |
3827 | module_exit(ibmvnic_module_exit); |