]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/ibm/ibmveth.c
Merge branch 'for-linus' of git://git.samba.org/sfrench/cifs-2.6
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / ibm / ibmveth.c
1 /*
2 * IBM Power Virtual Ethernet Device Driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * Copyright (C) IBM Corporation, 2003, 2010
18 *
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
24 */
25
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
37 #include <linux/mm.h>
38 #include <linux/pm.h>
39 #include <linux/ethtool.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <asm/hvcall.h>
45 #include <linux/atomic.h>
46 #include <asm/vio.h>
47 #include <asm/iommu.h>
48 #include <asm/firmware.h>
49
50 #include "ibmveth.h"
51
52 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
55
56 static struct kobj_type ktype_veth_pool;
57
58
59 static const char ibmveth_driver_name[] = "ibmveth";
60 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61 #define ibmveth_driver_version "1.04"
62
63 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(ibmveth_driver_version);
67
68 static unsigned int tx_copybreak __read_mostly = 128;
69 module_param(tx_copybreak, uint, 0644);
70 MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
73 static unsigned int rx_copybreak __read_mostly = 128;
74 module_param(rx_copybreak, uint, 0644);
75 MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
78 static unsigned int rx_flush __read_mostly = 0;
79 module_param(rx_flush, uint, 0644);
80 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
82 struct ibmveth_stat {
83 char name[ETH_GSTRING_LEN];
84 int offset;
85 };
86
87 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
88 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
89
90 struct ibmveth_stat ibmveth_stats[] = {
91 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
92 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
93 { "replenish_add_buff_failure",
94 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
95 { "replenish_add_buff_success",
96 IBMVETH_STAT_OFF(replenish_add_buff_success) },
97 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
98 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
99 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
100 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
101 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
102 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
103 };
104
105 /* simple methods of getting data from the current rxq entry */
106 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
107 {
108 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
109 }
110
111 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
112 {
113 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
114 IBMVETH_RXQ_TOGGLE_SHIFT;
115 }
116
117 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
118 {
119 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
120 }
121
122 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
123 {
124 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
125 }
126
127 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
128 {
129 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
130 }
131
132 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
133 {
134 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
135 }
136
137 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
138 {
139 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
140 }
141
142 /* setup the initial settings for a buffer pool */
143 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
144 u32 pool_index, u32 pool_size,
145 u32 buff_size, u32 pool_active)
146 {
147 pool->size = pool_size;
148 pool->index = pool_index;
149 pool->buff_size = buff_size;
150 pool->threshold = pool_size * 7 / 8;
151 pool->active = pool_active;
152 }
153
154 /* allocate and setup an buffer pool - called during open */
155 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
156 {
157 int i;
158
159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
160
161 if (!pool->free_map)
162 return -1;
163
164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
165 if (!pool->dma_addr) {
166 kfree(pool->free_map);
167 pool->free_map = NULL;
168 return -1;
169 }
170
171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
172
173 if (!pool->skbuff) {
174 kfree(pool->dma_addr);
175 pool->dma_addr = NULL;
176
177 kfree(pool->free_map);
178 pool->free_map = NULL;
179 return -1;
180 }
181
182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
183
184 for (i = 0; i < pool->size; ++i)
185 pool->free_map[i] = i;
186
187 atomic_set(&pool->available, 0);
188 pool->producer_index = 0;
189 pool->consumer_index = 0;
190
191 return 0;
192 }
193
194 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
195 {
196 unsigned long offset;
197
198 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
199 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
200 }
201
202 /* replenish the buffers for a pool. note that we don't need to
203 * skb_reserve these since they are used for incoming...
204 */
205 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
206 struct ibmveth_buff_pool *pool)
207 {
208 u32 i;
209 u32 count = pool->size - atomic_read(&pool->available);
210 u32 buffers_added = 0;
211 struct sk_buff *skb;
212 unsigned int free_index, index;
213 u64 correlator;
214 unsigned long lpar_rc;
215 dma_addr_t dma_addr;
216
217 mb();
218
219 for (i = 0; i < count; ++i) {
220 union ibmveth_buf_desc desc;
221
222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
223
224 if (!skb) {
225 netdev_dbg(adapter->netdev,
226 "replenish: unable to allocate skb\n");
227 adapter->replenish_no_mem++;
228 break;
229 }
230
231 free_index = pool->consumer_index;
232 pool->consumer_index++;
233 if (pool->consumer_index >= pool->size)
234 pool->consumer_index = 0;
235 index = pool->free_map[free_index];
236
237 BUG_ON(index == IBM_VETH_INVALID_MAP);
238 BUG_ON(pool->skbuff[index] != NULL);
239
240 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
241 pool->buff_size, DMA_FROM_DEVICE);
242
243 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
244 goto failure;
245
246 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
247 pool->dma_addr[index] = dma_addr;
248 pool->skbuff[index] = skb;
249
250 correlator = ((u64)pool->index << 32) | index;
251 *(u64 *)skb->data = correlator;
252
253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
254 desc.fields.address = dma_addr;
255
256 if (rx_flush) {
257 unsigned int len = min(pool->buff_size,
258 adapter->netdev->mtu +
259 IBMVETH_BUFF_OH);
260 ibmveth_flush_buffer(skb->data, len);
261 }
262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
263 desc.desc);
264
265 if (lpar_rc != H_SUCCESS) {
266 goto failure;
267 } else {
268 buffers_added++;
269 adapter->replenish_add_buff_success++;
270 }
271 }
272
273 mb();
274 atomic_add(buffers_added, &(pool->available));
275 return;
276
277 failure:
278 pool->free_map[free_index] = index;
279 pool->skbuff[index] = NULL;
280 if (pool->consumer_index == 0)
281 pool->consumer_index = pool->size - 1;
282 else
283 pool->consumer_index--;
284 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
285 dma_unmap_single(&adapter->vdev->dev,
286 pool->dma_addr[index], pool->buff_size,
287 DMA_FROM_DEVICE);
288 dev_kfree_skb_any(skb);
289 adapter->replenish_add_buff_failure++;
290
291 mb();
292 atomic_add(buffers_added, &(pool->available));
293 }
294
295 /* replenish routine */
296 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
297 {
298 int i;
299
300 adapter->replenish_task_cycles++;
301
302 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
303 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
304
305 if (pool->active &&
306 (atomic_read(&pool->available) < pool->threshold))
307 ibmveth_replenish_buffer_pool(adapter, pool);
308 }
309
310 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
311 4096 - 8);
312 }
313
314 /* empty and free ana buffer pool - also used to do cleanup in error paths */
315 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
316 struct ibmveth_buff_pool *pool)
317 {
318 int i;
319
320 kfree(pool->free_map);
321 pool->free_map = NULL;
322
323 if (pool->skbuff && pool->dma_addr) {
324 for (i = 0; i < pool->size; ++i) {
325 struct sk_buff *skb = pool->skbuff[i];
326 if (skb) {
327 dma_unmap_single(&adapter->vdev->dev,
328 pool->dma_addr[i],
329 pool->buff_size,
330 DMA_FROM_DEVICE);
331 dev_kfree_skb_any(skb);
332 pool->skbuff[i] = NULL;
333 }
334 }
335 }
336
337 if (pool->dma_addr) {
338 kfree(pool->dma_addr);
339 pool->dma_addr = NULL;
340 }
341
342 if (pool->skbuff) {
343 kfree(pool->skbuff);
344 pool->skbuff = NULL;
345 }
346 }
347
348 /* remove a buffer from a pool */
349 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
350 u64 correlator)
351 {
352 unsigned int pool = correlator >> 32;
353 unsigned int index = correlator & 0xffffffffUL;
354 unsigned int free_index;
355 struct sk_buff *skb;
356
357 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
358 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
359
360 skb = adapter->rx_buff_pool[pool].skbuff[index];
361
362 BUG_ON(skb == NULL);
363
364 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
365
366 dma_unmap_single(&adapter->vdev->dev,
367 adapter->rx_buff_pool[pool].dma_addr[index],
368 adapter->rx_buff_pool[pool].buff_size,
369 DMA_FROM_DEVICE);
370
371 free_index = adapter->rx_buff_pool[pool].producer_index;
372 adapter->rx_buff_pool[pool].producer_index++;
373 if (adapter->rx_buff_pool[pool].producer_index >=
374 adapter->rx_buff_pool[pool].size)
375 adapter->rx_buff_pool[pool].producer_index = 0;
376 adapter->rx_buff_pool[pool].free_map[free_index] = index;
377
378 mb();
379
380 atomic_dec(&(adapter->rx_buff_pool[pool].available));
381 }
382
383 /* get the current buffer on the rx queue */
384 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
385 {
386 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
387 unsigned int pool = correlator >> 32;
388 unsigned int index = correlator & 0xffffffffUL;
389
390 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
391 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
392
393 return adapter->rx_buff_pool[pool].skbuff[index];
394 }
395
396 /* recycle the current buffer on the rx queue */
397 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
398 {
399 u32 q_index = adapter->rx_queue.index;
400 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
401 unsigned int pool = correlator >> 32;
402 unsigned int index = correlator & 0xffffffffUL;
403 union ibmveth_buf_desc desc;
404 unsigned long lpar_rc;
405 int ret = 1;
406
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
409
410 if (!adapter->rx_buff_pool[pool].active) {
411 ibmveth_rxq_harvest_buffer(adapter);
412 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
413 goto out;
414 }
415
416 desc.fields.flags_len = IBMVETH_BUF_VALID |
417 adapter->rx_buff_pool[pool].buff_size;
418 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
419
420 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
421
422 if (lpar_rc != H_SUCCESS) {
423 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
424 "during recycle rc=%ld", lpar_rc);
425 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
426 ret = 0;
427 }
428
429 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
430 adapter->rx_queue.index = 0;
431 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
432 }
433
434 out:
435 return ret;
436 }
437
438 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
439 {
440 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
441
442 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
443 adapter->rx_queue.index = 0;
444 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
445 }
446 }
447
448 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
449 {
450 int i;
451 struct device *dev = &adapter->vdev->dev;
452
453 if (adapter->buffer_list_addr != NULL) {
454 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
455 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
456 DMA_BIDIRECTIONAL);
457 adapter->buffer_list_dma = DMA_ERROR_CODE;
458 }
459 free_page((unsigned long)adapter->buffer_list_addr);
460 adapter->buffer_list_addr = NULL;
461 }
462
463 if (adapter->filter_list_addr != NULL) {
464 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
465 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
466 DMA_BIDIRECTIONAL);
467 adapter->filter_list_dma = DMA_ERROR_CODE;
468 }
469 free_page((unsigned long)adapter->filter_list_addr);
470 adapter->filter_list_addr = NULL;
471 }
472
473 if (adapter->rx_queue.queue_addr != NULL) {
474 dma_free_coherent(dev, adapter->rx_queue.queue_len,
475 adapter->rx_queue.queue_addr,
476 adapter->rx_queue.queue_dma);
477 adapter->rx_queue.queue_addr = NULL;
478 }
479
480 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
481 if (adapter->rx_buff_pool[i].active)
482 ibmveth_free_buffer_pool(adapter,
483 &adapter->rx_buff_pool[i]);
484
485 if (adapter->bounce_buffer != NULL) {
486 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
487 dma_unmap_single(&adapter->vdev->dev,
488 adapter->bounce_buffer_dma,
489 adapter->netdev->mtu + IBMVETH_BUFF_OH,
490 DMA_BIDIRECTIONAL);
491 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
492 }
493 kfree(adapter->bounce_buffer);
494 adapter->bounce_buffer = NULL;
495 }
496 }
497
498 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
499 union ibmveth_buf_desc rxq_desc, u64 mac_address)
500 {
501 int rc, try_again = 1;
502
503 /*
504 * After a kexec the adapter will still be open, so our attempt to
505 * open it will fail. So if we get a failure we free the adapter and
506 * try again, but only once.
507 */
508 retry:
509 rc = h_register_logical_lan(adapter->vdev->unit_address,
510 adapter->buffer_list_dma, rxq_desc.desc,
511 adapter->filter_list_dma, mac_address);
512
513 if (rc != H_SUCCESS && try_again) {
514 do {
515 rc = h_free_logical_lan(adapter->vdev->unit_address);
516 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
517
518 try_again = 0;
519 goto retry;
520 }
521
522 return rc;
523 }
524
525 static int ibmveth_open(struct net_device *netdev)
526 {
527 struct ibmveth_adapter *adapter = netdev_priv(netdev);
528 u64 mac_address = 0;
529 int rxq_entries = 1;
530 unsigned long lpar_rc;
531 int rc;
532 union ibmveth_buf_desc rxq_desc;
533 int i;
534 struct device *dev;
535
536 netdev_dbg(netdev, "open starting\n");
537
538 napi_enable(&adapter->napi);
539
540 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
541 rxq_entries += adapter->rx_buff_pool[i].size;
542
543 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
544 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
545
546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
547 netdev_err(netdev, "unable to allocate filter or buffer list "
548 "pages\n");
549 rc = -ENOMEM;
550 goto err_out;
551 }
552
553 dev = &adapter->vdev->dev;
554
555 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
556 rxq_entries;
557 adapter->rx_queue.queue_addr =
558 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
559 &adapter->rx_queue.queue_dma, GFP_KERNEL);
560 if (!adapter->rx_queue.queue_addr) {
561 rc = -ENOMEM;
562 goto err_out;
563 }
564
565 adapter->buffer_list_dma = dma_map_single(dev,
566 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
567 adapter->filter_list_dma = dma_map_single(dev,
568 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
569
570 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
571 (dma_mapping_error(dev, adapter->filter_list_dma))) {
572 netdev_err(netdev, "unable to map filter or buffer list "
573 "pages\n");
574 rc = -ENOMEM;
575 goto err_out;
576 }
577
578 adapter->rx_queue.index = 0;
579 adapter->rx_queue.num_slots = rxq_entries;
580 adapter->rx_queue.toggle = 1;
581
582 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
583 mac_address = mac_address >> 16;
584
585 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
586 adapter->rx_queue.queue_len;
587 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
588
589 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
590 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
591 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
592
593 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
594
595 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
596
597 if (lpar_rc != H_SUCCESS) {
598 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
599 lpar_rc);
600 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
601 "desc:0x%llx MAC:0x%llx\n",
602 adapter->buffer_list_dma,
603 adapter->filter_list_dma,
604 rxq_desc.desc,
605 mac_address);
606 rc = -ENONET;
607 goto err_out;
608 }
609
610 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
611 if (!adapter->rx_buff_pool[i].active)
612 continue;
613 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
614 netdev_err(netdev, "unable to alloc pool\n");
615 adapter->rx_buff_pool[i].active = 0;
616 rc = -ENOMEM;
617 goto err_out;
618 }
619 }
620
621 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
622 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
623 netdev);
624 if (rc != 0) {
625 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
626 netdev->irq, rc);
627 do {
628 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
629 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
630
631 goto err_out;
632 }
633
634 adapter->bounce_buffer =
635 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
636 if (!adapter->bounce_buffer) {
637 rc = -ENOMEM;
638 goto err_out_free_irq;
639 }
640 adapter->bounce_buffer_dma =
641 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
642 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
643 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
644 netdev_err(netdev, "unable to map bounce buffer\n");
645 rc = -ENOMEM;
646 goto err_out_free_irq;
647 }
648
649 netdev_dbg(netdev, "initial replenish cycle\n");
650 ibmveth_interrupt(netdev->irq, netdev);
651
652 netif_start_queue(netdev);
653
654 netdev_dbg(netdev, "open complete\n");
655
656 return 0;
657
658 err_out_free_irq:
659 free_irq(netdev->irq, netdev);
660 err_out:
661 ibmveth_cleanup(adapter);
662 napi_disable(&adapter->napi);
663 return rc;
664 }
665
666 static int ibmveth_close(struct net_device *netdev)
667 {
668 struct ibmveth_adapter *adapter = netdev_priv(netdev);
669 long lpar_rc;
670
671 netdev_dbg(netdev, "close starting\n");
672
673 napi_disable(&adapter->napi);
674
675 if (!adapter->pool_config)
676 netif_stop_queue(netdev);
677
678 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
679
680 do {
681 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
682 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
683
684 if (lpar_rc != H_SUCCESS) {
685 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
686 "continuing with close\n", lpar_rc);
687 }
688
689 free_irq(netdev->irq, netdev);
690
691 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
692 4096 - 8);
693
694 ibmveth_cleanup(adapter);
695
696 netdev_dbg(netdev, "close complete\n");
697
698 return 0;
699 }
700
701 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
702 {
703 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
704 SUPPORTED_FIBRE);
705 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
706 ADVERTISED_FIBRE);
707 ethtool_cmd_speed_set(cmd, SPEED_1000);
708 cmd->duplex = DUPLEX_FULL;
709 cmd->port = PORT_FIBRE;
710 cmd->phy_address = 0;
711 cmd->transceiver = XCVR_INTERNAL;
712 cmd->autoneg = AUTONEG_ENABLE;
713 cmd->maxtxpkt = 0;
714 cmd->maxrxpkt = 1;
715 return 0;
716 }
717
718 static void netdev_get_drvinfo(struct net_device *dev,
719 struct ethtool_drvinfo *info)
720 {
721 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
722 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
723 }
724
725 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
726 netdev_features_t features)
727 {
728 /*
729 * Since the ibmveth firmware interface does not have the
730 * concept of separate tx/rx checksum offload enable, if rx
731 * checksum is disabled we also have to disable tx checksum
732 * offload. Once we disable rx checksum offload, we are no
733 * longer allowed to send tx buffers that are not properly
734 * checksummed.
735 */
736
737 if (!(features & NETIF_F_RXCSUM))
738 features &= ~NETIF_F_ALL_CSUM;
739
740 return features;
741 }
742
743 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
744 {
745 struct ibmveth_adapter *adapter = netdev_priv(dev);
746 unsigned long set_attr, clr_attr, ret_attr;
747 unsigned long set_attr6, clr_attr6;
748 long ret, ret4, ret6;
749 int rc1 = 0, rc2 = 0;
750 int restart = 0;
751
752 if (netif_running(dev)) {
753 restart = 1;
754 adapter->pool_config = 1;
755 ibmveth_close(dev);
756 adapter->pool_config = 0;
757 }
758
759 set_attr = 0;
760 clr_attr = 0;
761 set_attr6 = 0;
762 clr_attr6 = 0;
763
764 if (data) {
765 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
766 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
767 } else {
768 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
769 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
770 }
771
772 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
773
774 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
775 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
776 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
777 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
778 set_attr, &ret_attr);
779
780 if (ret4 != H_SUCCESS) {
781 netdev_err(dev, "unable to change IPv4 checksum "
782 "offload settings. %d rc=%ld\n",
783 data, ret4);
784
785 h_illan_attributes(adapter->vdev->unit_address,
786 set_attr, clr_attr, &ret_attr);
787
788 if (data == 1)
789 dev->features &= ~NETIF_F_IP_CSUM;
790
791 } else {
792 adapter->fw_ipv4_csum_support = data;
793 }
794
795 ret6 = h_illan_attributes(adapter->vdev->unit_address,
796 clr_attr6, set_attr6, &ret_attr);
797
798 if (ret6 != H_SUCCESS) {
799 netdev_err(dev, "unable to change IPv6 checksum "
800 "offload settings. %d rc=%ld\n",
801 data, ret6);
802
803 h_illan_attributes(adapter->vdev->unit_address,
804 set_attr6, clr_attr6, &ret_attr);
805
806 if (data == 1)
807 dev->features &= ~NETIF_F_IPV6_CSUM;
808
809 } else
810 adapter->fw_ipv6_csum_support = data;
811
812 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
813 adapter->rx_csum = data;
814 else
815 rc1 = -EIO;
816 } else {
817 rc1 = -EIO;
818 netdev_err(dev, "unable to change checksum offload settings."
819 " %d rc=%ld ret_attr=%lx\n", data, ret,
820 ret_attr);
821 }
822
823 if (restart)
824 rc2 = ibmveth_open(dev);
825
826 return rc1 ? rc1 : rc2;
827 }
828
829 static int ibmveth_set_features(struct net_device *dev,
830 netdev_features_t features)
831 {
832 struct ibmveth_adapter *adapter = netdev_priv(dev);
833 int rx_csum = !!(features & NETIF_F_RXCSUM);
834 int rc;
835
836 if (rx_csum == adapter->rx_csum)
837 return 0;
838
839 rc = ibmveth_set_csum_offload(dev, rx_csum);
840 if (rc && !adapter->rx_csum)
841 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
842
843 return rc;
844 }
845
846 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
847 {
848 int i;
849
850 if (stringset != ETH_SS_STATS)
851 return;
852
853 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
854 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
855 }
856
857 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
858 {
859 switch (sset) {
860 case ETH_SS_STATS:
861 return ARRAY_SIZE(ibmveth_stats);
862 default:
863 return -EOPNOTSUPP;
864 }
865 }
866
867 static void ibmveth_get_ethtool_stats(struct net_device *dev,
868 struct ethtool_stats *stats, u64 *data)
869 {
870 int i;
871 struct ibmveth_adapter *adapter = netdev_priv(dev);
872
873 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
874 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
875 }
876
877 static const struct ethtool_ops netdev_ethtool_ops = {
878 .get_drvinfo = netdev_get_drvinfo,
879 .get_settings = netdev_get_settings,
880 .get_link = ethtool_op_get_link,
881 .get_strings = ibmveth_get_strings,
882 .get_sset_count = ibmveth_get_sset_count,
883 .get_ethtool_stats = ibmveth_get_ethtool_stats,
884 };
885
886 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
887 {
888 return -EOPNOTSUPP;
889 }
890
891 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
892
893 static int ibmveth_send(struct ibmveth_adapter *adapter,
894 union ibmveth_buf_desc *descs)
895 {
896 unsigned long correlator;
897 unsigned int retry_count;
898 unsigned long ret;
899
900 /*
901 * The retry count sets a maximum for the number of broadcast and
902 * multicast destinations within the system.
903 */
904 retry_count = 1024;
905 correlator = 0;
906 do {
907 ret = h_send_logical_lan(adapter->vdev->unit_address,
908 descs[0].desc, descs[1].desc,
909 descs[2].desc, descs[3].desc,
910 descs[4].desc, descs[5].desc,
911 correlator, &correlator);
912 } while ((ret == H_BUSY) && (retry_count--));
913
914 if (ret != H_SUCCESS && ret != H_DROPPED) {
915 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
916 "with rc=%ld\n", ret);
917 return 1;
918 }
919
920 return 0;
921 }
922
923 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
924 struct net_device *netdev)
925 {
926 struct ibmveth_adapter *adapter = netdev_priv(netdev);
927 unsigned int desc_flags;
928 union ibmveth_buf_desc descs[6];
929 int last, i;
930 int force_bounce = 0;
931 dma_addr_t dma_addr;
932
933 /*
934 * veth handles a maximum of 6 segments including the header, so
935 * we have to linearize the skb if there are more than this.
936 */
937 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
938 netdev->stats.tx_dropped++;
939 goto out;
940 }
941
942 /* veth can't checksum offload UDP */
943 if (skb->ip_summed == CHECKSUM_PARTIAL &&
944 ((skb->protocol == htons(ETH_P_IP) &&
945 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
946 (skb->protocol == htons(ETH_P_IPV6) &&
947 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
948 skb_checksum_help(skb)) {
949
950 netdev_err(netdev, "tx: failed to checksum packet\n");
951 netdev->stats.tx_dropped++;
952 goto out;
953 }
954
955 desc_flags = IBMVETH_BUF_VALID;
956
957 if (skb->ip_summed == CHECKSUM_PARTIAL) {
958 unsigned char *buf = skb_transport_header(skb) +
959 skb->csum_offset;
960
961 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
962
963 /* Need to zero out the checksum */
964 buf[0] = 0;
965 buf[1] = 0;
966 }
967
968 retry_bounce:
969 memset(descs, 0, sizeof(descs));
970
971 /*
972 * If a linear packet is below the rx threshold then
973 * copy it into the static bounce buffer. This avoids the
974 * cost of a TCE insert and remove.
975 */
976 if (force_bounce || (!skb_is_nonlinear(skb) &&
977 (skb->len < tx_copybreak))) {
978 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
979 skb->len);
980
981 descs[0].fields.flags_len = desc_flags | skb->len;
982 descs[0].fields.address = adapter->bounce_buffer_dma;
983
984 if (ibmveth_send(adapter, descs)) {
985 adapter->tx_send_failed++;
986 netdev->stats.tx_dropped++;
987 } else {
988 netdev->stats.tx_packets++;
989 netdev->stats.tx_bytes += skb->len;
990 }
991
992 goto out;
993 }
994
995 /* Map the header */
996 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
997 skb_headlen(skb), DMA_TO_DEVICE);
998 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
999 goto map_failed;
1000
1001 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1002 descs[0].fields.address = dma_addr;
1003
1004 /* Map the frags */
1005 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1006 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1007
1008 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1009 skb_frag_size(frag), DMA_TO_DEVICE);
1010
1011 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1012 goto map_failed_frags;
1013
1014 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1015 descs[i+1].fields.address = dma_addr;
1016 }
1017
1018 if (ibmveth_send(adapter, descs)) {
1019 adapter->tx_send_failed++;
1020 netdev->stats.tx_dropped++;
1021 } else {
1022 netdev->stats.tx_packets++;
1023 netdev->stats.tx_bytes += skb->len;
1024 }
1025
1026 dma_unmap_single(&adapter->vdev->dev,
1027 descs[0].fields.address,
1028 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1029 DMA_TO_DEVICE);
1030
1031 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1032 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1033 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1034 DMA_TO_DEVICE);
1035
1036 out:
1037 dev_kfree_skb(skb);
1038 return NETDEV_TX_OK;
1039
1040 map_failed_frags:
1041 last = i+1;
1042 for (i = 0; i < last; i++)
1043 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1044 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1045 DMA_TO_DEVICE);
1046
1047 map_failed:
1048 if (!firmware_has_feature(FW_FEATURE_CMO))
1049 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1050 adapter->tx_map_failed++;
1051 skb_linearize(skb);
1052 force_bounce = 1;
1053 goto retry_bounce;
1054 }
1055
1056 static int ibmveth_poll(struct napi_struct *napi, int budget)
1057 {
1058 struct ibmveth_adapter *adapter =
1059 container_of(napi, struct ibmveth_adapter, napi);
1060 struct net_device *netdev = adapter->netdev;
1061 int frames_processed = 0;
1062 unsigned long lpar_rc;
1063
1064 restart_poll:
1065 do {
1066 if (!ibmveth_rxq_pending_buffer(adapter))
1067 break;
1068
1069 smp_rmb();
1070 if (!ibmveth_rxq_buffer_valid(adapter)) {
1071 wmb(); /* suggested by larson1 */
1072 adapter->rx_invalid_buffer++;
1073 netdev_dbg(netdev, "recycling invalid buffer\n");
1074 ibmveth_rxq_recycle_buffer(adapter);
1075 } else {
1076 struct sk_buff *skb, *new_skb;
1077 int length = ibmveth_rxq_frame_length(adapter);
1078 int offset = ibmveth_rxq_frame_offset(adapter);
1079 int csum_good = ibmveth_rxq_csum_good(adapter);
1080
1081 skb = ibmveth_rxq_get_buffer(adapter);
1082
1083 new_skb = NULL;
1084 if (length < rx_copybreak)
1085 new_skb = netdev_alloc_skb(netdev, length);
1086
1087 if (new_skb) {
1088 skb_copy_to_linear_data(new_skb,
1089 skb->data + offset,
1090 length);
1091 if (rx_flush)
1092 ibmveth_flush_buffer(skb->data,
1093 length + offset);
1094 if (!ibmveth_rxq_recycle_buffer(adapter))
1095 kfree_skb(skb);
1096 skb = new_skb;
1097 } else {
1098 ibmveth_rxq_harvest_buffer(adapter);
1099 skb_reserve(skb, offset);
1100 }
1101
1102 skb_put(skb, length);
1103 skb->protocol = eth_type_trans(skb, netdev);
1104
1105 if (csum_good)
1106 skb->ip_summed = CHECKSUM_UNNECESSARY;
1107
1108 netif_receive_skb(skb); /* send it up */
1109
1110 netdev->stats.rx_packets++;
1111 netdev->stats.rx_bytes += length;
1112 frames_processed++;
1113 }
1114 } while (frames_processed < budget);
1115
1116 ibmveth_replenish_task(adapter);
1117
1118 if (frames_processed < budget) {
1119 /* We think we are done - reenable interrupts,
1120 * then check once more to make sure we are done.
1121 */
1122 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1123 VIO_IRQ_ENABLE);
1124
1125 BUG_ON(lpar_rc != H_SUCCESS);
1126
1127 napi_complete(napi);
1128
1129 if (ibmveth_rxq_pending_buffer(adapter) &&
1130 napi_reschedule(napi)) {
1131 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1132 VIO_IRQ_DISABLE);
1133 goto restart_poll;
1134 }
1135 }
1136
1137 return frames_processed;
1138 }
1139
1140 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1141 {
1142 struct net_device *netdev = dev_instance;
1143 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1144 unsigned long lpar_rc;
1145
1146 if (napi_schedule_prep(&adapter->napi)) {
1147 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1148 VIO_IRQ_DISABLE);
1149 BUG_ON(lpar_rc != H_SUCCESS);
1150 __napi_schedule(&adapter->napi);
1151 }
1152 return IRQ_HANDLED;
1153 }
1154
1155 static void ibmveth_set_multicast_list(struct net_device *netdev)
1156 {
1157 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1158 unsigned long lpar_rc;
1159
1160 if ((netdev->flags & IFF_PROMISC) ||
1161 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1162 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1163 IbmVethMcastEnableRecv |
1164 IbmVethMcastDisableFiltering,
1165 0);
1166 if (lpar_rc != H_SUCCESS) {
1167 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1168 "entering promisc mode\n", lpar_rc);
1169 }
1170 } else {
1171 struct netdev_hw_addr *ha;
1172 /* clear the filter table & disable filtering */
1173 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1174 IbmVethMcastEnableRecv |
1175 IbmVethMcastDisableFiltering |
1176 IbmVethMcastClearFilterTable,
1177 0);
1178 if (lpar_rc != H_SUCCESS) {
1179 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1180 "attempting to clear filter table\n",
1181 lpar_rc);
1182 }
1183 /* add the addresses to the filter table */
1184 netdev_for_each_mc_addr(ha, netdev) {
1185 /* add the multicast address to the filter table */
1186 unsigned long mcast_addr = 0;
1187 memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
1188 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1189 IbmVethMcastAddFilter,
1190 mcast_addr);
1191 if (lpar_rc != H_SUCCESS) {
1192 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1193 "when adding an entry to the filter "
1194 "table\n", lpar_rc);
1195 }
1196 }
1197
1198 /* re-enable filtering */
1199 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1200 IbmVethMcastEnableFiltering,
1201 0);
1202 if (lpar_rc != H_SUCCESS) {
1203 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1204 "enabling filtering\n", lpar_rc);
1205 }
1206 }
1207 }
1208
1209 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1210 {
1211 struct ibmveth_adapter *adapter = netdev_priv(dev);
1212 struct vio_dev *viodev = adapter->vdev;
1213 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1214 int i, rc;
1215 int need_restart = 0;
1216
1217 if (new_mtu < IBMVETH_MIN_MTU)
1218 return -EINVAL;
1219
1220 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1221 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1222 break;
1223
1224 if (i == IBMVETH_NUM_BUFF_POOLS)
1225 return -EINVAL;
1226
1227 /* Deactivate all the buffer pools so that the next loop can activate
1228 only the buffer pools necessary to hold the new MTU */
1229 if (netif_running(adapter->netdev)) {
1230 need_restart = 1;
1231 adapter->pool_config = 1;
1232 ibmveth_close(adapter->netdev);
1233 adapter->pool_config = 0;
1234 }
1235
1236 /* Look for an active buffer pool that can hold the new MTU */
1237 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1238 adapter->rx_buff_pool[i].active = 1;
1239
1240 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1241 dev->mtu = new_mtu;
1242 vio_cmo_set_dev_desired(viodev,
1243 ibmveth_get_desired_dma
1244 (viodev));
1245 if (need_restart) {
1246 return ibmveth_open(adapter->netdev);
1247 }
1248 return 0;
1249 }
1250 }
1251
1252 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1253 return rc;
1254
1255 return -EINVAL;
1256 }
1257
1258 #ifdef CONFIG_NET_POLL_CONTROLLER
1259 static void ibmveth_poll_controller(struct net_device *dev)
1260 {
1261 ibmveth_replenish_task(netdev_priv(dev));
1262 ibmveth_interrupt(dev->irq, dev);
1263 }
1264 #endif
1265
1266 /**
1267 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1268 *
1269 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1270 *
1271 * Return value:
1272 * Number of bytes of IO data the driver will need to perform well.
1273 */
1274 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1275 {
1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1277 struct ibmveth_adapter *adapter;
1278 struct iommu_table *tbl;
1279 unsigned long ret;
1280 int i;
1281 int rxqentries = 1;
1282
1283 tbl = get_iommu_table_base(&vdev->dev);
1284
1285 /* netdev inits at probe time along with the structures we need below*/
1286 if (netdev == NULL)
1287 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1288
1289 adapter = netdev_priv(netdev);
1290
1291 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1292 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1293
1294 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1295 /* add the size of the active receive buffers */
1296 if (adapter->rx_buff_pool[i].active)
1297 ret +=
1298 adapter->rx_buff_pool[i].size *
1299 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1300 buff_size, tbl);
1301 rxqentries += adapter->rx_buff_pool[i].size;
1302 }
1303 /* add the size of the receive queue entries */
1304 ret += IOMMU_PAGE_ALIGN(
1305 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1306
1307 return ret;
1308 }
1309
1310 static const struct net_device_ops ibmveth_netdev_ops = {
1311 .ndo_open = ibmveth_open,
1312 .ndo_stop = ibmveth_close,
1313 .ndo_start_xmit = ibmveth_start_xmit,
1314 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1315 .ndo_do_ioctl = ibmveth_ioctl,
1316 .ndo_change_mtu = ibmveth_change_mtu,
1317 .ndo_fix_features = ibmveth_fix_features,
1318 .ndo_set_features = ibmveth_set_features,
1319 .ndo_validate_addr = eth_validate_addr,
1320 .ndo_set_mac_address = eth_mac_addr,
1321 #ifdef CONFIG_NET_POLL_CONTROLLER
1322 .ndo_poll_controller = ibmveth_poll_controller,
1323 #endif
1324 };
1325
1326 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1327 {
1328 int rc, i, mac_len;
1329 struct net_device *netdev;
1330 struct ibmveth_adapter *adapter;
1331 unsigned char *mac_addr_p;
1332 unsigned int *mcastFilterSize_p;
1333
1334 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1335 dev->unit_address);
1336
1337 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1338 &mac_len);
1339 if (!mac_addr_p) {
1340 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1341 return -EINVAL;
1342 }
1343 /* Workaround for old/broken pHyp */
1344 if (mac_len == 8)
1345 mac_addr_p += 2;
1346 else if (mac_len != 6) {
1347 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1348 mac_len);
1349 return -EINVAL;
1350 }
1351
1352 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1353 VETH_MCAST_FILTER_SIZE, NULL);
1354 if (!mcastFilterSize_p) {
1355 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1356 "attribute\n");
1357 return -EINVAL;
1358 }
1359
1360 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1361
1362 if (!netdev)
1363 return -ENOMEM;
1364
1365 adapter = netdev_priv(netdev);
1366 dev_set_drvdata(&dev->dev, netdev);
1367
1368 adapter->vdev = dev;
1369 adapter->netdev = netdev;
1370 adapter->mcastFilterSize = *mcastFilterSize_p;
1371 adapter->pool_config = 0;
1372
1373 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1374
1375 adapter->mac_addr = 0;
1376 memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
1377
1378 netdev->irq = dev->irq;
1379 netdev->netdev_ops = &ibmveth_netdev_ops;
1380 netdev->ethtool_ops = &netdev_ethtool_ops;
1381 SET_NETDEV_DEV(netdev, &dev->dev);
1382 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1383 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1384 netdev->features |= netdev->hw_features;
1385
1386 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1387
1388 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1389 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1390 int error;
1391
1392 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1393 pool_count[i], pool_size[i],
1394 pool_active[i]);
1395 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1396 &dev->dev.kobj, "pool%d", i);
1397 if (!error)
1398 kobject_uevent(kobj, KOBJ_ADD);
1399 }
1400
1401 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1402
1403 adapter->buffer_list_dma = DMA_ERROR_CODE;
1404 adapter->filter_list_dma = DMA_ERROR_CODE;
1405 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1406
1407 netdev_dbg(netdev, "registering netdev...\n");
1408
1409 ibmveth_set_features(netdev, netdev->features);
1410
1411 rc = register_netdev(netdev);
1412
1413 if (rc) {
1414 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1415 free_netdev(netdev);
1416 return rc;
1417 }
1418
1419 netdev_dbg(netdev, "registered\n");
1420
1421 return 0;
1422 }
1423
1424 static int ibmveth_remove(struct vio_dev *dev)
1425 {
1426 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1427 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1428 int i;
1429
1430 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1431 kobject_put(&adapter->rx_buff_pool[i].kobj);
1432
1433 unregister_netdev(netdev);
1434
1435 free_netdev(netdev);
1436 dev_set_drvdata(&dev->dev, NULL);
1437
1438 return 0;
1439 }
1440
1441 static struct attribute veth_active_attr;
1442 static struct attribute veth_num_attr;
1443 static struct attribute veth_size_attr;
1444
1445 static ssize_t veth_pool_show(struct kobject *kobj,
1446 struct attribute *attr, char *buf)
1447 {
1448 struct ibmveth_buff_pool *pool = container_of(kobj,
1449 struct ibmveth_buff_pool,
1450 kobj);
1451
1452 if (attr == &veth_active_attr)
1453 return sprintf(buf, "%d\n", pool->active);
1454 else if (attr == &veth_num_attr)
1455 return sprintf(buf, "%d\n", pool->size);
1456 else if (attr == &veth_size_attr)
1457 return sprintf(buf, "%d\n", pool->buff_size);
1458 return 0;
1459 }
1460
1461 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1462 const char *buf, size_t count)
1463 {
1464 struct ibmveth_buff_pool *pool = container_of(kobj,
1465 struct ibmveth_buff_pool,
1466 kobj);
1467 struct net_device *netdev = dev_get_drvdata(
1468 container_of(kobj->parent, struct device, kobj));
1469 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1470 long value = simple_strtol(buf, NULL, 10);
1471 long rc;
1472
1473 if (attr == &veth_active_attr) {
1474 if (value && !pool->active) {
1475 if (netif_running(netdev)) {
1476 if (ibmveth_alloc_buffer_pool(pool)) {
1477 netdev_err(netdev,
1478 "unable to alloc pool\n");
1479 return -ENOMEM;
1480 }
1481 pool->active = 1;
1482 adapter->pool_config = 1;
1483 ibmveth_close(netdev);
1484 adapter->pool_config = 0;
1485 if ((rc = ibmveth_open(netdev)))
1486 return rc;
1487 } else {
1488 pool->active = 1;
1489 }
1490 } else if (!value && pool->active) {
1491 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1492 int i;
1493 /* Make sure there is a buffer pool with buffers that
1494 can hold a packet of the size of the MTU */
1495 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1496 if (pool == &adapter->rx_buff_pool[i])
1497 continue;
1498 if (!adapter->rx_buff_pool[i].active)
1499 continue;
1500 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1501 break;
1502 }
1503
1504 if (i == IBMVETH_NUM_BUFF_POOLS) {
1505 netdev_err(netdev, "no active pool >= MTU\n");
1506 return -EPERM;
1507 }
1508
1509 if (netif_running(netdev)) {
1510 adapter->pool_config = 1;
1511 ibmveth_close(netdev);
1512 pool->active = 0;
1513 adapter->pool_config = 0;
1514 if ((rc = ibmveth_open(netdev)))
1515 return rc;
1516 }
1517 pool->active = 0;
1518 }
1519 } else if (attr == &veth_num_attr) {
1520 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1521 return -EINVAL;
1522 } else {
1523 if (netif_running(netdev)) {
1524 adapter->pool_config = 1;
1525 ibmveth_close(netdev);
1526 adapter->pool_config = 0;
1527 pool->size = value;
1528 if ((rc = ibmveth_open(netdev)))
1529 return rc;
1530 } else {
1531 pool->size = value;
1532 }
1533 }
1534 } else if (attr == &veth_size_attr) {
1535 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1536 return -EINVAL;
1537 } else {
1538 if (netif_running(netdev)) {
1539 adapter->pool_config = 1;
1540 ibmveth_close(netdev);
1541 adapter->pool_config = 0;
1542 pool->buff_size = value;
1543 if ((rc = ibmveth_open(netdev)))
1544 return rc;
1545 } else {
1546 pool->buff_size = value;
1547 }
1548 }
1549 }
1550
1551 /* kick the interrupt handler to allocate/deallocate pools */
1552 ibmveth_interrupt(netdev->irq, netdev);
1553 return count;
1554 }
1555
1556
1557 #define ATTR(_name, _mode) \
1558 struct attribute veth_##_name##_attr = { \
1559 .name = __stringify(_name), .mode = _mode, \
1560 };
1561
1562 static ATTR(active, 0644);
1563 static ATTR(num, 0644);
1564 static ATTR(size, 0644);
1565
1566 static struct attribute *veth_pool_attrs[] = {
1567 &veth_active_attr,
1568 &veth_num_attr,
1569 &veth_size_attr,
1570 NULL,
1571 };
1572
1573 static const struct sysfs_ops veth_pool_ops = {
1574 .show = veth_pool_show,
1575 .store = veth_pool_store,
1576 };
1577
1578 static struct kobj_type ktype_veth_pool = {
1579 .release = NULL,
1580 .sysfs_ops = &veth_pool_ops,
1581 .default_attrs = veth_pool_attrs,
1582 };
1583
1584 static int ibmveth_resume(struct device *dev)
1585 {
1586 struct net_device *netdev = dev_get_drvdata(dev);
1587 ibmveth_interrupt(netdev->irq, netdev);
1588 return 0;
1589 }
1590
1591 static struct vio_device_id ibmveth_device_table[] = {
1592 { "network", "IBM,l-lan"},
1593 { "", "" }
1594 };
1595 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1596
1597 static struct dev_pm_ops ibmveth_pm_ops = {
1598 .resume = ibmveth_resume
1599 };
1600
1601 static struct vio_driver ibmveth_driver = {
1602 .id_table = ibmveth_device_table,
1603 .probe = ibmveth_probe,
1604 .remove = ibmveth_remove,
1605 .get_desired_dma = ibmveth_get_desired_dma,
1606 .name = ibmveth_driver_name,
1607 .pm = &ibmveth_pm_ops,
1608 };
1609
1610 static int __init ibmveth_module_init(void)
1611 {
1612 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1613 ibmveth_driver_string, ibmveth_driver_version);
1614
1615 return vio_register_driver(&ibmveth_driver);
1616 }
1617
1618 static void __exit ibmveth_module_exit(void)
1619 {
1620 vio_unregister_driver(&ibmveth_driver);
1621 }
1622
1623 module_init(ibmveth_module_init);
1624 module_exit(ibmveth_module_exit);