]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/ibm/ibmveth.c
net: remove use of ndo_set_multicast_list in drivers
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / ibm / ibmveth.c
CommitLineData
f148f61d 1/*
9d348af4 2 * IBM Power Virtual Ethernet Device Driver
f148f61d 3 *
9d348af4
SL
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
f148f61d 8 *
9d348af4
SL
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
f148f61d 13 *
9d348af4
SL
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
f148f61d 17 *
9d348af4
SL
18 * Copyright (C) IBM Corporation, 2003, 2010
19 *
20 * Authors: Dave Larson <larson1@us.ibm.com>
21 * Santiago Leon <santil@linux.vnet.ibm.com>
22 * Brian King <brking@linux.vnet.ibm.com>
23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
f148f61d 25 */
1da177e4 26
1da177e4 27#include <linux/module.h>
1096d63d 28#include <linux/moduleparam.h>
1da177e4
LT
29#include <linux/types.h>
30#include <linux/errno.h>
1da177e4
LT
31#include <linux/dma-mapping.h>
32#include <linux/kernel.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/init.h>
a6b7a407 37#include <linux/interrupt.h>
1da177e4 38#include <linux/mm.h>
e7a3af5d 39#include <linux/pm.h>
1da177e4 40#include <linux/ethtool.h>
f4ff2872
BK
41#include <linux/in.h>
42#include <linux/ip.h>
ab78df75 43#include <linux/ipv6.h>
5a0e3ad6 44#include <linux/slab.h>
1da177e4 45#include <asm/hvcall.h>
60063497 46#include <linux/atomic.h>
1da177e4 47#include <asm/vio.h>
1096d63d 48#include <asm/iommu.h>
1096d63d 49#include <asm/firmware.h>
1da177e4
LT
50
51#include "ibmveth.h"
52
7d12e780 53static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
493a684a 54static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
1096d63d 55static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
e295fe83 56
860f242e 57static struct kobj_type ktype_veth_pool;
1da177e4 58
1096d63d 59
1da177e4 60static const char ibmveth_driver_name[] = "ibmveth";
9d348af4
SL
61static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
62#define ibmveth_driver_version "1.04"
1da177e4 63
9d348af4
SL
64MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
65MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
1da177e4
LT
66MODULE_LICENSE("GPL");
67MODULE_VERSION(ibmveth_driver_version);
68
c08cc3cc
SL
69static unsigned int tx_copybreak __read_mostly = 128;
70module_param(tx_copybreak, uint, 0644);
71MODULE_PARM_DESC(tx_copybreak,
72 "Maximum size of packet that is copied to a new buffer on transmit");
73
8d86c61a
SL
74static unsigned int rx_copybreak __read_mostly = 128;
75module_param(rx_copybreak, uint, 0644);
76MODULE_PARM_DESC(rx_copybreak,
77 "Maximum size of packet that is copied to a new buffer on receive");
78
0c26b677
SL
79static unsigned int rx_flush __read_mostly = 0;
80module_param(rx_flush, uint, 0644);
81MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
82
ddbb4de9
BK
83struct ibmveth_stat {
84 char name[ETH_GSTRING_LEN];
85 int offset;
86};
87
88#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
89#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
90
91struct ibmveth_stat ibmveth_stats[] = {
92 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
93 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
f148f61d
SL
94 { "replenish_add_buff_failure",
95 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
96 { "replenish_add_buff_success",
97 IBMVETH_STAT_OFF(replenish_add_buff_success) },
ddbb4de9
BK
98 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
99 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
ddbb4de9
BK
100 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
101 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
ab78df75
SL
102 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
103 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
ddbb4de9
BK
104};
105
1da177e4 106/* simple methods of getting data from the current rxq entry */
79ef4a4d
BK
107static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
108{
109 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
110}
111
112static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
113{
f148f61d
SL
114 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
115 IBMVETH_RXQ_TOGGLE_SHIFT;
79ef4a4d
BK
116}
117
1da177e4
LT
118static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
119{
f148f61d 120 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
1da177e4
LT
121}
122
123static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
124{
f148f61d 125 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
1da177e4
LT
126}
127
128static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
129{
f148f61d 130 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
1da177e4
LT
131}
132
133static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134{
f148f61d 135 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
1da177e4
LT
136}
137
f4ff2872
BK
138static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
139{
f148f61d 140 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
f4ff2872
BK
141}
142
1da177e4 143/* setup the initial settings for a buffer pool */
f148f61d
SL
144static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
145 u32 pool_index, u32 pool_size,
146 u32 buff_size, u32 pool_active)
1da177e4
LT
147{
148 pool->size = pool_size;
149 pool->index = pool_index;
150 pool->buff_size = buff_size;
c033a6d1 151 pool->threshold = pool_size * 7 / 8;
860f242e 152 pool->active = pool_active;
1da177e4
LT
153}
154
155/* allocate and setup an buffer pool - called during open */
156static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
157{
158 int i;
159
d7fbeba6 160 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
1da177e4 161
f148f61d 162 if (!pool->free_map)
1da177e4 163 return -1;
1da177e4 164
d7fbeba6 165 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
f148f61d 166 if (!pool->dma_addr) {
1da177e4
LT
167 kfree(pool->free_map);
168 pool->free_map = NULL;
169 return -1;
170 }
171
a05abcb5 172 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
1da177e4 173
f148f61d 174 if (!pool->skbuff) {
1da177e4
LT
175 kfree(pool->dma_addr);
176 pool->dma_addr = NULL;
177
178 kfree(pool->free_map);
179 pool->free_map = NULL;
180 return -1;
181 }
182
1da177e4
LT
183 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
184
f148f61d 185 for (i = 0; i < pool->size; ++i)
1da177e4 186 pool->free_map[i] = i;
1da177e4
LT
187
188 atomic_set(&pool->available, 0);
189 pool->producer_index = 0;
190 pool->consumer_index = 0;
191
192 return 0;
193}
194
0c26b677
SL
195static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
196{
197 unsigned long offset;
198
199 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
200 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
201}
202
1da177e4
LT
203/* replenish the buffers for a pool. note that we don't need to
204 * skb_reserve these since they are used for incoming...
205 */
f148f61d
SL
206static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
207 struct ibmveth_buff_pool *pool)
1da177e4
LT
208{
209 u32 i;
210 u32 count = pool->size - atomic_read(&pool->available);
211 u32 buffers_added = 0;
1096d63d
RJ
212 struct sk_buff *skb;
213 unsigned int free_index, index;
214 u64 correlator;
215 unsigned long lpar_rc;
216 dma_addr_t dma_addr;
1da177e4
LT
217
218 mb();
219
f148f61d 220 for (i = 0; i < count; ++i) {
1da177e4 221 union ibmveth_buf_desc desc;
1da177e4 222
003212cc 223 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
1da177e4 224
f148f61d 225 if (!skb) {
c43ced18
SL
226 netdev_dbg(adapter->netdev,
227 "replenish: unable to allocate skb\n");
1da177e4
LT
228 adapter->replenish_no_mem++;
229 break;
230 }
231
047a66d4 232 free_index = pool->consumer_index;
a613f581
SL
233 pool->consumer_index++;
234 if (pool->consumer_index >= pool->size)
235 pool->consumer_index = 0;
1da177e4 236 index = pool->free_map[free_index];
d7fbeba6 237
6485911a
SL
238 BUG_ON(index == IBM_VETH_INVALID_MAP);
239 BUG_ON(pool->skbuff[index] != NULL);
1da177e4
LT
240
241 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
242 pool->buff_size, DMA_FROM_DEVICE);
243
c713e7cb 244 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1096d63d
RJ
245 goto failure;
246
1da177e4
LT
247 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
248 pool->dma_addr[index] = dma_addr;
249 pool->skbuff[index] = skb;
250
251 correlator = ((u64)pool->index << 32) | index;
f148f61d 252 *(u64 *)skb->data = correlator;
1da177e4 253
79ef4a4d 254 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
d7fbeba6 255 desc.fields.address = dma_addr;
1da177e4 256
0c26b677
SL
257 if (rx_flush) {
258 unsigned int len = min(pool->buff_size,
259 adapter->netdev->mtu +
260 IBMVETH_BUFF_OH);
261 ibmveth_flush_buffer(skb->data, len);
262 }
f148f61d
SL
263 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
264 desc.desc);
d7fbeba6 265
f148f61d 266 if (lpar_rc != H_SUCCESS) {
1096d63d 267 goto failure;
f148f61d 268 } else {
1da177e4
LT
269 buffers_added++;
270 adapter->replenish_add_buff_success++;
271 }
272 }
d7fbeba6 273
1096d63d
RJ
274 mb();
275 atomic_add(buffers_added, &(pool->available));
276 return;
277
278failure:
279 pool->free_map[free_index] = index;
280 pool->skbuff[index] = NULL;
281 if (pool->consumer_index == 0)
282 pool->consumer_index = pool->size - 1;
283 else
284 pool->consumer_index--;
c713e7cb 285 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
1096d63d
RJ
286 dma_unmap_single(&adapter->vdev->dev,
287 pool->dma_addr[index], pool->buff_size,
288 DMA_FROM_DEVICE);
289 dev_kfree_skb_any(skb);
290 adapter->replenish_add_buff_failure++;
291
1da177e4
LT
292 mb();
293 atomic_add(buffers_added, &(pool->available));
294}
295
e2adbcb4 296/* replenish routine */
d7fbeba6 297static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
1da177e4 298{
b6d35182
SL
299 int i;
300
1da177e4
LT
301 adapter->replenish_task_cycles++;
302
517e80e6 303 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
c033a6d1
SL
304 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
305
306 if (pool->active &&
307 (atomic_read(&pool->available) < pool->threshold))
308 ibmveth_replenish_buffer_pool(adapter, pool);
309 }
1da177e4 310
f148f61d
SL
311 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
312 4096 - 8);
1da177e4
LT
313}
314
315/* empty and free ana buffer pool - also used to do cleanup in error paths */
f148f61d
SL
316static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
317 struct ibmveth_buff_pool *pool)
1da177e4
LT
318{
319 int i;
320
b4558ea9
JJ
321 kfree(pool->free_map);
322 pool->free_map = NULL;
1da177e4 323
f148f61d
SL
324 if (pool->skbuff && pool->dma_addr) {
325 for (i = 0; i < pool->size; ++i) {
1da177e4 326 struct sk_buff *skb = pool->skbuff[i];
f148f61d 327 if (skb) {
1da177e4
LT
328 dma_unmap_single(&adapter->vdev->dev,
329 pool->dma_addr[i],
330 pool->buff_size,
331 DMA_FROM_DEVICE);
332 dev_kfree_skb_any(skb);
333 pool->skbuff[i] = NULL;
334 }
335 }
336 }
337
f148f61d 338 if (pool->dma_addr) {
1da177e4
LT
339 kfree(pool->dma_addr);
340 pool->dma_addr = NULL;
341 }
342
f148f61d 343 if (pool->skbuff) {
1da177e4
LT
344 kfree(pool->skbuff);
345 pool->skbuff = NULL;
346 }
347}
348
349/* remove a buffer from a pool */
f148f61d
SL
350static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
351 u64 correlator)
1da177e4
LT
352{
353 unsigned int pool = correlator >> 32;
354 unsigned int index = correlator & 0xffffffffUL;
355 unsigned int free_index;
356 struct sk_buff *skb;
357
6485911a
SL
358 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
359 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4
LT
360
361 skb = adapter->rx_buff_pool[pool].skbuff[index];
362
6485911a 363 BUG_ON(skb == NULL);
1da177e4
LT
364
365 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
366
367 dma_unmap_single(&adapter->vdev->dev,
368 adapter->rx_buff_pool[pool].dma_addr[index],
369 adapter->rx_buff_pool[pool].buff_size,
370 DMA_FROM_DEVICE);
371
047a66d4 372 free_index = adapter->rx_buff_pool[pool].producer_index;
a613f581
SL
373 adapter->rx_buff_pool[pool].producer_index++;
374 if (adapter->rx_buff_pool[pool].producer_index >=
375 adapter->rx_buff_pool[pool].size)
376 adapter->rx_buff_pool[pool].producer_index = 0;
1da177e4
LT
377 adapter->rx_buff_pool[pool].free_map[free_index] = index;
378
379 mb();
380
381 atomic_dec(&(adapter->rx_buff_pool[pool].available));
382}
383
384/* get the current buffer on the rx queue */
385static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
386{
387 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
388 unsigned int pool = correlator >> 32;
389 unsigned int index = correlator & 0xffffffffUL;
390
6485911a
SL
391 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
392 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4
LT
393
394 return adapter->rx_buff_pool[pool].skbuff[index];
395}
396
397/* recycle the current buffer on the rx queue */
398static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399{
400 u32 q_index = adapter->rx_queue.index;
401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
402 unsigned int pool = correlator >> 32;
403 unsigned int index = correlator & 0xffffffffUL;
404 union ibmveth_buf_desc desc;
405 unsigned long lpar_rc;
406
6485911a
SL
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4 409
f148f61d 410 if (!adapter->rx_buff_pool[pool].active) {
b6d35182
SL
411 ibmveth_rxq_harvest_buffer(adapter);
412 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
413 return;
414 }
415
79ef4a4d
BK
416 desc.fields.flags_len = IBMVETH_BUF_VALID |
417 adapter->rx_buff_pool[pool].buff_size;
1da177e4
LT
418 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
419
420 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
d7fbeba6 421
f148f61d 422 if (lpar_rc != H_SUCCESS) {
c43ced18
SL
423 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
424 "during recycle rc=%ld", lpar_rc);
1da177e4
LT
425 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
426 }
427
f148f61d 428 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
1da177e4
LT
429 adapter->rx_queue.index = 0;
430 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
431 }
432}
433
493a684a 434static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
1da177e4
LT
435{
436 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
437
f148f61d 438 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
1da177e4
LT
439 adapter->rx_queue.index = 0;
440 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
441 }
442}
443
444static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
445{
b6d35182 446 int i;
8d8bb39b 447 struct device *dev = &adapter->vdev->dev;
b6d35182 448
f148f61d 449 if (adapter->buffer_list_addr != NULL) {
8d8bb39b
FT
450 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
451 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
1da177e4
LT
452 DMA_BIDIRECTIONAL);
453 adapter->buffer_list_dma = DMA_ERROR_CODE;
454 }
455 free_page((unsigned long)adapter->buffer_list_addr);
456 adapter->buffer_list_addr = NULL;
d7fbeba6 457 }
1da177e4 458
f148f61d 459 if (adapter->filter_list_addr != NULL) {
8d8bb39b
FT
460 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
461 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
1da177e4
LT
462 DMA_BIDIRECTIONAL);
463 adapter->filter_list_dma = DMA_ERROR_CODE;
464 }
465 free_page((unsigned long)adapter->filter_list_addr);
466 adapter->filter_list_addr = NULL;
467 }
468
f148f61d 469 if (adapter->rx_queue.queue_addr != NULL) {
8d8bb39b
FT
470 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
471 dma_unmap_single(dev,
1da177e4
LT
472 adapter->rx_queue.queue_dma,
473 adapter->rx_queue.queue_len,
474 DMA_BIDIRECTIONAL);
475 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
476 }
477 kfree(adapter->rx_queue.queue_addr);
478 adapter->rx_queue.queue_addr = NULL;
479 }
480
f148f61d 481 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
860f242e 482 if (adapter->rx_buff_pool[i].active)
d7fbeba6 483 ibmveth_free_buffer_pool(adapter,
860f242e 484 &adapter->rx_buff_pool[i]);
1096d63d
RJ
485
486 if (adapter->bounce_buffer != NULL) {
c713e7cb 487 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
1096d63d
RJ
488 dma_unmap_single(&adapter->vdev->dev,
489 adapter->bounce_buffer_dma,
490 adapter->netdev->mtu + IBMVETH_BUFF_OH,
491 DMA_BIDIRECTIONAL);
492 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
493 }
494 kfree(adapter->bounce_buffer);
495 adapter->bounce_buffer = NULL;
496 }
1da177e4
LT
497}
498
bbedefcc
ME
499static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
500 union ibmveth_buf_desc rxq_desc, u64 mac_address)
501{
502 int rc, try_again = 1;
503
f148f61d
SL
504 /*
505 * After a kexec the adapter will still be open, so our attempt to
506 * open it will fail. So if we get a failure we free the adapter and
507 * try again, but only once.
508 */
bbedefcc
ME
509retry:
510 rc = h_register_logical_lan(adapter->vdev->unit_address,
511 adapter->buffer_list_dma, rxq_desc.desc,
512 adapter->filter_list_dma, mac_address);
513
514 if (rc != H_SUCCESS && try_again) {
515 do {
516 rc = h_free_logical_lan(adapter->vdev->unit_address);
517 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
518
519 try_again = 0;
520 goto retry;
521 }
522
523 return rc;
524}
525
1da177e4
LT
526static int ibmveth_open(struct net_device *netdev)
527{
4cf1653a 528 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4 529 u64 mac_address = 0;
b6d35182 530 int rxq_entries = 1;
1da177e4
LT
531 unsigned long lpar_rc;
532 int rc;
533 union ibmveth_buf_desc rxq_desc;
b6d35182 534 int i;
8d8bb39b 535 struct device *dev;
1da177e4 536
c43ced18 537 netdev_dbg(netdev, "open starting\n");
1da177e4 538
bea3348e
SH
539 napi_enable(&adapter->napi);
540
517e80e6 541 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
b6d35182 542 rxq_entries += adapter->rx_buff_pool[i].size;
d7fbeba6 543
1da177e4
LT
544 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
545 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
d7fbeba6 546
f148f61d 547 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
21c2dece
SL
548 netdev_err(netdev, "unable to allocate filter or buffer list "
549 "pages\n");
88426f2a
DK
550 rc = -ENOMEM;
551 goto err_out;
1da177e4
LT
552 }
553
f148f61d
SL
554 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
555 rxq_entries;
556 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
557 GFP_KERNEL);
1da177e4 558
f148f61d 559 if (!adapter->rx_queue.queue_addr) {
21c2dece 560 netdev_err(netdev, "unable to allocate rx queue pages\n");
88426f2a
DK
561 rc = -ENOMEM;
562 goto err_out;
1da177e4
LT
563 }
564
8d8bb39b
FT
565 dev = &adapter->vdev->dev;
566
567 adapter->buffer_list_dma = dma_map_single(dev,
1da177e4 568 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
8d8bb39b 569 adapter->filter_list_dma = dma_map_single(dev,
1da177e4 570 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
8d8bb39b 571 adapter->rx_queue.queue_dma = dma_map_single(dev,
1da177e4
LT
572 adapter->rx_queue.queue_addr,
573 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
574
8d8bb39b
FT
575 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
576 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
577 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
21c2dece
SL
578 netdev_err(netdev, "unable to map filter or buffer list "
579 "pages\n");
88426f2a
DK
580 rc = -ENOMEM;
581 goto err_out;
1da177e4
LT
582 }
583
584 adapter->rx_queue.index = 0;
585 adapter->rx_queue.num_slots = rxq_entries;
586 adapter->rx_queue.toggle = 1;
587
1da177e4
LT
588 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
589 mac_address = mac_address >> 16;
590
f148f61d
SL
591 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
592 adapter->rx_queue.queue_len;
1da177e4
LT
593 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
594
c43ced18
SL
595 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
596 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
597 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
1da177e4 598
4347ef15
SL
599 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
600
bbedefcc 601 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
1da177e4 602
f148f61d 603 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
604 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
605 lpar_rc);
606 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
607 "desc:0x%llx MAC:0x%llx\n",
1da177e4
LT
608 adapter->buffer_list_dma,
609 adapter->filter_list_dma,
610 rxq_desc.desc,
611 mac_address);
88426f2a
DK
612 rc = -ENONET;
613 goto err_out;
1da177e4
LT
614 }
615
f148f61d
SL
616 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
617 if (!adapter->rx_buff_pool[i].active)
860f242e
SL
618 continue;
619 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
21c2dece 620 netdev_err(netdev, "unable to alloc pool\n");
860f242e 621 adapter->rx_buff_pool[i].active = 0;
88426f2a
DK
622 rc = -ENOMEM;
623 goto err_out;
860f242e
SL
624 }
625 }
626
c43ced18 627 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
f148f61d
SL
628 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
629 netdev);
630 if (rc != 0) {
21c2dece
SL
631 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
632 netdev->irq, rc);
1da177e4
LT
633 do {
634 rc = h_free_logical_lan(adapter->vdev->unit_address);
706c8c93 635 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
1da177e4 636
88426f2a 637 goto err_out;
1da177e4
LT
638 }
639
1096d63d
RJ
640 adapter->bounce_buffer =
641 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
642 if (!adapter->bounce_buffer) {
21c2dece 643 netdev_err(netdev, "unable to allocate bounce buffer\n");
88426f2a 644 rc = -ENOMEM;
e0e8ab59 645 goto err_out_free_irq;
1096d63d
RJ
646 }
647 adapter->bounce_buffer_dma =
648 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
649 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
8d8bb39b 650 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
21c2dece 651 netdev_err(netdev, "unable to map bounce buffer\n");
88426f2a 652 rc = -ENOMEM;
e0e8ab59 653 goto err_out_free_irq;
1096d63d
RJ
654 }
655
c43ced18 656 netdev_dbg(netdev, "initial replenish cycle\n");
7d12e780 657 ibmveth_interrupt(netdev->irq, netdev);
1da177e4 658
e2adbcb4 659 netif_start_queue(netdev);
1da177e4 660
c43ced18 661 netdev_dbg(netdev, "open complete\n");
1da177e4
LT
662
663 return 0;
88426f2a 664
e0e8ab59
DK
665err_out_free_irq:
666 free_irq(netdev->irq, netdev);
88426f2a
DK
667err_out:
668 ibmveth_cleanup(adapter);
669 napi_disable(&adapter->napi);
670 return rc;
1da177e4
LT
671}
672
673static int ibmveth_close(struct net_device *netdev)
674{
4cf1653a 675 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4 676 long lpar_rc;
d7fbeba6 677
c43ced18 678 netdev_dbg(netdev, "close starting\n");
1da177e4 679
bea3348e
SH
680 napi_disable(&adapter->napi);
681
860f242e
SL
682 if (!adapter->pool_config)
683 netif_stop_queue(netdev);
1da177e4 684
ee2e6114 685 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
1da177e4 686
1da177e4
LT
687 do {
688 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
706c8c93 689 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
1da177e4 690
f148f61d 691 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
692 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
693 "continuing with close\n", lpar_rc);
1da177e4
LT
694 }
695
ee2e6114
RJ
696 free_irq(netdev->irq, netdev);
697
f148f61d
SL
698 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
699 4096 - 8);
1da177e4
LT
700
701 ibmveth_cleanup(adapter);
702
c43ced18 703 netdev_dbg(netdev, "close complete\n");
1da177e4
LT
704
705 return 0;
706}
707
f148f61d
SL
708static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
709{
710 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
711 SUPPORTED_FIBRE);
712 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
713 ADVERTISED_FIBRE);
70739497 714 ethtool_cmd_speed_set(cmd, SPEED_1000);
1da177e4
LT
715 cmd->duplex = DUPLEX_FULL;
716 cmd->port = PORT_FIBRE;
717 cmd->phy_address = 0;
718 cmd->transceiver = XCVR_INTERNAL;
719 cmd->autoneg = AUTONEG_ENABLE;
720 cmd->maxtxpkt = 0;
721 cmd->maxrxpkt = 1;
722 return 0;
723}
724
f148f61d
SL
725static void netdev_get_drvinfo(struct net_device *dev,
726 struct ethtool_drvinfo *info)
727{
1da177e4 728 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
f148f61d
SL
729 strncpy(info->version, ibmveth_driver_version,
730 sizeof(info->version) - 1);
1da177e4
LT
731}
732
b9367bf3 733static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
5fc7e01c 734{
b9367bf3
MM
735 /*
736 * Since the ibmveth firmware interface does not have the
737 * concept of separate tx/rx checksum offload enable, if rx
738 * checksum is disabled we also have to disable tx checksum
739 * offload. Once we disable rx checksum offload, we are no
740 * longer allowed to send tx buffers that are not properly
741 * checksummed.
742 */
5fc7e01c 743
b9367bf3
MM
744 if (!(features & NETIF_F_RXCSUM))
745 features &= ~NETIF_F_ALL_CSUM;
5fc7e01c 746
b9367bf3 747 return features;
5fc7e01c
BK
748}
749
b9367bf3 750static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
5fc7e01c 751{
4cf1653a 752 struct ibmveth_adapter *adapter = netdev_priv(dev);
ff5bfc35 753 unsigned long set_attr, clr_attr, ret_attr;
ab78df75
SL
754 unsigned long set_attr6, clr_attr6;
755 long ret, ret6;
5fc7e01c
BK
756 int rc1 = 0, rc2 = 0;
757 int restart = 0;
758
759 if (netif_running(dev)) {
760 restart = 1;
761 adapter->pool_config = 1;
762 ibmveth_close(dev);
763 adapter->pool_config = 0;
764 }
765
79ef4a4d
BK
766 set_attr = 0;
767 clr_attr = 0;
5fc7e01c 768
ab78df75 769 if (data) {
79ef4a4d 770 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
ab78df75
SL
771 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
772 } else {
79ef4a4d 773 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
ab78df75
SL
774 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
775 }
5fc7e01c 776
79ef4a4d 777 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
5fc7e01c 778
79ef4a4d
BK
779 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
780 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
781 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
782 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
783 set_attr, &ret_attr);
5fc7e01c
BK
784
785 if (ret != H_SUCCESS) {
21c2dece
SL
786 netdev_err(dev, "unable to change IPv4 checksum "
787 "offload settings. %d rc=%ld\n",
788 data, ret);
5fc7e01c
BK
789
790 ret = h_illan_attributes(adapter->vdev->unit_address,
79ef4a4d 791 set_attr, clr_attr, &ret_attr);
f148f61d 792 } else {
ab78df75 793 adapter->fw_ipv4_csum_support = data;
f148f61d 794 }
ab78df75
SL
795
796 ret6 = h_illan_attributes(adapter->vdev->unit_address,
797 clr_attr6, set_attr6, &ret_attr);
798
799 if (ret6 != H_SUCCESS) {
21c2dece
SL
800 netdev_err(dev, "unable to change IPv6 checksum "
801 "offload settings. %d rc=%ld\n",
802 data, ret);
ab78df75
SL
803
804 ret = h_illan_attributes(adapter->vdev->unit_address,
805 set_attr6, clr_attr6,
806 &ret_attr);
807 } else
808 adapter->fw_ipv6_csum_support = data;
809
b9367bf3
MM
810 if (ret != H_SUCCESS || ret6 != H_SUCCESS)
811 adapter->rx_csum = data;
ab78df75
SL
812 else
813 rc1 = -EIO;
5fc7e01c
BK
814 } else {
815 rc1 = -EIO;
21c2dece
SL
816 netdev_err(dev, "unable to change checksum offload settings."
817 " %d rc=%ld ret_attr=%lx\n", data, ret,
818 ret_attr);
5fc7e01c
BK
819 }
820
821 if (restart)
822 rc2 = ibmveth_open(dev);
823
824 return rc1 ? rc1 : rc2;
825}
826
b9367bf3 827static int ibmveth_set_features(struct net_device *dev, u32 features)
5fc7e01c 828{
4cf1653a 829 struct ibmveth_adapter *adapter = netdev_priv(dev);
b9367bf3
MM
830 int rx_csum = !!(features & NETIF_F_RXCSUM);
831 int rc;
5fc7e01c 832
b9367bf3 833 if (rx_csum == adapter->rx_csum)
5fc7e01c
BK
834 return 0;
835
b9367bf3
MM
836 rc = ibmveth_set_csum_offload(dev, rx_csum);
837 if (rc && !adapter->rx_csum)
838 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
5fc7e01c
BK
839
840 return rc;
841}
842
ddbb4de9
BK
843static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
844{
845 int i;
846
847 if (stringset != ETH_SS_STATS)
848 return;
849
850 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
851 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
852}
853
b9f2c044 854static int ibmveth_get_sset_count(struct net_device *dev, int sset)
ddbb4de9 855{
b9f2c044
JG
856 switch (sset) {
857 case ETH_SS_STATS:
858 return ARRAY_SIZE(ibmveth_stats);
859 default:
860 return -EOPNOTSUPP;
861 }
ddbb4de9
BK
862}
863
864static void ibmveth_get_ethtool_stats(struct net_device *dev,
865 struct ethtool_stats *stats, u64 *data)
866{
867 int i;
4cf1653a 868 struct ibmveth_adapter *adapter = netdev_priv(dev);
ddbb4de9
BK
869
870 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
871 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
872}
873
7282d491 874static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
875 .get_drvinfo = netdev_get_drvinfo,
876 .get_settings = netdev_get_settings,
ed4ba4b5 877 .get_link = ethtool_op_get_link,
ddbb4de9 878 .get_strings = ibmveth_get_strings,
b9f2c044 879 .get_sset_count = ibmveth_get_sset_count,
ddbb4de9 880 .get_ethtool_stats = ibmveth_get_ethtool_stats,
1da177e4
LT
881};
882
883static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
884{
885 return -EOPNOTSUPP;
886}
887
888#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
889
6e8ab30e
SL
890static int ibmveth_send(struct ibmveth_adapter *adapter,
891 union ibmveth_buf_desc *descs)
1da177e4 892{
1da177e4
LT
893 unsigned long correlator;
894 unsigned int retry_count;
6e8ab30e
SL
895 unsigned long ret;
896
897 /*
898 * The retry count sets a maximum for the number of broadcast and
899 * multicast destinations within the system.
900 */
901 retry_count = 1024;
902 correlator = 0;
903 do {
904 ret = h_send_logical_lan(adapter->vdev->unit_address,
905 descs[0].desc, descs[1].desc,
906 descs[2].desc, descs[3].desc,
907 descs[4].desc, descs[5].desc,
908 correlator, &correlator);
909 } while ((ret == H_BUSY) && (retry_count--));
910
911 if (ret != H_SUCCESS && ret != H_DROPPED) {
21c2dece
SL
912 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
913 "with rc=%ld\n", ret);
6e8ab30e
SL
914 return 1;
915 }
916
917 return 0;
918}
60296d9e 919
6e8ab30e
SL
920static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
921 struct net_device *netdev)
922{
923 struct ibmveth_adapter *adapter = netdev_priv(netdev);
924 unsigned int desc_flags;
925 union ibmveth_buf_desc descs[6];
926 int last, i;
927 int force_bounce = 0;
928
929 /*
930 * veth handles a maximum of 6 segments including the header, so
931 * we have to linearize the skb if there are more than this.
932 */
933 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
934 netdev->stats.tx_dropped++;
935 goto out;
936 }
1da177e4 937
6e8ab30e 938 /* veth can't checksum offload UDP */
f4ff2872 939 if (skb->ip_summed == CHECKSUM_PARTIAL &&
ab78df75
SL
940 ((skb->protocol == htons(ETH_P_IP) &&
941 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
942 (skb->protocol == htons(ETH_P_IPV6) &&
943 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
944 skb_checksum_help(skb)) {
945
21c2dece 946 netdev_err(netdev, "tx: failed to checksum packet\n");
6e8ab30e 947 netdev->stats.tx_dropped++;
f4ff2872
BK
948 goto out;
949 }
950
6e8ab30e
SL
951 desc_flags = IBMVETH_BUF_VALID;
952
f4ff2872 953 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6e8ab30e
SL
954 unsigned char *buf = skb_transport_header(skb) +
955 skb->csum_offset;
f4ff2872 956
6e8ab30e 957 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
f4ff2872
BK
958
959 /* Need to zero out the checksum */
960 buf[0] = 0;
961 buf[1] = 0;
962 }
963
6e8ab30e
SL
964retry_bounce:
965 memset(descs, 0, sizeof(descs));
c08cc3cc 966
6e8ab30e
SL
967 /*
968 * If a linear packet is below the rx threshold then
969 * copy it into the static bounce buffer. This avoids the
970 * cost of a TCE insert and remove.
971 */
972 if (force_bounce || (!skb_is_nonlinear(skb) &&
973 (skb->len < tx_copybreak))) {
1096d63d
RJ
974 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
975 skb->len);
1da177e4 976
6e8ab30e
SL
977 descs[0].fields.flags_len = desc_flags | skb->len;
978 descs[0].fields.address = adapter->bounce_buffer_dma;
979
980 if (ibmveth_send(adapter, descs)) {
981 adapter->tx_send_failed++;
982 netdev->stats.tx_dropped++;
983 } else {
984 netdev->stats.tx_packets++;
985 netdev->stats.tx_bytes += skb->len;
986 }
987
988 goto out;
989 }
990
991 /* Map the header */
992 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
993 skb_headlen(skb),
994 DMA_TO_DEVICE);
995 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
996 goto map_failed;
997
998 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
999
1000 /* Map the frags */
1001 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1002 unsigned long dma_addr;
1003 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1004
1005 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
1006 frag->page_offset, frag->size,
1007 DMA_TO_DEVICE);
1008
1009 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1010 goto map_failed_frags;
1011
1012 descs[i+1].fields.flags_len = desc_flags | frag->size;
1013 descs[i+1].fields.address = dma_addr;
1014 }
1015
1016 if (ibmveth_send(adapter, descs)) {
1017 adapter->tx_send_failed++;
1018 netdev->stats.tx_dropped++;
1da177e4 1019 } else {
6e8ab30e
SL
1020 netdev->stats.tx_packets++;
1021 netdev->stats.tx_bytes += skb->len;
1da177e4
LT
1022 }
1023
6e8ab30e
SL
1024 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
1025 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1026 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1027 DMA_TO_DEVICE);
1da177e4 1028
e8cb7eb4 1029out:
1da177e4 1030 dev_kfree_skb(skb);
6ed10654 1031 return NETDEV_TX_OK;
6e8ab30e
SL
1032
1033map_failed_frags:
1034 last = i+1;
1035 for (i = 0; i < last; i++)
1036 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1037 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1038 DMA_TO_DEVICE);
1039
1040map_failed:
1041 if (!firmware_has_feature(FW_FEATURE_CMO))
21c2dece 1042 netdev_err(netdev, "tx: unable to map xmit buffer\n");
6e8ab30e
SL
1043 adapter->tx_map_failed++;
1044 skb_linearize(skb);
1045 force_bounce = 1;
1046 goto retry_bounce;
1da177e4
LT
1047}
1048
bea3348e 1049static int ibmveth_poll(struct napi_struct *napi, int budget)
1da177e4 1050{
f148f61d
SL
1051 struct ibmveth_adapter *adapter =
1052 container_of(napi, struct ibmveth_adapter, napi);
bea3348e 1053 struct net_device *netdev = adapter->netdev;
1da177e4 1054 int frames_processed = 0;
1da177e4
LT
1055 unsigned long lpar_rc;
1056
f148f61d 1057restart_poll:
1da177e4 1058 do {
bea3348e
SH
1059 if (!ibmveth_rxq_pending_buffer(adapter))
1060 break;
1da177e4 1061
f89e49e7 1062 smp_rmb();
bea3348e
SH
1063 if (!ibmveth_rxq_buffer_valid(adapter)) {
1064 wmb(); /* suggested by larson1 */
1065 adapter->rx_invalid_buffer++;
c43ced18 1066 netdev_dbg(netdev, "recycling invalid buffer\n");
bea3348e
SH
1067 ibmveth_rxq_recycle_buffer(adapter);
1068 } else {
8d86c61a 1069 struct sk_buff *skb, *new_skb;
bea3348e
SH
1070 int length = ibmveth_rxq_frame_length(adapter);
1071 int offset = ibmveth_rxq_frame_offset(adapter);
f4ff2872
BK
1072 int csum_good = ibmveth_rxq_csum_good(adapter);
1073
bea3348e 1074 skb = ibmveth_rxq_get_buffer(adapter);
1da177e4 1075
8d86c61a
SL
1076 new_skb = NULL;
1077 if (length < rx_copybreak)
1078 new_skb = netdev_alloc_skb(netdev, length);
1079
1080 if (new_skb) {
1081 skb_copy_to_linear_data(new_skb,
1082 skb->data + offset,
1083 length);
0c26b677
SL
1084 if (rx_flush)
1085 ibmveth_flush_buffer(skb->data,
1086 length + offset);
8d86c61a
SL
1087 skb = new_skb;
1088 ibmveth_rxq_recycle_buffer(adapter);
1089 } else {
1090 ibmveth_rxq_harvest_buffer(adapter);
1091 skb_reserve(skb, offset);
1092 }
1da177e4 1093
bea3348e
SH
1094 skb_put(skb, length);
1095 skb->protocol = eth_type_trans(skb, netdev);
1da177e4 1096
8d86c61a
SL
1097 if (csum_good)
1098 skb->ip_summed = CHECKSUM_UNNECESSARY;
1099
bea3348e 1100 netif_receive_skb(skb); /* send it up */
1da177e4 1101
09f75cd7
JG
1102 netdev->stats.rx_packets++;
1103 netdev->stats.rx_bytes += length;
bea3348e 1104 frames_processed++;
1da177e4 1105 }
bea3348e 1106 } while (frames_processed < budget);
1da177e4 1107
e2adbcb4 1108 ibmveth_replenish_task(adapter);
1da177e4 1109
bea3348e
SH
1110 if (frames_processed < budget) {
1111 /* We think we are done - reenable interrupts,
1112 * then check once more to make sure we are done.
1113 */
1114 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1115 VIO_IRQ_ENABLE);
1da177e4 1116
6485911a 1117 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4 1118
288379f0 1119 napi_complete(napi);
1da177e4 1120
bea3348e 1121 if (ibmveth_rxq_pending_buffer(adapter) &&
288379f0 1122 napi_reschedule(napi)) {
bea3348e
SH
1123 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1124 VIO_IRQ_DISABLE);
1125 goto restart_poll;
1126 }
1da177e4
LT
1127 }
1128
bea3348e 1129 return frames_processed;
1da177e4
LT
1130}
1131
7d12e780 1132static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
d7fbeba6 1133{
1da177e4 1134 struct net_device *netdev = dev_instance;
4cf1653a 1135 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
1136 unsigned long lpar_rc;
1137
288379f0 1138 if (napi_schedule_prep(&adapter->napi)) {
bea3348e
SH
1139 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1140 VIO_IRQ_DISABLE);
6485911a 1141 BUG_ON(lpar_rc != H_SUCCESS);
288379f0 1142 __napi_schedule(&adapter->napi);
1da177e4
LT
1143 }
1144 return IRQ_HANDLED;
1145}
1146
1da177e4
LT
1147static void ibmveth_set_multicast_list(struct net_device *netdev)
1148{
4cf1653a 1149 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
1150 unsigned long lpar_rc;
1151
4cd24eaf
JP
1152 if ((netdev->flags & IFF_PROMISC) ||
1153 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1da177e4
LT
1154 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1155 IbmVethMcastEnableRecv |
1156 IbmVethMcastDisableFiltering,
1157 0);
f148f61d 1158 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1159 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1160 "entering promisc mode\n", lpar_rc);
1da177e4
LT
1161 }
1162 } else {
22bedad3 1163 struct netdev_hw_addr *ha;
1da177e4
LT
1164 /* clear the filter table & disable filtering */
1165 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1166 IbmVethMcastEnableRecv |
1167 IbmVethMcastDisableFiltering |
1168 IbmVethMcastClearFilterTable,
1169 0);
f148f61d 1170 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1171 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1172 "attempting to clear filter table\n",
1173 lpar_rc);
1da177e4
LT
1174 }
1175 /* add the addresses to the filter table */
22bedad3 1176 netdev_for_each_mc_addr(ha, netdev) {
f148f61d 1177 /* add the multicast address to the filter table */
1da177e4 1178 unsigned long mcast_addr = 0;
22bedad3 1179 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1da177e4
LT
1180 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1181 IbmVethMcastAddFilter,
1182 mcast_addr);
f148f61d 1183 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1184 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1185 "when adding an entry to the filter "
1186 "table\n", lpar_rc);
1da177e4
LT
1187 }
1188 }
d7fbeba6 1189
1da177e4
LT
1190 /* re-enable filtering */
1191 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1192 IbmVethMcastEnableFiltering,
1193 0);
f148f61d 1194 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1195 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1196 "enabling filtering\n", lpar_rc);
1da177e4
LT
1197 }
1198 }
1199}
1200
1201static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1202{
4cf1653a 1203 struct ibmveth_adapter *adapter = netdev_priv(dev);
1096d63d 1204 struct vio_dev *viodev = adapter->vdev;
860f242e 1205 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
0645bab7
RJ
1206 int i, rc;
1207 int need_restart = 0;
b6d35182 1208
517e80e6 1209 if (new_mtu < IBMVETH_MIN_MTU)
1da177e4 1210 return -EINVAL;
b6d35182 1211
517e80e6 1212 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
ce6eea58
BK
1213 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1214 break;
1215
517e80e6 1216 if (i == IBMVETH_NUM_BUFF_POOLS)
ce6eea58
BK
1217 return -EINVAL;
1218
ea866e65
SL
1219 /* Deactivate all the buffer pools so that the next loop can activate
1220 only the buffer pools necessary to hold the new MTU */
0645bab7
RJ
1221 if (netif_running(adapter->netdev)) {
1222 need_restart = 1;
1223 adapter->pool_config = 1;
1224 ibmveth_close(adapter->netdev);
1225 adapter->pool_config = 0;
1226 }
ea866e65 1227
860f242e 1228 /* Look for an active buffer pool that can hold the new MTU */
f148f61d 1229 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
ea866e65 1230 adapter->rx_buff_pool[i].active = 1;
ce6eea58 1231
860f242e 1232 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1096d63d
RJ
1233 dev->mtu = new_mtu;
1234 vio_cmo_set_dev_desired(viodev,
1235 ibmveth_get_desired_dma
1236 (viodev));
0645bab7
RJ
1237 if (need_restart) {
1238 return ibmveth_open(adapter->netdev);
1239 }
860f242e 1240 return 0;
b6d35182 1241 }
b6d35182 1242 }
0645bab7
RJ
1243
1244 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1245 return rc;
1246
860f242e 1247 return -EINVAL;
1da177e4
LT
1248}
1249
6b422374
SL
1250#ifdef CONFIG_NET_POLL_CONTROLLER
1251static void ibmveth_poll_controller(struct net_device *dev)
1252{
4cf1653a 1253 ibmveth_replenish_task(netdev_priv(dev));
5f77113c 1254 ibmveth_interrupt(dev->irq, dev);
6b422374
SL
1255}
1256#endif
1257
1096d63d
RJ
1258/**
1259 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1260 *
1261 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1262 *
1263 * Return value:
1264 * Number of bytes of IO data the driver will need to perform well.
1265 */
1266static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1267{
1268 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1269 struct ibmveth_adapter *adapter;
1270 unsigned long ret;
1271 int i;
1272 int rxqentries = 1;
1273
1274 /* netdev inits at probe time along with the structures we need below*/
1275 if (netdev == NULL)
1276 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1277
1278 adapter = netdev_priv(netdev);
1279
1280 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1281 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1282
517e80e6 1283 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1096d63d
RJ
1284 /* add the size of the active receive buffers */
1285 if (adapter->rx_buff_pool[i].active)
1286 ret +=
1287 adapter->rx_buff_pool[i].size *
1288 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1289 buff_size);
1290 rxqentries += adapter->rx_buff_pool[i].size;
1291 }
1292 /* add the size of the receive queue entries */
1293 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1294
1295 return ret;
1296}
1297
e186d174
AB
1298static const struct net_device_ops ibmveth_netdev_ops = {
1299 .ndo_open = ibmveth_open,
1300 .ndo_stop = ibmveth_close,
1301 .ndo_start_xmit = ibmveth_start_xmit,
afc4b13d 1302 .ndo_set_rx_mode = ibmveth_set_multicast_list,
e186d174
AB
1303 .ndo_do_ioctl = ibmveth_ioctl,
1304 .ndo_change_mtu = ibmveth_change_mtu,
b9367bf3
MM
1305 .ndo_fix_features = ibmveth_fix_features,
1306 .ndo_set_features = ibmveth_set_features,
e186d174
AB
1307 .ndo_validate_addr = eth_validate_addr,
1308 .ndo_set_mac_address = eth_mac_addr,
1309#ifdef CONFIG_NET_POLL_CONTROLLER
1310 .ndo_poll_controller = ibmveth_poll_controller,
1311#endif
1312};
1313
f148f61d
SL
1314static int __devinit ibmveth_probe(struct vio_dev *dev,
1315 const struct vio_device_id *id)
1da177e4 1316{
b6d35182 1317 int rc, i;
1da177e4 1318 struct net_device *netdev;
9dc83afd 1319 struct ibmveth_adapter *adapter;
1da177e4
LT
1320 unsigned char *mac_addr_p;
1321 unsigned int *mcastFilterSize_p;
1322
c43ced18
SL
1323 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1324 dev->unit_address);
1da177e4 1325
f148f61d
SL
1326 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1327 NULL);
1328 if (!mac_addr_p) {
21c2dece 1329 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
be35ae9e 1330 return -EINVAL;
1da177e4 1331 }
d7fbeba6 1332
f148f61d 1333 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
493a684a 1334 VETH_MCAST_FILTER_SIZE, NULL);
f148f61d 1335 if (!mcastFilterSize_p) {
21c2dece
SL
1336 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1337 "attribute\n");
be35ae9e 1338 return -EINVAL;
1da177e4 1339 }
d7fbeba6 1340
1da177e4
LT
1341 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1342
f148f61d 1343 if (!netdev)
1da177e4
LT
1344 return -ENOMEM;
1345
4cf1653a 1346 adapter = netdev_priv(netdev);
c7ae011d 1347 dev_set_drvdata(&dev->dev, netdev);
1da177e4
LT
1348
1349 adapter->vdev = dev;
1350 adapter->netdev = netdev;
f148f61d 1351 adapter->mcastFilterSize = *mcastFilterSize_p;
860f242e 1352 adapter->pool_config = 0;
d7fbeba6 1353
bea3348e
SH
1354 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1355
f148f61d
SL
1356 /*
1357 * Some older boxes running PHYP non-natively have an OF that returns
1358 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1359 * ignored) while newer boxes' OF return a 6-byte field. Note that
1360 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1361 * The RPA doc specifies that the first byte must be 10b, so we'll
1362 * just look for it to solve this 8 vs. 6 byte field issue
1363 */
1da177e4
LT
1364 if ((*mac_addr_p & 0x3) != 0x02)
1365 mac_addr_p += 2;
1366
1367 adapter->mac_addr = 0;
1368 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1369
1da177e4 1370 netdev->irq = dev->irq;
e186d174
AB
1371 netdev->netdev_ops = &ibmveth_netdev_ops;
1372 netdev->ethtool_ops = &netdev_ethtool_ops;
1da177e4 1373 SET_NETDEV_DEV(netdev, &dev->dev);
b9367bf3
MM
1374 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1375 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1376 netdev->features |= netdev->hw_features;
1da177e4 1377
d44b5e07 1378 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1da177e4 1379
f148f61d 1380 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
860f242e 1381 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
8dde2a96
GKH
1382 int error;
1383
d7fbeba6
JG
1384 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1385 pool_count[i], pool_size[i],
860f242e 1386 pool_active[i]);
8dde2a96
GKH
1387 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1388 &dev->dev.kobj, "pool%d", i);
1389 if (!error)
1390 kobject_uevent(kobj, KOBJ_ADD);
860f242e 1391 }
1da177e4 1392
c43ced18 1393 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1da177e4 1394
1da177e4
LT
1395 adapter->buffer_list_dma = DMA_ERROR_CODE;
1396 adapter->filter_list_dma = DMA_ERROR_CODE;
1397 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1398
c43ced18 1399 netdev_dbg(netdev, "registering netdev...\n");
1da177e4 1400
b801a4e7
MM
1401 ibmveth_set_features(netdev, netdev->features);
1402
1da177e4
LT
1403 rc = register_netdev(netdev);
1404
f148f61d 1405 if (rc) {
c43ced18 1406 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1da177e4
LT
1407 free_netdev(netdev);
1408 return rc;
1409 }
1410
c43ced18 1411 netdev_dbg(netdev, "registered\n");
1da177e4 1412
1da177e4
LT
1413 return 0;
1414}
1415
1416static int __devexit ibmveth_remove(struct vio_dev *dev)
1417{
c7ae011d 1418 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4cf1653a 1419 struct ibmveth_adapter *adapter = netdev_priv(netdev);
860f242e
SL
1420 int i;
1421
f148f61d 1422 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
c10997f6 1423 kobject_put(&adapter->rx_buff_pool[i].kobj);
1da177e4
LT
1424
1425 unregister_netdev(netdev);
1426
1da177e4 1427 free_netdev(netdev);
1096d63d
RJ
1428 dev_set_drvdata(&dev->dev, NULL);
1429
1da177e4
LT
1430 return 0;
1431}
1432
860f242e
SL
1433static struct attribute veth_active_attr;
1434static struct attribute veth_num_attr;
1435static struct attribute veth_size_attr;
1436
f148f61d
SL
1437static ssize_t veth_pool_show(struct kobject *kobj,
1438 struct attribute *attr, char *buf)
860f242e 1439{
d7fbeba6 1440 struct ibmveth_buff_pool *pool = container_of(kobj,
860f242e
SL
1441 struct ibmveth_buff_pool,
1442 kobj);
1443
1444 if (attr == &veth_active_attr)
1445 return sprintf(buf, "%d\n", pool->active);
1446 else if (attr == &veth_num_attr)
1447 return sprintf(buf, "%d\n", pool->size);
1448 else if (attr == &veth_size_attr)
1449 return sprintf(buf, "%d\n", pool->buff_size);
1450 return 0;
1451}
1452
f148f61d
SL
1453static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1454 const char *buf, size_t count)
860f242e 1455{
d7fbeba6 1456 struct ibmveth_buff_pool *pool = container_of(kobj,
860f242e
SL
1457 struct ibmveth_buff_pool,
1458 kobj);
c7ae011d
GKH
1459 struct net_device *netdev = dev_get_drvdata(
1460 container_of(kobj->parent, struct device, kobj));
4cf1653a 1461 struct ibmveth_adapter *adapter = netdev_priv(netdev);
860f242e
SL
1462 long value = simple_strtol(buf, NULL, 10);
1463 long rc;
1464
1465 if (attr == &veth_active_attr) {
1466 if (value && !pool->active) {
4aa9c93e 1467 if (netif_running(netdev)) {
f148f61d 1468 if (ibmveth_alloc_buffer_pool(pool)) {
21c2dece
SL
1469 netdev_err(netdev,
1470 "unable to alloc pool\n");
4aa9c93e
BK
1471 return -ENOMEM;
1472 }
1473 pool->active = 1;
1474 adapter->pool_config = 1;
1475 ibmveth_close(netdev);
1476 adapter->pool_config = 0;
1477 if ((rc = ibmveth_open(netdev)))
1478 return rc;
f148f61d 1479 } else {
4aa9c93e 1480 pool->active = 1;
f148f61d 1481 }
860f242e
SL
1482 } else if (!value && pool->active) {
1483 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1484 int i;
1485 /* Make sure there is a buffer pool with buffers that
1486 can hold a packet of the size of the MTU */
517e80e6 1487 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
860f242e
SL
1488 if (pool == &adapter->rx_buff_pool[i])
1489 continue;
1490 if (!adapter->rx_buff_pool[i].active)
1491 continue;
76b9cfcc
BK
1492 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1493 break;
860f242e 1494 }
76b9cfcc 1495
517e80e6 1496 if (i == IBMVETH_NUM_BUFF_POOLS) {
21c2dece 1497 netdev_err(netdev, "no active pool >= MTU\n");
860f242e
SL
1498 return -EPERM;
1499 }
76b9cfcc 1500
76b9cfcc
BK
1501 if (netif_running(netdev)) {
1502 adapter->pool_config = 1;
1503 ibmveth_close(netdev);
ea866e65 1504 pool->active = 0;
76b9cfcc
BK
1505 adapter->pool_config = 0;
1506 if ((rc = ibmveth_open(netdev)))
1507 return rc;
1508 }
ea866e65 1509 pool->active = 0;
860f242e
SL
1510 }
1511 } else if (attr == &veth_num_attr) {
f148f61d 1512 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
860f242e 1513 return -EINVAL;
f148f61d 1514 } else {
4aa9c93e
BK
1515 if (netif_running(netdev)) {
1516 adapter->pool_config = 1;
1517 ibmveth_close(netdev);
1518 adapter->pool_config = 0;
1519 pool->size = value;
1520 if ((rc = ibmveth_open(netdev)))
1521 return rc;
f148f61d 1522 } else {
4aa9c93e 1523 pool->size = value;
f148f61d 1524 }
860f242e
SL
1525 }
1526 } else if (attr == &veth_size_attr) {
f148f61d 1527 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
860f242e 1528 return -EINVAL;
f148f61d 1529 } else {
4aa9c93e
BK
1530 if (netif_running(netdev)) {
1531 adapter->pool_config = 1;
1532 ibmveth_close(netdev);
1533 adapter->pool_config = 0;
1534 pool->buff_size = value;
1535 if ((rc = ibmveth_open(netdev)))
1536 return rc;
f148f61d 1537 } else {
4aa9c93e 1538 pool->buff_size = value;
f148f61d 1539 }
860f242e
SL
1540 }
1541 }
1542
1543 /* kick the interrupt handler to allocate/deallocate pools */
7d12e780 1544 ibmveth_interrupt(netdev->irq, netdev);
860f242e
SL
1545 return count;
1546}
1547
1548
f148f61d
SL
1549#define ATTR(_name, _mode) \
1550 struct attribute veth_##_name##_attr = { \
1551 .name = __stringify(_name), .mode = _mode, \
1552 };
860f242e
SL
1553
1554static ATTR(active, 0644);
1555static ATTR(num, 0644);
1556static ATTR(size, 0644);
1557
f148f61d 1558static struct attribute *veth_pool_attrs[] = {
860f242e
SL
1559 &veth_active_attr,
1560 &veth_num_attr,
1561 &veth_size_attr,
1562 NULL,
1563};
1564
52cf25d0 1565static const struct sysfs_ops veth_pool_ops = {
860f242e
SL
1566 .show = veth_pool_show,
1567 .store = veth_pool_store,
1568};
1569
1570static struct kobj_type ktype_veth_pool = {
1571 .release = NULL,
1572 .sysfs_ops = &veth_pool_ops,
1573 .default_attrs = veth_pool_attrs,
1574};
1575
e7a3af5d
BK
1576static int ibmveth_resume(struct device *dev)
1577{
1578 struct net_device *netdev = dev_get_drvdata(dev);
1579 ibmveth_interrupt(netdev->irq, netdev);
1580 return 0;
1581}
860f242e 1582
f148f61d 1583static struct vio_device_id ibmveth_device_table[] __devinitdata = {
1da177e4 1584 { "network", "IBM,l-lan"},
fb120da6 1585 { "", "" }
1da177e4 1586};
1da177e4
LT
1587MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1588
e7a3af5d
BK
1589static struct dev_pm_ops ibmveth_pm_ops = {
1590 .resume = ibmveth_resume
1591};
1592
1da177e4 1593static struct vio_driver ibmveth_driver = {
6fdf5392
SR
1594 .id_table = ibmveth_device_table,
1595 .probe = ibmveth_probe,
1596 .remove = ibmveth_remove,
1096d63d 1597 .get_desired_dma = ibmveth_get_desired_dma,
6fdf5392
SR
1598 .driver = {
1599 .name = ibmveth_driver_name,
915124d8 1600 .owner = THIS_MODULE,
e7a3af5d 1601 .pm = &ibmveth_pm_ops,
6fdf5392 1602 }
1da177e4
LT
1603};
1604
1605static int __init ibmveth_module_init(void)
1606{
21c2dece
SL
1607 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1608 ibmveth_driver_string, ibmveth_driver_version);
1da177e4 1609
1da177e4
LT
1610 return vio_register_driver(&ibmveth_driver);
1611}
1612
1613static void __exit ibmveth_module_exit(void)
1614{
1615 vio_unregister_driver(&ibmveth_driver);
d7fbeba6 1616}
1da177e4
LT
1617
1618module_init(ibmveth_module_init);
1619module_exit(ibmveth_module_exit);