]>
Commit | Line | Data |
---|---|---|
1 | /**************************************************************************/ | |
2 | /* */ | |
3 | /* IBM eServer i/pSeries Virtual Ethernet Device Driver */ | |
4 | /* Copyright (C) 2003 IBM Corp. */ | |
5 | /* Originally written by Dave Larson (larson1@us.ibm.com) */ | |
6 | /* Maintained by Santiago Leon (santil@us.ibm.com) */ | |
7 | /* */ | |
8 | /* This program is free software; you can redistribute it and/or modify */ | |
9 | /* it under the terms of the GNU General Public License as published by */ | |
10 | /* the Free Software Foundation; either version 2 of the License, or */ | |
11 | /* (at your option) any later version. */ | |
12 | /* */ | |
13 | /* This program is distributed in the hope that it will be useful, */ | |
14 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | |
15 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | |
16 | /* GNU General Public License for more details. */ | |
17 | /* */ | |
18 | /* You should have received a copy of the GNU General Public License */ | |
19 | /* along with this program; if not, write to the Free Software */ | |
20 | /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */ | |
21 | /* USA */ | |
22 | /* */ | |
23 | /* This module contains the implementation of a virtual ethernet device */ | |
24 | /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ | |
25 | /* option of the RS/6000 Platform Architechture to interface with virtual */ | |
26 | /* ethernet NICs that are presented to the partition by the hypervisor. */ | |
27 | /* */ | |
28 | /**************************************************************************/ | |
29 | /* | |
30 | TODO: | |
31 | - remove frag processing code - no longer needed | |
32 | - add support for sysfs | |
33 | - possibly remove procfs support | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/errno.h> | |
39 | #include <linux/ioport.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/kernel.h> | |
42 | #include <linux/netdevice.h> | |
43 | #include <linux/etherdevice.h> | |
44 | #include <linux/skbuff.h> | |
45 | #include <linux/init.h> | |
46 | #include <linux/delay.h> | |
47 | #include <linux/mm.h> | |
48 | #include <linux/ethtool.h> | |
49 | #include <linux/proc_fs.h> | |
50 | #include <net/net_namespace.h> | |
51 | #include <asm/semaphore.h> | |
52 | #include <asm/hvcall.h> | |
53 | #include <asm/atomic.h> | |
54 | #include <asm/vio.h> | |
55 | #include <asm/uaccess.h> | |
56 | #include <linux/seq_file.h> | |
57 | ||
58 | #include "ibmveth.h" | |
59 | ||
60 | #undef DEBUG | |
61 | ||
62 | #define ibmveth_printk(fmt, args...) \ | |
63 | printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args) | |
64 | ||
65 | #define ibmveth_error_printk(fmt, args...) \ | |
66 | printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) | |
67 | ||
68 | #ifdef DEBUG | |
69 | #define ibmveth_debug_printk_no_adapter(fmt, args...) \ | |
70 | printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args) | |
71 | #define ibmveth_debug_printk(fmt, args...) \ | |
72 | printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) | |
73 | #define ibmveth_assert(expr) \ | |
74 | if(!(expr)) { \ | |
75 | printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \ | |
76 | BUG(); \ | |
77 | } | |
78 | #else | |
79 | #define ibmveth_debug_printk_no_adapter(fmt, args...) | |
80 | #define ibmveth_debug_printk(fmt, args...) | |
81 | #define ibmveth_assert(expr) | |
82 | #endif | |
83 | ||
84 | static int ibmveth_open(struct net_device *dev); | |
85 | static int ibmveth_close(struct net_device *dev); | |
86 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | |
87 | static int ibmveth_poll(struct napi_struct *napi, int budget); | |
88 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
89 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); | |
90 | static void ibmveth_set_multicast_list(struct net_device *dev); | |
91 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); | |
92 | static void ibmveth_proc_register_driver(void); | |
93 | static void ibmveth_proc_unregister_driver(void); | |
94 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |
95 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | |
96 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); | |
97 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | |
98 | static struct kobj_type ktype_veth_pool; | |
99 | ||
100 | #ifdef CONFIG_PROC_FS | |
101 | #define IBMVETH_PROC_DIR "ibmveth" | |
102 | static struct proc_dir_entry *ibmveth_proc_dir; | |
103 | #endif | |
104 | ||
105 | static const char ibmveth_driver_name[] = "ibmveth"; | |
106 | static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; | |
107 | #define ibmveth_driver_version "1.03" | |
108 | ||
109 | MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); | |
110 | MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); | |
111 | MODULE_LICENSE("GPL"); | |
112 | MODULE_VERSION(ibmveth_driver_version); | |
113 | ||
114 | /* simple methods of getting data from the current rxq entry */ | |
115 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) | |
116 | { | |
117 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle); | |
118 | } | |
119 | ||
120 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) | |
121 | { | |
122 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid); | |
123 | } | |
124 | ||
125 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) | |
126 | { | |
127 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset); | |
128 | } | |
129 | ||
130 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) | |
131 | { | |
132 | return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); | |
133 | } | |
134 | ||
135 | /* setup the initial settings for a buffer pool */ | |
136 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) | |
137 | { | |
138 | pool->size = pool_size; | |
139 | pool->index = pool_index; | |
140 | pool->buff_size = buff_size; | |
141 | pool->threshold = pool_size / 2; | |
142 | pool->active = pool_active; | |
143 | } | |
144 | ||
145 | /* allocate and setup an buffer pool - called during open */ | |
146 | static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |
147 | { | |
148 | int i; | |
149 | ||
150 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); | |
151 | ||
152 | if(!pool->free_map) { | |
153 | return -1; | |
154 | } | |
155 | ||
156 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); | |
157 | if(!pool->dma_addr) { | |
158 | kfree(pool->free_map); | |
159 | pool->free_map = NULL; | |
160 | return -1; | |
161 | } | |
162 | ||
163 | pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL); | |
164 | ||
165 | if(!pool->skbuff) { | |
166 | kfree(pool->dma_addr); | |
167 | pool->dma_addr = NULL; | |
168 | ||
169 | kfree(pool->free_map); | |
170 | pool->free_map = NULL; | |
171 | return -1; | |
172 | } | |
173 | ||
174 | memset(pool->skbuff, 0, sizeof(void*) * pool->size); | |
175 | memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); | |
176 | ||
177 | for(i = 0; i < pool->size; ++i) { | |
178 | pool->free_map[i] = i; | |
179 | } | |
180 | ||
181 | atomic_set(&pool->available, 0); | |
182 | pool->producer_index = 0; | |
183 | pool->consumer_index = 0; | |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | /* replenish the buffers for a pool. note that we don't need to | |
189 | * skb_reserve these since they are used for incoming... | |
190 | */ | |
191 | static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) | |
192 | { | |
193 | u32 i; | |
194 | u32 count = pool->size - atomic_read(&pool->available); | |
195 | u32 buffers_added = 0; | |
196 | ||
197 | mb(); | |
198 | ||
199 | for(i = 0; i < count; ++i) { | |
200 | struct sk_buff *skb; | |
201 | unsigned int free_index, index; | |
202 | u64 correlator; | |
203 | union ibmveth_buf_desc desc; | |
204 | unsigned long lpar_rc; | |
205 | dma_addr_t dma_addr; | |
206 | ||
207 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); | |
208 | ||
209 | if(!skb) { | |
210 | ibmveth_debug_printk("replenish: unable to allocate skb\n"); | |
211 | adapter->replenish_no_mem++; | |
212 | break; | |
213 | } | |
214 | ||
215 | free_index = pool->consumer_index; | |
216 | pool->consumer_index = (pool->consumer_index + 1) % pool->size; | |
217 | index = pool->free_map[free_index]; | |
218 | ||
219 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); | |
220 | ibmveth_assert(pool->skbuff[index] == NULL); | |
221 | ||
222 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | |
223 | pool->buff_size, DMA_FROM_DEVICE); | |
224 | ||
225 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | |
226 | pool->dma_addr[index] = dma_addr; | |
227 | pool->skbuff[index] = skb; | |
228 | ||
229 | correlator = ((u64)pool->index << 32) | index; | |
230 | *(u64*)skb->data = correlator; | |
231 | ||
232 | desc.desc = 0; | |
233 | desc.fields.valid = 1; | |
234 | desc.fields.length = pool->buff_size; | |
235 | desc.fields.address = dma_addr; | |
236 | ||
237 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | |
238 | ||
239 | if(lpar_rc != H_SUCCESS) { | |
240 | pool->free_map[free_index] = index; | |
241 | pool->skbuff[index] = NULL; | |
242 | if (pool->consumer_index == 0) | |
243 | pool->consumer_index = pool->size - 1; | |
244 | else | |
245 | pool->consumer_index--; | |
246 | dma_unmap_single(&adapter->vdev->dev, | |
247 | pool->dma_addr[index], pool->buff_size, | |
248 | DMA_FROM_DEVICE); | |
249 | dev_kfree_skb_any(skb); | |
250 | adapter->replenish_add_buff_failure++; | |
251 | break; | |
252 | } else { | |
253 | buffers_added++; | |
254 | adapter->replenish_add_buff_success++; | |
255 | } | |
256 | } | |
257 | ||
258 | mb(); | |
259 | atomic_add(buffers_added, &(pool->available)); | |
260 | } | |
261 | ||
262 | /* replenish routine */ | |
263 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |
264 | { | |
265 | int i; | |
266 | ||
267 | adapter->replenish_task_cycles++; | |
268 | ||
269 | for(i = 0; i < IbmVethNumBufferPools; i++) | |
270 | if(adapter->rx_buff_pool[i].active) | |
271 | ibmveth_replenish_buffer_pool(adapter, | |
272 | &adapter->rx_buff_pool[i]); | |
273 | ||
274 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | |
275 | } | |
276 | ||
277 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ | |
278 | static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) | |
279 | { | |
280 | int i; | |
281 | ||
282 | kfree(pool->free_map); | |
283 | pool->free_map = NULL; | |
284 | ||
285 | if(pool->skbuff && pool->dma_addr) { | |
286 | for(i = 0; i < pool->size; ++i) { | |
287 | struct sk_buff *skb = pool->skbuff[i]; | |
288 | if(skb) { | |
289 | dma_unmap_single(&adapter->vdev->dev, | |
290 | pool->dma_addr[i], | |
291 | pool->buff_size, | |
292 | DMA_FROM_DEVICE); | |
293 | dev_kfree_skb_any(skb); | |
294 | pool->skbuff[i] = NULL; | |
295 | } | |
296 | } | |
297 | } | |
298 | ||
299 | if(pool->dma_addr) { | |
300 | kfree(pool->dma_addr); | |
301 | pool->dma_addr = NULL; | |
302 | } | |
303 | ||
304 | if(pool->skbuff) { | |
305 | kfree(pool->skbuff); | |
306 | pool->skbuff = NULL; | |
307 | } | |
308 | } | |
309 | ||
310 | /* remove a buffer from a pool */ | |
311 | static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) | |
312 | { | |
313 | unsigned int pool = correlator >> 32; | |
314 | unsigned int index = correlator & 0xffffffffUL; | |
315 | unsigned int free_index; | |
316 | struct sk_buff *skb; | |
317 | ||
318 | ibmveth_assert(pool < IbmVethNumBufferPools); | |
319 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | |
320 | ||
321 | skb = adapter->rx_buff_pool[pool].skbuff[index]; | |
322 | ||
323 | ibmveth_assert(skb != NULL); | |
324 | ||
325 | adapter->rx_buff_pool[pool].skbuff[index] = NULL; | |
326 | ||
327 | dma_unmap_single(&adapter->vdev->dev, | |
328 | adapter->rx_buff_pool[pool].dma_addr[index], | |
329 | adapter->rx_buff_pool[pool].buff_size, | |
330 | DMA_FROM_DEVICE); | |
331 | ||
332 | free_index = adapter->rx_buff_pool[pool].producer_index; | |
333 | adapter->rx_buff_pool[pool].producer_index | |
334 | = (adapter->rx_buff_pool[pool].producer_index + 1) | |
335 | % adapter->rx_buff_pool[pool].size; | |
336 | adapter->rx_buff_pool[pool].free_map[free_index] = index; | |
337 | ||
338 | mb(); | |
339 | ||
340 | atomic_dec(&(adapter->rx_buff_pool[pool].available)); | |
341 | } | |
342 | ||
343 | /* get the current buffer on the rx queue */ | |
344 | static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) | |
345 | { | |
346 | u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; | |
347 | unsigned int pool = correlator >> 32; | |
348 | unsigned int index = correlator & 0xffffffffUL; | |
349 | ||
350 | ibmveth_assert(pool < IbmVethNumBufferPools); | |
351 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | |
352 | ||
353 | return adapter->rx_buff_pool[pool].skbuff[index]; | |
354 | } | |
355 | ||
356 | /* recycle the current buffer on the rx queue */ | |
357 | static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |
358 | { | |
359 | u32 q_index = adapter->rx_queue.index; | |
360 | u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; | |
361 | unsigned int pool = correlator >> 32; | |
362 | unsigned int index = correlator & 0xffffffffUL; | |
363 | union ibmveth_buf_desc desc; | |
364 | unsigned long lpar_rc; | |
365 | ||
366 | ibmveth_assert(pool < IbmVethNumBufferPools); | |
367 | ibmveth_assert(index < adapter->rx_buff_pool[pool].size); | |
368 | ||
369 | if(!adapter->rx_buff_pool[pool].active) { | |
370 | ibmveth_rxq_harvest_buffer(adapter); | |
371 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); | |
372 | return; | |
373 | } | |
374 | ||
375 | desc.desc = 0; | |
376 | desc.fields.valid = 1; | |
377 | desc.fields.length = adapter->rx_buff_pool[pool].buff_size; | |
378 | desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; | |
379 | ||
380 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | |
381 | ||
382 | if(lpar_rc != H_SUCCESS) { | |
383 | ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); | |
384 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | |
385 | } | |
386 | ||
387 | if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | |
388 | adapter->rx_queue.index = 0; | |
389 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | |
390 | } | |
391 | } | |
392 | ||
393 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |
394 | { | |
395 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | |
396 | ||
397 | if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | |
398 | adapter->rx_queue.index = 0; | |
399 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | |
400 | } | |
401 | } | |
402 | ||
403 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |
404 | { | |
405 | int i; | |
406 | ||
407 | if(adapter->buffer_list_addr != NULL) { | |
408 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | |
409 | dma_unmap_single(&adapter->vdev->dev, | |
410 | adapter->buffer_list_dma, 4096, | |
411 | DMA_BIDIRECTIONAL); | |
412 | adapter->buffer_list_dma = DMA_ERROR_CODE; | |
413 | } | |
414 | free_page((unsigned long)adapter->buffer_list_addr); | |
415 | adapter->buffer_list_addr = NULL; | |
416 | } | |
417 | ||
418 | if(adapter->filter_list_addr != NULL) { | |
419 | if(!dma_mapping_error(adapter->filter_list_dma)) { | |
420 | dma_unmap_single(&adapter->vdev->dev, | |
421 | adapter->filter_list_dma, 4096, | |
422 | DMA_BIDIRECTIONAL); | |
423 | adapter->filter_list_dma = DMA_ERROR_CODE; | |
424 | } | |
425 | free_page((unsigned long)adapter->filter_list_addr); | |
426 | adapter->filter_list_addr = NULL; | |
427 | } | |
428 | ||
429 | if(adapter->rx_queue.queue_addr != NULL) { | |
430 | if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { | |
431 | dma_unmap_single(&adapter->vdev->dev, | |
432 | adapter->rx_queue.queue_dma, | |
433 | adapter->rx_queue.queue_len, | |
434 | DMA_BIDIRECTIONAL); | |
435 | adapter->rx_queue.queue_dma = DMA_ERROR_CODE; | |
436 | } | |
437 | kfree(adapter->rx_queue.queue_addr); | |
438 | adapter->rx_queue.queue_addr = NULL; | |
439 | } | |
440 | ||
441 | for(i = 0; i<IbmVethNumBufferPools; i++) | |
442 | if (adapter->rx_buff_pool[i].active) | |
443 | ibmveth_free_buffer_pool(adapter, | |
444 | &adapter->rx_buff_pool[i]); | |
445 | } | |
446 | ||
447 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | |
448 | union ibmveth_buf_desc rxq_desc, u64 mac_address) | |
449 | { | |
450 | int rc, try_again = 1; | |
451 | ||
452 | /* After a kexec the adapter will still be open, so our attempt to | |
453 | * open it will fail. So if we get a failure we free the adapter and | |
454 | * try again, but only once. */ | |
455 | retry: | |
456 | rc = h_register_logical_lan(adapter->vdev->unit_address, | |
457 | adapter->buffer_list_dma, rxq_desc.desc, | |
458 | adapter->filter_list_dma, mac_address); | |
459 | ||
460 | if (rc != H_SUCCESS && try_again) { | |
461 | do { | |
462 | rc = h_free_logical_lan(adapter->vdev->unit_address); | |
463 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | |
464 | ||
465 | try_again = 0; | |
466 | goto retry; | |
467 | } | |
468 | ||
469 | return rc; | |
470 | } | |
471 | ||
472 | static int ibmveth_open(struct net_device *netdev) | |
473 | { | |
474 | struct ibmveth_adapter *adapter = netdev->priv; | |
475 | u64 mac_address = 0; | |
476 | int rxq_entries = 1; | |
477 | unsigned long lpar_rc; | |
478 | int rc; | |
479 | union ibmveth_buf_desc rxq_desc; | |
480 | int i; | |
481 | ||
482 | ibmveth_debug_printk("open starting\n"); | |
483 | ||
484 | napi_enable(&adapter->napi); | |
485 | ||
486 | for(i = 0; i<IbmVethNumBufferPools; i++) | |
487 | rxq_entries += adapter->rx_buff_pool[i].size; | |
488 | ||
489 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | |
490 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | |
491 | ||
492 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { | |
493 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); | |
494 | ibmveth_cleanup(adapter); | |
495 | napi_disable(&adapter->napi); | |
496 | return -ENOMEM; | |
497 | } | |
498 | ||
499 | adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; | |
500 | adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); | |
501 | ||
502 | if(!adapter->rx_queue.queue_addr) { | |
503 | ibmveth_error_printk("unable to allocate rx queue pages\n"); | |
504 | ibmveth_cleanup(adapter); | |
505 | napi_disable(&adapter->napi); | |
506 | return -ENOMEM; | |
507 | } | |
508 | ||
509 | adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, | |
510 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); | |
511 | adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, | |
512 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); | |
513 | adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, | |
514 | adapter->rx_queue.queue_addr, | |
515 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); | |
516 | ||
517 | if((dma_mapping_error(adapter->buffer_list_dma) ) || | |
518 | (dma_mapping_error(adapter->filter_list_dma)) || | |
519 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | |
520 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | |
521 | ibmveth_cleanup(adapter); | |
522 | napi_disable(&adapter->napi); | |
523 | return -ENOMEM; | |
524 | } | |
525 | ||
526 | adapter->rx_queue.index = 0; | |
527 | adapter->rx_queue.num_slots = rxq_entries; | |
528 | adapter->rx_queue.toggle = 1; | |
529 | ||
530 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | |
531 | mac_address = mac_address >> 16; | |
532 | ||
533 | rxq_desc.desc = 0; | |
534 | rxq_desc.fields.valid = 1; | |
535 | rxq_desc.fields.length = adapter->rx_queue.queue_len; | |
536 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; | |
537 | ||
538 | ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); | |
539 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); | |
540 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); | |
541 | ||
542 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); | |
543 | ||
544 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); | |
545 | ||
546 | if(lpar_rc != H_SUCCESS) { | |
547 | ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); | |
548 | ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n", | |
549 | adapter->buffer_list_dma, | |
550 | adapter->filter_list_dma, | |
551 | rxq_desc.desc, | |
552 | mac_address); | |
553 | ibmveth_cleanup(adapter); | |
554 | napi_disable(&adapter->napi); | |
555 | return -ENONET; | |
556 | } | |
557 | ||
558 | for(i = 0; i<IbmVethNumBufferPools; i++) { | |
559 | if(!adapter->rx_buff_pool[i].active) | |
560 | continue; | |
561 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | |
562 | ibmveth_error_printk("unable to alloc pool\n"); | |
563 | adapter->rx_buff_pool[i].active = 0; | |
564 | ibmveth_cleanup(adapter); | |
565 | napi_disable(&adapter->napi); | |
566 | return -ENOMEM ; | |
567 | } | |
568 | } | |
569 | ||
570 | ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); | |
571 | if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { | |
572 | ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); | |
573 | do { | |
574 | rc = h_free_logical_lan(adapter->vdev->unit_address); | |
575 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | |
576 | ||
577 | ibmveth_cleanup(adapter); | |
578 | napi_disable(&adapter->napi); | |
579 | return rc; | |
580 | } | |
581 | ||
582 | ibmveth_debug_printk("initial replenish cycle\n"); | |
583 | ibmveth_interrupt(netdev->irq, netdev); | |
584 | ||
585 | netif_start_queue(netdev); | |
586 | ||
587 | ibmveth_debug_printk("open complete\n"); | |
588 | ||
589 | return 0; | |
590 | } | |
591 | ||
592 | static int ibmveth_close(struct net_device *netdev) | |
593 | { | |
594 | struct ibmveth_adapter *adapter = netdev->priv; | |
595 | long lpar_rc; | |
596 | ||
597 | ibmveth_debug_printk("close starting\n"); | |
598 | ||
599 | napi_disable(&adapter->napi); | |
600 | ||
601 | if (!adapter->pool_config) | |
602 | netif_stop_queue(netdev); | |
603 | ||
604 | free_irq(netdev->irq, netdev); | |
605 | ||
606 | do { | |
607 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); | |
608 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); | |
609 | ||
610 | if(lpar_rc != H_SUCCESS) | |
611 | { | |
612 | ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", | |
613 | lpar_rc); | |
614 | } | |
615 | ||
616 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | |
617 | ||
618 | ibmveth_cleanup(adapter); | |
619 | ||
620 | ibmveth_debug_printk("close complete\n"); | |
621 | ||
622 | return 0; | |
623 | } | |
624 | ||
625 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { | |
626 | cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); | |
627 | cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); | |
628 | cmd->speed = SPEED_1000; | |
629 | cmd->duplex = DUPLEX_FULL; | |
630 | cmd->port = PORT_FIBRE; | |
631 | cmd->phy_address = 0; | |
632 | cmd->transceiver = XCVR_INTERNAL; | |
633 | cmd->autoneg = AUTONEG_ENABLE; | |
634 | cmd->maxtxpkt = 0; | |
635 | cmd->maxrxpkt = 1; | |
636 | return 0; | |
637 | } | |
638 | ||
639 | static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { | |
640 | strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); | |
641 | strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); | |
642 | } | |
643 | ||
644 | static u32 netdev_get_link(struct net_device *dev) { | |
645 | return 1; | |
646 | } | |
647 | ||
648 | static const struct ethtool_ops netdev_ethtool_ops = { | |
649 | .get_drvinfo = netdev_get_drvinfo, | |
650 | .get_settings = netdev_get_settings, | |
651 | .get_link = netdev_get_link, | |
652 | .get_sg = ethtool_op_get_sg, | |
653 | .get_tx_csum = ethtool_op_get_tx_csum, | |
654 | }; | |
655 | ||
656 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
657 | { | |
658 | return -EOPNOTSUPP; | |
659 | } | |
660 | ||
661 | #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) | |
662 | ||
663 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |
664 | { | |
665 | struct ibmveth_adapter *adapter = netdev->priv; | |
666 | union ibmveth_buf_desc desc[IbmVethMaxSendFrags]; | |
667 | unsigned long lpar_rc; | |
668 | int nfrags = 0, curfrag; | |
669 | unsigned long correlator; | |
670 | unsigned long flags; | |
671 | unsigned int retry_count; | |
672 | unsigned int tx_dropped = 0; | |
673 | unsigned int tx_bytes = 0; | |
674 | unsigned int tx_packets = 0; | |
675 | unsigned int tx_send_failed = 0; | |
676 | unsigned int tx_map_failed = 0; | |
677 | ||
678 | ||
679 | if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { | |
680 | tx_dropped++; | |
681 | goto out; | |
682 | } | |
683 | ||
684 | memset(&desc, 0, sizeof(desc)); | |
685 | ||
686 | /* nfrags = number of frags after the initial fragment */ | |
687 | nfrags = skb_shinfo(skb)->nr_frags; | |
688 | ||
689 | if(nfrags) | |
690 | adapter->tx_multidesc_send++; | |
691 | ||
692 | /* map the initial fragment */ | |
693 | desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; | |
694 | desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | |
695 | desc[0].fields.length, DMA_TO_DEVICE); | |
696 | desc[0].fields.valid = 1; | |
697 | ||
698 | if(dma_mapping_error(desc[0].fields.address)) { | |
699 | ibmveth_error_printk("tx: unable to map initial fragment\n"); | |
700 | tx_map_failed++; | |
701 | tx_dropped++; | |
702 | goto out; | |
703 | } | |
704 | ||
705 | curfrag = nfrags; | |
706 | ||
707 | /* map fragments past the initial portion if there are any */ | |
708 | while(curfrag--) { | |
709 | skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; | |
710 | desc[curfrag+1].fields.address | |
711 | = dma_map_single(&adapter->vdev->dev, | |
712 | page_address(frag->page) + frag->page_offset, | |
713 | frag->size, DMA_TO_DEVICE); | |
714 | desc[curfrag+1].fields.length = frag->size; | |
715 | desc[curfrag+1].fields.valid = 1; | |
716 | ||
717 | if(dma_mapping_error(desc[curfrag+1].fields.address)) { | |
718 | ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); | |
719 | tx_map_failed++; | |
720 | tx_dropped++; | |
721 | /* Free all the mappings we just created */ | |
722 | while(curfrag < nfrags) { | |
723 | dma_unmap_single(&adapter->vdev->dev, | |
724 | desc[curfrag+1].fields.address, | |
725 | desc[curfrag+1].fields.length, | |
726 | DMA_TO_DEVICE); | |
727 | curfrag++; | |
728 | } | |
729 | goto out; | |
730 | } | |
731 | } | |
732 | ||
733 | /* send the frame. Arbitrarily set retrycount to 1024 */ | |
734 | correlator = 0; | |
735 | retry_count = 1024; | |
736 | do { | |
737 | lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, | |
738 | desc[0].desc, | |
739 | desc[1].desc, | |
740 | desc[2].desc, | |
741 | desc[3].desc, | |
742 | desc[4].desc, | |
743 | desc[5].desc, | |
744 | correlator, | |
745 | &correlator); | |
746 | } while ((lpar_rc == H_BUSY) && (retry_count--)); | |
747 | ||
748 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { | |
749 | int i; | |
750 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); | |
751 | for(i = 0; i < 6; i++) { | |
752 | ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, | |
753 | desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); | |
754 | } | |
755 | tx_send_failed++; | |
756 | tx_dropped++; | |
757 | } else { | |
758 | tx_packets++; | |
759 | tx_bytes += skb->len; | |
760 | netdev->trans_start = jiffies; | |
761 | } | |
762 | ||
763 | do { | |
764 | dma_unmap_single(&adapter->vdev->dev, | |
765 | desc[nfrags].fields.address, | |
766 | desc[nfrags].fields.length, DMA_TO_DEVICE); | |
767 | } while(--nfrags >= 0); | |
768 | ||
769 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | |
770 | adapter->stats.tx_dropped += tx_dropped; | |
771 | adapter->stats.tx_bytes += tx_bytes; | |
772 | adapter->stats.tx_packets += tx_packets; | |
773 | adapter->tx_send_failed += tx_send_failed; | |
774 | adapter->tx_map_failed += tx_map_failed; | |
775 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | |
776 | ||
777 | dev_kfree_skb(skb); | |
778 | return 0; | |
779 | } | |
780 | ||
781 | static int ibmveth_poll(struct napi_struct *napi, int budget) | |
782 | { | |
783 | struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); | |
784 | struct net_device *netdev = adapter->netdev; | |
785 | int frames_processed = 0; | |
786 | unsigned long lpar_rc; | |
787 | ||
788 | restart_poll: | |
789 | do { | |
790 | struct sk_buff *skb; | |
791 | ||
792 | if (!ibmveth_rxq_pending_buffer(adapter)) | |
793 | break; | |
794 | ||
795 | rmb(); | |
796 | if (!ibmveth_rxq_buffer_valid(adapter)) { | |
797 | wmb(); /* suggested by larson1 */ | |
798 | adapter->rx_invalid_buffer++; | |
799 | ibmveth_debug_printk("recycling invalid buffer\n"); | |
800 | ibmveth_rxq_recycle_buffer(adapter); | |
801 | } else { | |
802 | int length = ibmveth_rxq_frame_length(adapter); | |
803 | int offset = ibmveth_rxq_frame_offset(adapter); | |
804 | skb = ibmveth_rxq_get_buffer(adapter); | |
805 | ||
806 | ibmveth_rxq_harvest_buffer(adapter); | |
807 | ||
808 | skb_reserve(skb, offset); | |
809 | skb_put(skb, length); | |
810 | skb->protocol = eth_type_trans(skb, netdev); | |
811 | ||
812 | netif_receive_skb(skb); /* send it up */ | |
813 | ||
814 | adapter->stats.rx_packets++; | |
815 | adapter->stats.rx_bytes += length; | |
816 | frames_processed++; | |
817 | netdev->last_rx = jiffies; | |
818 | } | |
819 | } while (frames_processed < budget); | |
820 | ||
821 | ibmveth_replenish_task(adapter); | |
822 | ||
823 | if (frames_processed < budget) { | |
824 | /* We think we are done - reenable interrupts, | |
825 | * then check once more to make sure we are done. | |
826 | */ | |
827 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | |
828 | VIO_IRQ_ENABLE); | |
829 | ||
830 | ibmveth_assert(lpar_rc == H_SUCCESS); | |
831 | ||
832 | netif_rx_complete(netdev, napi); | |
833 | ||
834 | if (ibmveth_rxq_pending_buffer(adapter) && | |
835 | netif_rx_reschedule(netdev, napi)) { | |
836 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | |
837 | VIO_IRQ_DISABLE); | |
838 | goto restart_poll; | |
839 | } | |
840 | } | |
841 | ||
842 | return frames_processed; | |
843 | } | |
844 | ||
845 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | |
846 | { | |
847 | struct net_device *netdev = dev_instance; | |
848 | struct ibmveth_adapter *adapter = netdev->priv; | |
849 | unsigned long lpar_rc; | |
850 | ||
851 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { | |
852 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | |
853 | VIO_IRQ_DISABLE); | |
854 | ibmveth_assert(lpar_rc == H_SUCCESS); | |
855 | __netif_rx_schedule(netdev, &adapter->napi); | |
856 | } | |
857 | return IRQ_HANDLED; | |
858 | } | |
859 | ||
860 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev) | |
861 | { | |
862 | struct ibmveth_adapter *adapter = dev->priv; | |
863 | return &adapter->stats; | |
864 | } | |
865 | ||
866 | static void ibmveth_set_multicast_list(struct net_device *netdev) | |
867 | { | |
868 | struct ibmveth_adapter *adapter = netdev->priv; | |
869 | unsigned long lpar_rc; | |
870 | ||
871 | if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) { | |
872 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
873 | IbmVethMcastEnableRecv | | |
874 | IbmVethMcastDisableFiltering, | |
875 | 0); | |
876 | if(lpar_rc != H_SUCCESS) { | |
877 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); | |
878 | } | |
879 | } else { | |
880 | struct dev_mc_list *mclist = netdev->mc_list; | |
881 | int i; | |
882 | /* clear the filter table & disable filtering */ | |
883 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
884 | IbmVethMcastEnableRecv | | |
885 | IbmVethMcastDisableFiltering | | |
886 | IbmVethMcastClearFilterTable, | |
887 | 0); | |
888 | if(lpar_rc != H_SUCCESS) { | |
889 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); | |
890 | } | |
891 | /* add the addresses to the filter table */ | |
892 | for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) { | |
893 | // add the multicast address to the filter table | |
894 | unsigned long mcast_addr = 0; | |
895 | memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); | |
896 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
897 | IbmVethMcastAddFilter, | |
898 | mcast_addr); | |
899 | if(lpar_rc != H_SUCCESS) { | |
900 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); | |
901 | } | |
902 | } | |
903 | ||
904 | /* re-enable filtering */ | |
905 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
906 | IbmVethMcastEnableFiltering, | |
907 | 0); | |
908 | if(lpar_rc != H_SUCCESS) { | |
909 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); | |
910 | } | |
911 | } | |
912 | } | |
913 | ||
914 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |
915 | { | |
916 | struct ibmveth_adapter *adapter = dev->priv; | |
917 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; | |
918 | int reinit = 0; | |
919 | int i, rc; | |
920 | ||
921 | if (new_mtu < IBMVETH_MAX_MTU) | |
922 | return -EINVAL; | |
923 | ||
924 | for (i = 0; i < IbmVethNumBufferPools; i++) | |
925 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) | |
926 | break; | |
927 | ||
928 | if (i == IbmVethNumBufferPools) | |
929 | return -EINVAL; | |
930 | ||
931 | /* Look for an active buffer pool that can hold the new MTU */ | |
932 | for(i = 0; i<IbmVethNumBufferPools; i++) { | |
933 | if (!adapter->rx_buff_pool[i].active) { | |
934 | adapter->rx_buff_pool[i].active = 1; | |
935 | reinit = 1; | |
936 | } | |
937 | ||
938 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | |
939 | if (reinit && netif_running(adapter->netdev)) { | |
940 | adapter->pool_config = 1; | |
941 | ibmveth_close(adapter->netdev); | |
942 | adapter->pool_config = 0; | |
943 | dev->mtu = new_mtu; | |
944 | if ((rc = ibmveth_open(adapter->netdev))) | |
945 | return rc; | |
946 | } else | |
947 | dev->mtu = new_mtu; | |
948 | return 0; | |
949 | } | |
950 | } | |
951 | return -EINVAL; | |
952 | } | |
953 | ||
954 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
955 | static void ibmveth_poll_controller(struct net_device *dev) | |
956 | { | |
957 | ibmveth_replenish_task(dev->priv); | |
958 | ibmveth_interrupt(dev->irq, dev); | |
959 | } | |
960 | #endif | |
961 | ||
962 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |
963 | { | |
964 | int rc, i; | |
965 | struct net_device *netdev; | |
966 | struct ibmveth_adapter *adapter; | |
967 | ||
968 | unsigned char *mac_addr_p; | |
969 | unsigned int *mcastFilterSize_p; | |
970 | ||
971 | ||
972 | ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", | |
973 | dev->unit_address); | |
974 | ||
975 | mac_addr_p = (unsigned char *) vio_get_attribute(dev, | |
976 | VETH_MAC_ADDR, NULL); | |
977 | if(!mac_addr_p) { | |
978 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR " | |
979 | "attribute\n", __FILE__, __LINE__); | |
980 | return 0; | |
981 | } | |
982 | ||
983 | mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev, | |
984 | VETH_MCAST_FILTER_SIZE, NULL); | |
985 | if(!mcastFilterSize_p) { | |
986 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " | |
987 | "VETH_MCAST_FILTER_SIZE attribute\n", | |
988 | __FILE__, __LINE__); | |
989 | return 0; | |
990 | } | |
991 | ||
992 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); | |
993 | ||
994 | if(!netdev) | |
995 | return -ENOMEM; | |
996 | ||
997 | SET_MODULE_OWNER(netdev); | |
998 | ||
999 | adapter = netdev->priv; | |
1000 | dev->dev.driver_data = netdev; | |
1001 | ||
1002 | adapter->vdev = dev; | |
1003 | adapter->netdev = netdev; | |
1004 | adapter->mcastFilterSize= *mcastFilterSize_p; | |
1005 | adapter->pool_config = 0; | |
1006 | ||
1007 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | |
1008 | ||
1009 | /* Some older boxes running PHYP non-natively have an OF that | |
1010 | returns a 8-byte local-mac-address field (and the first | |
1011 | 2 bytes have to be ignored) while newer boxes' OF return | |
1012 | a 6-byte field. Note that IEEE 1275 specifies that | |
1013 | local-mac-address must be a 6-byte field. | |
1014 | The RPA doc specifies that the first byte must be 10b, so | |
1015 | we'll just look for it to solve this 8 vs. 6 byte field issue */ | |
1016 | ||
1017 | if ((*mac_addr_p & 0x3) != 0x02) | |
1018 | mac_addr_p += 2; | |
1019 | ||
1020 | adapter->mac_addr = 0; | |
1021 | memcpy(&adapter->mac_addr, mac_addr_p, 6); | |
1022 | ||
1023 | netdev->irq = dev->irq; | |
1024 | netdev->open = ibmveth_open; | |
1025 | netdev->stop = ibmveth_close; | |
1026 | netdev->hard_start_xmit = ibmveth_start_xmit; | |
1027 | netdev->get_stats = ibmveth_get_stats; | |
1028 | netdev->set_multicast_list = ibmveth_set_multicast_list; | |
1029 | netdev->do_ioctl = ibmveth_ioctl; | |
1030 | netdev->ethtool_ops = &netdev_ethtool_ops; | |
1031 | netdev->change_mtu = ibmveth_change_mtu; | |
1032 | SET_NETDEV_DEV(netdev, &dev->dev); | |
1033 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1034 | netdev->poll_controller = ibmveth_poll_controller; | |
1035 | #endif | |
1036 | netdev->features |= NETIF_F_LLTX; | |
1037 | spin_lock_init(&adapter->stats_lock); | |
1038 | ||
1039 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | |
1040 | ||
1041 | for(i = 0; i<IbmVethNumBufferPools; i++) { | |
1042 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; | |
1043 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, | |
1044 | pool_count[i], pool_size[i], | |
1045 | pool_active[i]); | |
1046 | kobj->parent = &dev->dev.kobj; | |
1047 | sprintf(kobj->name, "pool%d", i); | |
1048 | kobj->ktype = &ktype_veth_pool; | |
1049 | kobject_register(kobj); | |
1050 | } | |
1051 | ||
1052 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); | |
1053 | ||
1054 | adapter->buffer_list_dma = DMA_ERROR_CODE; | |
1055 | adapter->filter_list_dma = DMA_ERROR_CODE; | |
1056 | adapter->rx_queue.queue_dma = DMA_ERROR_CODE; | |
1057 | ||
1058 | ibmveth_debug_printk("registering netdev...\n"); | |
1059 | ||
1060 | rc = register_netdev(netdev); | |
1061 | ||
1062 | if(rc) { | |
1063 | ibmveth_debug_printk("failed to register netdev rc=%d\n", rc); | |
1064 | free_netdev(netdev); | |
1065 | return rc; | |
1066 | } | |
1067 | ||
1068 | ibmveth_debug_printk("registered\n"); | |
1069 | ||
1070 | ibmveth_proc_register_adapter(adapter); | |
1071 | ||
1072 | return 0; | |
1073 | } | |
1074 | ||
1075 | static int __devexit ibmveth_remove(struct vio_dev *dev) | |
1076 | { | |
1077 | struct net_device *netdev = dev->dev.driver_data; | |
1078 | struct ibmveth_adapter *adapter = netdev->priv; | |
1079 | int i; | |
1080 | ||
1081 | for(i = 0; i<IbmVethNumBufferPools; i++) | |
1082 | kobject_unregister(&adapter->rx_buff_pool[i].kobj); | |
1083 | ||
1084 | unregister_netdev(netdev); | |
1085 | ||
1086 | ibmveth_proc_unregister_adapter(adapter); | |
1087 | ||
1088 | free_netdev(netdev); | |
1089 | return 0; | |
1090 | } | |
1091 | ||
1092 | #ifdef CONFIG_PROC_FS | |
1093 | static void ibmveth_proc_register_driver(void) | |
1094 | { | |
1095 | ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net); | |
1096 | if (ibmveth_proc_dir) { | |
1097 | SET_MODULE_OWNER(ibmveth_proc_dir); | |
1098 | } | |
1099 | } | |
1100 | ||
1101 | static void ibmveth_proc_unregister_driver(void) | |
1102 | { | |
1103 | remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net); | |
1104 | } | |
1105 | ||
1106 | static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) | |
1107 | { | |
1108 | if (*pos == 0) { | |
1109 | return (void *)1; | |
1110 | } else { | |
1111 | return NULL; | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1116 | { | |
1117 | ++*pos; | |
1118 | return NULL; | |
1119 | } | |
1120 | ||
1121 | static void ibmveth_seq_stop(struct seq_file *seq, void *v) | |
1122 | { | |
1123 | } | |
1124 | ||
1125 | static int ibmveth_seq_show(struct seq_file *seq, void *v) | |
1126 | { | |
1127 | struct ibmveth_adapter *adapter = seq->private; | |
1128 | char *current_mac = ((char*) &adapter->netdev->dev_addr); | |
1129 | char *firmware_mac = ((char*) &adapter->mac_addr) ; | |
1130 | ||
1131 | seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); | |
1132 | ||
1133 | seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); | |
1134 | seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", | |
1135 | current_mac[0], current_mac[1], current_mac[2], | |
1136 | current_mac[3], current_mac[4], current_mac[5]); | |
1137 | seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", | |
1138 | firmware_mac[0], firmware_mac[1], firmware_mac[2], | |
1139 | firmware_mac[3], firmware_mac[4], firmware_mac[5]); | |
1140 | ||
1141 | seq_printf(seq, "\nAdapter Statistics:\n"); | |
1142 | seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); | |
1143 | seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); | |
1144 | seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed); | |
1145 | seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed); | |
1146 | seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); | |
1147 | seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); | |
1148 | seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); | |
1149 | seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); | |
1150 | seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); | |
1151 | seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); | |
1152 | ||
1153 | return 0; | |
1154 | } | |
1155 | static struct seq_operations ibmveth_seq_ops = { | |
1156 | .start = ibmveth_seq_start, | |
1157 | .next = ibmveth_seq_next, | |
1158 | .stop = ibmveth_seq_stop, | |
1159 | .show = ibmveth_seq_show, | |
1160 | }; | |
1161 | ||
1162 | static int ibmveth_proc_open(struct inode *inode, struct file *file) | |
1163 | { | |
1164 | struct seq_file *seq; | |
1165 | struct proc_dir_entry *proc; | |
1166 | int rc; | |
1167 | ||
1168 | rc = seq_open(file, &ibmveth_seq_ops); | |
1169 | if (!rc) { | |
1170 | /* recover the pointer buried in proc_dir_entry data */ | |
1171 | seq = file->private_data; | |
1172 | proc = PDE(inode); | |
1173 | seq->private = proc->data; | |
1174 | } | |
1175 | return rc; | |
1176 | } | |
1177 | ||
1178 | static const struct file_operations ibmveth_proc_fops = { | |
1179 | .owner = THIS_MODULE, | |
1180 | .open = ibmveth_proc_open, | |
1181 | .read = seq_read, | |
1182 | .llseek = seq_lseek, | |
1183 | .release = seq_release, | |
1184 | }; | |
1185 | ||
1186 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | |
1187 | { | |
1188 | struct proc_dir_entry *entry; | |
1189 | if (ibmveth_proc_dir) { | |
1190 | char u_addr[10]; | |
1191 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | |
1192 | entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir); | |
1193 | if (!entry) { | |
1194 | ibmveth_error_printk("Cannot create adapter proc entry"); | |
1195 | } else { | |
1196 | entry->data = (void *) adapter; | |
1197 | entry->proc_fops = &ibmveth_proc_fops; | |
1198 | SET_MODULE_OWNER(entry); | |
1199 | } | |
1200 | } | |
1201 | return; | |
1202 | } | |
1203 | ||
1204 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | |
1205 | { | |
1206 | if (ibmveth_proc_dir) { | |
1207 | char u_addr[10]; | |
1208 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | |
1209 | remove_proc_entry(u_addr, ibmveth_proc_dir); | |
1210 | } | |
1211 | } | |
1212 | ||
1213 | #else /* CONFIG_PROC_FS */ | |
1214 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | |
1215 | { | |
1216 | } | |
1217 | ||
1218 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | |
1219 | { | |
1220 | } | |
1221 | static void ibmveth_proc_register_driver(void) | |
1222 | { | |
1223 | } | |
1224 | ||
1225 | static void ibmveth_proc_unregister_driver(void) | |
1226 | { | |
1227 | } | |
1228 | #endif /* CONFIG_PROC_FS */ | |
1229 | ||
1230 | static struct attribute veth_active_attr; | |
1231 | static struct attribute veth_num_attr; | |
1232 | static struct attribute veth_size_attr; | |
1233 | ||
1234 | static ssize_t veth_pool_show(struct kobject * kobj, | |
1235 | struct attribute * attr, char * buf) | |
1236 | { | |
1237 | struct ibmveth_buff_pool *pool = container_of(kobj, | |
1238 | struct ibmveth_buff_pool, | |
1239 | kobj); | |
1240 | ||
1241 | if (attr == &veth_active_attr) | |
1242 | return sprintf(buf, "%d\n", pool->active); | |
1243 | else if (attr == &veth_num_attr) | |
1244 | return sprintf(buf, "%d\n", pool->size); | |
1245 | else if (attr == &veth_size_attr) | |
1246 | return sprintf(buf, "%d\n", pool->buff_size); | |
1247 | return 0; | |
1248 | } | |
1249 | ||
1250 | static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, | |
1251 | const char * buf, size_t count) | |
1252 | { | |
1253 | struct ibmveth_buff_pool *pool = container_of(kobj, | |
1254 | struct ibmveth_buff_pool, | |
1255 | kobj); | |
1256 | struct net_device *netdev = | |
1257 | container_of(kobj->parent, struct device, kobj)->driver_data; | |
1258 | struct ibmveth_adapter *adapter = netdev->priv; | |
1259 | long value = simple_strtol(buf, NULL, 10); | |
1260 | long rc; | |
1261 | ||
1262 | if (attr == &veth_active_attr) { | |
1263 | if (value && !pool->active) { | |
1264 | if (netif_running(netdev)) { | |
1265 | if(ibmveth_alloc_buffer_pool(pool)) { | |
1266 | ibmveth_error_printk("unable to alloc pool\n"); | |
1267 | return -ENOMEM; | |
1268 | } | |
1269 | pool->active = 1; | |
1270 | adapter->pool_config = 1; | |
1271 | ibmveth_close(netdev); | |
1272 | adapter->pool_config = 0; | |
1273 | if ((rc = ibmveth_open(netdev))) | |
1274 | return rc; | |
1275 | } else | |
1276 | pool->active = 1; | |
1277 | } else if (!value && pool->active) { | |
1278 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; | |
1279 | int i; | |
1280 | /* Make sure there is a buffer pool with buffers that | |
1281 | can hold a packet of the size of the MTU */ | |
1282 | for (i = 0; i < IbmVethNumBufferPools; i++) { | |
1283 | if (pool == &adapter->rx_buff_pool[i]) | |
1284 | continue; | |
1285 | if (!adapter->rx_buff_pool[i].active) | |
1286 | continue; | |
1287 | if (mtu <= adapter->rx_buff_pool[i].buff_size) | |
1288 | break; | |
1289 | } | |
1290 | ||
1291 | if (i == IbmVethNumBufferPools) { | |
1292 | ibmveth_error_printk("no active pool >= MTU\n"); | |
1293 | return -EPERM; | |
1294 | } | |
1295 | ||
1296 | pool->active = 0; | |
1297 | if (netif_running(netdev)) { | |
1298 | adapter->pool_config = 1; | |
1299 | ibmveth_close(netdev); | |
1300 | adapter->pool_config = 0; | |
1301 | if ((rc = ibmveth_open(netdev))) | |
1302 | return rc; | |
1303 | } | |
1304 | } | |
1305 | } else if (attr == &veth_num_attr) { | |
1306 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | |
1307 | return -EINVAL; | |
1308 | else { | |
1309 | if (netif_running(netdev)) { | |
1310 | adapter->pool_config = 1; | |
1311 | ibmveth_close(netdev); | |
1312 | adapter->pool_config = 0; | |
1313 | pool->size = value; | |
1314 | if ((rc = ibmveth_open(netdev))) | |
1315 | return rc; | |
1316 | } else | |
1317 | pool->size = value; | |
1318 | } | |
1319 | } else if (attr == &veth_size_attr) { | |
1320 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) | |
1321 | return -EINVAL; | |
1322 | else { | |
1323 | if (netif_running(netdev)) { | |
1324 | adapter->pool_config = 1; | |
1325 | ibmveth_close(netdev); | |
1326 | adapter->pool_config = 0; | |
1327 | pool->buff_size = value; | |
1328 | if ((rc = ibmveth_open(netdev))) | |
1329 | return rc; | |
1330 | } else | |
1331 | pool->buff_size = value; | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | /* kick the interrupt handler to allocate/deallocate pools */ | |
1336 | ibmveth_interrupt(netdev->irq, netdev); | |
1337 | return count; | |
1338 | } | |
1339 | ||
1340 | ||
1341 | #define ATTR(_name, _mode) \ | |
1342 | struct attribute veth_##_name##_attr = { \ | |
1343 | .name = __stringify(_name), .mode = _mode, \ | |
1344 | }; | |
1345 | ||
1346 | static ATTR(active, 0644); | |
1347 | static ATTR(num, 0644); | |
1348 | static ATTR(size, 0644); | |
1349 | ||
1350 | static struct attribute * veth_pool_attrs[] = { | |
1351 | &veth_active_attr, | |
1352 | &veth_num_attr, | |
1353 | &veth_size_attr, | |
1354 | NULL, | |
1355 | }; | |
1356 | ||
1357 | static struct sysfs_ops veth_pool_ops = { | |
1358 | .show = veth_pool_show, | |
1359 | .store = veth_pool_store, | |
1360 | }; | |
1361 | ||
1362 | static struct kobj_type ktype_veth_pool = { | |
1363 | .release = NULL, | |
1364 | .sysfs_ops = &veth_pool_ops, | |
1365 | .default_attrs = veth_pool_attrs, | |
1366 | }; | |
1367 | ||
1368 | ||
1369 | static struct vio_device_id ibmveth_device_table[] __devinitdata= { | |
1370 | { "network", "IBM,l-lan"}, | |
1371 | { "", "" } | |
1372 | }; | |
1373 | MODULE_DEVICE_TABLE(vio, ibmveth_device_table); | |
1374 | ||
1375 | static struct vio_driver ibmveth_driver = { | |
1376 | .id_table = ibmveth_device_table, | |
1377 | .probe = ibmveth_probe, | |
1378 | .remove = ibmveth_remove, | |
1379 | .driver = { | |
1380 | .name = ibmveth_driver_name, | |
1381 | .owner = THIS_MODULE, | |
1382 | } | |
1383 | }; | |
1384 | ||
1385 | static int __init ibmveth_module_init(void) | |
1386 | { | |
1387 | ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); | |
1388 | ||
1389 | ibmveth_proc_register_driver(); | |
1390 | ||
1391 | return vio_register_driver(&ibmveth_driver); | |
1392 | } | |
1393 | ||
1394 | static void __exit ibmveth_module_exit(void) | |
1395 | { | |
1396 | vio_unregister_driver(&ibmveth_driver); | |
1397 | ibmveth_proc_unregister_driver(); | |
1398 | } | |
1399 | ||
1400 | module_init(ibmveth_module_init); | |
1401 | module_exit(ibmveth_module_exit); |