]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/staging/unisys/visornic/visornic_main.c
staging: rtl8192u: Remove unnecessary externs
[mirror_ubuntu-hirsute-kernel.git] / drivers / staging / unisys / visornic / visornic_main.c
CommitLineData
68905a14
DK
1/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
3 *
6f14cc18
BR
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
68905a14
DK
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for more
12 * details.
13 */
14
15/* This driver lives in a spar partition, and registers to ethernet io
16 * channels from the visorbus driver. It creates netdev devices and
17 * forwards transmit to the IO channel and accepts rcvs from the IO
18 * Partition via the IO channel.
19 */
20
21#include <linux/debugfs.h>
68905a14 22#include <linux/etherdevice.h>
0d507393 23#include <linux/netdevice.h>
68905a14 24#include <linux/kthread.h>
0d507393
NH
25#include <linux/skbuff.h>
26#include <linux/rtnetlink.h>
68905a14
DK
27
28#include "visorbus.h"
29#include "iochannel.h"
30
0c677e9c 31#define VISORNIC_INFINITE_RSP_WAIT 0
68905a14
DK
32#define VISORNICSOPENMAX 32
33#define MAXDEVICES 16384
34
35/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
36 * = 163840 bytes
37 */
38#define MAX_BUF 163840
39
40static spinlock_t dev_num_pool_lock;
41static void *dev_num_pool; /**< pool to grab device numbers from */
42
43static int visornic_probe(struct visor_device *dev);
44static void visornic_remove(struct visor_device *dev);
45static int visornic_pause(struct visor_device *dev,
46 visorbus_state_complete_func complete_func);
47static int visornic_resume(struct visor_device *dev,
48 visorbus_state_complete_func complete_func);
49
50/* DEBUGFS declarations */
51static ssize_t info_debugfs_read(struct file *file, char __user *buf,
52 size_t len, loff_t *offset);
53static ssize_t enable_ints_write(struct file *file, const char __user *buf,
54 size_t len, loff_t *ppos);
55static struct dentry *visornic_debugfs_dir;
56static const struct file_operations debugfs_info_fops = {
57 .read = info_debugfs_read,
58};
59
60static const struct file_operations debugfs_enable_ints_fops = {
61 .write = enable_ints_write,
62};
63
68905a14
DK
64static struct workqueue_struct *visornic_timeout_reset_workqueue;
65
66/* GUIDS for director channel type supported by this driver. */
67static struct visor_channeltype_descriptor visornic_channel_types[] = {
68 /* Note that the only channel type we expect to be reported by the
69 * bus driver is the SPAR_VNIC channel.
70 */
71 { SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
72 { NULL_UUID_LE, NULL }
73};
74
75/* This is used to tell the visor bus driver which types of visor devices
76 * we support, and what functions to call when a visor device that we support
77 * is attached or removed.
78 */
79static struct visor_driver visornic_driver = {
80 .name = "visornic",
81 .version = "1.0.0.0",
82 .vertag = NULL,
83 .owner = THIS_MODULE,
84 .channel_types = visornic_channel_types,
85 .probe = visornic_probe,
86 .remove = visornic_remove,
87 .pause = visornic_pause,
88 .resume = visornic_resume,
89 .channel_interrupt = NULL,
90};
91
68905a14
DK
92struct chanstat {
93 unsigned long got_rcv;
94 unsigned long got_enbdisack;
95 unsigned long got_xmit_done;
96 unsigned long xmit_fail;
97 unsigned long sent_enbdis;
98 unsigned long sent_promisc;
99 unsigned long sent_post;
81d275c6 100 unsigned long sent_post_failed;
68905a14
DK
101 unsigned long sent_xmit;
102 unsigned long reject_count;
103 unsigned long extra_rcvbufs_sent;
104};
105
106struct visornic_devdata {
107 int devnum;
68905a14
DK
108 unsigned short enabled; /* 0 disabled 1 enabled to receive */
109 unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by
110 * IOPART
111 */
112 struct visor_device *dev;
113 char name[99];
114 struct list_head list_all; /* < link within list_all_devices list */
68905a14
DK
115 struct net_device *netdev;
116 struct net_device_stats net_stats;
117 atomic_t interrupt_rcvd;
118 wait_queue_head_t rsp_queue;
119 struct sk_buff **rcvbuf;
120 u64 uniquenum; /* TODO figure out why not used */
121 unsigned short old_flags; /* flags as they were prior to
122 * set_multicast_list
123 */
124 atomic_t usage; /* count of users */
125 int num_rcv_bufs; /* indicates how many rcv buffers
126 * the vnic will post
127 */
128 int num_rcv_bufs_could_not_alloc;
129 atomic_t num_rcvbuf_in_iovm;
130 unsigned long alloc_failed_in_if_needed_cnt;
131 unsigned long alloc_failed_in_repost_rtn_cnt;
36927c18
TS
132 unsigned long max_outstanding_net_xmits; /* absolute max number of
133 * outstanding xmits - should
134 * never hit this
135 */
136 unsigned long upper_threshold_net_xmits; /* high water mark for
137 * calling netif_stop_queue()
138 */
139 unsigned long lower_threshold_net_xmits; /* high water mark for calling
140 * netif_wake_queue()
141 */
68905a14
DK
142 struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
143 * xmit buffer list that have been
144 * sent to the IOPART end
145 */
d01da5ea 146 visorbus_state_complete_func server_down_complete_func;
68905a14
DK
147 struct work_struct timeout_reset;
148 struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for
149 * posting/unposting rcv buffers
150 */
151 struct uiscmdrsp *xmit_cmdrsp; /* used to issue NET_XMIT - there is
152 * never more that one xmit in
153 * progress at a time
154 */
155 bool server_down; /* IOPART is down */
156 bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
46df8226 157 bool going_away; /* device is being torn down */
68905a14 158 struct dentry *eth_debugfs_dir;
68905a14
DK
159 u64 interrupts_rcvd;
160 u64 interrupts_notme;
161 u64 interrupts_disabled;
162 u64 busy_cnt;
163 spinlock_t priv_lock; /* spinlock to access devdata structures */
164
165 /* flow control counter */
166 u64 flow_control_upper_hits;
167 u64 flow_control_lower_hits;
168
169 /* debug counters */
170 unsigned long n_rcv0; /* # rcvs of 0 buffers */
171 unsigned long n_rcv1; /* # rcvs of 1 buffers */
172 unsigned long n_rcv2; /* # rcvs of 2 buffers */
173 unsigned long n_rcvx; /* # rcvs of >2 buffers */
174 unsigned long found_repost_rcvbuf_cnt; /* # times we called
175 * repost_rcvbuf_cnt
176 */
177 unsigned long repost_found_skb_cnt; /* # times found the skb */
178 unsigned long n_repost_deficit; /* # times we couldn't find
179 * all of the rcv buffers
180 */
181 unsigned long bad_rcv_buf; /* # times we negleted to
182 * free the rcv skb because
183 * we didn't know where it
184 * came from
185 */
186 unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
187
188 int queuefullmsg_logged;
189 struct chanstat chstat;
946b2546
NH
190 struct timer_list irq_poll_timer;
191 struct napi_struct napi;
192 struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
68905a14
DK
193};
194
68905a14
DK
195
196/* List of all visornic_devdata structs,
197 * linked via the list_all member
198 */
199static LIST_HEAD(list_all_devices);
200static DEFINE_SPINLOCK(lock_all_devices);
946b2546
NH
201static int visornic_poll(struct napi_struct *napi, int budget);
202static void poll_for_irq(unsigned long v);
68905a14
DK
203
204/**
205 * visor_copy_fragsinfo_from_skb(
206 * @skb_in: skbuff that we are pulling the frags from
207 * @firstfraglen: length of first fragment in skb
208 * @frags_max: max len of frags array
209 * @frags: frags array filled in on output
210 *
211 * Copy the fragment list in the SKB to a phys_info
212 * array that the IOPART understands.
213 * Return value indicates number of entries filled in frags
214 * Negative values indicate an error.
215 */
216static unsigned int
217visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
218 unsigned int frags_max,
219 struct phys_info frags[])
220{
221 unsigned int count = 0, ii, size, offset = 0, numfrags;
513e1cbd 222 unsigned int total_count;
68905a14
DK
223
224 numfrags = skb_shinfo(skb)->nr_frags;
225
513e1cbd
NH
226 /*
227 * Compute the number of fragments this skb has, and if its more than
228 * frag array can hold, linearize the skb
229 */
230 total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
231 if (firstfraglen % PI_PAGE_SIZE)
232 total_count++;
233
234 if (total_count > frags_max) {
235 if (skb_linearize(skb))
236 return -EINVAL;
237 numfrags = skb_shinfo(skb)->nr_frags;
238 firstfraglen = 0;
239 }
240
68905a14
DK
241 while (firstfraglen) {
242 if (count == frags_max)
243 return -EINVAL;
244
245 frags[count].pi_pfn =
246 page_to_pfn(virt_to_page(skb->data + offset));
247 frags[count].pi_off =
248 (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
249 size = min_t(unsigned int, firstfraglen,
250 PI_PAGE_SIZE - frags[count].pi_off);
251
252 /* can take smallest of firstfraglen (what's left) OR
253 * bytes left in the page
254 */
255 frags[count].pi_len = size;
256 firstfraglen -= size;
257 offset += size;
258 count++;
259 }
260 if (numfrags) {
261 if ((count + numfrags) > frags_max)
262 return -EINVAL;
263
264 for (ii = 0; ii < numfrags; ii++) {
265 count = add_physinfo_entries(page_to_pfn(
266 skb_frag_page(&skb_shinfo(skb)->frags[ii])),
267 skb_shinfo(skb)->frags[ii].
268 page_offset,
269 skb_shinfo(skb)->frags[ii].
270 size, count, frags_max, frags);
998ff7f8
NH
271 /*
272 * add_physinfo_entries only returns
273 * zero if the frags array is out of room
274 * That should never happen because we
275 * fail above, if count+numfrags > frags_max.
276 * Given that theres no recovery mechanism from putting
277 * half a packet in the I/O channel, panic here as this
278 * should never happen
279 */
280 BUG_ON(!count);
68905a14
DK
281 }
282 }
283 if (skb_shinfo(skb)->frag_list) {
284 struct sk_buff *skbinlist;
285 int c;
286
287 for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
288 skbinlist = skbinlist->next) {
289 c = visor_copy_fragsinfo_from_skb(skbinlist,
290 skbinlist->len -
291 skbinlist->data_len,
292 frags_max - count,
293 &frags[count]);
294 if (c < 0)
295 return c;
296 count += c;
297 }
298 }
299 return count;
300}
301
68905a14
DK
302static ssize_t enable_ints_write(struct file *file,
303 const char __user *buffer,
304 size_t count, loff_t *ppos)
305{
52b1660d
NH
306 /*
307 * Don't want to break ABI here by having a debugfs
308 * file that no longer exists or is writable, so
309 * lets just make this a vestigual function
310 */
68905a14
DK
311 return count;
312}
313
314/**
315 * visornic_serverdown_complete - IOPART went down, need to pause
316 * device
317 * @work: Work queue it was scheduled on
318 *
319 * The IO partition has gone down and we need to do some cleanup
320 * for when it comes back. Treat the IO partition as the link
321 * being down.
322 * Returns void.
323 */
324static void
ace72eef 325visornic_serverdown_complete(struct visornic_devdata *devdata)
68905a14 326{
68905a14 327 struct net_device *netdev;
68905a14 328
68905a14
DK
329 netdev = devdata->netdev;
330
946b2546
NH
331 /* Stop polling for interrupts */
332 del_timer_sync(&devdata->irq_poll_timer);
68905a14 333
0d507393
NH
334 rtnl_lock();
335 dev_close(netdev);
336 rtnl_unlock();
68905a14 337
68905a14 338 atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
db849927
TS
339 devdata->chstat.sent_xmit = 0;
340 devdata->chstat.got_xmit_done = 0;
68905a14 341
d01da5ea
TS
342 if (devdata->server_down_complete_func)
343 (*devdata->server_down_complete_func)(devdata->dev, 0);
344
68905a14
DK
345 devdata->server_down = true;
346 devdata->server_change_state = false;
d01da5ea 347 devdata->server_down_complete_func = NULL;
68905a14
DK
348}
349
350/**
351 * visornic_serverdown - Command has notified us that IOPARt is down
352 * @devdata: device that is being managed by IOPART
353 *
354 * Schedule the work needed to handle the server down request. Make
355 * sure we haven't already handled the server change state event.
356 * Returns 0 if we scheduled the work, -EINVAL on error.
357 */
358static int
d01da5ea
TS
359visornic_serverdown(struct visornic_devdata *devdata,
360 visorbus_state_complete_func complete_func)
68905a14 361{
46df8226
TS
362 unsigned long flags;
363
364 spin_lock_irqsave(&devdata->priv_lock, flags);
68905a14 365 if (!devdata->server_down && !devdata->server_change_state) {
46df8226
TS
366 if (devdata->going_away) {
367 spin_unlock_irqrestore(&devdata->priv_lock, flags);
368 dev_dbg(&devdata->dev->device,
369 "%s aborting because device removal pending\n",
370 __func__);
371 return -ENODEV;
372 }
68905a14 373 devdata->server_change_state = true;
d01da5ea 374 devdata->server_down_complete_func = complete_func;
6f562b21 375 spin_unlock_irqrestore(&devdata->priv_lock, flags);
ace72eef 376 visornic_serverdown_complete(devdata);
68905a14 377 } else if (devdata->server_change_state) {
00748b0c
TS
378 dev_dbg(&devdata->dev->device, "%s changing state\n",
379 __func__);
46df8226 380 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14 381 return -EINVAL;
6f562b21
TS
382 } else
383 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
384 return 0;
385}
386
387/**
388 * alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
389 * @netdev: network adapter the rcv bufs are attached too.
390 *
391 * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
392 * so that it can write rcv data into our memory space.
393 * Return pointer to sk_buff
394 */
395static struct sk_buff *
396alloc_rcv_buf(struct net_device *netdev)
397{
398 struct sk_buff *skb;
399
400 /* NOTE: the first fragment in each rcv buffer is pointed to by
401 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
402 * in length, so the firstfrag is large enough to hold 1514.
403 */
404 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
405 if (!skb)
406 return NULL;
407 skb->dev = netdev;
408 skb->len = RCVPOST_BUF_SIZE;
409 /* current value of mtu doesn't come into play here; large
410 * packets will just end up using multiple rcv buffers all of
411 * same size
412 */
413 skb->data_len = 0; /* dev_alloc_skb already zeroes it out
414 * for clarification.
415 */
416 return skb;
417}
418
419/**
420 * post_skb - post a skb to the IO Partition.
421 * @cmdrsp: cmdrsp packet to be send to the IO Partition
422 * @devdata: visornic_devdata to post the skb too
423 * @skb: skb to give to the IO partition
424 *
425 * Send the skb to the IO Partition.
426 * Returns void
427 */
428static inline void
429post_skb(struct uiscmdrsp *cmdrsp,
430 struct visornic_devdata *devdata, struct sk_buff *skb)
431{
432 cmdrsp->net.buf = skb;
433 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
434 cmdrsp->net.rcvpost.frag.pi_off =
435 (unsigned long)skb->data & PI_PAGE_MASK;
436 cmdrsp->net.rcvpost.frag.pi_len = skb->len;
437 cmdrsp->net.rcvpost.unique_num = devdata->uniquenum;
438
439 if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
440 cmdrsp->net.type = NET_RCV_POST;
441 cmdrsp->cmdtype = CMD_NET_TYPE;
81d275c6 442 if (visorchannel_signalinsert(devdata->dev->visorchannel,
68905a14 443 IOCHAN_TO_IOPART,
81d275c6
TS
444 cmdrsp)) {
445 atomic_inc(&devdata->num_rcvbuf_in_iovm);
446 devdata->chstat.sent_post++;
447 } else {
448 devdata->chstat.sent_post_failed++;
449 }
68905a14
DK
450 }
451}
452
453/**
454 * send_enbdis - send NET_RCV_ENBDIS to IO Partition
455 * @netdev: netdevice we are enable/disable, used as context
456 * return value
457 * @state: enable = 1/disable = 0
458 * @devdata: visornic device we are enabling/disabling
459 *
460 * Send the enable/disable message to the IO Partition.
461 * Returns void
462 */
463static void
464send_enbdis(struct net_device *netdev, int state,
465 struct visornic_devdata *devdata)
466{
467 devdata->cmdrsp_rcv->net.enbdis.enable = state;
468 devdata->cmdrsp_rcv->net.enbdis.context = netdev;
469 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
470 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
81d275c6 471 if (visorchannel_signalinsert(devdata->dev->visorchannel,
68905a14 472 IOCHAN_TO_IOPART,
81d275c6
TS
473 devdata->cmdrsp_rcv))
474 devdata->chstat.sent_enbdis++;
68905a14
DK
475}
476
477/**
478 * visornic_disable_with_timeout - Disable network adapter
479 * @netdev: netdevice to disale
480 * @timeout: timeout to wait for disable
481 *
482 * Disable the network adapter and inform the IO Partition that we
483 * are disabled, reclaim memory from rcv bufs.
484 * Returns 0 on success, negative for failure of IO Partition
485 * responding.
486 *
487 */
488static int
489visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
490{
491 struct visornic_devdata *devdata = netdev_priv(netdev);
492 int i;
493 unsigned long flags;
494 int wait = 0;
495
68905a14
DK
496 /* send a msg telling the other end we are stopping incoming pkts */
497 spin_lock_irqsave(&devdata->priv_lock, flags);
498 devdata->enabled = 0;
499 devdata->enab_dis_acked = 0; /* must wait for ack */
500 spin_unlock_irqrestore(&devdata->priv_lock, flags);
501
502 /* send disable and wait for ack -- don't hold lock when sending
503 * disable because if the queue is full, insert might sleep.
504 */
505 send_enbdis(netdev, 0, devdata);
506
507 /* wait for ack to arrive before we try to free rcv buffers
508 * NOTE: the other end automatically unposts the rcv buffers when
509 * when it gets a disable.
510 */
511 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 512 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
513 (wait < timeout)) {
514 if (devdata->enab_dis_acked)
515 break;
516 if (devdata->server_down || devdata->server_change_state) {
517 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
518 dev_dbg(&netdev->dev, "%s server went away\n",
519 __func__);
68905a14
DK
520 return -EIO;
521 }
522 set_current_state(TASK_INTERRUPTIBLE);
523 spin_unlock_irqrestore(&devdata->priv_lock, flags);
524 wait += schedule_timeout(msecs_to_jiffies(10));
525 spin_lock_irqsave(&devdata->priv_lock, flags);
526 }
527
528 /* Wait for usage to go to 1 (no other users) before freeing
529 * rcv buffers
530 */
531 if (atomic_read(&devdata->usage) > 1) {
532 while (1) {
533 set_current_state(TASK_INTERRUPTIBLE);
534 spin_unlock_irqrestore(&devdata->priv_lock, flags);
535 schedule_timeout(msecs_to_jiffies(10));
536 spin_lock_irqsave(&devdata->priv_lock, flags);
537 if (atomic_read(&devdata->usage))
538 break;
539 }
540 }
68905a14
DK
541 /* we've set enabled to 0, so we can give up the lock. */
542 spin_unlock_irqrestore(&devdata->priv_lock, flags);
543
946b2546
NH
544 /* stop the transmit queue so nothing more can be transmitted */
545 netif_stop_queue(netdev);
546
547 napi_disable(&devdata->napi);
548
0d507393
NH
549 skb_queue_purge(&devdata->xmitbufhead);
550
68905a14
DK
551 /* Free rcv buffers - other end has automatically unposed them on
552 * disable
553 */
554 for (i = 0; i < devdata->num_rcv_bufs; i++) {
555 if (devdata->rcvbuf[i]) {
556 kfree_skb(devdata->rcvbuf[i]);
557 devdata->rcvbuf[i] = NULL;
558 }
559 }
560
68905a14
DK
561 return 0;
562}
563
564/**
565 * init_rcv_bufs -- initialize receive bufs and send them to the IO Part
566 * @netdev: struct netdevice
567 * @devdata: visornic_devdata
568 *
569 * Allocate rcv buffers and post them to the IO Partition.
570 * Return 0 for success, and negative for failure.
571 */
572static int
573init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
574{
575 int i, count;
576
577 /* allocate fixed number of receive buffers to post to uisnic
578 * post receive buffers after we've allocated a required amount
579 */
580 for (i = 0; i < devdata->num_rcv_bufs; i++) {
581 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
582 if (!devdata->rcvbuf[i])
583 break; /* if we failed to allocate one let us stop */
584 }
585 if (i == 0) /* couldn't even allocate one -- bail out */
586 return -ENOMEM;
587 count = i;
588
589 /* Ensure we can alloc 2/3rd of the requeested number of buffers.
590 * 2/3 is an arbitrary choice; used also in ndis init.c
591 */
592 if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
593 /* free receive buffers we did alloc and then bail out */
594 for (i = 0; i < count; i++) {
595 kfree_skb(devdata->rcvbuf[i]);
596 devdata->rcvbuf[i] = NULL;
597 }
598 return -ENOMEM;
599 }
600
601 /* post receive buffers to receive incoming input - without holding
602 * lock - we've not enabled nor started the queue so there shouldn't
603 * be any rcv or xmit activity
604 */
605 for (i = 0; i < count; i++)
606 post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]);
607
608 return 0;
609}
610
611/**
612 * visornic_enable_with_timeout - send enable to IO Part
613 * @netdev: struct net_device
614 * @timeout: Time to wait for the ACK from the enable
615 *
616 * Sends enable to IOVM, inits, and posts receive buffers to IOVM
617 * timeout is defined in msecs (timeout of 0 specifies infinite wait)
618 * Return 0 for success, negavite for failure.
619 */
620static int
621visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
622{
623 int i;
624 struct visornic_devdata *devdata = netdev_priv(netdev);
625 unsigned long flags;
626 int wait = 0;
627
628 /* NOTE: the other end automatically unposts the rcv buffers when it
629 * gets a disable.
630 */
631 i = init_rcv_bufs(netdev, devdata);
00748b0c
TS
632 if (i < 0) {
633 dev_err(&netdev->dev,
634 "%s failed to init rcv bufs (%d)\n", __func__, i);
68905a14 635 return i;
00748b0c 636 }
68905a14
DK
637
638 spin_lock_irqsave(&devdata->priv_lock, flags);
639 devdata->enabled = 1;
6483783d 640 devdata->enab_dis_acked = 0;
68905a14
DK
641
642 /* now we're ready, let's send an ENB to uisnic but until we get
643 * an ACK back from uisnic, we'll drop the packets
644 */
645 devdata->n_rcv_packets_not_accepted = 0;
646 spin_unlock_irqrestore(&devdata->priv_lock, flags);
647
648 /* send enable and wait for ack -- don't hold lock when sending enable
649 * because if the queue is full, insert might sleep.
650 */
946b2546 651 napi_enable(&devdata->napi);
68905a14
DK
652 send_enbdis(netdev, 1, devdata);
653
654 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 655 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
656 (wait < timeout)) {
657 if (devdata->enab_dis_acked)
658 break;
659 if (devdata->server_down || devdata->server_change_state) {
660 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
661 dev_dbg(&netdev->dev, "%s server went away\n",
662 __func__);
68905a14
DK
663 return -EIO;
664 }
665 set_current_state(TASK_INTERRUPTIBLE);
666 spin_unlock_irqrestore(&devdata->priv_lock, flags);
667 wait += schedule_timeout(msecs_to_jiffies(10));
668 spin_lock_irqsave(&devdata->priv_lock, flags);
669 }
670
671 spin_unlock_irqrestore(&devdata->priv_lock, flags);
672
00748b0c
TS
673 if (!devdata->enab_dis_acked) {
674 dev_err(&netdev->dev, "%s missing ACK\n", __func__);
68905a14 675 return -EIO;
00748b0c 676 }
68905a14 677
35a8dd31 678 netif_start_queue(netdev);
946b2546 679
68905a14
DK
680 return 0;
681}
682
683/**
684 * visornic_timeout_reset - handle xmit timeout resets
685 * @work work item that scheduled the work
686 *
687 * Transmit Timeouts are typically handled by resetting the
688 * device for our virtual NIC we will send a Disable and Enable
689 * to the IOVM. If it doesn't respond we will trigger a serverdown.
690 */
691static void
692visornic_timeout_reset(struct work_struct *work)
693{
694 struct visornic_devdata *devdata;
695 struct net_device *netdev;
696 int response = 0;
697
698 devdata = container_of(work, struct visornic_devdata, timeout_reset);
699 netdev = devdata->netdev;
700
4d79002e
TS
701 rtnl_lock();
702 if (!netif_running(netdev)) {
703 rtnl_unlock();
704 return;
705 }
706
0c677e9c
NH
707 response = visornic_disable_with_timeout(netdev,
708 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
709 if (response)
710 goto call_serverdown;
711
0c677e9c
NH
712 response = visornic_enable_with_timeout(netdev,
713 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
714 if (response)
715 goto call_serverdown;
68905a14 716
4d79002e
TS
717 rtnl_unlock();
718
68905a14
DK
719 return;
720
721call_serverdown:
d01da5ea 722 visornic_serverdown(devdata, NULL);
4d79002e 723 rtnl_unlock();
68905a14
DK
724}
725
726/**
727 * visornic_open - Enable the visornic device and mark the queue started
728 * @netdev: netdevice to start
729 *
730 * Enable the device and start the transmit queue.
731 * Return 0 for success
732 */
733static int
734visornic_open(struct net_device *netdev)
735{
0c677e9c 736 visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14 737
68905a14
DK
738 return 0;
739}
740
741/**
742 * visornic_close - Disables the visornic device and stops the queues
743 * @netdev: netdevice to start
744 *
745 * Disable the device and stop the transmit queue.
746 * Return 0 for success
747 */
748static int
749visornic_close(struct net_device *netdev)
750{
0c677e9c 751 visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
752
753 return 0;
754}
755
36927c18
TS
756/**
757 * devdata_xmits_outstanding - compute outstanding xmits
758 * @devdata: visornic_devdata for device
759 *
760 * Return value is the number of outstanding xmits.
761 */
762static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
763{
764 if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
765 return devdata->chstat.sent_xmit -
766 devdata->chstat.got_xmit_done;
767 else
768 return (ULONG_MAX - devdata->chstat.got_xmit_done
769 + devdata->chstat.sent_xmit + 1);
770}
771
772/**
773 * vnic_hit_high_watermark
774 * @devdata: indicates visornic device we are checking
775 * @high_watermark: max num of unacked xmits we will tolerate,
776 * before we will start throttling
777 *
778 * Returns true iff the number of unacked xmits sent to
779 * the IO partition is >= high_watermark.
780 */
781static inline bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
782 ulong high_watermark)
783{
784 return (devdata_xmits_outstanding(devdata) >= high_watermark);
785}
786
787/**
788 * vnic_hit_low_watermark
789 * @devdata: indicates visornic device we are checking
790 * @low_watermark: we will wait until the num of unacked xmits
791 * drops to this value or lower before we start
792 * transmitting again
793 *
794 * Returns true iff the number of unacked xmits sent to
795 * the IO partition is <= low_watermark.
796 */
797static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
798 ulong low_watermark)
799{
800 return (devdata_xmits_outstanding(devdata) <= low_watermark);
801}
802
68905a14
DK
803/**
804 * visornic_xmit - send a packet to the IO Partition
805 * @skb: Packet to be sent
806 * @netdev: net device the packet is being sent from
807 *
808 * Convert the skb to a cmdrsp so the IO Partition can undersand it.
809 * Send the XMIT command to the IO Partition for processing. This
810 * function is protected from concurrent calls by a spinlock xmit_lock
811 * in the net_device struct, but as soon as the function returns it
812 * can be called again.
f6346ad6 813 * Returns NETDEV_TX_OK.
68905a14
DK
814 */
815static int
816visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
817{
818 struct visornic_devdata *devdata;
819 int len, firstfraglen, padlen;
820 struct uiscmdrsp *cmdrsp = NULL;
821 unsigned long flags;
822
823 devdata = netdev_priv(netdev);
824 spin_lock_irqsave(&devdata->priv_lock, flags);
825
826 if (netif_queue_stopped(netdev) || devdata->server_down ||
827 devdata->server_change_state) {
828 spin_unlock_irqrestore(&devdata->priv_lock, flags);
829 devdata->busy_cnt++;
00748b0c
TS
830 dev_dbg(&netdev->dev,
831 "%s busy - queue stopped\n", __func__);
f6346ad6
NH
832 kfree_skb(skb);
833 return NETDEV_TX_OK;
68905a14
DK
834 }
835
836 /* sk_buff struct is used to host network data throughout all the
837 * linux network subsystems
838 */
839 len = skb->len;
840
841 /* skb->len is the FULL length of data (including fragmentary portion)
842 * skb->data_len is the length of the fragment portion in frags
843 * skb->len - skb->data_len is size of the 1st fragment in skb->data
844 * calculate the length of the first fragment that skb->data is
845 * pointing to
846 */
847 firstfraglen = skb->len - skb->data_len;
848 if (firstfraglen < ETH_HEADER_SIZE) {
849 spin_unlock_irqrestore(&devdata->priv_lock, flags);
850 devdata->busy_cnt++;
00748b0c
TS
851 dev_err(&netdev->dev,
852 "%s busy - first frag too small (%d)\n",
853 __func__, firstfraglen);
f6346ad6
NH
854 kfree_skb(skb);
855 return NETDEV_TX_OK;
68905a14
DK
856 }
857
858 if ((len < ETH_MIN_PACKET_SIZE) &&
859 ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
860 /* pad the packet out to minimum size */
861 padlen = ETH_MIN_PACKET_SIZE - len;
862 memset(&skb->data[len], 0, padlen);
863 skb->tail += padlen;
864 skb->len += padlen;
865 len += padlen;
866 firstfraglen += padlen;
867 }
868
869 cmdrsp = devdata->xmit_cmdrsp;
870 /* clear cmdrsp */
871 memset(cmdrsp, 0, SIZEOF_CMDRSP);
872 cmdrsp->net.type = NET_XMIT;
873 cmdrsp->cmdtype = CMD_NET_TYPE;
874
875 /* save the pointer to skb -- we'll need it for completion */
876 cmdrsp->net.buf = skb;
877
36927c18
TS
878 if (vnic_hit_high_watermark(devdata,
879 devdata->max_outstanding_net_xmits)) {
68905a14
DK
880 /* too many NET_XMITs queued over to IOVM - need to wait
881 */
882 devdata->chstat.reject_count++;
883 if (!devdata->queuefullmsg_logged &&
884 ((devdata->chstat.reject_count & 0x3ff) == 1))
885 devdata->queuefullmsg_logged = 1;
886 netif_stop_queue(netdev);
887 spin_unlock_irqrestore(&devdata->priv_lock, flags);
888 devdata->busy_cnt++;
00748b0c
TS
889 dev_dbg(&netdev->dev,
890 "%s busy - waiting for iovm to catch up\n",
891 __func__);
f6346ad6
NH
892 kfree_skb(skb);
893 return NETDEV_TX_OK;
68905a14
DK
894 }
895 if (devdata->queuefullmsg_logged)
896 devdata->queuefullmsg_logged = 0;
897
898 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
899 cmdrsp->net.xmt.lincsum.valid = 1;
900 cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
901 if (skb_transport_header(skb) > skb->data) {
902 cmdrsp->net.xmt.lincsum.hrawoff =
903 skb_transport_header(skb) - skb->data;
904 cmdrsp->net.xmt.lincsum.hrawoff = 1;
905 }
906 if (skb_network_header(skb) > skb->data) {
907 cmdrsp->net.xmt.lincsum.nhrawoff =
908 skb_network_header(skb) - skb->data;
909 cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
910 }
911 cmdrsp->net.xmt.lincsum.csum = skb->csum;
912 } else {
913 cmdrsp->net.xmt.lincsum.valid = 0;
914 }
915
916 /* save off the length of the entire data packet */
917 cmdrsp->net.xmt.len = len;
918
919 /* copy ethernet header from first frag into ocmdrsp
920 * - everything else will be pass in frags & DMA'ed
921 */
922 memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HEADER_SIZE);
923 /* copy frags info - from skb->data we need to only provide access
924 * beyond eth header
925 */
926 cmdrsp->net.xmt.num_frags =
927 visor_copy_fragsinfo_from_skb(skb, firstfraglen,
928 MAX_PHYS_INFO,
929 cmdrsp->net.xmt.frags);
ce657aa8 930 if (cmdrsp->net.xmt.num_frags < 0) {
68905a14
DK
931 spin_unlock_irqrestore(&devdata->priv_lock, flags);
932 devdata->busy_cnt++;
00748b0c
TS
933 dev_err(&netdev->dev,
934 "%s busy - copy frags failed\n", __func__);
f6346ad6
NH
935 kfree_skb(skb);
936 return NETDEV_TX_OK;
68905a14
DK
937 }
938
939 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
940 IOCHAN_TO_IOPART, cmdrsp)) {
941 netif_stop_queue(netdev);
942 spin_unlock_irqrestore(&devdata->priv_lock, flags);
943 devdata->busy_cnt++;
00748b0c
TS
944 dev_dbg(&netdev->dev,
945 "%s busy - signalinsert failed\n", __func__);
f6346ad6
NH
946 kfree_skb(skb);
947 return NETDEV_TX_OK;
68905a14
DK
948 }
949
950 /* Track the skbs that have been sent to the IOVM for XMIT */
951 skb_queue_head(&devdata->xmitbufhead, skb);
952
68905a14
DK
953 /* update xmt stats */
954 devdata->net_stats.tx_packets++;
955 devdata->net_stats.tx_bytes += skb->len;
956 devdata->chstat.sent_xmit++;
957
958 /* check to see if we have hit the high watermark for
959 * netif_stop_queue()
960 */
36927c18
TS
961 if (vnic_hit_high_watermark(devdata,
962 devdata->upper_threshold_net_xmits)) {
68905a14
DK
963 /* too many NET_XMITs queued over to IOVM - need to wait */
964 netif_stop_queue(netdev); /* calling stop queue - call
965 * netif_wake_queue() after lower
966 * threshold
967 */
00748b0c
TS
968 dev_dbg(&netdev->dev,
969 "%s busy - invoking iovm flow control\n",
970 __func__);
68905a14
DK
971 devdata->flow_control_upper_hits++;
972 }
973 spin_unlock_irqrestore(&devdata->priv_lock, flags);
974
975 /* skb will be freed when we get back NET_XMIT_DONE */
976 return NETDEV_TX_OK;
977}
978
979/**
980 * visornic_get_stats - returns net_stats of the visornic device
981 * @netdev: netdevice
982 *
983 * Returns the net_device_stats for the device
984 */
985static struct net_device_stats *
986visornic_get_stats(struct net_device *netdev)
987{
988 struct visornic_devdata *devdata = netdev_priv(netdev);
989
990 return &devdata->net_stats;
991}
992
68905a14
DK
993/**
994 * visornic_change_mtu - changes mtu of device.
995 * @netdev: netdevice
996 * @new_mtu: value of new mtu
997 *
998 * MTU cannot be changed by system, must be changed via
999 * CONTROLVM message. All vnics and pnics in a switch have
1000 * to have the same MTU for everything to work.
1001 * Currently not supported.
1002 * Returns EINVAL
1003 */
1004static int
1005visornic_change_mtu(struct net_device *netdev, int new_mtu)
1006{
1007 return -EINVAL;
1008}
1009
1010/**
1011 * visornic_set_multi - changes mtu of device.
1012 * @netdev: netdevice
1013 *
1014 * Only flag we support currently is IFF_PROMISC
1015 * Returns void
1016 */
1017static void
1018visornic_set_multi(struct net_device *netdev)
1019{
1020 struct uiscmdrsp *cmdrsp;
1021 struct visornic_devdata *devdata = netdev_priv(netdev);
1022
1023 /* any filtering changes */
1024 if (devdata->old_flags != netdev->flags) {
1025 if ((netdev->flags & IFF_PROMISC) !=
1026 (devdata->old_flags & IFF_PROMISC)) {
1027 cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1028 if (!cmdrsp)
1029 return;
1030 cmdrsp->cmdtype = CMD_NET_TYPE;
1031 cmdrsp->net.type = NET_RCV_PROMISC;
1032 cmdrsp->net.enbdis.context = netdev;
1033 cmdrsp->net.enbdis.enable =
1034 (netdev->flags & IFF_PROMISC);
1035 visorchannel_signalinsert(devdata->dev->visorchannel,
1036 IOCHAN_TO_IOPART,
1037 cmdrsp);
1038 kfree(cmdrsp);
1039 }
1040 devdata->old_flags = netdev->flags;
1041 }
1042}
1043
1044/**
1045 * visornic_xmit_timeout - request to timeout the xmit
1046 * @netdev
1047 *
1048 * Queue the work and return. Make sure we have not already
1049 * been informed the IO Partition is gone, if it is gone
1050 * we will already timeout the xmits.
1051 */
1052static void
1053visornic_xmit_timeout(struct net_device *netdev)
1054{
1055 struct visornic_devdata *devdata = netdev_priv(netdev);
1056 unsigned long flags;
1057
1058 spin_lock_irqsave(&devdata->priv_lock, flags);
46df8226
TS
1059 if (devdata->going_away) {
1060 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1061 dev_dbg(&devdata->dev->device,
1062 "%s aborting because device removal pending\n",
1063 __func__);
1064 return;
1065 }
1066
68905a14
DK
1067 /* Ensure that a ServerDown message hasn't been received */
1068 if (!devdata->enabled ||
1069 (devdata->server_down && !devdata->server_change_state)) {
00748b0c
TS
1070 dev_dbg(&netdev->dev, "%s no processing\n",
1071 __func__);
68905a14
DK
1072 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1073 return;
1074 }
68905a14 1075 queue_work(visornic_timeout_reset_workqueue, &devdata->timeout_reset);
46df8226 1076 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
1077}
1078
1079/**
1080 * repost_return - repost rcv bufs that have come back
1081 * @cmdrsp: io channel command struct to post
1082 * @devdata: visornic devdata for the device
1083 * @skb: skb
1084 * @netdev: netdevice
1085 *
1086 * Repost rcv buffers that have been returned to us when
1087 * we are finished with them.
1088 * Returns 0 for success, -1 for error.
1089 */
1090static inline int
1091repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1092 struct sk_buff *skb, struct net_device *netdev)
1093{
1094 struct net_pkt_rcv copy;
1095 int i = 0, cc, numreposted;
1096 int found_skb = 0;
1097 int status = 0;
1098
1099 copy = cmdrsp->net.rcv;
1100 switch (copy.numrcvbufs) {
1101 case 0:
1102 devdata->n_rcv0++;
1103 break;
1104 case 1:
1105 devdata->n_rcv1++;
1106 break;
1107 case 2:
1108 devdata->n_rcv2++;
1109 break;
1110 default:
1111 devdata->n_rcvx++;
1112 break;
1113 }
1114 for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
1115 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1116 if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
1117 continue;
1118
1119 if ((skb) && devdata->rcvbuf[i] == skb) {
1120 devdata->found_repost_rcvbuf_cnt++;
1121 found_skb = 1;
1122 devdata->repost_found_skb_cnt++;
1123 }
1124 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1125 if (!devdata->rcvbuf[i]) {
1126 devdata->num_rcv_bufs_could_not_alloc++;
1127 devdata->alloc_failed_in_repost_rtn_cnt++;
1128 status = -ENOMEM;
1129 break;
1130 }
1131 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1132 numreposted++;
1133 break;
1134 }
1135 }
1136 if (numreposted != copy.numrcvbufs) {
1137 devdata->n_repost_deficit++;
1138 status = -EINVAL;
1139 }
1140 if (skb) {
1141 if (found_skb) {
1142 kfree_skb(skb);
1143 } else {
1144 status = -EINVAL;
1145 devdata->bad_rcv_buf++;
1146 }
1147 }
68905a14
DK
1148 return status;
1149}
1150
1151/**
1152 * visornic_rx - Handle receive packets coming back from IO Part
1153 * @cmdrsp: Receive packet returned from IO Part
1154 *
1155 * Got a receive packet back from the IO Part, handle it and send
1156 * it up the stack.
1157 * Returns void
1158 */
946b2546 1159static int
68905a14
DK
1160visornic_rx(struct uiscmdrsp *cmdrsp)
1161{
1162 struct visornic_devdata *devdata;
1163 struct sk_buff *skb, *prev, *curr;
1164 struct net_device *netdev;
946b2546 1165 int cc, currsize, off;
68905a14
DK
1166 struct ethhdr *eth;
1167 unsigned long flags;
946b2546 1168 int rx_count = 0;
68905a14
DK
1169
1170 /* post new rcv buf to the other end using the cmdrsp we have at hand
1171 * post it without holding lock - but we'll use the signal lock to
1172 * synchronize the queue insert the cmdrsp that contains the net.rcv
1173 * is the one we are using to repost, so copy the info we need from it.
1174 */
1175 skb = cmdrsp->net.buf;
1176 netdev = skb->dev;
1177
68905a14
DK
1178 devdata = netdev_priv(netdev);
1179
1180 spin_lock_irqsave(&devdata->priv_lock, flags);
1181 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1182
1183 /* update rcv stats - call it with priv_lock held */
1184 devdata->net_stats.rx_packets++;
1185 devdata->net_stats.rx_bytes = skb->len;
1186
68905a14
DK
1187 /* set length to how much was ACTUALLY received -
1188 * NOTE: rcv_done_len includes actual length of data rcvd
1189 * including ethhdr
1190 */
1191 skb->len = cmdrsp->net.rcv.rcv_done_len;
1192
1193 /* test enabled while holding lock */
1194 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1195 /* don't process it unless we're in enable mode and until
1196 * we've gotten an ACK saying the other end got our RCV enable
1197 */
1198 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1199 repost_return(cmdrsp, devdata, skb, netdev);
946b2546 1200 return rx_count;
68905a14
DK
1201 }
1202
1203 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1204
1205 /* when skb was allocated, skb->dev, skb->data, skb->len and
1206 * skb->data_len were setup. AND, data has already put into the
1207 * skb (both first frag and in frags pages)
1208 * NOTE: firstfragslen is the amount of data in skb->data and that
1209 * which is not in nr_frags or frag_list. This is now simply
1210 * RCVPOST_BUF_SIZE. bump tail to show how much data is in
1211 * firstfrag & set data_len to show rest see if we have to chain
1212 * frag_list.
1213 */
1214 if (skb->len > RCVPOST_BUF_SIZE) { /* do PRECAUTIONARY check */
1215 if (cmdrsp->net.rcv.numrcvbufs < 2) {
1216 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1217 dev_err(&devdata->netdev->dev,
1218 "repost_return failed");
946b2546 1219 return rx_count;
68905a14
DK
1220 }
1221 /* length rcvd is greater than firstfrag in this skb rcv buf */
1222 skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
1223 skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that
1224 will be in
1225 frag_list */
1226 } else {
1227 /* data fits in this skb - no chaining - do
1228 * PRECAUTIONARY check
1229 */
1230 if (cmdrsp->net.rcv.numrcvbufs != 1) { /* should be 1 */
1231 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1232 dev_err(&devdata->netdev->dev,
1233 "repost_return failed");
946b2546 1234 return rx_count;
68905a14
DK
1235 }
1236 skb->tail += skb->len;
1237 skb->data_len = 0; /* nothing rcvd in frag_list */
1238 }
1239 off = skb_tail_pointer(skb) - skb->data;
1240
1241 /* amount we bumped tail by in the head skb
1242 * it is used to calculate the size of each chained skb below
1243 * it is also used to index into bufline to continue the copy
1244 * (for chansocktwopc)
1245 * if necessary chain the rcv skbs together.
1246 * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
1247 * chain the rest to that one.
1248 * - do PRECAUTIONARY check
1249 */
1250 if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
1251 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1252 dev_err(&devdata->netdev->dev, "repost_return failed");
946b2546 1253 return rx_count;
68905a14
DK
1254 }
1255
1256 if (cmdrsp->net.rcv.numrcvbufs > 1) {
1257 /* chain the various rcv buffers into the skb's frag_list. */
1258 /* Note: off was initialized above */
1259 for (cc = 1, prev = NULL;
1260 cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
1261 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
1262 curr->next = NULL;
1263 if (!prev) /* start of list- set head */
1264 skb_shinfo(skb)->frag_list = curr;
1265 else
1266 prev->next = curr;
1267 prev = curr;
1268
1269 /* should we set skb->len and skb->data_len for each
1270 * buffer being chained??? can't hurt!
1271 */
1272 currsize = min(skb->len - off,
1273 (unsigned int)RCVPOST_BUF_SIZE);
1274 curr->len = currsize;
1275 curr->tail += currsize;
1276 curr->data_len = 0;
1277 off += currsize;
1278 }
68905a14
DK
1279 /* assert skb->len == off */
1280 if (skb->len != off) {
cb84fca0
TS
1281 netdev_err(devdata->netdev,
1282 "something wrong; skb->len:%d != off:%d\n",
1283 skb->len, off);
68905a14 1284 }
68905a14
DK
1285 }
1286
1287 /* set up packet's protocl type using ethernet header - this
1288 * sets up skb->pkt_type & it also PULLS out the eth header
1289 */
1290 skb->protocol = eth_type_trans(skb, netdev);
1291
1292 eth = eth_hdr(skb);
1293
1294 skb->csum = 0;
1295 skb->ip_summed = CHECKSUM_NONE;
1296
1297 do {
1298 if (netdev->flags & IFF_PROMISC)
1299 break; /* accept all packets */
1300 if (skb->pkt_type == PACKET_BROADCAST) {
1301 if (netdev->flags & IFF_BROADCAST)
1302 break; /* accept all broadcast packets */
1303 } else if (skb->pkt_type == PACKET_MULTICAST) {
1304 if ((netdev->flags & IFF_MULTICAST) &&
1305 (netdev_mc_count(netdev))) {
1306 struct netdev_hw_addr *ha;
1307 int found_mc = 0;
1308
1309 /* only accept multicast packets that we can
1310 * find in our multicast address list
1311 */
1312 netdev_for_each_mc_addr(ha, netdev) {
1313 if (ether_addr_equal(eth->h_dest,
1314 ha->addr)) {
1315 found_mc = 1;
1316 break;
1317 }
1318 }
1319 if (found_mc)
1320 break; /* accept packet, dest
1321 matches a multicast
1322 address */
1323 }
1324 } else if (skb->pkt_type == PACKET_HOST) {
1325 break; /* accept packet, h_dest must match vnic
1326 mac address */
1327 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1328 /* something is not right */
1329 dev_err(&devdata->netdev->dev,
1330 "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
1331 netdev->name, eth->h_dest, netdev->dev_addr);
1332 }
1333 /* drop packet - don't forward it up to OS */
1334 devdata->n_rcv_packets_not_accepted++;
1335 repost_return(cmdrsp, devdata, skb, netdev);
946b2546 1336 return rx_count;
68905a14
DK
1337 } while (0);
1338
946b2546
NH
1339 rx_count++;
1340 netif_receive_skb(skb);
68905a14
DK
1341 /* netif_rx returns various values, but "in practice most drivers
1342 * ignore the return value
1343 */
1344
1345 skb = NULL;
1346 /*
1347 * whether the packet got dropped or handled, the skb is freed by
1348 * kernel code, so we shouldn't free it. but we should repost a
1349 * new rcv buffer.
1350 */
1351 repost_return(cmdrsp, devdata, skb, netdev);
946b2546 1352 return rx_count;
68905a14
DK
1353}
1354
1355/**
1356 * devdata_initialize - Initialize devdata structure
1357 * @devdata: visornic_devdata structure to initialize
1358 * #dev: visorbus_deviced it belongs to
1359 *
1360 * Setup initial values for the visornic based on channel and default
1361 * values.
1362 * Returns a pointer to the devdata if successful, else NULL
1363 */
1364static struct visornic_devdata *
1365devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
1366{
1367 int devnum = -1;
1368
1369 if (!devdata)
1370 return NULL;
1371 memset(devdata, '\0', sizeof(struct visornic_devdata));
1372 spin_lock(&dev_num_pool_lock);
1373 devnum = find_first_zero_bit(dev_num_pool, MAXDEVICES);
1374 set_bit(devnum, dev_num_pool);
1375 spin_unlock(&dev_num_pool_lock);
1376 if (devnum == MAXDEVICES)
1377 devnum = -1;
8d0119d8 1378 if (devnum < 0)
68905a14 1379 return NULL;
68905a14
DK
1380 devdata->devnum = devnum;
1381 devdata->dev = dev;
1382 strncpy(devdata->name, dev_name(&dev->device), sizeof(devdata->name));
68905a14
DK
1383 spin_lock(&lock_all_devices);
1384 list_add_tail(&devdata->list_all, &list_all_devices);
1385 spin_unlock(&lock_all_devices);
1386 return devdata;
1387}
1388
1389/**
8d0119d8
TS
1390 * devdata_release - Frees up references in devdata
1391 * @devdata: struct to clean up
68905a14 1392 *
8d0119d8 1393 * Frees up references in devdata.
68905a14
DK
1394 * Returns void
1395 */
8d0119d8 1396static void devdata_release(struct visornic_devdata *devdata)
68905a14 1397{
68905a14
DK
1398 spin_lock(&dev_num_pool_lock);
1399 clear_bit(devdata->devnum, dev_num_pool);
1400 spin_unlock(&dev_num_pool_lock);
1401 spin_lock(&lock_all_devices);
1402 list_del(&devdata->list_all);
1403 spin_unlock(&lock_all_devices);
46df8226
TS
1404 kfree(devdata->rcvbuf);
1405 kfree(devdata->cmdrsp_rcv);
1406 kfree(devdata->xmit_cmdrsp);
68905a14
DK
1407}
1408
1409static const struct net_device_ops visornic_dev_ops = {
1410 .ndo_open = visornic_open,
1411 .ndo_stop = visornic_close,
1412 .ndo_start_xmit = visornic_xmit,
1413 .ndo_get_stats = visornic_get_stats,
68905a14
DK
1414 .ndo_change_mtu = visornic_change_mtu,
1415 .ndo_tx_timeout = visornic_xmit_timeout,
1416 .ndo_set_rx_mode = visornic_set_multi,
1417};
1418
52b1660d
NH
1419/* DebugFS code */
1420static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1421 size_t len, loff_t *offset)
1422{
1423 ssize_t bytes_read = 0;
1424 int str_pos = 0;
1425 struct visornic_devdata *devdata;
1426 struct net_device *dev;
1427 char *vbuf;
1428
1429 if (len > MAX_BUF)
1430 len = MAX_BUF;
1431 vbuf = kzalloc(len, GFP_KERNEL);
1432 if (!vbuf)
1433 return -ENOMEM;
1434
1435 /* for each vnic channel
1436 * dump out channel specific data
1437 */
1438 rcu_read_lock();
1439 for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
1440 /*
1441 * Only consider netdevs that are visornic, and are open
1442 */
1443 if ((dev->netdev_ops != &visornic_dev_ops) ||
1444 (!netif_queue_stopped(dev)))
1445 continue;
1446
1447 devdata = netdev_priv(dev);
1448 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1449 "netdev = %s (0x%p), MAC Addr %pM\n",
1450 dev->name,
1451 dev,
1452 dev->dev_addr);
1453 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1454 "VisorNic Dev Info = 0x%p\n", devdata);
1455 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1456 " num_rcv_bufs = %d\n",
1457 devdata->num_rcv_bufs);
1458 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1459 " max_oustanding_next_xmits = %lu\n",
52b1660d
NH
1460 devdata->max_outstanding_net_xmits);
1461 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1462 " upper_threshold_net_xmits = %lu\n",
52b1660d
NH
1463 devdata->upper_threshold_net_xmits);
1464 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1465 " lower_threshold_net_xmits = %lu\n",
52b1660d
NH
1466 devdata->lower_threshold_net_xmits);
1467 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1468 " queuefullmsg_logged = %d\n",
1469 devdata->queuefullmsg_logged);
1470 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1471 " chstat.got_rcv = %lu\n",
1472 devdata->chstat.got_rcv);
1473 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1474 " chstat.got_enbdisack = %lu\n",
1475 devdata->chstat.got_enbdisack);
1476 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1477 " chstat.got_xmit_done = %lu\n",
1478 devdata->chstat.got_xmit_done);
1479 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1480 " chstat.xmit_fail = %lu\n",
1481 devdata->chstat.xmit_fail);
1482 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1483 " chstat.sent_enbdis = %lu\n",
1484 devdata->chstat.sent_enbdis);
1485 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1486 " chstat.sent_promisc = %lu\n",
1487 devdata->chstat.sent_promisc);
1488 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1489 " chstat.sent_post = %lu\n",
1490 devdata->chstat.sent_post);
81d275c6
TS
1491 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1492 " chstat.sent_post_failed = %lu\n",
1493 devdata->chstat.sent_post_failed);
52b1660d
NH
1494 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1495 " chstat.sent_xmit = %lu\n",
1496 devdata->chstat.sent_xmit);
1497 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1498 " chstat.reject_count = %lu\n",
1499 devdata->chstat.reject_count);
1500 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1501 " chstat.extra_rcvbufs_sent = %lu\n",
1502 devdata->chstat.extra_rcvbufs_sent);
1503 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1504 " n_rcv0 = %lu\n", devdata->n_rcv0);
1505 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1506 " n_rcv1 = %lu\n", devdata->n_rcv1);
1507 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1508 " n_rcv2 = %lu\n", devdata->n_rcv2);
1509 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1510 " n_rcvx = %lu\n", devdata->n_rcvx);
1511 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1512 " num_rcvbuf_in_iovm = %d\n",
1513 atomic_read(&devdata->num_rcvbuf_in_iovm));
1514 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1515 " alloc_failed_in_if_needed_cnt = %lu\n",
1516 devdata->alloc_failed_in_if_needed_cnt);
1517 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1518 " alloc_failed_in_repost_rtn_cnt = %lu\n",
1519 devdata->alloc_failed_in_repost_rtn_cnt);
1520 /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1521 * " inner_loop_limit_reached_cnt = %lu\n",
1522 * devdata->inner_loop_limit_reached_cnt);
1523 */
1524 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1525 " found_repost_rcvbuf_cnt = %lu\n",
1526 devdata->found_repost_rcvbuf_cnt);
1527 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1528 " repost_found_skb_cnt = %lu\n",
1529 devdata->repost_found_skb_cnt);
1530 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1531 " n_repost_deficit = %lu\n",
1532 devdata->n_repost_deficit);
1533 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1534 " bad_rcv_buf = %lu\n",
1535 devdata->bad_rcv_buf);
1536 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1537 " n_rcv_packets_not_accepted = %lu\n",
1538 devdata->n_rcv_packets_not_accepted);
1539 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1540 " interrupts_rcvd = %llu\n",
1541 devdata->interrupts_rcvd);
1542 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1543 " interrupts_notme = %llu\n",
1544 devdata->interrupts_notme);
1545 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1546 " interrupts_disabled = %llu\n",
1547 devdata->interrupts_disabled);
1548 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1549 " busy_cnt = %llu\n",
1550 devdata->busy_cnt);
1551 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1552 " flow_control_upper_hits = %llu\n",
1553 devdata->flow_control_upper_hits);
1554 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1555 " flow_control_lower_hits = %llu\n",
1556 devdata->flow_control_lower_hits);
52b1660d
NH
1557 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1558 " netif_queue = %s\n",
1559 netif_queue_stopped(devdata->netdev) ?
1560 "stopped" : "running");
36927c18
TS
1561 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1562 " xmits_outstanding = %lu\n",
1563 devdata_xmits_outstanding(devdata));
52b1660d
NH
1564 }
1565 rcu_read_unlock();
1566 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1567 kfree(vbuf);
1568 return bytes_read;
1569}
1570
68905a14
DK
1571/**
1572 * send_rcv_posts_if_needed
1573 * @devdata: visornic device
1574 *
1575 * Send receive buffers to the IO Partition.
1576 * Returns void
1577 */
1578static void
1579send_rcv_posts_if_needed(struct visornic_devdata *devdata)
1580{
1581 int i;
1582 struct net_device *netdev;
1583 struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
1584 int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
1585
1586 /* don't do this until vnic is marked ready */
1587 if (!(devdata->enabled && devdata->enab_dis_acked))
1588 return;
1589
1590 netdev = devdata->netdev;
1591 rcv_bufs_allocated = 0;
1592 /* this code is trying to prevent getting stuck here forever,
1593 * but still retry it if you cant allocate them all this time.
1594 */
1595 cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
1596 while (cur_num_rcv_bufs_to_alloc > 0) {
1597 cur_num_rcv_bufs_to_alloc--;
1598 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1599 if (devdata->rcvbuf[i])
1600 continue;
1601 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1602 if (!devdata->rcvbuf[i]) {
1603 devdata->alloc_failed_in_if_needed_cnt++;
1604 break;
1605 }
1606 rcv_bufs_allocated++;
1607 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1608 devdata->chstat.extra_rcvbufs_sent++;
1609 }
1610 }
1611 devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
1612}
1613
1614/**
1615 * draing_queue - drains the response queue
1616 * @cmdrsp: io channel command response message
1617 * @devdata: visornic device to drain
1618 *
1619 * Drain the respones queue of any responses from the IO partition.
1620 * Process the responses as we get them.
1621 * Returns when response queue is empty or when the threadd stops.
1622 */
1623static void
946b2546
NH
1624service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1625 int *rx_work_done)
68905a14
DK
1626{
1627 unsigned long flags;
1628 struct net_device *netdev;
1629
87a9404e
NH
1630 /* TODO: CLIENT ACQUIRE -- Don't really need this at the
1631 * moment */
7c03621a
DK
1632 for (;;) {
1633 if (!visorchannel_signalremove(devdata->dev->visorchannel,
1634 IOCHAN_FROM_IOPART,
1635 cmdrsp))
1636 break; /* queue empty */
1637
1638 switch (cmdrsp->net.type) {
1639 case NET_RCV:
1640 devdata->chstat.got_rcv++;
1641 /* process incoming packet */
946b2546 1642 *rx_work_done += visornic_rx(cmdrsp);
7c03621a
DK
1643 break;
1644 case NET_XMIT_DONE:
1645 spin_lock_irqsave(&devdata->priv_lock, flags);
1646 devdata->chstat.got_xmit_done++;
1647 if (cmdrsp->net.xmtdone.xmt_done_result)
1648 devdata->chstat.xmit_fail++;
1649 /* only call queue wake if we stopped it */
1650 netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
1651 /* ASSERT netdev == vnicinfo->netdev; */
1652 if ((netdev == devdata->netdev) &&
1653 netif_queue_stopped(netdev)) {
1654 /* check to see if we have crossed
1655 * the lower watermark for
1656 * netif_wake_queue()
68905a14 1657 */
36927c18
TS
1658 if (vnic_hit_low_watermark(devdata,
1659 devdata->lower_threshold_net_xmits)) {
7c03621a
DK
1660 /* enough NET_XMITs completed
1661 * so can restart netif queue
1662 */
1663 netif_wake_queue(netdev);
1664 devdata->flow_control_lower_hits++;
1665 }
68905a14 1666 }
7c03621a
DK
1667 skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
1668 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1669 kfree_skb(cmdrsp->net.buf);
68905a14 1670 break;
7c03621a
DK
1671 case NET_RCV_ENBDIS_ACK:
1672 devdata->chstat.got_enbdisack++;
1673 netdev = (struct net_device *)
1674 cmdrsp->net.enbdis.context;
87a9404e 1675 spin_lock_irqsave(&devdata->priv_lock, flags);
7c03621a
DK
1676 devdata->enab_dis_acked = 1;
1677 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1678
7c03621a
DK
1679 if (devdata->server_down &&
1680 devdata->server_change_state) {
1681 /* Inform Linux that the link is up */
1682 devdata->server_down = false;
1683 devdata->server_change_state = false;
1684 netif_wake_queue(netdev);
1685 netif_carrier_on(netdev);
1686 }
1687 break;
1688 case NET_CONNECT_STATUS:
1689 netdev = devdata->netdev;
1690 if (cmdrsp->net.enbdis.enable == 1) {
1691 spin_lock_irqsave(&devdata->priv_lock, flags);
1692 devdata->enabled = cmdrsp->net.enbdis.enable;
1693 spin_unlock_irqrestore(&devdata->priv_lock,
1694 flags);
1695 netif_wake_queue(netdev);
1696 netif_carrier_on(netdev);
1697 } else {
1698 netif_stop_queue(netdev);
1699 netif_carrier_off(netdev);
1700 spin_lock_irqsave(&devdata->priv_lock, flags);
1701 devdata->enabled = cmdrsp->net.enbdis.enable;
1702 spin_unlock_irqrestore(&devdata->priv_lock,
1703 flags);
1704 }
1705 break;
1706 default:
1707 break;
87a9404e 1708 }
7c03621a 1709 /* cmdrsp is now available for reuse */
68905a14
DK
1710 }
1711}
1712
946b2546
NH
1713static int visornic_poll(struct napi_struct *napi, int budget)
1714{
1715 struct visornic_devdata *devdata = container_of(napi,
1716 struct visornic_devdata,
1717 napi);
1718 int rx_count = 0;
1719
1720 send_rcv_posts_if_needed(devdata);
1721 service_resp_queue(devdata->cmdrsp, devdata, &rx_count);
1722
1723 /*
1724 * If there aren't any more packets to receive
1725 * stop the poll
1726 */
1727 if (rx_count < budget)
1728 napi_complete(napi);
1729
1730 return rx_count;
1731}
1732
68905a14 1733/**
946b2546 1734 * poll_for_irq - Checks the status of the response queue.
68905a14
DK
1735 * @v: void pointer to the visronic devdata
1736 *
1737 * Main function of the vnic_incoming thread. Peridocially check the
1738 * response queue and drain it if needed.
1739 * Returns when thread has stopped.
1740 */
946b2546
NH
1741static void
1742poll_for_irq(unsigned long v)
68905a14 1743{
946b2546 1744 struct visornic_devdata *devdata = (struct visornic_devdata *)v;
68905a14 1745
946b2546
NH
1746 if (!visorchannel_signalempty(
1747 devdata->dev->visorchannel,
1748 IOCHAN_FROM_IOPART))
1749 napi_schedule(&devdata->napi);
68905a14 1750
946b2546 1751 atomic_set(&devdata->interrupt_rcvd, 0);
68905a14 1752
946b2546 1753 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
68905a14 1754
68905a14
DK
1755}
1756
1757/**
1758 * visornic_probe - probe function for visornic devices
1759 * @dev: The visor device discovered
1760 *
1761 * Called when visorbus discovers a visornic device on its
1762 * bus. It creates a new visornic ethernet adapter.
1763 * Returns 0 or negative for error.
1764 */
1765static int visornic_probe(struct visor_device *dev)
1766{
1767 struct visornic_devdata *devdata = NULL;
1768 struct net_device *netdev = NULL;
1769 int err;
1770 int channel_offset = 0;
1771 u64 features;
1772
1773 netdev = alloc_etherdev(sizeof(struct visornic_devdata));
00748b0c
TS
1774 if (!netdev) {
1775 dev_err(&dev->device,
1776 "%s alloc_etherdev failed\n", __func__);
68905a14 1777 return -ENOMEM;
00748b0c 1778 }
68905a14
DK
1779
1780 netdev->netdev_ops = &visornic_dev_ops;
1781 netdev->watchdog_timeo = (5 * HZ);
051e9fbb 1782 SET_NETDEV_DEV(netdev, &dev->device);
68905a14
DK
1783
1784 /* Get MAC adddress from channel and read it into the device. */
1785 netdev->addr_len = ETH_ALEN;
1786 channel_offset = offsetof(struct spar_io_channel_protocol,
1787 vnic.macaddr);
1788 err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
1789 ETH_ALEN);
00748b0c
TS
1790 if (err < 0) {
1791 dev_err(&dev->device,
1792 "%s failed to get mac addr from chan (%d)\n",
1793 __func__, err);
68905a14 1794 goto cleanup_netdev;
00748b0c 1795 }
68905a14
DK
1796
1797 devdata = devdata_initialize(netdev_priv(netdev), dev);
1798 if (!devdata) {
00748b0c
TS
1799 dev_err(&dev->device,
1800 "%s devdata_initialize failed\n", __func__);
68905a14
DK
1801 err = -ENOMEM;
1802 goto cleanup_netdev;
1803 }
1804
1805 devdata->netdev = netdev;
5deeea33 1806 dev_set_drvdata(&dev->device, devdata);
68905a14
DK
1807 init_waitqueue_head(&devdata->rsp_queue);
1808 spin_lock_init(&devdata->priv_lock);
1809 devdata->enabled = 0; /* not yet */
1810 atomic_set(&devdata->usage, 1);
1811
1812 /* Setup rcv bufs */
1813 channel_offset = offsetof(struct spar_io_channel_protocol,
1814 vnic.num_rcv_bufs);
1815 err = visorbus_read_channel(dev, channel_offset,
1816 &devdata->num_rcv_bufs, 4);
00748b0c
TS
1817 if (err) {
1818 dev_err(&dev->device,
1819 "%s failed to get #rcv bufs from chan (%d)\n",
1820 __func__, err);
68905a14 1821 goto cleanup_netdev;
00748b0c 1822 }
68905a14 1823
46dfa3d8 1824 devdata->rcvbuf = kzalloc(sizeof(struct sk_buff *) *
68905a14
DK
1825 devdata->num_rcv_bufs, GFP_KERNEL);
1826 if (!devdata->rcvbuf) {
1827 err = -ENOMEM;
1828 goto cleanup_rcvbuf;
1829 }
1830
1831 /* set the net_xmit outstanding threshold */
1832 /* always leave two slots open but you should have 3 at a minimum */
36927c18 1833 /* note that max_outstanding_net_xmits must be > 0 */
68905a14 1834 devdata->max_outstanding_net_xmits =
36927c18 1835 max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
68905a14 1836 devdata->upper_threshold_net_xmits =
36927c18
TS
1837 max_t(unsigned long,
1838 2, (devdata->max_outstanding_net_xmits - 1));
68905a14 1839 devdata->lower_threshold_net_xmits =
36927c18
TS
1840 max_t(unsigned long,
1841 1, (devdata->max_outstanding_net_xmits / 2));
68905a14
DK
1842
1843 skb_queue_head_init(&devdata->xmitbufhead);
1844
1845 /* create a cmdrsp we can use to post and unpost rcv buffers */
1846 devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1847 if (!devdata->cmdrsp_rcv) {
1848 err = -ENOMEM;
1849 goto cleanup_cmdrsp_rcv;
1850 }
1851 devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1852 if (!devdata->xmit_cmdrsp) {
1853 err = -ENOMEM;
1854 goto cleanup_xmit_cmdrsp;
1855 }
68905a14
DK
1856 INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
1857 devdata->server_down = false;
1858 devdata->server_change_state = false;
1859
1860 /*set the default mtu */
1861 channel_offset = offsetof(struct spar_io_channel_protocol,
1862 vnic.mtu);
1863 err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
00748b0c
TS
1864 if (err) {
1865 dev_err(&dev->device,
1866 "%s failed to get mtu from chan (%d)\n",
1867 __func__, err);
68905a14 1868 goto cleanup_xmit_cmdrsp;
00748b0c 1869 }
68905a14
DK
1870
1871 /* TODO: Setup Interrupt information */
1872 /* Let's start our threads to get responses */
946b2546
NH
1873 netif_napi_add(netdev, &devdata->napi, visornic_poll, 64);
1874
1875 setup_timer(&devdata->irq_poll_timer, poll_for_irq,
1876 (unsigned long)devdata);
1877 /*
1878 * Note: This time has to start running before the while
1879 * loop below because the napi routine is responsible for
1880 * setting enab_dis_acked
1881 */
1882 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
1883
68905a14
DK
1884 channel_offset = offsetof(struct spar_io_channel_protocol,
1885 channel_header.features);
1886 err = visorbus_read_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1887 if (err) {
1888 dev_err(&dev->device,
1889 "%s failed to get features from chan (%d)\n",
1890 __func__, err);
946b2546 1891 goto cleanup_napi_add;
00748b0c 1892 }
68905a14
DK
1893
1894 features |= ULTRA_IO_CHANNEL_IS_POLLING;
1895 err = visorbus_write_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1896 if (err) {
1897 dev_err(&dev->device,
1898 "%s failed to set features in chan (%d)\n",
1899 __func__, err);
946b2546 1900 goto cleanup_napi_add;
00748b0c 1901 }
68905a14 1902
68905a14 1903 err = register_netdev(netdev);
00748b0c
TS
1904 if (err) {
1905 dev_err(&dev->device,
1906 "%s register_netdev failed (%d)\n", __func__, err);
946b2546 1907 goto cleanup_napi_add;
00748b0c 1908 }
68905a14
DK
1909
1910 /* create debgug/sysfs directories */
1911 devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
1912 visornic_debugfs_dir);
1913 if (!devdata->eth_debugfs_dir) {
00748b0c
TS
1914 dev_err(&dev->device,
1915 "%s debugfs_create_dir %s failed\n",
1916 __func__, netdev->name);
68905a14 1917 err = -ENOMEM;
1a2e3e3d 1918 goto cleanup_xmit_cmdrsp;
68905a14
DK
1919 }
1920
00748b0c
TS
1921 dev_info(&dev->device, "%s success netdev=%s\n",
1922 __func__, netdev->name);
68905a14
DK
1923 return 0;
1924
946b2546
NH
1925cleanup_napi_add:
1926 del_timer_sync(&devdata->irq_poll_timer);
1927 netif_napi_del(&devdata->napi);
1928
68905a14
DK
1929cleanup_xmit_cmdrsp:
1930 kfree(devdata->xmit_cmdrsp);
1931
1932cleanup_cmdrsp_rcv:
1933 kfree(devdata->cmdrsp_rcv);
1934
1935cleanup_rcvbuf:
1936 kfree(devdata->rcvbuf);
1937
1938cleanup_netdev:
1939 free_netdev(netdev);
1940 return err;
1941}
1942
1943/**
1944 * host_side_disappeared - IO part is gone.
1945 * @devdata: device object
1946 *
1947 * IO partition servicing this device is gone, do cleanup
1948 * Returns void.
1949 */
1950static void host_side_disappeared(struct visornic_devdata *devdata)
1951{
1952 unsigned long flags;
1953
1954 spin_lock_irqsave(&devdata->priv_lock, flags);
1955 sprintf(devdata->name, "<dev#%d-history>", devdata->devnum);
1956 devdata->dev = NULL; /* indicate device destroyed */
1957 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1958}
1959
1960/**
1961 * visornic_remove - Called when visornic dev goes away
1962 * @dev: visornic device that is being removed
1963 *
1964 * Called when DEVICE_DESTROY gets called to remove device.
1965 * Returns void
1966 */
1967static void visornic_remove(struct visor_device *dev)
1968{
1969 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
46df8226
TS
1970 struct net_device *netdev;
1971 unsigned long flags;
68905a14 1972
00748b0c
TS
1973 if (!devdata) {
1974 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 1975 return;
00748b0c 1976 }
46df8226
TS
1977 spin_lock_irqsave(&devdata->priv_lock, flags);
1978 if (devdata->going_away) {
1979 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1980 dev_err(&dev->device, "%s already being removed\n", __func__);
1981 return;
1982 }
1983 devdata->going_away = true;
1984 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1985 netdev = devdata->netdev;
1986 if (!netdev) {
1987 dev_err(&dev->device, "%s not net device\n", __func__);
1988 return;
1989 }
1990
1991 /* going_away prevents new items being added to the workqueues */
46df8226
TS
1992 flush_workqueue(visornic_timeout_reset_workqueue);
1993
1994 debugfs_remove_recursive(devdata->eth_debugfs_dir);
1995
1996 unregister_netdev(netdev); /* this will call visornic_close() */
1997
946b2546
NH
1998 del_timer_sync(&devdata->irq_poll_timer);
1999 netif_napi_del(&devdata->napi);
46df8226 2000
68905a14
DK
2001 dev_set_drvdata(&dev->device, NULL);
2002 host_side_disappeared(devdata);
8d0119d8 2003 devdata_release(devdata);
46df8226 2004 free_netdev(netdev);
68905a14
DK
2005}
2006
2007/**
2008 * visornic_pause - Called when IO Part disappears
2009 * @dev: visornic device that is being serviced
2010 * @complete_func: call when finished.
2011 *
2012 * Called when the IO Partition has gone down. Need to free
2013 * up resources and wait for IO partition to come back. Mark
2014 * link as down and don't attempt any DMA. When we have freed
2015 * memory call the complete_func so that Command knows we are
2016 * done. If we don't call complete_func, IO part will never
2017 * come back.
2018 * Returns 0 for success.
2019 */
2020static int visornic_pause(struct visor_device *dev,
2021 visorbus_state_complete_func complete_func)
2022{
2023 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2024
d01da5ea 2025 visornic_serverdown(devdata, complete_func);
68905a14
DK
2026 return 0;
2027}
2028
2029/**
2030 * visornic_resume - Called when IO part has recovered
2031 * @dev: visornic device that is being serviced
2032 * @compelte_func: call when finished
2033 *
2034 * Called when the IO partition has recovered. Reestablish
2035 * connection to the IO part and set the link up. Okay to do
2036 * DMA again.
2037 * Returns 0 for success.
2038 */
2039static int visornic_resume(struct visor_device *dev,
2040 visorbus_state_complete_func complete_func)
2041{
2042 struct visornic_devdata *devdata;
2043 struct net_device *netdev;
2044 unsigned long flags;
2045
2046 devdata = dev_get_drvdata(&dev->device);
00748b0c
TS
2047 if (!devdata) {
2048 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2049 return -EINVAL;
00748b0c 2050 }
68905a14
DK
2051
2052 netdev = devdata->netdev;
2053
c847020e
TS
2054 spin_lock_irqsave(&devdata->priv_lock, flags);
2055 if (devdata->server_change_state) {
68905a14 2056 spin_unlock_irqrestore(&devdata->priv_lock, flags);
c847020e 2057 dev_err(&dev->device, "%s server already changing state\n",
00748b0c 2058 __func__);
c847020e 2059 return -EINVAL;
68905a14 2060 }
c847020e
TS
2061 if (!devdata->server_down) {
2062 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2063 dev_err(&dev->device, "%s server not down\n", __func__);
2064 complete_func(dev, 0);
2065 return 0;
2066 }
2067 devdata->server_change_state = true;
2068 spin_unlock_irqrestore(&devdata->priv_lock, flags);
946b2546 2069
c847020e
TS
2070 /* Must transition channel to ATTACHED state BEFORE
2071 * we can start using the device again.
2072 * TODO: State transitions
2073 */
946b2546
NH
2074 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
2075
2076 init_rcv_bufs(netdev, devdata);
c847020e
TS
2077
2078 rtnl_lock();
2079 dev_open(netdev);
2080 rtnl_unlock();
68905a14
DK
2081
2082 complete_func(dev, 0);
2083 return 0;
2084}
2085
2086/**
2087 * visornic_init - Init function
2088 *
2089 * Init function for the visornic driver. Do initial driver setup
2090 * and wait for devices.
2091 * Returns 0 for success, negative for error.
2092 */
2093static int visornic_init(void)
2094{
2095 struct dentry *ret;
2096 int err = -ENOMEM;
2097
68905a14
DK
2098 visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
2099 if (!visornic_debugfs_dir)
2100 return err;
2101
2102 ret = debugfs_create_file("info", S_IRUSR, visornic_debugfs_dir, NULL,
2103 &debugfs_info_fops);
2104 if (!ret)
2105 goto cleanup_debugfs;
2106 ret = debugfs_create_file("enable_ints", S_IWUSR, visornic_debugfs_dir,
2107 NULL, &debugfs_enable_ints_fops);
2108 if (!ret)
2109 goto cleanup_debugfs;
2110
68905a14
DK
2111 /* create workqueue for tx timeout reset */
2112 visornic_timeout_reset_workqueue =
2113 create_singlethread_workqueue("visornic_timeout_reset");
2114 if (!visornic_timeout_reset_workqueue)
2115 goto cleanup_workqueue;
2116
2117 spin_lock_init(&dev_num_pool_lock);
2118 dev_num_pool = kzalloc(BITS_TO_LONGS(MAXDEVICES), GFP_KERNEL);
2119 if (!dev_num_pool)
2120 goto cleanup_workqueue;
2121
2122 visorbus_register_visor_driver(&visornic_driver);
2123 return 0;
2124
2125cleanup_workqueue:
68905a14
DK
2126 if (visornic_timeout_reset_workqueue) {
2127 flush_workqueue(visornic_timeout_reset_workqueue);
2128 destroy_workqueue(visornic_timeout_reset_workqueue);
2129 }
2130cleanup_debugfs:
2131 debugfs_remove_recursive(visornic_debugfs_dir);
2132
2133 return err;
2134}
2135
2136/**
2137 * visornic_cleanup - driver exit routine
2138 *
2139 * Unregister driver from the bus and free up memory.
2140 */
2141static void visornic_cleanup(void)
2142{
3798ff31
TS
2143 visorbus_unregister_visor_driver(&visornic_driver);
2144
68905a14
DK
2145 if (visornic_timeout_reset_workqueue) {
2146 flush_workqueue(visornic_timeout_reset_workqueue);
2147 destroy_workqueue(visornic_timeout_reset_workqueue);
2148 }
2149 debugfs_remove_recursive(visornic_debugfs_dir);
2150
68905a14
DK
2151 kfree(dev_num_pool);
2152 dev_num_pool = NULL;
2153}
2154
2155module_init(visornic_init);
2156module_exit(visornic_cleanup);
2157
2158MODULE_AUTHOR("Unisys");
2159MODULE_LICENSE("GPL");
2160MODULE_DESCRIPTION("sPAR nic driver for sparlinux: ver 1.0.0.0");
2161MODULE_VERSION("1.0.0.0");