]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/staging/unisys/visornic/visornic_main.c
staging: unisys: visorinput: visorinput.c: Adjust whitespace usage
[mirror_ubuntu-hirsute-kernel.git] / drivers / staging / unisys / visornic / visornic_main.c
CommitLineData
68905a14
DK
1/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
3 *
6f14cc18
BR
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
68905a14
DK
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for more
12 * details.
13 */
14
15/* This driver lives in a spar partition, and registers to ethernet io
16 * channels from the visorbus driver. It creates netdev devices and
17 * forwards transmit to the IO channel and accepts rcvs from the IO
18 * Partition via the IO channel.
19 */
20
21#include <linux/debugfs.h>
68905a14 22#include <linux/etherdevice.h>
0d507393 23#include <linux/netdevice.h>
68905a14 24#include <linux/kthread.h>
0d507393
NH
25#include <linux/skbuff.h>
26#include <linux/rtnetlink.h>
68905a14
DK
27
28#include "visorbus.h"
29#include "iochannel.h"
30
0c677e9c 31#define VISORNIC_INFINITE_RSP_WAIT 0
68905a14
DK
32
33/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
34 * = 163840 bytes
35 */
36#define MAX_BUF 163840
61dd330a 37#define NAPI_WEIGHT 64
68905a14 38
68905a14
DK
39/* GUIDS for director channel type supported by this driver. */
40static struct visor_channeltype_descriptor visornic_channel_types[] = {
41 /* Note that the only channel type we expect to be reported by the
c75ebe5e 42 * bus driver is the VISOR_VNIC channel.
68905a14 43 */
c75ebe5e 44 { VISOR_VNIC_CHANNEL_UUID, "ultravnic" },
68905a14
DK
45 { NULL_UUID_LE, NULL }
46};
110a66be
PB
47MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
48/*
49 * FIXME XXX: This next line of code must be fixed and removed before
50 * acceptance into the 'normal' part of the kernel. It is only here as a place
51 * holder to get module autoloading functionality working for visorbus. Code
52 * must be added to scripts/mode/file2alias.c, etc., to get this working
53 * properly.
54 */
c75ebe5e 55MODULE_ALIAS("visorbus:" VISOR_VNIC_CHANNEL_UUID_STR);
68905a14 56
68905a14
DK
57struct chanstat {
58 unsigned long got_rcv;
59 unsigned long got_enbdisack;
60 unsigned long got_xmit_done;
61 unsigned long xmit_fail;
62 unsigned long sent_enbdis;
63 unsigned long sent_promisc;
64 unsigned long sent_post;
81d275c6 65 unsigned long sent_post_failed;
68905a14
DK
66 unsigned long sent_xmit;
67 unsigned long reject_count;
68 unsigned long extra_rcvbufs_sent;
69};
70
71struct visornic_devdata {
77c9a4ae
EA
72 /* 0 disabled 1 enabled to receive */
73 unsigned short enabled;
74 /* NET_RCV_ENABLE/DISABLE acked by IOPART */
75 unsigned short enab_dis_acked;
76
68905a14 77 struct visor_device *dev;
68905a14
DK
78 struct net_device *netdev;
79 struct net_device_stats net_stats;
80 atomic_t interrupt_rcvd;
81 wait_queue_head_t rsp_queue;
82 struct sk_buff **rcvbuf;
77c9a4ae
EA
83 /* incarnation_id lets IOPART know about re-birth */
84 u64 incarnation_id;
85 /* flags as they were prior to set_multicast_list */
86 unsigned short old_flags;
921557cb
SW
87 /* count of users */
88 atomic_t usage;
77c9a4ae
EA
89
90 /* number of rcv buffers the vnic will post */
91 int num_rcv_bufs;
68905a14
DK
92 int num_rcv_bufs_could_not_alloc;
93 atomic_t num_rcvbuf_in_iovm;
94 unsigned long alloc_failed_in_if_needed_cnt;
95 unsigned long alloc_failed_in_repost_rtn_cnt;
77c9a4ae
EA
96
97 /* absolute max number of outstanding xmits - should never hit this */
98 unsigned long max_outstanding_net_xmits;
99 /* high water mark for calling netif_stop_queue() */
100 unsigned long upper_threshold_net_xmits;
101 /* high water mark for calling netif_wake_queue() */
102 unsigned long lower_threshold_net_xmits;
103 /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
104 struct sk_buff_head xmitbufhead;
105
d01da5ea 106 visorbus_state_complete_func server_down_complete_func;
68905a14 107 struct work_struct timeout_reset;
77c9a4ae
EA
108 /* cmdrsp_rcv is used for posting/unposting rcv buffers */
109 struct uiscmdrsp *cmdrsp_rcv;
110 /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
111 struct uiscmdrsp *xmit_cmdrsp;
921557cb
SW
112 /* IOPART is down */
113 bool server_down;
114 /* Processing SERVER_CHANGESTATE msg */
115 bool server_change_state;
116 /* device is being torn down */
117 bool going_away;
68905a14 118 struct dentry *eth_debugfs_dir;
68905a14
DK
119 u64 interrupts_rcvd;
120 u64 interrupts_notme;
121 u64 interrupts_disabled;
122 u64 busy_cnt;
921557cb
SW
123 /* spinlock to access devdata structures */
124 spinlock_t priv_lock;
68905a14
DK
125
126 /* flow control counter */
127 u64 flow_control_upper_hits;
128 u64 flow_control_lower_hits;
129
130 /* debug counters */
921557cb
SW
131 /* # rcvs of 0 buffers */
132 unsigned long n_rcv0;
133 /* # rcvs of 1 buffers */
134 unsigned long n_rcv1;
135 /* # rcvs of 2 buffers */
136 unsigned long n_rcv2;
137 /* # rcvs of >2 buffers */
138 unsigned long n_rcvx;
139 /* # repost_rcvbuf_cnt */
140 unsigned long found_repost_rcvbuf_cnt;
141 /* # of found the skb */
142 unsigned long repost_found_skb_cnt;
143 /* # of lost rcv buffers */
144 unsigned long n_repost_deficit;
145 /* # of unknown rcv skb not freed */
146 unsigned long bad_rcv_buf;
147 /* # bogs rcv packets */
148 unsigned long n_rcv_packets_not_accepted;
68905a14
DK
149
150 int queuefullmsg_logged;
151 struct chanstat chstat;
946b2546
NH
152 struct timer_list irq_poll_timer;
153 struct napi_struct napi;
154 struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
68905a14
DK
155};
156
ea0d2075 157/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
54149b4c 158static u16
ea0d2075
DB
159add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
160 u16 max_pi_arr_entries, struct phys_info pi_arr[])
161{
162 u32 len;
163 u16 i, firstlen;
164
165 firstlen = PI_PAGE_SIZE - inp_off;
166 if (inp_len <= firstlen) {
167 /* The input entry spans only one page - add as is. */
168 if (index >= max_pi_arr_entries)
169 return 0;
170 pi_arr[index].pi_pfn = inp_pfn;
171 pi_arr[index].pi_off = (u16)inp_off;
172 pi_arr[index].pi_len = (u16)inp_len;
173 return index + 1;
174 }
175
176 /* This entry spans multiple pages. */
177 for (len = inp_len, i = 0; len;
178 len -= pi_arr[index + i].pi_len, i++) {
179 if (index + i >= max_pi_arr_entries)
180 return 0;
181 pi_arr[index + i].pi_pfn = inp_pfn + i;
182 if (i == 0) {
183 pi_arr[index].pi_off = inp_off;
184 pi_arr[index].pi_len = firstlen;
185 } else {
186 pi_arr[index + i].pi_off = 0;
187 pi_arr[index + i].pi_len =
188 (u16)MINNUM(len, (u32)PI_PAGE_SIZE);
189 }
190 }
191 return index + i;
192}
193
0bb10830 194/*
68905a14
DK
195 * visor_copy_fragsinfo_from_skb(
196 * @skb_in: skbuff that we are pulling the frags from
197 * @firstfraglen: length of first fragment in skb
198 * @frags_max: max len of frags array
199 * @frags: frags array filled in on output
200 *
201 * Copy the fragment list in the SKB to a phys_info
202 * array that the IOPART understands.
203 * Return value indicates number of entries filled in frags
204 * Negative values indicate an error.
205 */
6a957193 206static int
68905a14
DK
207visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
208 unsigned int frags_max,
209 struct phys_info frags[])
210{
03759f8c 211 unsigned int count = 0, frag, size, offset = 0, numfrags;
513e1cbd 212 unsigned int total_count;
68905a14
DK
213
214 numfrags = skb_shinfo(skb)->nr_frags;
215
77c9a4ae 216 /* Compute the number of fragments this skb has, and if its more than
513e1cbd
NH
217 * frag array can hold, linearize the skb
218 */
219 total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
220 if (firstfraglen % PI_PAGE_SIZE)
221 total_count++;
222
223 if (total_count > frags_max) {
224 if (skb_linearize(skb))
225 return -EINVAL;
226 numfrags = skb_shinfo(skb)->nr_frags;
227 firstfraglen = 0;
228 }
229
68905a14
DK
230 while (firstfraglen) {
231 if (count == frags_max)
232 return -EINVAL;
233
234 frags[count].pi_pfn =
235 page_to_pfn(virt_to_page(skb->data + offset));
236 frags[count].pi_off =
237 (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
238 size = min_t(unsigned int, firstfraglen,
239 PI_PAGE_SIZE - frags[count].pi_off);
240
241 /* can take smallest of firstfraglen (what's left) OR
242 * bytes left in the page
243 */
244 frags[count].pi_len = size;
245 firstfraglen -= size;
246 offset += size;
247 count++;
248 }
249 if (numfrags) {
250 if ((count + numfrags) > frags_max)
251 return -EINVAL;
252
03759f8c 253 for (frag = 0; frag < numfrags; frag++) {
68905a14 254 count = add_physinfo_entries(page_to_pfn(
03759f8c
EA
255 skb_frag_page(&skb_shinfo(skb)->frags[frag])),
256 skb_shinfo(skb)->frags[frag].
68905a14 257 page_offset,
03759f8c 258 skb_shinfo(skb)->frags[frag].
68905a14 259 size, count, frags_max, frags);
77c9a4ae 260 /* add_physinfo_entries only returns
998ff7f8
NH
261 * zero if the frags array is out of room
262 * That should never happen because we
263 * fail above, if count+numfrags > frags_max.
998ff7f8 264 */
6a957193
TS
265 if (!count)
266 return -EINVAL;
68905a14
DK
267 }
268 }
269 if (skb_shinfo(skb)->frag_list) {
270 struct sk_buff *skbinlist;
271 int c;
272
273 for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
274 skbinlist = skbinlist->next) {
275 c = visor_copy_fragsinfo_from_skb(skbinlist,
276 skbinlist->len -
277 skbinlist->data_len,
278 frags_max - count,
279 &frags[count]);
280 if (c < 0)
281 return c;
282 count += c;
283 }
284 }
285 return count;
286}
287
68905a14
DK
288static ssize_t enable_ints_write(struct file *file,
289 const char __user *buffer,
290 size_t count, loff_t *ppos)
291{
77c9a4ae 292 /* Don't want to break ABI here by having a debugfs
52b1660d
NH
293 * file that no longer exists or is writable, so
294 * lets just make this a vestigual function
295 */
68905a14
DK
296 return count;
297}
298
0543205b
DK
299static const struct file_operations debugfs_enable_ints_fops = {
300 .write = enable_ints_write,
301};
302
0bb10830 303/*
77c9a4ae 304 * visornic_serverdown_complete - IOPART went down, pause device
68905a14
DK
305 * @work: Work queue it was scheduled on
306 *
307 * The IO partition has gone down and we need to do some cleanup
308 * for when it comes back. Treat the IO partition as the link
309 * being down.
310 * Returns void.
311 */
312static void
ace72eef 313visornic_serverdown_complete(struct visornic_devdata *devdata)
68905a14 314{
68905a14 315 struct net_device *netdev;
68905a14 316
68905a14
DK
317 netdev = devdata->netdev;
318
946b2546
NH
319 /* Stop polling for interrupts */
320 del_timer_sync(&devdata->irq_poll_timer);
68905a14 321
0d507393
NH
322 rtnl_lock();
323 dev_close(netdev);
324 rtnl_unlock();
68905a14 325
68905a14 326 atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
db849927
TS
327 devdata->chstat.sent_xmit = 0;
328 devdata->chstat.got_xmit_done = 0;
68905a14 329
d01da5ea
TS
330 if (devdata->server_down_complete_func)
331 (*devdata->server_down_complete_func)(devdata->dev, 0);
332
68905a14
DK
333 devdata->server_down = true;
334 devdata->server_change_state = false;
d01da5ea 335 devdata->server_down_complete_func = NULL;
68905a14
DK
336}
337
0bb10830 338/*
77c9a4ae 339 * visornic_serverdown - Command has notified us that IOPART is down
68905a14
DK
340 * @devdata: device that is being managed by IOPART
341 *
342 * Schedule the work needed to handle the server down request. Make
343 * sure we haven't already handled the server change state event.
344 * Returns 0 if we scheduled the work, -EINVAL on error.
345 */
346static int
d01da5ea
TS
347visornic_serverdown(struct visornic_devdata *devdata,
348 visorbus_state_complete_func complete_func)
68905a14 349{
46df8226 350 unsigned long flags;
4145ba76 351 int err;
46df8226
TS
352
353 spin_lock_irqsave(&devdata->priv_lock, flags);
4145ba76 354 if (devdata->server_change_state) {
00748b0c
TS
355 dev_dbg(&devdata->dev->device, "%s changing state\n",
356 __func__);
4145ba76
TS
357 err = -EINVAL;
358 goto err_unlock;
359 }
360 if (devdata->server_down) {
361 dev_dbg(&devdata->dev->device, "%s already down\n",
362 __func__);
363 err = -EINVAL;
364 goto err_unlock;
365 }
366 if (devdata->going_away) {
367 dev_dbg(&devdata->dev->device,
368 "%s aborting because device removal pending\n",
369 __func__);
370 err = -ENODEV;
371 goto err_unlock;
05f1b17e 372 }
4145ba76
TS
373 devdata->server_change_state = true;
374 devdata->server_down_complete_func = complete_func;
05f1b17e 375 spin_unlock_irqrestore(&devdata->priv_lock, flags);
4145ba76
TS
376
377 visornic_serverdown_complete(devdata);
68905a14 378 return 0;
4145ba76
TS
379
380err_unlock:
381 spin_unlock_irqrestore(&devdata->priv_lock, flags);
382 return err;
68905a14
DK
383}
384
0bb10830 385/*
68905a14
DK
386 * alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
387 * @netdev: network adapter the rcv bufs are attached too.
388 *
389 * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
390 * so that it can write rcv data into our memory space.
391 * Return pointer to sk_buff
392 */
393static struct sk_buff *
394alloc_rcv_buf(struct net_device *netdev)
395{
396 struct sk_buff *skb;
397
398 /* NOTE: the first fragment in each rcv buffer is pointed to by
399 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
77c9a4ae 400 * in length, so the first frag is large enough to hold 1514.
68905a14
DK
401 */
402 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
403 if (!skb)
404 return NULL;
405 skb->dev = netdev;
68905a14
DK
406 /* current value of mtu doesn't come into play here; large
407 * packets will just end up using multiple rcv buffers all of
77c9a4ae 408 * same size.
68905a14 409 */
77c9a4ae
EA
410 skb->len = RCVPOST_BUF_SIZE;
411 /* alloc_skb already zeroes it out for clarification. */
412 skb->data_len = 0;
68905a14
DK
413 return skb;
414}
415
0bb10830 416/*
68905a14
DK
417 * post_skb - post a skb to the IO Partition.
418 * @cmdrsp: cmdrsp packet to be send to the IO Partition
419 * @devdata: visornic_devdata to post the skb too
420 * @skb: skb to give to the IO partition
421 *
422 * Send the skb to the IO Partition.
03156571 423 * Returns 0 or error
68905a14 424 */
03156571 425static int
68905a14
DK
426post_skb(struct uiscmdrsp *cmdrsp,
427 struct visornic_devdata *devdata, struct sk_buff *skb)
428{
03156571
DK
429 int err;
430
68905a14
DK
431 cmdrsp->net.buf = skb;
432 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
433 cmdrsp->net.rcvpost.frag.pi_off =
434 (unsigned long)skb->data & PI_PAGE_MASK;
435 cmdrsp->net.rcvpost.frag.pi_len = skb->len;
91678f37 436 cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
68905a14 437
03156571
DK
438 if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
439 return -EINVAL;
440
441 cmdrsp->net.type = NET_RCV_POST;
442 cmdrsp->cmdtype = CMD_NET_TYPE;
443 err = visorchannel_signalinsert(devdata->dev->visorchannel,
444 IOCHAN_TO_IOPART,
445 cmdrsp);
446 if (err) {
447 devdata->chstat.sent_post_failed++;
448 return err;
68905a14 449 }
03156571
DK
450
451 atomic_inc(&devdata->num_rcvbuf_in_iovm);
452 devdata->chstat.sent_post++;
453
454 return 0;
68905a14
DK
455}
456
0bb10830 457/*
68905a14
DK
458 * send_enbdis - send NET_RCV_ENBDIS to IO Partition
459 * @netdev: netdevice we are enable/disable, used as context
460 * return value
461 * @state: enable = 1/disable = 0
462 * @devdata: visornic device we are enabling/disabling
463 *
464 * Send the enable/disable message to the IO Partition.
03156571 465 * Returns 0 or error
68905a14 466 */
03156571 467static int
68905a14
DK
468send_enbdis(struct net_device *netdev, int state,
469 struct visornic_devdata *devdata)
470{
03156571
DK
471 int err;
472
68905a14
DK
473 devdata->cmdrsp_rcv->net.enbdis.enable = state;
474 devdata->cmdrsp_rcv->net.enbdis.context = netdev;
475 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
476 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
03156571
DK
477 err = visorchannel_signalinsert(devdata->dev->visorchannel,
478 IOCHAN_TO_IOPART,
479 devdata->cmdrsp_rcv);
480 if (err)
481 return err;
482 devdata->chstat.sent_enbdis++;
483 return 0;
68905a14
DK
484}
485
0bb10830 486/*
68905a14 487 * visornic_disable_with_timeout - Disable network adapter
50e66ccb 488 * @netdev: netdevice to disable
68905a14
DK
489 * @timeout: timeout to wait for disable
490 *
491 * Disable the network adapter and inform the IO Partition that we
492 * are disabled, reclaim memory from rcv bufs.
493 * Returns 0 on success, negative for failure of IO Partition
494 * responding.
495 *
496 */
497static int
498visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
499{
500 struct visornic_devdata *devdata = netdev_priv(netdev);
501 int i;
502 unsigned long flags;
503 int wait = 0;
03156571 504 int err;
68905a14 505
68905a14
DK
506 /* send a msg telling the other end we are stopping incoming pkts */
507 spin_lock_irqsave(&devdata->priv_lock, flags);
508 devdata->enabled = 0;
921557cb
SW
509 /* must wait for ack */
510 devdata->enab_dis_acked = 0;
68905a14
DK
511 spin_unlock_irqrestore(&devdata->priv_lock, flags);
512
513 /* send disable and wait for ack -- don't hold lock when sending
514 * disable because if the queue is full, insert might sleep.
03156571 515 * If an error occurs, don't wait for the timeout.
68905a14 516 */
03156571
DK
517 err = send_enbdis(netdev, 0, devdata);
518 if (err)
519 return err;
68905a14
DK
520
521 /* wait for ack to arrive before we try to free rcv buffers
522 * NOTE: the other end automatically unposts the rcv buffers when
523 * when it gets a disable.
524 */
525 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 526 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
527 (wait < timeout)) {
528 if (devdata->enab_dis_acked)
529 break;
530 if (devdata->server_down || devdata->server_change_state) {
00748b0c
TS
531 dev_dbg(&netdev->dev, "%s server went away\n",
532 __func__);
ef44146c 533 break;
68905a14
DK
534 }
535 set_current_state(TASK_INTERRUPTIBLE);
536 spin_unlock_irqrestore(&devdata->priv_lock, flags);
537 wait += schedule_timeout(msecs_to_jiffies(10));
538 spin_lock_irqsave(&devdata->priv_lock, flags);
539 }
540
541 /* Wait for usage to go to 1 (no other users) before freeing
542 * rcv buffers
543 */
544 if (atomic_read(&devdata->usage) > 1) {
545 while (1) {
546 set_current_state(TASK_INTERRUPTIBLE);
547 spin_unlock_irqrestore(&devdata->priv_lock, flags);
548 schedule_timeout(msecs_to_jiffies(10));
549 spin_lock_irqsave(&devdata->priv_lock, flags);
550 if (atomic_read(&devdata->usage))
551 break;
552 }
553 }
68905a14
DK
554 /* we've set enabled to 0, so we can give up the lock. */
555 spin_unlock_irqrestore(&devdata->priv_lock, flags);
556
946b2546
NH
557 /* stop the transmit queue so nothing more can be transmitted */
558 netif_stop_queue(netdev);
559
560 napi_disable(&devdata->napi);
561
0d507393
NH
562 skb_queue_purge(&devdata->xmitbufhead);
563
68905a14
DK
564 /* Free rcv buffers - other end has automatically unposed them on
565 * disable
566 */
567 for (i = 0; i < devdata->num_rcv_bufs; i++) {
568 if (devdata->rcvbuf[i]) {
569 kfree_skb(devdata->rcvbuf[i]);
570 devdata->rcvbuf[i] = NULL;
571 }
572 }
573
68905a14
DK
574 return 0;
575}
576
0bb10830 577/*
68905a14
DK
578 * init_rcv_bufs -- initialize receive bufs and send them to the IO Part
579 * @netdev: struct netdevice
580 * @devdata: visornic_devdata
581 *
582 * Allocate rcv buffers and post them to the IO Partition.
583 * Return 0 for success, and negative for failure.
584 */
585static int
586init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
587{
03156571 588 int i, j, count, err;
68905a14
DK
589
590 /* allocate fixed number of receive buffers to post to uisnic
591 * post receive buffers after we've allocated a required amount
592 */
593 for (i = 0; i < devdata->num_rcv_bufs; i++) {
594 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
921557cb 595 /* if we failed to allocate one let us stop */
68905a14 596 if (!devdata->rcvbuf[i])
921557cb 597 break;
68905a14 598 }
921557cb
SW
599 /* couldn't even allocate one -- bail out */
600 if (i == 0)
68905a14
DK
601 return -ENOMEM;
602 count = i;
603
50e66ccb 604 /* Ensure we can alloc 2/3rd of the requested number of buffers.
68905a14
DK
605 * 2/3 is an arbitrary choice; used also in ndis init.c
606 */
607 if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
608 /* free receive buffers we did alloc and then bail out */
609 for (i = 0; i < count; i++) {
610 kfree_skb(devdata->rcvbuf[i]);
611 devdata->rcvbuf[i] = NULL;
612 }
613 return -ENOMEM;
614 }
615
616 /* post receive buffers to receive incoming input - without holding
617 * lock - we've not enabled nor started the queue so there shouldn't
618 * be any rcv or xmit activity
619 */
03156571
DK
620 for (i = 0; i < count; i++) {
621 err = post_skb(devdata->cmdrsp_rcv, devdata,
622 devdata->rcvbuf[i]);
623 if (!err)
624 continue;
625
626 /* Error handling -
627 * If we posted at least one skb, we should return success,
628 * but need to free the resources that we have not successfully
629 * posted.
630 */
631 for (j = i; j < count; j++) {
632 kfree_skb(devdata->rcvbuf[j]);
633 devdata->rcvbuf[j] = NULL;
634 }
635 if (i == 0)
636 return err;
637 break;
638 }
68905a14
DK
639
640 return 0;
641}
642
0bb10830 643/*
68905a14
DK
644 * visornic_enable_with_timeout - send enable to IO Part
645 * @netdev: struct net_device
646 * @timeout: Time to wait for the ACK from the enable
647 *
648 * Sends enable to IOVM, inits, and posts receive buffers to IOVM
649 * timeout is defined in msecs (timeout of 0 specifies infinite wait)
50e66ccb 650 * Return 0 for success, negative for failure.
68905a14
DK
651 */
652static int
653visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
654{
03156571 655 int err = 0;
68905a14
DK
656 struct visornic_devdata *devdata = netdev_priv(netdev);
657 unsigned long flags;
658 int wait = 0;
659
6219e493
DB
660 napi_enable(&devdata->napi);
661
68905a14
DK
662 /* NOTE: the other end automatically unposts the rcv buffers when it
663 * gets a disable.
664 */
03156571
DK
665 err = init_rcv_bufs(netdev, devdata);
666 if (err < 0) {
00748b0c 667 dev_err(&netdev->dev,
03156571
DK
668 "%s failed to init rcv bufs\n", __func__);
669 return err;
00748b0c 670 }
68905a14
DK
671
672 spin_lock_irqsave(&devdata->priv_lock, flags);
673 devdata->enabled = 1;
6483783d 674 devdata->enab_dis_acked = 0;
68905a14
DK
675
676 /* now we're ready, let's send an ENB to uisnic but until we get
677 * an ACK back from uisnic, we'll drop the packets
678 */
679 devdata->n_rcv_packets_not_accepted = 0;
680 spin_unlock_irqrestore(&devdata->priv_lock, flags);
681
682 /* send enable and wait for ack -- don't hold lock when sending enable
03156571
DK
683 * because if the queue is full, insert might sleep. If an error
684 * occurs error out.
68905a14 685 */
03156571
DK
686 err = send_enbdis(netdev, 1, devdata);
687 if (err)
688 return err;
68905a14
DK
689
690 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 691 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
692 (wait < timeout)) {
693 if (devdata->enab_dis_acked)
694 break;
695 if (devdata->server_down || devdata->server_change_state) {
00748b0c
TS
696 dev_dbg(&netdev->dev, "%s server went away\n",
697 __func__);
ef44146c 698 break;
68905a14
DK
699 }
700 set_current_state(TASK_INTERRUPTIBLE);
701 spin_unlock_irqrestore(&devdata->priv_lock, flags);
702 wait += schedule_timeout(msecs_to_jiffies(10));
703 spin_lock_irqsave(&devdata->priv_lock, flags);
704 }
705
706 spin_unlock_irqrestore(&devdata->priv_lock, flags);
707
00748b0c
TS
708 if (!devdata->enab_dis_acked) {
709 dev_err(&netdev->dev, "%s missing ACK\n", __func__);
68905a14 710 return -EIO;
00748b0c 711 }
68905a14 712
35a8dd31 713 netif_start_queue(netdev);
946b2546 714
68905a14
DK
715 return 0;
716}
717
0bb10830 718/*
68905a14
DK
719 * visornic_timeout_reset - handle xmit timeout resets
720 * @work work item that scheduled the work
721 *
722 * Transmit Timeouts are typically handled by resetting the
723 * device for our virtual NIC we will send a Disable and Enable
724 * to the IOVM. If it doesn't respond we will trigger a serverdown.
725 */
726static void
727visornic_timeout_reset(struct work_struct *work)
728{
729 struct visornic_devdata *devdata;
730 struct net_device *netdev;
731 int response = 0;
732
733 devdata = container_of(work, struct visornic_devdata, timeout_reset);
734 netdev = devdata->netdev;
735
4d79002e
TS
736 rtnl_lock();
737 if (!netif_running(netdev)) {
738 rtnl_unlock();
739 return;
740 }
741
0c677e9c
NH
742 response = visornic_disable_with_timeout(netdev,
743 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
744 if (response)
745 goto call_serverdown;
746
0c677e9c
NH
747 response = visornic_enable_with_timeout(netdev,
748 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
749 if (response)
750 goto call_serverdown;
68905a14 751
4d79002e
TS
752 rtnl_unlock();
753
68905a14
DK
754 return;
755
756call_serverdown:
d01da5ea 757 visornic_serverdown(devdata, NULL);
4d79002e 758 rtnl_unlock();
68905a14
DK
759}
760
0bb10830 761/*
68905a14
DK
762 * visornic_open - Enable the visornic device and mark the queue started
763 * @netdev: netdevice to start
764 *
765 * Enable the device and start the transmit queue.
766 * Return 0 for success
767 */
768static int
769visornic_open(struct net_device *netdev)
770{
0c677e9c 771 visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14 772
68905a14
DK
773 return 0;
774}
775
0bb10830 776/*
68905a14
DK
777 * visornic_close - Disables the visornic device and stops the queues
778 * @netdev: netdevice to start
779 *
780 * Disable the device and stop the transmit queue.
781 * Return 0 for success
782 */
783static int
784visornic_close(struct net_device *netdev)
785{
0c677e9c 786 visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
787
788 return 0;
789}
790
0bb10830 791/*
36927c18
TS
792 * devdata_xmits_outstanding - compute outstanding xmits
793 * @devdata: visornic_devdata for device
794 *
795 * Return value is the number of outstanding xmits.
796 */
797static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
798{
799 if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
800 return devdata->chstat.sent_xmit -
801 devdata->chstat.got_xmit_done;
6e1edc0f
BS
802 return (ULONG_MAX - devdata->chstat.got_xmit_done
803 + devdata->chstat.sent_xmit + 1);
36927c18
TS
804}
805
0bb10830 806/*
36927c18
TS
807 * vnic_hit_high_watermark
808 * @devdata: indicates visornic device we are checking
809 * @high_watermark: max num of unacked xmits we will tolerate,
810 * before we will start throttling
811 *
812 * Returns true iff the number of unacked xmits sent to
813 * the IO partition is >= high_watermark.
814 */
a7d2ab20
JF
815static bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
816 ulong high_watermark)
36927c18
TS
817{
818 return (devdata_xmits_outstanding(devdata) >= high_watermark);
819}
820
0bb10830 821/*
36927c18
TS
822 * vnic_hit_low_watermark
823 * @devdata: indicates visornic device we are checking
824 * @low_watermark: we will wait until the num of unacked xmits
825 * drops to this value or lower before we start
826 * transmitting again
827 *
828 * Returns true iff the number of unacked xmits sent to
829 * the IO partition is <= low_watermark.
830 */
a7d2ab20
JF
831static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
832 ulong low_watermark)
36927c18
TS
833{
834 return (devdata_xmits_outstanding(devdata) <= low_watermark);
835}
836
0bb10830 837/*
68905a14
DK
838 * visornic_xmit - send a packet to the IO Partition
839 * @skb: Packet to be sent
840 * @netdev: net device the packet is being sent from
841 *
50e66ccb 842 * Convert the skb to a cmdrsp so the IO Partition can understand it.
68905a14
DK
843 * Send the XMIT command to the IO Partition for processing. This
844 * function is protected from concurrent calls by a spinlock xmit_lock
845 * in the net_device struct, but as soon as the function returns it
846 * can be called again.
f6346ad6 847 * Returns NETDEV_TX_OK.
68905a14
DK
848 */
849static int
850visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
851{
852 struct visornic_devdata *devdata;
853 int len, firstfraglen, padlen;
854 struct uiscmdrsp *cmdrsp = NULL;
855 unsigned long flags;
03156571 856 int err;
68905a14
DK
857
858 devdata = netdev_priv(netdev);
859 spin_lock_irqsave(&devdata->priv_lock, flags);
860
861 if (netif_queue_stopped(netdev) || devdata->server_down ||
862 devdata->server_change_state) {
863 spin_unlock_irqrestore(&devdata->priv_lock, flags);
864 devdata->busy_cnt++;
00748b0c
TS
865 dev_dbg(&netdev->dev,
866 "%s busy - queue stopped\n", __func__);
f6346ad6
NH
867 kfree_skb(skb);
868 return NETDEV_TX_OK;
68905a14
DK
869 }
870
871 /* sk_buff struct is used to host network data throughout all the
872 * linux network subsystems
873 */
874 len = skb->len;
875
876 /* skb->len is the FULL length of data (including fragmentary portion)
877 * skb->data_len is the length of the fragment portion in frags
878 * skb->len - skb->data_len is size of the 1st fragment in skb->data
879 * calculate the length of the first fragment that skb->data is
880 * pointing to
881 */
882 firstfraglen = skb->len - skb->data_len;
d0c2c997 883 if (firstfraglen < ETH_HLEN) {
68905a14
DK
884 spin_unlock_irqrestore(&devdata->priv_lock, flags);
885 devdata->busy_cnt++;
00748b0c
TS
886 dev_err(&netdev->dev,
887 "%s busy - first frag too small (%d)\n",
888 __func__, firstfraglen);
f6346ad6
NH
889 kfree_skb(skb);
890 return NETDEV_TX_OK;
68905a14
DK
891 }
892
893 if ((len < ETH_MIN_PACKET_SIZE) &&
894 ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
895 /* pad the packet out to minimum size */
896 padlen = ETH_MIN_PACKET_SIZE - len;
897 memset(&skb->data[len], 0, padlen);
898 skb->tail += padlen;
899 skb->len += padlen;
900 len += padlen;
901 firstfraglen += padlen;
902 }
903
904 cmdrsp = devdata->xmit_cmdrsp;
905 /* clear cmdrsp */
906 memset(cmdrsp, 0, SIZEOF_CMDRSP);
907 cmdrsp->net.type = NET_XMIT;
908 cmdrsp->cmdtype = CMD_NET_TYPE;
909
910 /* save the pointer to skb -- we'll need it for completion */
911 cmdrsp->net.buf = skb;
912
36927c18
TS
913 if (vnic_hit_high_watermark(devdata,
914 devdata->max_outstanding_net_xmits)) {
77c9a4ae 915 /* extra NET_XMITs queued over to IOVM - need to wait */
68905a14
DK
916 devdata->chstat.reject_count++;
917 if (!devdata->queuefullmsg_logged &&
918 ((devdata->chstat.reject_count & 0x3ff) == 1))
919 devdata->queuefullmsg_logged = 1;
920 netif_stop_queue(netdev);
921 spin_unlock_irqrestore(&devdata->priv_lock, flags);
922 devdata->busy_cnt++;
00748b0c
TS
923 dev_dbg(&netdev->dev,
924 "%s busy - waiting for iovm to catch up\n",
925 __func__);
f6346ad6
NH
926 kfree_skb(skb);
927 return NETDEV_TX_OK;
68905a14
DK
928 }
929 if (devdata->queuefullmsg_logged)
930 devdata->queuefullmsg_logged = 0;
931
932 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
933 cmdrsp->net.xmt.lincsum.valid = 1;
934 cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
935 if (skb_transport_header(skb) > skb->data) {
936 cmdrsp->net.xmt.lincsum.hrawoff =
937 skb_transport_header(skb) - skb->data;
938 cmdrsp->net.xmt.lincsum.hrawoff = 1;
939 }
940 if (skb_network_header(skb) > skb->data) {
941 cmdrsp->net.xmt.lincsum.nhrawoff =
942 skb_network_header(skb) - skb->data;
943 cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
944 }
945 cmdrsp->net.xmt.lincsum.csum = skb->csum;
946 } else {
947 cmdrsp->net.xmt.lincsum.valid = 0;
948 }
949
950 /* save off the length of the entire data packet */
951 cmdrsp->net.xmt.len = len;
952
953 /* copy ethernet header from first frag into ocmdrsp
954 * - everything else will be pass in frags & DMA'ed
955 */
d0c2c997 956 memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
68905a14
DK
957 /* copy frags info - from skb->data we need to only provide access
958 * beyond eth header
959 */
960 cmdrsp->net.xmt.num_frags =
961 visor_copy_fragsinfo_from_skb(skb, firstfraglen,
962 MAX_PHYS_INFO,
963 cmdrsp->net.xmt.frags);
ce657aa8 964 if (cmdrsp->net.xmt.num_frags < 0) {
68905a14
DK
965 spin_unlock_irqrestore(&devdata->priv_lock, flags);
966 devdata->busy_cnt++;
00748b0c
TS
967 dev_err(&netdev->dev,
968 "%s busy - copy frags failed\n", __func__);
f6346ad6
NH
969 kfree_skb(skb);
970 return NETDEV_TX_OK;
68905a14
DK
971 }
972
03156571
DK
973 err = visorchannel_signalinsert(devdata->dev->visorchannel,
974 IOCHAN_TO_IOPART, cmdrsp);
975 if (err) {
68905a14
DK
976 netif_stop_queue(netdev);
977 spin_unlock_irqrestore(&devdata->priv_lock, flags);
978 devdata->busy_cnt++;
00748b0c
TS
979 dev_dbg(&netdev->dev,
980 "%s busy - signalinsert failed\n", __func__);
f6346ad6
NH
981 kfree_skb(skb);
982 return NETDEV_TX_OK;
68905a14
DK
983 }
984
985 /* Track the skbs that have been sent to the IOVM for XMIT */
986 skb_queue_head(&devdata->xmitbufhead, skb);
987
68905a14
DK
988 /* update xmt stats */
989 devdata->net_stats.tx_packets++;
990 devdata->net_stats.tx_bytes += skb->len;
991 devdata->chstat.sent_xmit++;
992
77c9a4ae 993 /* check if we have hit the high watermark for netif_stop_queue() */
36927c18
TS
994 if (vnic_hit_high_watermark(devdata,
995 devdata->upper_threshold_net_xmits)) {
77c9a4ae
EA
996 /* extra NET_XMITs queued over to IOVM - need to wait */
997 /* stop queue - call netif_wake_queue() after lower threshold */
998 netif_stop_queue(netdev);
00748b0c
TS
999 dev_dbg(&netdev->dev,
1000 "%s busy - invoking iovm flow control\n",
1001 __func__);
68905a14
DK
1002 devdata->flow_control_upper_hits++;
1003 }
1004 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1005
1006 /* skb will be freed when we get back NET_XMIT_DONE */
1007 return NETDEV_TX_OK;
1008}
1009
0bb10830 1010/*
68905a14
DK
1011 * visornic_get_stats - returns net_stats of the visornic device
1012 * @netdev: netdevice
1013 *
1014 * Returns the net_device_stats for the device
1015 */
1016static struct net_device_stats *
1017visornic_get_stats(struct net_device *netdev)
1018{
1019 struct visornic_devdata *devdata = netdev_priv(netdev);
1020
1021 return &devdata->net_stats;
1022}
1023
0bb10830 1024/*
68905a14
DK
1025 * visornic_change_mtu - changes mtu of device.
1026 * @netdev: netdevice
1027 * @new_mtu: value of new mtu
1028 *
1029 * MTU cannot be changed by system, must be changed via
1030 * CONTROLVM message. All vnics and pnics in a switch have
1031 * to have the same MTU for everything to work.
1032 * Currently not supported.
1033 * Returns EINVAL
1034 */
1035static int
1036visornic_change_mtu(struct net_device *netdev, int new_mtu)
1037{
1038 return -EINVAL;
1039}
1040
0bb10830 1041/*
68905a14
DK
1042 * visornic_set_multi - changes mtu of device.
1043 * @netdev: netdevice
1044 *
1045 * Only flag we support currently is IFF_PROMISC
1046 * Returns void
1047 */
1048static void
1049visornic_set_multi(struct net_device *netdev)
1050{
1051 struct uiscmdrsp *cmdrsp;
1052 struct visornic_devdata *devdata = netdev_priv(netdev);
03156571 1053 int err = 0;
68905a14 1054
6d8c96cb
DB
1055 if (devdata->old_flags == netdev->flags)
1056 return;
1057
1058 if ((netdev->flags & IFF_PROMISC) ==
1059 (devdata->old_flags & IFF_PROMISC))
1060 goto out_save_flags;
1061
1062 cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1063 if (!cmdrsp)
1064 return;
1065 cmdrsp->cmdtype = CMD_NET_TYPE;
1066 cmdrsp->net.type = NET_RCV_PROMISC;
1067 cmdrsp->net.enbdis.context = netdev;
1068 cmdrsp->net.enbdis.enable =
1069 netdev->flags & IFF_PROMISC;
03156571
DK
1070 err = visorchannel_signalinsert(devdata->dev->visorchannel,
1071 IOCHAN_TO_IOPART,
1072 cmdrsp);
6d8c96cb 1073 kfree(cmdrsp);
03156571
DK
1074 if (err)
1075 return;
6d8c96cb
DB
1076
1077out_save_flags:
1078 devdata->old_flags = netdev->flags;
68905a14
DK
1079}
1080
0bb10830 1081/*
68905a14
DK
1082 * visornic_xmit_timeout - request to timeout the xmit
1083 * @netdev
1084 *
1085 * Queue the work and return. Make sure we have not already
1086 * been informed the IO Partition is gone, if it is gone
1087 * we will already timeout the xmits.
1088 */
1089static void
1090visornic_xmit_timeout(struct net_device *netdev)
1091{
1092 struct visornic_devdata *devdata = netdev_priv(netdev);
1093 unsigned long flags;
1094
1095 spin_lock_irqsave(&devdata->priv_lock, flags);
46df8226
TS
1096 if (devdata->going_away) {
1097 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1098 dev_dbg(&devdata->dev->device,
1099 "%s aborting because device removal pending\n",
1100 __func__);
1101 return;
1102 }
1103
68905a14
DK
1104 /* Ensure that a ServerDown message hasn't been received */
1105 if (!devdata->enabled ||
1106 (devdata->server_down && !devdata->server_change_state)) {
00748b0c
TS
1107 dev_dbg(&netdev->dev, "%s no processing\n",
1108 __func__);
68905a14
DK
1109 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1110 return;
1111 }
ce388d7e 1112 schedule_work(&devdata->timeout_reset);
46df8226 1113 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
1114}
1115
0bb10830 1116/*
68905a14
DK
1117 * repost_return - repost rcv bufs that have come back
1118 * @cmdrsp: io channel command struct to post
1119 * @devdata: visornic devdata for the device
1120 * @skb: skb
1121 * @netdev: netdevice
1122 *
1123 * Repost rcv buffers that have been returned to us when
1124 * we are finished with them.
1125 * Returns 0 for success, -1 for error.
1126 */
a7d2ab20 1127static int
68905a14
DK
1128repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1129 struct sk_buff *skb, struct net_device *netdev)
1130{
1131 struct net_pkt_rcv copy;
1132 int i = 0, cc, numreposted;
1133 int found_skb = 0;
1134 int status = 0;
1135
1136 copy = cmdrsp->net.rcv;
1137 switch (copy.numrcvbufs) {
1138 case 0:
1139 devdata->n_rcv0++;
1140 break;
1141 case 1:
1142 devdata->n_rcv1++;
1143 break;
1144 case 2:
1145 devdata->n_rcv2++;
1146 break;
1147 default:
1148 devdata->n_rcvx++;
1149 break;
1150 }
1151 for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
1152 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1153 if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
1154 continue;
1155
1156 if ((skb) && devdata->rcvbuf[i] == skb) {
1157 devdata->found_repost_rcvbuf_cnt++;
1158 found_skb = 1;
1159 devdata->repost_found_skb_cnt++;
1160 }
1161 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1162 if (!devdata->rcvbuf[i]) {
1163 devdata->num_rcv_bufs_could_not_alloc++;
1164 devdata->alloc_failed_in_repost_rtn_cnt++;
1165 status = -ENOMEM;
1166 break;
1167 }
03156571
DK
1168 status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1169 if (status) {
1170 kfree_skb(devdata->rcvbuf[i]);
1171 devdata->rcvbuf[i] = NULL;
1172 break;
1173 }
68905a14
DK
1174 numreposted++;
1175 break;
1176 }
1177 }
1178 if (numreposted != copy.numrcvbufs) {
1179 devdata->n_repost_deficit++;
1180 status = -EINVAL;
1181 }
1182 if (skb) {
1183 if (found_skb) {
1184 kfree_skb(skb);
1185 } else {
1186 status = -EINVAL;
1187 devdata->bad_rcv_buf++;
1188 }
1189 }
68905a14
DK
1190 return status;
1191}
1192
0bb10830 1193/*
68905a14
DK
1194 * visornic_rx - Handle receive packets coming back from IO Part
1195 * @cmdrsp: Receive packet returned from IO Part
1196 *
1197 * Got a receive packet back from the IO Part, handle it and send
1198 * it up the stack.
50e66ccb 1199 * Returns 1 iff an skb was received, otherwise 0
68905a14 1200 */
946b2546 1201static int
68905a14
DK
1202visornic_rx(struct uiscmdrsp *cmdrsp)
1203{
1204 struct visornic_devdata *devdata;
1205 struct sk_buff *skb, *prev, *curr;
1206 struct net_device *netdev;
946b2546 1207 int cc, currsize, off;
68905a14
DK
1208 struct ethhdr *eth;
1209 unsigned long flags;
68905a14
DK
1210
1211 /* post new rcv buf to the other end using the cmdrsp we have at hand
1212 * post it without holding lock - but we'll use the signal lock to
1213 * synchronize the queue insert the cmdrsp that contains the net.rcv
1214 * is the one we are using to repost, so copy the info we need from it.
1215 */
1216 skb = cmdrsp->net.buf;
1217 netdev = skb->dev;
1218
68905a14
DK
1219 devdata = netdev_priv(netdev);
1220
1221 spin_lock_irqsave(&devdata->priv_lock, flags);
1222 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1223
68905a14
DK
1224 /* set length to how much was ACTUALLY received -
1225 * NOTE: rcv_done_len includes actual length of data rcvd
1226 * including ethhdr
1227 */
1228 skb->len = cmdrsp->net.rcv.rcv_done_len;
1229
f6b6a8ec
DK
1230 /* update rcv stats - call it with priv_lock held */
1231 devdata->net_stats.rx_packets++;
1232 devdata->net_stats.rx_bytes += skb->len;
1233
68905a14
DK
1234 /* test enabled while holding lock */
1235 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1236 /* don't process it unless we're in enable mode and until
1237 * we've gotten an ACK saying the other end got our RCV enable
1238 */
1239 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1240 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1241 return 0;
68905a14
DK
1242 }
1243
1244 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1245
1246 /* when skb was allocated, skb->dev, skb->data, skb->len and
1247 * skb->data_len were setup. AND, data has already put into the
1248 * skb (both first frag and in frags pages)
1249 * NOTE: firstfragslen is the amount of data in skb->data and that
1250 * which is not in nr_frags or frag_list. This is now simply
1251 * RCVPOST_BUF_SIZE. bump tail to show how much data is in
1252 * firstfrag & set data_len to show rest see if we have to chain
1253 * frag_list.
1254 */
921557cb
SW
1255 /* do PRECAUTIONARY check */
1256 if (skb->len > RCVPOST_BUF_SIZE) {
68905a14
DK
1257 if (cmdrsp->net.rcv.numrcvbufs < 2) {
1258 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1259 dev_err(&devdata->netdev->dev,
1260 "repost_return failed");
73e81350 1261 return 0;
68905a14
DK
1262 }
1263 /* length rcvd is greater than firstfrag in this skb rcv buf */
921557cb
SW
1264 /* amount in skb->data */
1265 skb->tail += RCVPOST_BUF_SIZE;
1266 /* amount that will be in frag_list */
1267 skb->data_len = skb->len - RCVPOST_BUF_SIZE;
68905a14
DK
1268 } else {
1269 /* data fits in this skb - no chaining - do
1270 * PRECAUTIONARY check
1271 */
921557cb
SW
1272 /* should be 1 */
1273 if (cmdrsp->net.rcv.numrcvbufs != 1) {
68905a14
DK
1274 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1275 dev_err(&devdata->netdev->dev,
1276 "repost_return failed");
73e81350 1277 return 0;
68905a14
DK
1278 }
1279 skb->tail += skb->len;
921557cb
SW
1280 /* nothing rcvd in frag_list */
1281 skb->data_len = 0;
68905a14
DK
1282 }
1283 off = skb_tail_pointer(skb) - skb->data;
1284
1285 /* amount we bumped tail by in the head skb
1286 * it is used to calculate the size of each chained skb below
1287 * it is also used to index into bufline to continue the copy
1288 * (for chansocktwopc)
1289 * if necessary chain the rcv skbs together.
1290 * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
1291 * chain the rest to that one.
1292 * - do PRECAUTIONARY check
1293 */
1294 if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
1295 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1296 dev_err(&devdata->netdev->dev, "repost_return failed");
73e81350 1297 return 0;
68905a14
DK
1298 }
1299
1300 if (cmdrsp->net.rcv.numrcvbufs > 1) {
1301 /* chain the various rcv buffers into the skb's frag_list. */
1302 /* Note: off was initialized above */
1303 for (cc = 1, prev = NULL;
1304 cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
1305 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
1306 curr->next = NULL;
921557cb
SW
1307 /* start of list- set head */
1308 if (!prev)
68905a14
DK
1309 skb_shinfo(skb)->frag_list = curr;
1310 else
1311 prev->next = curr;
1312 prev = curr;
1313
1314 /* should we set skb->len and skb->data_len for each
1315 * buffer being chained??? can't hurt!
1316 */
1317 currsize = min(skb->len - off,
1318 (unsigned int)RCVPOST_BUF_SIZE);
1319 curr->len = currsize;
1320 curr->tail += currsize;
1321 curr->data_len = 0;
1322 off += currsize;
1323 }
68905a14
DK
1324 /* assert skb->len == off */
1325 if (skb->len != off) {
cb84fca0
TS
1326 netdev_err(devdata->netdev,
1327 "something wrong; skb->len:%d != off:%d\n",
1328 skb->len, off);
68905a14 1329 }
68905a14
DK
1330 }
1331
50e66ccb 1332 /* set up packet's protocol type using ethernet header - this
68905a14
DK
1333 * sets up skb->pkt_type & it also PULLS out the eth header
1334 */
1335 skb->protocol = eth_type_trans(skb, netdev);
1336
1337 eth = eth_hdr(skb);
1338
1339 skb->csum = 0;
1340 skb->ip_summed = CHECKSUM_NONE;
1341
1342 do {
921557cb 1343 /* accept all packets */
68905a14 1344 if (netdev->flags & IFF_PROMISC)
921557cb 1345 break;
68905a14 1346 if (skb->pkt_type == PACKET_BROADCAST) {
921557cb 1347 /* accept all broadcast packets */
68905a14 1348 if (netdev->flags & IFF_BROADCAST)
921557cb 1349 break;
68905a14
DK
1350 } else if (skb->pkt_type == PACKET_MULTICAST) {
1351 if ((netdev->flags & IFF_MULTICAST) &&
1352 (netdev_mc_count(netdev))) {
1353 struct netdev_hw_addr *ha;
1354 int found_mc = 0;
1355
1356 /* only accept multicast packets that we can
1357 * find in our multicast address list
1358 */
1359 netdev_for_each_mc_addr(ha, netdev) {
1360 if (ether_addr_equal(eth->h_dest,
1361 ha->addr)) {
1362 found_mc = 1;
1363 break;
1364 }
1365 }
77c9a4ae 1366 /* accept pkt, dest matches a multicast addr */
68905a14 1367 if (found_mc)
77c9a4ae 1368 break;
68905a14 1369 }
77c9a4ae 1370 /* accept packet, h_dest must match vnic mac address */
68905a14 1371 } else if (skb->pkt_type == PACKET_HOST) {
77c9a4ae 1372 break;
68905a14
DK
1373 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1374 /* something is not right */
1375 dev_err(&devdata->netdev->dev,
1376 "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
1377 netdev->name, eth->h_dest, netdev->dev_addr);
1378 }
1379 /* drop packet - don't forward it up to OS */
1380 devdata->n_rcv_packets_not_accepted++;
1381 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1382 return 0;
68905a14
DK
1383 } while (0);
1384
946b2546 1385 netif_receive_skb(skb);
68905a14
DK
1386 /* netif_rx returns various values, but "in practice most drivers
1387 * ignore the return value
1388 */
1389
1390 skb = NULL;
1391 /*
1392 * whether the packet got dropped or handled, the skb is freed by
1393 * kernel code, so we shouldn't free it. but we should repost a
1394 * new rcv buffer.
1395 */
1396 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1397 return 1;
68905a14
DK
1398}
1399
0bb10830 1400/*
68905a14
DK
1401 * devdata_initialize - Initialize devdata structure
1402 * @devdata: visornic_devdata structure to initialize
1403 * #dev: visorbus_deviced it belongs to
1404 *
1405 * Setup initial values for the visornic based on channel and default
1406 * values.
e1834bd0 1407 * Returns a pointer to the devdata structure
68905a14
DK
1408 */
1409static struct visornic_devdata *
1410devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
1411{
68905a14 1412 devdata->dev = dev;
91678f37 1413 devdata->incarnation_id = get_jiffies_64();
68905a14
DK
1414 return devdata;
1415}
1416
0bb10830 1417/*
8d0119d8
TS
1418 * devdata_release - Frees up references in devdata
1419 * @devdata: struct to clean up
68905a14 1420 *
8d0119d8 1421 * Frees up references in devdata.
68905a14
DK
1422 * Returns void
1423 */
8d0119d8 1424static void devdata_release(struct visornic_devdata *devdata)
68905a14 1425{
46df8226
TS
1426 kfree(devdata->rcvbuf);
1427 kfree(devdata->cmdrsp_rcv);
1428 kfree(devdata->xmit_cmdrsp);
68905a14
DK
1429}
1430
1431static const struct net_device_ops visornic_dev_ops = {
1432 .ndo_open = visornic_open,
1433 .ndo_stop = visornic_close,
1434 .ndo_start_xmit = visornic_xmit,
1435 .ndo_get_stats = visornic_get_stats,
68905a14
DK
1436 .ndo_change_mtu = visornic_change_mtu,
1437 .ndo_tx_timeout = visornic_xmit_timeout,
1438 .ndo_set_rx_mode = visornic_set_multi,
1439};
1440
52b1660d
NH
1441/* DebugFS code */
1442static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1443 size_t len, loff_t *offset)
1444{
1445 ssize_t bytes_read = 0;
1446 int str_pos = 0;
1447 struct visornic_devdata *devdata;
1448 struct net_device *dev;
1449 char *vbuf;
1450
1451 if (len > MAX_BUF)
1452 len = MAX_BUF;
1453 vbuf = kzalloc(len, GFP_KERNEL);
1454 if (!vbuf)
1455 return -ENOMEM;
1456
77c9a4ae 1457 /* for each vnic channel dump out channel specific data */
52b1660d
NH
1458 rcu_read_lock();
1459 for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
77c9a4ae 1460 /* Only consider netdevs that are visornic, and are open */
52b1660d
NH
1461 if ((dev->netdev_ops != &visornic_dev_ops) ||
1462 (!netif_queue_stopped(dev)))
1463 continue;
1464
1465 devdata = netdev_priv(dev);
1466 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1467 "netdev = %s (0x%p), MAC Addr %pM\n",
1468 dev->name,
1469 dev,
1470 dev->dev_addr);
1471 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1472 "VisorNic Dev Info = 0x%p\n", devdata);
1473 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1474 " num_rcv_bufs = %d\n",
1475 devdata->num_rcv_bufs);
1476 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
5bcf2a92 1477 " max_outstanding_next_xmits = %lu\n",
52b1660d
NH
1478 devdata->max_outstanding_net_xmits);
1479 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1480 " upper_threshold_net_xmits = %lu\n",
52b1660d
NH
1481 devdata->upper_threshold_net_xmits);
1482 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1483 " lower_threshold_net_xmits = %lu\n",
52b1660d
NH
1484 devdata->lower_threshold_net_xmits);
1485 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1486 " queuefullmsg_logged = %d\n",
1487 devdata->queuefullmsg_logged);
1488 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1489 " chstat.got_rcv = %lu\n",
1490 devdata->chstat.got_rcv);
1491 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1492 " chstat.got_enbdisack = %lu\n",
1493 devdata->chstat.got_enbdisack);
1494 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1495 " chstat.got_xmit_done = %lu\n",
1496 devdata->chstat.got_xmit_done);
1497 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1498 " chstat.xmit_fail = %lu\n",
1499 devdata->chstat.xmit_fail);
1500 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1501 " chstat.sent_enbdis = %lu\n",
1502 devdata->chstat.sent_enbdis);
1503 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1504 " chstat.sent_promisc = %lu\n",
1505 devdata->chstat.sent_promisc);
1506 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1507 " chstat.sent_post = %lu\n",
1508 devdata->chstat.sent_post);
81d275c6
TS
1509 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1510 " chstat.sent_post_failed = %lu\n",
1511 devdata->chstat.sent_post_failed);
52b1660d
NH
1512 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1513 " chstat.sent_xmit = %lu\n",
1514 devdata->chstat.sent_xmit);
1515 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1516 " chstat.reject_count = %lu\n",
1517 devdata->chstat.reject_count);
1518 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1519 " chstat.extra_rcvbufs_sent = %lu\n",
1520 devdata->chstat.extra_rcvbufs_sent);
1521 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1522 " n_rcv0 = %lu\n", devdata->n_rcv0);
1523 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1524 " n_rcv1 = %lu\n", devdata->n_rcv1);
1525 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1526 " n_rcv2 = %lu\n", devdata->n_rcv2);
1527 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1528 " n_rcvx = %lu\n", devdata->n_rcvx);
1529 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1530 " num_rcvbuf_in_iovm = %d\n",
1531 atomic_read(&devdata->num_rcvbuf_in_iovm));
1532 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1533 " alloc_failed_in_if_needed_cnt = %lu\n",
1534 devdata->alloc_failed_in_if_needed_cnt);
1535 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1536 " alloc_failed_in_repost_rtn_cnt = %lu\n",
1537 devdata->alloc_failed_in_repost_rtn_cnt);
1538 /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1539 * " inner_loop_limit_reached_cnt = %lu\n",
1540 * devdata->inner_loop_limit_reached_cnt);
1541 */
1542 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1543 " found_repost_rcvbuf_cnt = %lu\n",
1544 devdata->found_repost_rcvbuf_cnt);
1545 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1546 " repost_found_skb_cnt = %lu\n",
1547 devdata->repost_found_skb_cnt);
1548 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1549 " n_repost_deficit = %lu\n",
1550 devdata->n_repost_deficit);
1551 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1552 " bad_rcv_buf = %lu\n",
1553 devdata->bad_rcv_buf);
1554 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1555 " n_rcv_packets_not_accepted = %lu\n",
1556 devdata->n_rcv_packets_not_accepted);
1557 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1558 " interrupts_rcvd = %llu\n",
1559 devdata->interrupts_rcvd);
1560 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1561 " interrupts_notme = %llu\n",
1562 devdata->interrupts_notme);
1563 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1564 " interrupts_disabled = %llu\n",
1565 devdata->interrupts_disabled);
1566 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1567 " busy_cnt = %llu\n",
1568 devdata->busy_cnt);
1569 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1570 " flow_control_upper_hits = %llu\n",
1571 devdata->flow_control_upper_hits);
1572 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1573 " flow_control_lower_hits = %llu\n",
1574 devdata->flow_control_lower_hits);
52b1660d
NH
1575 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1576 " netif_queue = %s\n",
1577 netif_queue_stopped(devdata->netdev) ?
1578 "stopped" : "running");
36927c18
TS
1579 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1580 " xmits_outstanding = %lu\n",
1581 devdata_xmits_outstanding(devdata));
52b1660d
NH
1582 }
1583 rcu_read_unlock();
1584 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1585 kfree(vbuf);
1586 return bytes_read;
1587}
1588
0543205b
DK
1589static struct dentry *visornic_debugfs_dir;
1590static const struct file_operations debugfs_info_fops = {
1591 .read = info_debugfs_read,
1592};
1593
0bb10830 1594/*
68905a14
DK
1595 * send_rcv_posts_if_needed
1596 * @devdata: visornic device
1597 *
1598 * Send receive buffers to the IO Partition.
1599 * Returns void
1600 */
03156571 1601static int
68905a14
DK
1602send_rcv_posts_if_needed(struct visornic_devdata *devdata)
1603{
1604 int i;
1605 struct net_device *netdev;
1606 struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
1607 int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
03156571 1608 int err;
68905a14
DK
1609
1610 /* don't do this until vnic is marked ready */
1611 if (!(devdata->enabled && devdata->enab_dis_acked))
03156571 1612 return 0;
68905a14
DK
1613
1614 netdev = devdata->netdev;
1615 rcv_bufs_allocated = 0;
1616 /* this code is trying to prevent getting stuck here forever,
1617 * but still retry it if you cant allocate them all this time.
1618 */
1619 cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
1620 while (cur_num_rcv_bufs_to_alloc > 0) {
1621 cur_num_rcv_bufs_to_alloc--;
1622 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1623 if (devdata->rcvbuf[i])
1624 continue;
1625 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1626 if (!devdata->rcvbuf[i]) {
1627 devdata->alloc_failed_in_if_needed_cnt++;
1628 break;
1629 }
1630 rcv_bufs_allocated++;
03156571
DK
1631 err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1632 if (err) {
1633 kfree_skb(devdata->rcvbuf[i]);
1634 devdata->rcvbuf[i] = NULL;
1635 break;
1636 }
68905a14
DK
1637 devdata->chstat.extra_rcvbufs_sent++;
1638 }
1639 }
1640 devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
03156571 1641 return 0;
68905a14
DK
1642}
1643
0bb10830 1644/*
91678f37
TS
1645 * drain_resp_queue - drains and ignores all messages from the resp queue
1646 * @cmdrsp: io channel command response message
1647 * @devdata: visornic device to drain
1648 */
1649static void
1650drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
1651{
f621a968
DB
1652 while (!visorchannel_signalremove(devdata->dev->visorchannel,
1653 IOCHAN_FROM_IOPART,
1654 cmdrsp))
91678f37
TS
1655 ;
1656}
1657
0bb10830 1658/*
91678f37 1659 * service_resp_queue - drains the response queue
68905a14
DK
1660 * @cmdrsp: io channel command response message
1661 * @devdata: visornic device to drain
1662 *
50e66ccb 1663 * Drain the response queue of any responses from the IO partition.
68905a14 1664 * Process the responses as we get them.
ab301265 1665 * Returns when response queue is empty or when the thread stops.
68905a14
DK
1666 */
1667static void
946b2546 1668service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
61dd330a 1669 int *rx_work_done, int budget)
68905a14
DK
1670{
1671 unsigned long flags;
1672 struct net_device *netdev;
1673
61dd330a 1674 while (*rx_work_done < budget) {
921557cb
SW
1675 /* TODO: CLIENT ACQUIRE -- Don't really need this at the
1676 * moment
1677 */
1678 /* queue empty */
f621a968
DB
1679 if (visorchannel_signalremove(devdata->dev->visorchannel,
1680 IOCHAN_FROM_IOPART,
1681 cmdrsp))
921557cb 1682 break;
7c03621a
DK
1683
1684 switch (cmdrsp->net.type) {
1685 case NET_RCV:
1686 devdata->chstat.got_rcv++;
1687 /* process incoming packet */
946b2546 1688 *rx_work_done += visornic_rx(cmdrsp);
7c03621a
DK
1689 break;
1690 case NET_XMIT_DONE:
1691 spin_lock_irqsave(&devdata->priv_lock, flags);
1692 devdata->chstat.got_xmit_done++;
1693 if (cmdrsp->net.xmtdone.xmt_done_result)
1694 devdata->chstat.xmit_fail++;
1695 /* only call queue wake if we stopped it */
1696 netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
1697 /* ASSERT netdev == vnicinfo->netdev; */
1698 if ((netdev == devdata->netdev) &&
1699 netif_queue_stopped(netdev)) {
77c9a4ae
EA
1700 /* check if we have crossed the lower watermark
1701 * for netif_wake_queue()
68905a14 1702 */
dc38082f
TS
1703 if (vnic_hit_low_watermark
1704 (devdata,
1705 devdata->lower_threshold_net_xmits)) {
7c03621a
DK
1706 /* enough NET_XMITs completed
1707 * so can restart netif queue
1708 */
1709 netif_wake_queue(netdev);
1710 devdata->flow_control_lower_hits++;
1711 }
68905a14 1712 }
7c03621a
DK
1713 skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
1714 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1715 kfree_skb(cmdrsp->net.buf);
68905a14 1716 break;
7c03621a
DK
1717 case NET_RCV_ENBDIS_ACK:
1718 devdata->chstat.got_enbdisack++;
1719 netdev = (struct net_device *)
1720 cmdrsp->net.enbdis.context;
87a9404e 1721 spin_lock_irqsave(&devdata->priv_lock, flags);
7c03621a
DK
1722 devdata->enab_dis_acked = 1;
1723 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1724
7c03621a
DK
1725 if (devdata->server_down &&
1726 devdata->server_change_state) {
1727 /* Inform Linux that the link is up */
1728 devdata->server_down = false;
1729 devdata->server_change_state = false;
1730 netif_wake_queue(netdev);
1731 netif_carrier_on(netdev);
1732 }
1733 break;
1734 case NET_CONNECT_STATUS:
1735 netdev = devdata->netdev;
1736 if (cmdrsp->net.enbdis.enable == 1) {
1737 spin_lock_irqsave(&devdata->priv_lock, flags);
1738 devdata->enabled = cmdrsp->net.enbdis.enable;
1739 spin_unlock_irqrestore(&devdata->priv_lock,
1740 flags);
1741 netif_wake_queue(netdev);
1742 netif_carrier_on(netdev);
1743 } else {
1744 netif_stop_queue(netdev);
1745 netif_carrier_off(netdev);
1746 spin_lock_irqsave(&devdata->priv_lock, flags);
1747 devdata->enabled = cmdrsp->net.enbdis.enable;
1748 spin_unlock_irqrestore(&devdata->priv_lock,
1749 flags);
1750 }
1751 break;
1752 default:
1753 break;
87a9404e 1754 }
7c03621a 1755 /* cmdrsp is now available for reuse */
68905a14
DK
1756 }
1757}
1758
946b2546
NH
1759static int visornic_poll(struct napi_struct *napi, int budget)
1760{
1761 struct visornic_devdata *devdata = container_of(napi,
1762 struct visornic_devdata,
1763 napi);
1764 int rx_count = 0;
03156571
DK
1765 int err;
1766
1767 err = send_rcv_posts_if_needed(devdata);
1768 if (err)
1769 return err;
946b2546 1770
61dd330a 1771 service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
946b2546 1772
77c9a4ae 1773 /* If there aren't any more packets to receive stop the poll */
946b2546 1774 if (rx_count < budget)
6ad20165 1775 napi_complete_done(napi, rx_count);
946b2546
NH
1776
1777 return rx_count;
1778}
1779
0bb10830 1780/*
946b2546 1781 * poll_for_irq - Checks the status of the response queue.
68905a14
DK
1782 * @v: void pointer to the visronic devdata
1783 *
50e66ccb 1784 * Main function of the vnic_incoming thread. Periodically check the
68905a14
DK
1785 * response queue and drain it if needed.
1786 * Returns when thread has stopped.
1787 */
946b2546
NH
1788static void
1789poll_for_irq(unsigned long v)
68905a14 1790{
946b2546 1791 struct visornic_devdata *devdata = (struct visornic_devdata *)v;
68905a14 1792
946b2546
NH
1793 if (!visorchannel_signalempty(
1794 devdata->dev->visorchannel,
1795 IOCHAN_FROM_IOPART))
1796 napi_schedule(&devdata->napi);
68905a14 1797
946b2546 1798 atomic_set(&devdata->interrupt_rcvd, 0);
68905a14 1799
946b2546 1800 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
68905a14
DK
1801}
1802
0bb10830 1803/*
68905a14
DK
1804 * visornic_probe - probe function for visornic devices
1805 * @dev: The visor device discovered
1806 *
1807 * Called when visorbus discovers a visornic device on its
1808 * bus. It creates a new visornic ethernet adapter.
1809 * Returns 0 or negative for error.
1810 */
1811static int visornic_probe(struct visor_device *dev)
1812{
1813 struct visornic_devdata *devdata = NULL;
1814 struct net_device *netdev = NULL;
1815 int err;
1816 int channel_offset = 0;
1817 u64 features;
1818
1819 netdev = alloc_etherdev(sizeof(struct visornic_devdata));
00748b0c
TS
1820 if (!netdev) {
1821 dev_err(&dev->device,
1822 "%s alloc_etherdev failed\n", __func__);
68905a14 1823 return -ENOMEM;
00748b0c 1824 }
68905a14
DK
1825
1826 netdev->netdev_ops = &visornic_dev_ops;
90cb147f 1827 netdev->watchdog_timeo = 5 * HZ;
051e9fbb 1828 SET_NETDEV_DEV(netdev, &dev->device);
68905a14 1829
50e66ccb 1830 /* Get MAC address from channel and read it into the device. */
68905a14 1831 netdev->addr_len = ETH_ALEN;
172f4c36 1832 channel_offset = offsetof(struct visor_io_channel, vnic.macaddr);
68905a14
DK
1833 err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
1834 ETH_ALEN);
00748b0c
TS
1835 if (err < 0) {
1836 dev_err(&dev->device,
1837 "%s failed to get mac addr from chan (%d)\n",
1838 __func__, err);
68905a14 1839 goto cleanup_netdev;
00748b0c 1840 }
68905a14
DK
1841
1842 devdata = devdata_initialize(netdev_priv(netdev), dev);
1843 if (!devdata) {
00748b0c
TS
1844 dev_err(&dev->device,
1845 "%s devdata_initialize failed\n", __func__);
68905a14
DK
1846 err = -ENOMEM;
1847 goto cleanup_netdev;
1848 }
91678f37
TS
1849 /* don't trust messages laying around in the channel */
1850 drain_resp_queue(devdata->cmdrsp, devdata);
68905a14
DK
1851
1852 devdata->netdev = netdev;
5deeea33 1853 dev_set_drvdata(&dev->device, devdata);
68905a14
DK
1854 init_waitqueue_head(&devdata->rsp_queue);
1855 spin_lock_init(&devdata->priv_lock);
921557cb
SW
1856 /* not yet */
1857 devdata->enabled = 0;
68905a14
DK
1858 atomic_set(&devdata->usage, 1);
1859
1860 /* Setup rcv bufs */
172f4c36 1861 channel_offset = offsetof(struct visor_io_channel, vnic.num_rcv_bufs);
68905a14
DK
1862 err = visorbus_read_channel(dev, channel_offset,
1863 &devdata->num_rcv_bufs, 4);
00748b0c
TS
1864 if (err) {
1865 dev_err(&dev->device,
1866 "%s failed to get #rcv bufs from chan (%d)\n",
1867 __func__, err);
68905a14 1868 goto cleanup_netdev;
00748b0c 1869 }
68905a14 1870
5e757bc5
SB
1871 devdata->rcvbuf = kcalloc(devdata->num_rcv_bufs,
1872 sizeof(struct sk_buff *), GFP_KERNEL);
68905a14
DK
1873 if (!devdata->rcvbuf) {
1874 err = -ENOMEM;
d12324e3 1875 goto cleanup_netdev;
68905a14
DK
1876 }
1877
1878 /* set the net_xmit outstanding threshold */
1879 /* always leave two slots open but you should have 3 at a minimum */
36927c18 1880 /* note that max_outstanding_net_xmits must be > 0 */
68905a14 1881 devdata->max_outstanding_net_xmits =
36927c18 1882 max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
68905a14 1883 devdata->upper_threshold_net_xmits =
36927c18
TS
1884 max_t(unsigned long,
1885 2, (devdata->max_outstanding_net_xmits - 1));
68905a14 1886 devdata->lower_threshold_net_xmits =
36927c18
TS
1887 max_t(unsigned long,
1888 1, (devdata->max_outstanding_net_xmits / 2));
68905a14
DK
1889
1890 skb_queue_head_init(&devdata->xmitbufhead);
1891
1892 /* create a cmdrsp we can use to post and unpost rcv buffers */
1893 devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1894 if (!devdata->cmdrsp_rcv) {
1895 err = -ENOMEM;
d12324e3 1896 goto cleanup_rcvbuf;
68905a14
DK
1897 }
1898 devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1899 if (!devdata->xmit_cmdrsp) {
1900 err = -ENOMEM;
d12324e3 1901 goto cleanup_cmdrsp_rcv;
68905a14 1902 }
68905a14
DK
1903 INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
1904 devdata->server_down = false;
1905 devdata->server_change_state = false;
1906
1907 /*set the default mtu */
172f4c36 1908 channel_offset = offsetof(struct visor_io_channel, vnic.mtu);
68905a14 1909 err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
00748b0c
TS
1910 if (err) {
1911 dev_err(&dev->device,
1912 "%s failed to get mtu from chan (%d)\n",
1913 __func__, err);
68905a14 1914 goto cleanup_xmit_cmdrsp;
00748b0c 1915 }
68905a14
DK
1916
1917 /* TODO: Setup Interrupt information */
1918 /* Let's start our threads to get responses */
9c70ee32 1919 netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
946b2546
NH
1920
1921 setup_timer(&devdata->irq_poll_timer, poll_for_irq,
1922 (unsigned long)devdata);
77c9a4ae 1923 /* Note: This time has to start running before the while
946b2546
NH
1924 * loop below because the napi routine is responsible for
1925 * setting enab_dis_acked
1926 */
1927 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
1928
172f4c36 1929 channel_offset = offsetof(struct visor_io_channel,
68905a14
DK
1930 channel_header.features);
1931 err = visorbus_read_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1932 if (err) {
1933 dev_err(&dev->device,
1934 "%s failed to get features from chan (%d)\n",
1935 __func__, err);
946b2546 1936 goto cleanup_napi_add;
00748b0c 1937 }
68905a14 1938
c75ebe5e
SW
1939 features |= VISOR_CHANNEL_IS_POLLING;
1940 features |= VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING;
68905a14 1941 err = visorbus_write_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1942 if (err) {
1943 dev_err(&dev->device,
1944 "%s failed to set features in chan (%d)\n",
1945 __func__, err);
946b2546 1946 goto cleanup_napi_add;
00748b0c 1947 }
68905a14 1948
50e66ccb 1949 /* Note: Interrupts have to be enable before the while
61dd330a
DK
1950 * loop below because the napi routine is responsible for
1951 * setting enab_dis_acked
1952 */
1953 visorbus_enable_channel_interrupts(dev);
1954
68905a14 1955 err = register_netdev(netdev);
00748b0c
TS
1956 if (err) {
1957 dev_err(&dev->device,
1958 "%s register_netdev failed (%d)\n", __func__, err);
946b2546 1959 goto cleanup_napi_add;
00748b0c 1960 }
68905a14 1961
50e66ccb 1962 /* create debug/sysfs directories */
68905a14
DK
1963 devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
1964 visornic_debugfs_dir);
1965 if (!devdata->eth_debugfs_dir) {
00748b0c
TS
1966 dev_err(&dev->device,
1967 "%s debugfs_create_dir %s failed\n",
1968 __func__, netdev->name);
68905a14 1969 err = -ENOMEM;
5b12100a 1970 goto cleanup_register_netdev;
68905a14
DK
1971 }
1972
00748b0c
TS
1973 dev_info(&dev->device, "%s success netdev=%s\n",
1974 __func__, netdev->name);
68905a14
DK
1975 return 0;
1976
5b12100a
DK
1977cleanup_register_netdev:
1978 unregister_netdev(netdev);
1979
946b2546
NH
1980cleanup_napi_add:
1981 del_timer_sync(&devdata->irq_poll_timer);
1982 netif_napi_del(&devdata->napi);
1983
68905a14
DK
1984cleanup_xmit_cmdrsp:
1985 kfree(devdata->xmit_cmdrsp);
1986
1987cleanup_cmdrsp_rcv:
1988 kfree(devdata->cmdrsp_rcv);
1989
1990cleanup_rcvbuf:
1991 kfree(devdata->rcvbuf);
1992
1993cleanup_netdev:
1994 free_netdev(netdev);
1995 return err;
1996}
1997
0bb10830 1998/*
68905a14
DK
1999 * host_side_disappeared - IO part is gone.
2000 * @devdata: device object
2001 *
2002 * IO partition servicing this device is gone, do cleanup
2003 * Returns void.
2004 */
2005static void host_side_disappeared(struct visornic_devdata *devdata)
2006{
2007 unsigned long flags;
2008
2009 spin_lock_irqsave(&devdata->priv_lock, flags);
921557cb
SW
2010 /* indicate device destroyed */
2011 devdata->dev = NULL;
68905a14
DK
2012 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2013}
2014
0bb10830 2015/*
68905a14
DK
2016 * visornic_remove - Called when visornic dev goes away
2017 * @dev: visornic device that is being removed
2018 *
2019 * Called when DEVICE_DESTROY gets called to remove device.
2020 * Returns void
2021 */
2022static void visornic_remove(struct visor_device *dev)
2023{
2024 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
46df8226
TS
2025 struct net_device *netdev;
2026 unsigned long flags;
68905a14 2027
00748b0c
TS
2028 if (!devdata) {
2029 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2030 return;
00748b0c 2031 }
46df8226
TS
2032 spin_lock_irqsave(&devdata->priv_lock, flags);
2033 if (devdata->going_away) {
2034 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2035 dev_err(&dev->device, "%s already being removed\n", __func__);
2036 return;
2037 }
2038 devdata->going_away = true;
2039 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2040 netdev = devdata->netdev;
2041 if (!netdev) {
2042 dev_err(&dev->device, "%s not net device\n", __func__);
2043 return;
2044 }
2045
2046 /* going_away prevents new items being added to the workqueues */
ce388d7e 2047 cancel_work_sync(&devdata->timeout_reset);
46df8226
TS
2048
2049 debugfs_remove_recursive(devdata->eth_debugfs_dir);
921557cb
SW
2050 /* this will call visornic_close() */
2051 unregister_netdev(netdev);
46df8226 2052
946b2546
NH
2053 del_timer_sync(&devdata->irq_poll_timer);
2054 netif_napi_del(&devdata->napi);
46df8226 2055
68905a14
DK
2056 dev_set_drvdata(&dev->device, NULL);
2057 host_side_disappeared(devdata);
8d0119d8 2058 devdata_release(devdata);
46df8226 2059 free_netdev(netdev);
68905a14
DK
2060}
2061
0bb10830 2062/*
68905a14
DK
2063 * visornic_pause - Called when IO Part disappears
2064 * @dev: visornic device that is being serviced
2065 * @complete_func: call when finished.
2066 *
2067 * Called when the IO Partition has gone down. Need to free
2068 * up resources and wait for IO partition to come back. Mark
2069 * link as down and don't attempt any DMA. When we have freed
2070 * memory call the complete_func so that Command knows we are
2071 * done. If we don't call complete_func, IO part will never
2072 * come back.
2073 * Returns 0 for success.
2074 */
2075static int visornic_pause(struct visor_device *dev,
2076 visorbus_state_complete_func complete_func)
2077{
2078 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2079
d01da5ea 2080 visornic_serverdown(devdata, complete_func);
68905a14
DK
2081 return 0;
2082}
2083
0bb10830 2084/*
68905a14
DK
2085 * visornic_resume - Called when IO part has recovered
2086 * @dev: visornic device that is being serviced
2087 * @compelte_func: call when finished
2088 *
2089 * Called when the IO partition has recovered. Reestablish
2090 * connection to the IO part and set the link up. Okay to do
2091 * DMA again.
2092 * Returns 0 for success.
2093 */
2094static int visornic_resume(struct visor_device *dev,
2095 visorbus_state_complete_func complete_func)
2096{
2097 struct visornic_devdata *devdata;
2098 struct net_device *netdev;
2099 unsigned long flags;
2100
2101 devdata = dev_get_drvdata(&dev->device);
00748b0c
TS
2102 if (!devdata) {
2103 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2104 return -EINVAL;
00748b0c 2105 }
68905a14
DK
2106
2107 netdev = devdata->netdev;
2108
c847020e
TS
2109 spin_lock_irqsave(&devdata->priv_lock, flags);
2110 if (devdata->server_change_state) {
68905a14 2111 spin_unlock_irqrestore(&devdata->priv_lock, flags);
c847020e 2112 dev_err(&dev->device, "%s server already changing state\n",
00748b0c 2113 __func__);
c847020e 2114 return -EINVAL;
68905a14 2115 }
c847020e
TS
2116 if (!devdata->server_down) {
2117 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2118 dev_err(&dev->device, "%s server not down\n", __func__);
2119 complete_func(dev, 0);
2120 return 0;
2121 }
2122 devdata->server_change_state = true;
2123 spin_unlock_irqrestore(&devdata->priv_lock, flags);
946b2546 2124
c847020e
TS
2125 /* Must transition channel to ATTACHED state BEFORE
2126 * we can start using the device again.
2127 * TODO: State transitions
2128 */
946b2546
NH
2129 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
2130
c847020e
TS
2131 rtnl_lock();
2132 dev_open(netdev);
2133 rtnl_unlock();
68905a14
DK
2134
2135 complete_func(dev, 0);
2136 return 0;
2137}
2138
172f4c36 2139/* This is used to tell the visorbus driver which types of visor devices
6083c710
DK
2140 * we support, and what functions to call when a visor device that we support
2141 * is attached or removed.
2142 */
2143static struct visor_driver visornic_driver = {
2144 .name = "visornic",
6083c710
DK
2145 .owner = THIS_MODULE,
2146 .channel_types = visornic_channel_types,
2147 .probe = visornic_probe,
2148 .remove = visornic_remove,
2149 .pause = visornic_pause,
2150 .resume = visornic_resume,
2151 .channel_interrupt = NULL,
2152};
2153
0bb10830 2154/*
68905a14
DK
2155 * visornic_init - Init function
2156 *
2157 * Init function for the visornic driver. Do initial driver setup
2158 * and wait for devices.
2159 * Returns 0 for success, negative for error.
2160 */
2161static int visornic_init(void)
2162{
2163 struct dentry *ret;
2164 int err = -ENOMEM;
2165
68905a14
DK
2166 visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
2167 if (!visornic_debugfs_dir)
2168 return err;
2169
bca74ee5 2170 ret = debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
68905a14
DK
2171 &debugfs_info_fops);
2172 if (!ret)
2173 goto cleanup_debugfs;
bca74ee5 2174 ret = debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir,
68905a14
DK
2175 NULL, &debugfs_enable_ints_fops);
2176 if (!ret)
2177 goto cleanup_debugfs;
2178
8b5081c8 2179 err = visorbus_register_visor_driver(&visornic_driver);
186896fd
DB
2180 if (err)
2181 goto cleanup_debugfs;
2182
2183 return 0;
68905a14 2184
68905a14
DK
2185cleanup_debugfs:
2186 debugfs_remove_recursive(visornic_debugfs_dir);
2187
2188 return err;
2189}
2190
0bb10830 2191/*
68905a14
DK
2192 * visornic_cleanup - driver exit routine
2193 *
2194 * Unregister driver from the bus and free up memory.
2195 */
2196static void visornic_cleanup(void)
2197{
3798ff31
TS
2198 visorbus_unregister_visor_driver(&visornic_driver);
2199
68905a14 2200 debugfs_remove_recursive(visornic_debugfs_dir);
68905a14
DK
2201}
2202
2203module_init(visornic_init);
2204module_exit(visornic_cleanup);
2205
2206MODULE_AUTHOR("Unisys");
2207MODULE_LICENSE("GPL");
bff8c1a1 2208MODULE_DESCRIPTION("s-Par NIC driver for virtual network devices");