]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/staging/unisys/visornic/visornic_main.c
staging: unisys: include: Remove unused enum
[mirror_ubuntu-hirsute-kernel.git] / drivers / staging / unisys / visornic / visornic_main.c
CommitLineData
68905a14
DK
1/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
3 *
6f14cc18
BR
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
68905a14
DK
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for more
12 * details.
13 */
14
15/* This driver lives in a spar partition, and registers to ethernet io
16 * channels from the visorbus driver. It creates netdev devices and
17 * forwards transmit to the IO channel and accepts rcvs from the IO
18 * Partition via the IO channel.
19 */
20
21#include <linux/debugfs.h>
68905a14 22#include <linux/etherdevice.h>
eb6eb1e1 23#include <linux/module.h>
0d507393 24#include <linux/netdevice.h>
68905a14 25#include <linux/kthread.h>
0d507393
NH
26#include <linux/skbuff.h>
27#include <linux/rtnetlink.h>
68905a14
DK
28
29#include "visorbus.h"
30#include "iochannel.h"
31
0c677e9c 32#define VISORNIC_INFINITE_RSP_WAIT 0
68905a14
DK
33
34/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
35 * = 163840 bytes
36 */
37#define MAX_BUF 163840
61dd330a 38#define NAPI_WEIGHT 64
68905a14 39
68905a14 40/* GUIDS for director channel type supported by this driver. */
2c1d9820
DK
41/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
42#define VISOR_VNIC_CHANNEL_GUID \
43 GUID_INIT(0x8cd5994d, 0xc58e, 0x11da, \
44 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
45#define VISOR_VNIC_CHANNEL_GUID_STR \
46 "8cd5994d-c58e-11da-95a9-00e08161165f"
47
68905a14
DK
48static struct visor_channeltype_descriptor visornic_channel_types[] = {
49 /* Note that the only channel type we expect to be reported by the
c75ebe5e 50 * bus driver is the VISOR_VNIC channel.
68905a14 51 */
b32c5cb8
AS
52 { VISOR_VNIC_CHANNEL_GUID, "ultravnic" },
53 {}
68905a14 54};
110a66be 55MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
496c8902 56/* FIXME XXX: This next line of code must be fixed and removed before
110a66be
PB
57 * acceptance into the 'normal' part of the kernel. It is only here as a place
58 * holder to get module autoloading functionality working for visorbus. Code
59 * must be added to scripts/mode/file2alias.c, etc., to get this working
60 * properly.
61 */
b32c5cb8 62MODULE_ALIAS("visorbus:" VISOR_VNIC_CHANNEL_GUID_STR);
68905a14 63
68905a14
DK
64struct chanstat {
65 unsigned long got_rcv;
66 unsigned long got_enbdisack;
67 unsigned long got_xmit_done;
68 unsigned long xmit_fail;
69 unsigned long sent_enbdis;
70 unsigned long sent_promisc;
71 unsigned long sent_post;
81d275c6 72 unsigned long sent_post_failed;
68905a14
DK
73 unsigned long sent_xmit;
74 unsigned long reject_count;
75 unsigned long extra_rcvbufs_sent;
76};
77
5ea5c64b 78/* struct visornic_devdata
5ea5c64b
SW
79 * @enabled: 0 disabled 1 enabled to receive.
80 * @enab_dis_acked: NET_RCV_ENABLE/DISABLE acked by IOPART.
81 * @struct *dev:
82 * @struct *netdev:
83 * @struct net_stats:
84 * @interrupt_rcvd:
85 * @rsp_queue:
86 * @struct **rcvbuf:
87 * @incarnation_id: incarnation_id lets IOPART know about
88 * re-birth.
89 * @old_flags: flags as they were prior to
90 * set_multicast_list.
91 * @usage: count of users.
92 * @num_rcv_bufs: number of rcv buffers the vnic will post.
93 * @num_rcv_bufs_could_not_alloc:
94 * @num_rcvbuf_in_iovm:
95 * @alloc_failed_in_if_needed_cnt:
96 * @alloc_failed_in_repost_rtn_cnt:
97 * @max_outstanding_net_xmits: absolute max number of outstanding xmits
98 * - should never hit this.
99 * @upper_threshold_net_xmits: high water mark for calling
100 * netif_stop_queue().
101 * @lower_threshold_net_xmits: high water mark for calling
102 * netif_wake_queue().
103 * @struct xmitbufhead: xmitbufhead - head of the xmit buffer list
104 * sent to the IOPART end.
105 * @server_down_complete_func:
106 * @struct timeout_reset:
107 * @struct *cmdrsp_rcv: cmdrsp_rcv is used for posting/unposting rcv
108 * buffers.
109 * @struct *xmit_cmdrsp: xmit_cmdrsp - issues NET_XMIT - only one
110 * active xmit at a time.
111 * @server_down: IOPART is down.
112 * @server_change_state: Processing SERVER_CHANGESTATE msg.
113 * @going_away: device is being torn down.
114 * @struct *eth_debugfs_dir:
115 * @interrupts_rcvd:
116 * @interrupts_notme:
117 * @interrupts_disabled:
118 * @busy_cnt:
119 * @priv_lock: spinlock to access devdata structures.
120 * @flow_control_upper_hits:
121 * @flow_control_lower_hits:
122 * @n_rcv0: # rcvs of 0 buffers.
123 * @n_rcv1: # rcvs of 1 buffers.
124 * @n_rcv2: # rcvs of 2 buffers.
125 * @n_rcvx: # rcvs of >2 buffers.
126 * @found_repost_rcvbuf_cnt: # repost_rcvbuf_cnt.
127 * @repost_found_skb_cnt: # of found the skb.
128 * @n_repost_deficit: # of lost rcv buffers.
129 * @bad_rcv_buf: # of unknown rcv skb not freed.
130 * @n_rcv_packets_not_accepted: # bogs rcv packets.
131 * @queuefullmsg_logged:
132 * @struct chstat:
133 * @struct irq_poll_timer:
134 * @struct napi:
135 * @struct cmdrsp:
136 */
68905a14 137struct visornic_devdata {
77c9a4ae 138 unsigned short enabled;
77c9a4ae
EA
139 unsigned short enab_dis_acked;
140
68905a14 141 struct visor_device *dev;
68905a14
DK
142 struct net_device *netdev;
143 struct net_device_stats net_stats;
144 atomic_t interrupt_rcvd;
145 wait_queue_head_t rsp_queue;
146 struct sk_buff **rcvbuf;
77c9a4ae 147 u64 incarnation_id;
77c9a4ae 148 unsigned short old_flags;
921557cb 149 atomic_t usage;
77c9a4ae 150
77c9a4ae 151 int num_rcv_bufs;
68905a14
DK
152 int num_rcv_bufs_could_not_alloc;
153 atomic_t num_rcvbuf_in_iovm;
154 unsigned long alloc_failed_in_if_needed_cnt;
155 unsigned long alloc_failed_in_repost_rtn_cnt;
77c9a4ae 156
77c9a4ae 157 unsigned long max_outstanding_net_xmits;
77c9a4ae 158 unsigned long upper_threshold_net_xmits;
77c9a4ae 159 unsigned long lower_threshold_net_xmits;
77c9a4ae
EA
160 struct sk_buff_head xmitbufhead;
161
d01da5ea 162 visorbus_state_complete_func server_down_complete_func;
68905a14 163 struct work_struct timeout_reset;
77c9a4ae 164 struct uiscmdrsp *cmdrsp_rcv;
77c9a4ae 165 struct uiscmdrsp *xmit_cmdrsp;
921557cb 166 bool server_down;
921557cb 167 bool server_change_state;
921557cb 168 bool going_away;
68905a14 169 struct dentry *eth_debugfs_dir;
68905a14
DK
170 u64 interrupts_rcvd;
171 u64 interrupts_notme;
172 u64 interrupts_disabled;
173 u64 busy_cnt;
5ea5c64b 174 /* spinlock to access devdata structures. */
921557cb 175 spinlock_t priv_lock;
68905a14
DK
176
177 /* flow control counter */
178 u64 flow_control_upper_hits;
179 u64 flow_control_lower_hits;
180
181 /* debug counters */
921557cb 182 unsigned long n_rcv0;
921557cb 183 unsigned long n_rcv1;
921557cb 184 unsigned long n_rcv2;
921557cb 185 unsigned long n_rcvx;
921557cb 186 unsigned long found_repost_rcvbuf_cnt;
921557cb 187 unsigned long repost_found_skb_cnt;
921557cb 188 unsigned long n_repost_deficit;
921557cb 189 unsigned long bad_rcv_buf;
921557cb 190 unsigned long n_rcv_packets_not_accepted;
68905a14
DK
191
192 int queuefullmsg_logged;
193 struct chanstat chstat;
946b2546
NH
194 struct timer_list irq_poll_timer;
195 struct napi_struct napi;
196 struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
68905a14
DK
197};
198
ea0d2075 199/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
7126395e 200static u16 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u16 inp_len,
af9f5e7d
CD
201 u16 index, u16 max_pi_arr_entries,
202 struct phys_info pi_arr[])
ea0d2075 203{
7126395e 204 u16 i, len, firstlen;
ea0d2075
DB
205
206 firstlen = PI_PAGE_SIZE - inp_off;
207 if (inp_len <= firstlen) {
208 /* The input entry spans only one page - add as is. */
209 if (index >= max_pi_arr_entries)
210 return 0;
211 pi_arr[index].pi_pfn = inp_pfn;
212 pi_arr[index].pi_off = (u16)inp_off;
213 pi_arr[index].pi_len = (u16)inp_len;
214 return index + 1;
215 }
216
217 /* This entry spans multiple pages. */
218 for (len = inp_len, i = 0; len;
219 len -= pi_arr[index + i].pi_len, i++) {
220 if (index + i >= max_pi_arr_entries)
221 return 0;
222 pi_arr[index + i].pi_pfn = inp_pfn + i;
223 if (i == 0) {
224 pi_arr[index].pi_off = inp_off;
225 pi_arr[index].pi_len = firstlen;
226 } else {
227 pi_arr[index + i].pi_off = 0;
7126395e
DK
228 pi_arr[index + i].pi_len = min_t(u16, len,
229 PI_PAGE_SIZE);
ea0d2075
DB
230 }
231 }
232 return index + i;
233}
234
1ce0a9bc
DB
235/* visor_copy_fragsinfo_from_skb - copy fragment list in the SKB to a phys_info
236 * array that the IOPART understands
237 * @skb: Skbuff that we are pulling the frags from.
238 * @firstfraglen: Length of first fragment in skb.
239 * @frags_max: Max len of frags array.
240 * @frags: Frags array filled in on output.
68905a14 241 *
1ce0a9bc
DB
242 * Return: Positive integer indicating number of entries filled in frags on
243 * success, negative integer on error.
68905a14 244 */
af9f5e7d
CD
245static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
246 unsigned int firstfraglen,
247 unsigned int frags_max,
248 struct phys_info frags[])
68905a14 249{
03759f8c 250 unsigned int count = 0, frag, size, offset = 0, numfrags;
513e1cbd 251 unsigned int total_count;
68905a14
DK
252
253 numfrags = skb_shinfo(skb)->nr_frags;
254
77c9a4ae 255 /* Compute the number of fragments this skb has, and if its more than
513e1cbd
NH
256 * frag array can hold, linearize the skb
257 */
258 total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
259 if (firstfraglen % PI_PAGE_SIZE)
260 total_count++;
261
262 if (total_count > frags_max) {
263 if (skb_linearize(skb))
264 return -EINVAL;
265 numfrags = skb_shinfo(skb)->nr_frags;
266 firstfraglen = 0;
267 }
268
68905a14
DK
269 while (firstfraglen) {
270 if (count == frags_max)
271 return -EINVAL;
272
273 frags[count].pi_pfn =
274 page_to_pfn(virt_to_page(skb->data + offset));
275 frags[count].pi_off =
276 (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
277 size = min_t(unsigned int, firstfraglen,
278 PI_PAGE_SIZE - frags[count].pi_off);
279
280 /* can take smallest of firstfraglen (what's left) OR
281 * bytes left in the page
282 */
283 frags[count].pi_len = size;
284 firstfraglen -= size;
285 offset += size;
286 count++;
287 }
288 if (numfrags) {
289 if ((count + numfrags) > frags_max)
290 return -EINVAL;
291
03759f8c 292 for (frag = 0; frag < numfrags; frag++) {
68905a14 293 count = add_physinfo_entries(page_to_pfn(
c02c6a0c
SW
294 skb_frag_page(&skb_shinfo(skb)->frags[frag])),
295 skb_shinfo(skb)->frags[frag].page_offset,
296 skb_shinfo(skb)->frags[frag].size, count,
297 frags_max, frags);
77c9a4ae 298 /* add_physinfo_entries only returns
998ff7f8
NH
299 * zero if the frags array is out of room
300 * That should never happen because we
301 * fail above, if count+numfrags > frags_max.
998ff7f8 302 */
6a957193
TS
303 if (!count)
304 return -EINVAL;
68905a14
DK
305 }
306 }
307 if (skb_shinfo(skb)->frag_list) {
308 struct sk_buff *skbinlist;
309 int c;
310
311 for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
312 skbinlist = skbinlist->next) {
313 c = visor_copy_fragsinfo_from_skb(skbinlist,
314 skbinlist->len -
315 skbinlist->data_len,
316 frags_max - count,
317 &frags[count]);
318 if (c < 0)
319 return c;
320 count += c;
321 }
322 }
323 return count;
324}
325
68905a14
DK
326static ssize_t enable_ints_write(struct file *file,
327 const char __user *buffer,
328 size_t count, loff_t *ppos)
329{
77c9a4ae 330 /* Don't want to break ABI here by having a debugfs
52b1660d
NH
331 * file that no longer exists or is writable, so
332 * lets just make this a vestigual function
333 */
68905a14
DK
334 return count;
335}
336
0543205b
DK
337static const struct file_operations debugfs_enable_ints_fops = {
338 .write = enable_ints_write,
339};
340
1ce0a9bc
DB
341/* visornic_serverdown_complete - pause device following IOPART going down
342 * @devdata: Device managed by IOPART.
68905a14 343 *
1ce0a9bc
DB
344 * The IO partition has gone down, and we need to do some cleanup for when it
345 * comes back. Treat the IO partition as the link being down.
68905a14 346 */
af9f5e7d 347static void visornic_serverdown_complete(struct visornic_devdata *devdata)
68905a14 348{
920680f4 349 struct net_device *netdev = devdata->netdev;
68905a14 350
946b2546
NH
351 /* Stop polling for interrupts */
352 del_timer_sync(&devdata->irq_poll_timer);
68905a14 353
0d507393
NH
354 rtnl_lock();
355 dev_close(netdev);
356 rtnl_unlock();
68905a14 357
68905a14 358 atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
db849927
TS
359 devdata->chstat.sent_xmit = 0;
360 devdata->chstat.got_xmit_done = 0;
68905a14 361
d01da5ea
TS
362 if (devdata->server_down_complete_func)
363 (*devdata->server_down_complete_func)(devdata->dev, 0);
364
68905a14
DK
365 devdata->server_down = true;
366 devdata->server_change_state = false;
d01da5ea 367 devdata->server_down_complete_func = NULL;
68905a14
DK
368}
369
1ce0a9bc
DB
370/* visornic_serverdown - Command has notified us that IOPART is down
371 * @devdata: Device managed by IOPART.
372 * @complete_func: Function to call when finished.
373 *
374 * Schedule the work needed to handle the server down request. Make sure we
375 * haven't already handled the server change state event.
68905a14 376 *
1ce0a9bc 377 * Return: 0 if we scheduled the work, negative integer on error.
68905a14 378 */
af9f5e7d
CD
379static int visornic_serverdown(struct visornic_devdata *devdata,
380 visorbus_state_complete_func complete_func)
68905a14 381{
46df8226 382 unsigned long flags;
4145ba76 383 int err;
46df8226
TS
384
385 spin_lock_irqsave(&devdata->priv_lock, flags);
4145ba76 386 if (devdata->server_change_state) {
00748b0c
TS
387 dev_dbg(&devdata->dev->device, "%s changing state\n",
388 __func__);
4145ba76
TS
389 err = -EINVAL;
390 goto err_unlock;
391 }
392 if (devdata->server_down) {
393 dev_dbg(&devdata->dev->device, "%s already down\n",
394 __func__);
395 err = -EINVAL;
396 goto err_unlock;
397 }
398 if (devdata->going_away) {
399 dev_dbg(&devdata->dev->device,
400 "%s aborting because device removal pending\n",
401 __func__);
402 err = -ENODEV;
403 goto err_unlock;
05f1b17e 404 }
4145ba76
TS
405 devdata->server_change_state = true;
406 devdata->server_down_complete_func = complete_func;
05f1b17e 407 spin_unlock_irqrestore(&devdata->priv_lock, flags);
4145ba76
TS
408
409 visornic_serverdown_complete(devdata);
68905a14 410 return 0;
4145ba76
TS
411
412err_unlock:
413 spin_unlock_irqrestore(&devdata->priv_lock, flags);
414 return err;
68905a14
DK
415}
416
1ce0a9bc
DB
417/* alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition
418 * @netdev: Network adapter the rcv bufs are attached too.
419 *
420 * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
421 * so that it can write rcv data into our memory space.
68905a14 422 *
1ce0a9bc 423 * Return: Pointer to sk_buff.
68905a14 424 */
af9f5e7d 425static struct sk_buff *alloc_rcv_buf(struct net_device *netdev)
68905a14
DK
426{
427 struct sk_buff *skb;
428
429 /* NOTE: the first fragment in each rcv buffer is pointed to by
430 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
77c9a4ae 431 * in length, so the first frag is large enough to hold 1514.
68905a14
DK
432 */
433 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
434 if (!skb)
435 return NULL;
436 skb->dev = netdev;
68905a14
DK
437 /* current value of mtu doesn't come into play here; large
438 * packets will just end up using multiple rcv buffers all of
77c9a4ae 439 * same size.
68905a14 440 */
77c9a4ae
EA
441 skb->len = RCVPOST_BUF_SIZE;
442 /* alloc_skb already zeroes it out for clarification. */
443 skb->data_len = 0;
68905a14
DK
444 return skb;
445}
446
1ce0a9bc
DB
447/* post_skb - post a skb to the IO Partition
448 * @cmdrsp: Cmdrsp packet to be send to the IO Partition.
449 * @devdata: visornic_devdata to post the skb to.
450 * @skb: Skb to give to the IO partition.
68905a14 451 *
1ce0a9bc 452 * Return: 0 on success, negative integer on error.
68905a14 453 */
af9f5e7d
CD
454static int post_skb(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
455 struct sk_buff *skb)
68905a14 456{
03156571
DK
457 int err;
458
68905a14
DK
459 cmdrsp->net.buf = skb;
460 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
461 cmdrsp->net.rcvpost.frag.pi_off =
462 (unsigned long)skb->data & PI_PAGE_MASK;
463 cmdrsp->net.rcvpost.frag.pi_len = skb->len;
91678f37 464 cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
68905a14 465
03156571
DK
466 if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
467 return -EINVAL;
468
469 cmdrsp->net.type = NET_RCV_POST;
470 cmdrsp->cmdtype = CMD_NET_TYPE;
471 err = visorchannel_signalinsert(devdata->dev->visorchannel,
472 IOCHAN_TO_IOPART,
473 cmdrsp);
474 if (err) {
475 devdata->chstat.sent_post_failed++;
476 return err;
68905a14 477 }
03156571
DK
478
479 atomic_inc(&devdata->num_rcvbuf_in_iovm);
480 devdata->chstat.sent_post++;
03156571 481 return 0;
68905a14
DK
482}
483
1ce0a9bc
DB
484/* send_enbdis - Send NET_RCV_ENBDIS to IO Partition
485 * @netdev: Netdevice we are enabling/disabling, used as context return value.
486 * @state: Enable = 1/disable = 0.
487 * @devdata: Visornic device we are enabling/disabling.
488 *
489 * Send the enable/disable message to the IO Partition.
68905a14 490 *
1ce0a9bc 491 * Return: 0 on success, negative integer on error.
68905a14 492 */
af9f5e7d
CD
493static int send_enbdis(struct net_device *netdev, int state,
494 struct visornic_devdata *devdata)
68905a14 495{
03156571
DK
496 int err;
497
68905a14
DK
498 devdata->cmdrsp_rcv->net.enbdis.enable = state;
499 devdata->cmdrsp_rcv->net.enbdis.context = netdev;
500 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
501 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
03156571
DK
502 err = visorchannel_signalinsert(devdata->dev->visorchannel,
503 IOCHAN_TO_IOPART,
504 devdata->cmdrsp_rcv);
505 if (err)
506 return err;
507 devdata->chstat.sent_enbdis++;
508 return 0;
68905a14
DK
509}
510
1ce0a9bc
DB
511/* visornic_disable_with_timeout - disable network adapter
512 * @netdev: netdevice to disable.
513 * @timeout: Timeout to wait for disable.
514 *
515 * Disable the network adapter and inform the IO Partition that we are disabled.
516 * Reclaim memory from rcv bufs.
68905a14 517 *
1ce0a9bc 518 * Return: 0 on success, negative integer on failure of IO Partition responding.
68905a14 519 */
af9f5e7d
CD
520static int visornic_disable_with_timeout(struct net_device *netdev,
521 const int timeout)
68905a14
DK
522{
523 struct visornic_devdata *devdata = netdev_priv(netdev);
524 int i;
525 unsigned long flags;
526 int wait = 0;
03156571 527 int err;
68905a14 528
68905a14
DK
529 /* send a msg telling the other end we are stopping incoming pkts */
530 spin_lock_irqsave(&devdata->priv_lock, flags);
531 devdata->enabled = 0;
921557cb
SW
532 /* must wait for ack */
533 devdata->enab_dis_acked = 0;
68905a14
DK
534 spin_unlock_irqrestore(&devdata->priv_lock, flags);
535
536 /* send disable and wait for ack -- don't hold lock when sending
537 * disable because if the queue is full, insert might sleep.
03156571 538 * If an error occurs, don't wait for the timeout.
68905a14 539 */
03156571
DK
540 err = send_enbdis(netdev, 0, devdata);
541 if (err)
542 return err;
68905a14
DK
543
544 /* wait for ack to arrive before we try to free rcv buffers
545 * NOTE: the other end automatically unposts the rcv buffers when
546 * when it gets a disable.
547 */
548 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 549 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
550 (wait < timeout)) {
551 if (devdata->enab_dis_acked)
552 break;
553 if (devdata->server_down || devdata->server_change_state) {
00748b0c
TS
554 dev_dbg(&netdev->dev, "%s server went away\n",
555 __func__);
ef44146c 556 break;
68905a14
DK
557 }
558 set_current_state(TASK_INTERRUPTIBLE);
559 spin_unlock_irqrestore(&devdata->priv_lock, flags);
560 wait += schedule_timeout(msecs_to_jiffies(10));
561 spin_lock_irqsave(&devdata->priv_lock, flags);
562 }
563
564 /* Wait for usage to go to 1 (no other users) before freeing
565 * rcv buffers
566 */
567 if (atomic_read(&devdata->usage) > 1) {
568 while (1) {
569 set_current_state(TASK_INTERRUPTIBLE);
570 spin_unlock_irqrestore(&devdata->priv_lock, flags);
571 schedule_timeout(msecs_to_jiffies(10));
572 spin_lock_irqsave(&devdata->priv_lock, flags);
573 if (atomic_read(&devdata->usage))
574 break;
575 }
576 }
68905a14
DK
577 /* we've set enabled to 0, so we can give up the lock. */
578 spin_unlock_irqrestore(&devdata->priv_lock, flags);
579
946b2546
NH
580 /* stop the transmit queue so nothing more can be transmitted */
581 netif_stop_queue(netdev);
582
583 napi_disable(&devdata->napi);
584
0d507393
NH
585 skb_queue_purge(&devdata->xmitbufhead);
586
68905a14
DK
587 /* Free rcv buffers - other end has automatically unposed them on
588 * disable
589 */
590 for (i = 0; i < devdata->num_rcv_bufs; i++) {
591 if (devdata->rcvbuf[i]) {
592 kfree_skb(devdata->rcvbuf[i]);
593 devdata->rcvbuf[i] = NULL;
594 }
595 }
596
68905a14
DK
597 return 0;
598}
599
1ce0a9bc
DB
600/* init_rcv_bufs - initialize receive buffs and send them to the IO Partition
601 * @netdev: struct netdevice.
602 * @devdata: visornic_devdata.
603 *
604 * Allocate rcv buffers and post them to the IO Partition.
68905a14 605 *
1ce0a9bc 606 * Return: 0 on success, negative integer on failure.
68905a14 607 */
af9f5e7d
CD
608static int init_rcv_bufs(struct net_device *netdev,
609 struct visornic_devdata *devdata)
68905a14 610{
03156571 611 int i, j, count, err;
68905a14
DK
612
613 /* allocate fixed number of receive buffers to post to uisnic
614 * post receive buffers after we've allocated a required amount
615 */
616 for (i = 0; i < devdata->num_rcv_bufs; i++) {
617 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
921557cb 618 /* if we failed to allocate one let us stop */
68905a14 619 if (!devdata->rcvbuf[i])
921557cb 620 break;
68905a14 621 }
921557cb
SW
622 /* couldn't even allocate one -- bail out */
623 if (i == 0)
68905a14
DK
624 return -ENOMEM;
625 count = i;
626
50e66ccb 627 /* Ensure we can alloc 2/3rd of the requested number of buffers.
68905a14
DK
628 * 2/3 is an arbitrary choice; used also in ndis init.c
629 */
630 if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
631 /* free receive buffers we did alloc and then bail out */
632 for (i = 0; i < count; i++) {
633 kfree_skb(devdata->rcvbuf[i]);
634 devdata->rcvbuf[i] = NULL;
635 }
636 return -ENOMEM;
637 }
638
639 /* post receive buffers to receive incoming input - without holding
640 * lock - we've not enabled nor started the queue so there shouldn't
641 * be any rcv or xmit activity
642 */
03156571
DK
643 for (i = 0; i < count; i++) {
644 err = post_skb(devdata->cmdrsp_rcv, devdata,
645 devdata->rcvbuf[i]);
646 if (!err)
647 continue;
648
649 /* Error handling -
650 * If we posted at least one skb, we should return success,
651 * but need to free the resources that we have not successfully
652 * posted.
653 */
654 for (j = i; j < count; j++) {
655 kfree_skb(devdata->rcvbuf[j]);
656 devdata->rcvbuf[j] = NULL;
657 }
658 if (i == 0)
659 return err;
660 break;
661 }
68905a14
DK
662
663 return 0;
664}
665
1ce0a9bc
DB
666/* visornic_enable_with_timeout - send enable to IO Partition
667 * @netdev: struct net_device.
668 * @timeout: Time to wait for the ACK from the enable.
68905a14 669 *
1ce0a9bc
DB
670 * Sends enable to IOVM and inits, and posts receive buffers to IOVM. Timeout is
671 * defined in msecs (timeout of 0 specifies infinite wait).
672 *
673 * Return: 0 on success, negative integer on failure.
68905a14 674 */
af9f5e7d
CD
675static int visornic_enable_with_timeout(struct net_device *netdev,
676 const int timeout)
68905a14 677{
03156571 678 int err = 0;
68905a14
DK
679 struct visornic_devdata *devdata = netdev_priv(netdev);
680 unsigned long flags;
681 int wait = 0;
682
6219e493
DB
683 napi_enable(&devdata->napi);
684
68905a14
DK
685 /* NOTE: the other end automatically unposts the rcv buffers when it
686 * gets a disable.
687 */
03156571
DK
688 err = init_rcv_bufs(netdev, devdata);
689 if (err < 0) {
00748b0c 690 dev_err(&netdev->dev,
03156571
DK
691 "%s failed to init rcv bufs\n", __func__);
692 return err;
00748b0c 693 }
68905a14
DK
694
695 spin_lock_irqsave(&devdata->priv_lock, flags);
696 devdata->enabled = 1;
6483783d 697 devdata->enab_dis_acked = 0;
68905a14
DK
698
699 /* now we're ready, let's send an ENB to uisnic but until we get
700 * an ACK back from uisnic, we'll drop the packets
701 */
702 devdata->n_rcv_packets_not_accepted = 0;
703 spin_unlock_irqrestore(&devdata->priv_lock, flags);
704
705 /* send enable and wait for ack -- don't hold lock when sending enable
03156571
DK
706 * because if the queue is full, insert might sleep. If an error
707 * occurs error out.
68905a14 708 */
03156571
DK
709 err = send_enbdis(netdev, 1, devdata);
710 if (err)
711 return err;
68905a14
DK
712
713 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 714 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
715 (wait < timeout)) {
716 if (devdata->enab_dis_acked)
717 break;
718 if (devdata->server_down || devdata->server_change_state) {
00748b0c
TS
719 dev_dbg(&netdev->dev, "%s server went away\n",
720 __func__);
ef44146c 721 break;
68905a14
DK
722 }
723 set_current_state(TASK_INTERRUPTIBLE);
724 spin_unlock_irqrestore(&devdata->priv_lock, flags);
725 wait += schedule_timeout(msecs_to_jiffies(10));
726 spin_lock_irqsave(&devdata->priv_lock, flags);
727 }
728
729 spin_unlock_irqrestore(&devdata->priv_lock, flags);
730
00748b0c
TS
731 if (!devdata->enab_dis_acked) {
732 dev_err(&netdev->dev, "%s missing ACK\n", __func__);
68905a14 733 return -EIO;
00748b0c 734 }
68905a14 735
35a8dd31 736 netif_start_queue(netdev);
68905a14
DK
737 return 0;
738}
739
1ce0a9bc
DB
740/* visornic_timeout_reset - handle xmit timeout resets
741 * @work: Work item that scheduled the work.
68905a14 742 *
1ce0a9bc
DB
743 * Transmit timeouts are typically handled by resetting the device for our
744 * virtual NIC; we will send a disable and enable to the IOVM. If it doesn't
745 * respond, we will trigger a serverdown.
68905a14 746 */
af9f5e7d 747static void visornic_timeout_reset(struct work_struct *work)
68905a14
DK
748{
749 struct visornic_devdata *devdata;
750 struct net_device *netdev;
751 int response = 0;
752
753 devdata = container_of(work, struct visornic_devdata, timeout_reset);
754 netdev = devdata->netdev;
755
4d79002e
TS
756 rtnl_lock();
757 if (!netif_running(netdev)) {
758 rtnl_unlock();
759 return;
760 }
761
0c677e9c
NH
762 response = visornic_disable_with_timeout(netdev,
763 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
764 if (response)
765 goto call_serverdown;
766
0c677e9c
NH
767 response = visornic_enable_with_timeout(netdev,
768 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
769 if (response)
770 goto call_serverdown;
68905a14 771
4d79002e
TS
772 rtnl_unlock();
773
68905a14
DK
774 return;
775
776call_serverdown:
d01da5ea 777 visornic_serverdown(devdata, NULL);
4d79002e 778 rtnl_unlock();
68905a14
DK
779}
780
1ce0a9bc
DB
781/* visornic_open - enable the visornic device and mark the queue started
782 * @netdev: netdevice to start.
783 *
784 * Enable the device and start the transmit queue.
68905a14 785 *
1ce0a9bc 786 * Return: 0 on success.
68905a14 787 */
af9f5e7d 788static int visornic_open(struct net_device *netdev)
68905a14 789{
0c677e9c 790 visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
791 return 0;
792}
793
1ce0a9bc
DB
794/* visornic_close - disables the visornic device and stops the queues
795 * @netdev: netdevice to stop.
796 *
797 * Disable the device and stop the transmit queue.
68905a14 798 *
1ce0a9bc 799 * Return 0 on success.
68905a14 800 */
af9f5e7d 801static int visornic_close(struct net_device *netdev)
68905a14 802{
0c677e9c 803 visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
804 return 0;
805}
806
1ce0a9bc
DB
807/* devdata_xmits_outstanding - compute outstanding xmits
808 * @devdata: visornic_devdata for device
36927c18 809 *
1ce0a9bc 810 * Return: Long integer representing the number of outstanding xmits.
36927c18
TS
811 */
812static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
813{
814 if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
815 return devdata->chstat.sent_xmit -
816 devdata->chstat.got_xmit_done;
6e1edc0f
BS
817 return (ULONG_MAX - devdata->chstat.got_xmit_done
818 + devdata->chstat.sent_xmit + 1);
36927c18
TS
819}
820
1ce0a9bc
DB
821/* vnic_hit_high_watermark
822 * @devdata: Indicates visornic device we are checking.
823 * @high_watermark: Max num of unacked xmits we will tolerate before we will
824 * start throttling.
36927c18 825 *
1ce0a9bc
DB
826 * Return: True iff the number of unacked xmits sent to the IO Partition is >=
827 * high_watermark. False otherwise.
36927c18 828 */
a7d2ab20
JF
829static bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
830 ulong high_watermark)
36927c18
TS
831{
832 return (devdata_xmits_outstanding(devdata) >= high_watermark);
833}
834
1ce0a9bc
DB
835/* vnic_hit_low_watermark
836 * @devdata: Indicates visornic device we are checking.
837 * @low_watermark: We will wait until the num of unacked xmits drops to this
838 * value or lower before we start transmitting again.
36927c18 839 *
1ce0a9bc
DB
840 * Return: True iff the number of unacked xmits sent to the IO Partition is <=
841 * low_watermark.
36927c18 842 */
a7d2ab20
JF
843static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
844 ulong low_watermark)
36927c18
TS
845{
846 return (devdata_xmits_outstanding(devdata) <= low_watermark);
847}
848
1ce0a9bc
DB
849/* visornic_xmit - send a packet to the IO Partition
850 * @skb: Packet to be sent.
851 * @netdev: Net device the packet is being sent from.
68905a14 852 *
1ce0a9bc
DB
853 * Convert the skb to a cmdrsp so the IO Partition can understand it, and send
854 * the XMIT command to the IO Partition for processing. This function is
855 * protected from concurrent calls by a spinlock xmit_lock in the net_device
856 * struct. As soon as the function returns, it can be called again.
857 *
858 * Return: NETDEV_TX_OK.
68905a14 859 */
af9f5e7d 860static int visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
68905a14
DK
861{
862 struct visornic_devdata *devdata;
863 int len, firstfraglen, padlen;
864 struct uiscmdrsp *cmdrsp = NULL;
865 unsigned long flags;
03156571 866 int err;
68905a14
DK
867
868 devdata = netdev_priv(netdev);
869 spin_lock_irqsave(&devdata->priv_lock, flags);
870
871 if (netif_queue_stopped(netdev) || devdata->server_down ||
872 devdata->server_change_state) {
873 spin_unlock_irqrestore(&devdata->priv_lock, flags);
874 devdata->busy_cnt++;
00748b0c
TS
875 dev_dbg(&netdev->dev,
876 "%s busy - queue stopped\n", __func__);
f6346ad6
NH
877 kfree_skb(skb);
878 return NETDEV_TX_OK;
68905a14
DK
879 }
880
881 /* sk_buff struct is used to host network data throughout all the
882 * linux network subsystems
883 */
884 len = skb->len;
885
886 /* skb->len is the FULL length of data (including fragmentary portion)
887 * skb->data_len is the length of the fragment portion in frags
888 * skb->len - skb->data_len is size of the 1st fragment in skb->data
889 * calculate the length of the first fragment that skb->data is
890 * pointing to
891 */
892 firstfraglen = skb->len - skb->data_len;
d0c2c997 893 if (firstfraglen < ETH_HLEN) {
68905a14
DK
894 spin_unlock_irqrestore(&devdata->priv_lock, flags);
895 devdata->busy_cnt++;
00748b0c
TS
896 dev_err(&netdev->dev,
897 "%s busy - first frag too small (%d)\n",
898 __func__, firstfraglen);
f6346ad6
NH
899 kfree_skb(skb);
900 return NETDEV_TX_OK;
68905a14
DK
901 }
902
903 if ((len < ETH_MIN_PACKET_SIZE) &&
904 ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
905 /* pad the packet out to minimum size */
906 padlen = ETH_MIN_PACKET_SIZE - len;
907 memset(&skb->data[len], 0, padlen);
908 skb->tail += padlen;
909 skb->len += padlen;
910 len += padlen;
911 firstfraglen += padlen;
912 }
913
914 cmdrsp = devdata->xmit_cmdrsp;
915 /* clear cmdrsp */
916 memset(cmdrsp, 0, SIZEOF_CMDRSP);
917 cmdrsp->net.type = NET_XMIT;
918 cmdrsp->cmdtype = CMD_NET_TYPE;
919
920 /* save the pointer to skb -- we'll need it for completion */
921 cmdrsp->net.buf = skb;
922
36927c18
TS
923 if (vnic_hit_high_watermark(devdata,
924 devdata->max_outstanding_net_xmits)) {
77c9a4ae 925 /* extra NET_XMITs queued over to IOVM - need to wait */
68905a14
DK
926 devdata->chstat.reject_count++;
927 if (!devdata->queuefullmsg_logged &&
928 ((devdata->chstat.reject_count & 0x3ff) == 1))
929 devdata->queuefullmsg_logged = 1;
930 netif_stop_queue(netdev);
931 spin_unlock_irqrestore(&devdata->priv_lock, flags);
932 devdata->busy_cnt++;
00748b0c
TS
933 dev_dbg(&netdev->dev,
934 "%s busy - waiting for iovm to catch up\n",
935 __func__);
f6346ad6
NH
936 kfree_skb(skb);
937 return NETDEV_TX_OK;
68905a14
DK
938 }
939 if (devdata->queuefullmsg_logged)
940 devdata->queuefullmsg_logged = 0;
941
942 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
943 cmdrsp->net.xmt.lincsum.valid = 1;
944 cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
945 if (skb_transport_header(skb) > skb->data) {
946 cmdrsp->net.xmt.lincsum.hrawoff =
947 skb_transport_header(skb) - skb->data;
948 cmdrsp->net.xmt.lincsum.hrawoff = 1;
949 }
950 if (skb_network_header(skb) > skb->data) {
951 cmdrsp->net.xmt.lincsum.nhrawoff =
952 skb_network_header(skb) - skb->data;
953 cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
954 }
955 cmdrsp->net.xmt.lincsum.csum = skb->csum;
956 } else {
957 cmdrsp->net.xmt.lincsum.valid = 0;
958 }
959
960 /* save off the length of the entire data packet */
961 cmdrsp->net.xmt.len = len;
962
963 /* copy ethernet header from first frag into ocmdrsp
964 * - everything else will be pass in frags & DMA'ed
965 */
d0c2c997 966 memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
920680f4 967
68905a14
DK
968 /* copy frags info - from skb->data we need to only provide access
969 * beyond eth header
970 */
971 cmdrsp->net.xmt.num_frags =
972 visor_copy_fragsinfo_from_skb(skb, firstfraglen,
973 MAX_PHYS_INFO,
974 cmdrsp->net.xmt.frags);
ce657aa8 975 if (cmdrsp->net.xmt.num_frags < 0) {
68905a14
DK
976 spin_unlock_irqrestore(&devdata->priv_lock, flags);
977 devdata->busy_cnt++;
00748b0c
TS
978 dev_err(&netdev->dev,
979 "%s busy - copy frags failed\n", __func__);
f6346ad6
NH
980 kfree_skb(skb);
981 return NETDEV_TX_OK;
68905a14
DK
982 }
983
03156571
DK
984 err = visorchannel_signalinsert(devdata->dev->visorchannel,
985 IOCHAN_TO_IOPART, cmdrsp);
986 if (err) {
68905a14
DK
987 netif_stop_queue(netdev);
988 spin_unlock_irqrestore(&devdata->priv_lock, flags);
989 devdata->busy_cnt++;
00748b0c
TS
990 dev_dbg(&netdev->dev,
991 "%s busy - signalinsert failed\n", __func__);
f6346ad6
NH
992 kfree_skb(skb);
993 return NETDEV_TX_OK;
68905a14
DK
994 }
995
996 /* Track the skbs that have been sent to the IOVM for XMIT */
997 skb_queue_head(&devdata->xmitbufhead, skb);
998
68905a14
DK
999 /* update xmt stats */
1000 devdata->net_stats.tx_packets++;
1001 devdata->net_stats.tx_bytes += skb->len;
1002 devdata->chstat.sent_xmit++;
1003
77c9a4ae 1004 /* check if we have hit the high watermark for netif_stop_queue() */
36927c18
TS
1005 if (vnic_hit_high_watermark(devdata,
1006 devdata->upper_threshold_net_xmits)) {
77c9a4ae
EA
1007 /* extra NET_XMITs queued over to IOVM - need to wait */
1008 /* stop queue - call netif_wake_queue() after lower threshold */
1009 netif_stop_queue(netdev);
00748b0c
TS
1010 dev_dbg(&netdev->dev,
1011 "%s busy - invoking iovm flow control\n",
1012 __func__);
68905a14
DK
1013 devdata->flow_control_upper_hits++;
1014 }
1015 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1016
1017 /* skb will be freed when we get back NET_XMIT_DONE */
1018 return NETDEV_TX_OK;
1019}
1020
1ce0a9bc
DB
1021/* visornic_get_stats - returns net_stats of the visornic device
1022 * @netdev: netdevice.
68905a14 1023 *
1ce0a9bc 1024 * Return: Pointer to the net_device_stats struct for the device.
68905a14 1025 */
af9f5e7d 1026static struct net_device_stats *visornic_get_stats(struct net_device *netdev)
68905a14
DK
1027{
1028 struct visornic_devdata *devdata = netdev_priv(netdev);
1029
1030 return &devdata->net_stats;
1031}
1032
1ce0a9bc
DB
1033/* visornic_change_mtu - changes mtu of device
1034 * @netdev: netdevice.
1035 * @new_mtu: Value of new mtu.
68905a14 1036 *
1ce0a9bc
DB
1037 * The device's MTU cannot be changed by system; it must be changed via a
1038 * CONTROLVM message. All vnics and pnics in a switch have to have the same MTU
1039 * for everything to work. Currently not supported.
1040 *
1041 * Return: -EINVAL.
68905a14 1042 */
af9f5e7d 1043static int visornic_change_mtu(struct net_device *netdev, int new_mtu)
68905a14
DK
1044{
1045 return -EINVAL;
1046}
1047
1ce0a9bc
DB
1048/* visornic_set_multi - set visornic device flags
1049 * @netdev: netdevice.
68905a14 1050 *
1ce0a9bc 1051 * The only flag we currently support is IFF_PROMISC.
68905a14 1052 */
af9f5e7d 1053static void visornic_set_multi(struct net_device *netdev)
68905a14
DK
1054{
1055 struct uiscmdrsp *cmdrsp;
1056 struct visornic_devdata *devdata = netdev_priv(netdev);
03156571 1057 int err = 0;
68905a14 1058
6d8c96cb
DB
1059 if (devdata->old_flags == netdev->flags)
1060 return;
1061
1062 if ((netdev->flags & IFF_PROMISC) ==
1063 (devdata->old_flags & IFF_PROMISC))
1064 goto out_save_flags;
1065
1066 cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1067 if (!cmdrsp)
1068 return;
1069 cmdrsp->cmdtype = CMD_NET_TYPE;
1070 cmdrsp->net.type = NET_RCV_PROMISC;
1071 cmdrsp->net.enbdis.context = netdev;
1072 cmdrsp->net.enbdis.enable =
1073 netdev->flags & IFF_PROMISC;
03156571
DK
1074 err = visorchannel_signalinsert(devdata->dev->visorchannel,
1075 IOCHAN_TO_IOPART,
1076 cmdrsp);
6d8c96cb 1077 kfree(cmdrsp);
03156571
DK
1078 if (err)
1079 return;
6d8c96cb
DB
1080
1081out_save_flags:
1082 devdata->old_flags = netdev->flags;
68905a14
DK
1083}
1084
1ce0a9bc
DB
1085/* visornic_xmit_timeout - request to timeout the xmit
1086 * @netdev: netdevice.
68905a14 1087 *
1ce0a9bc
DB
1088 * Queue the work and return. Make sure we have not already been informed that
1089 * the IO Partition is gone; if so, we will have already timed-out the xmits.
68905a14 1090 */
af9f5e7d 1091static void visornic_xmit_timeout(struct net_device *netdev)
68905a14
DK
1092{
1093 struct visornic_devdata *devdata = netdev_priv(netdev);
1094 unsigned long flags;
1095
1096 spin_lock_irqsave(&devdata->priv_lock, flags);
46df8226
TS
1097 if (devdata->going_away) {
1098 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1099 dev_dbg(&devdata->dev->device,
1100 "%s aborting because device removal pending\n",
1101 __func__);
1102 return;
1103 }
1104
68905a14
DK
1105 /* Ensure that a ServerDown message hasn't been received */
1106 if (!devdata->enabled ||
1107 (devdata->server_down && !devdata->server_change_state)) {
00748b0c
TS
1108 dev_dbg(&netdev->dev, "%s no processing\n",
1109 __func__);
68905a14
DK
1110 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1111 return;
1112 }
ce388d7e 1113 schedule_work(&devdata->timeout_reset);
46df8226 1114 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
1115}
1116
1ce0a9bc
DB
1117/* repost_return - repost rcv bufs that have come back
1118 * @cmdrsp: IO channel command struct to post.
1119 * @devdata: Visornic devdata for the device.
1120 * @skb: Socket buffer.
1121 * @netdev: netdevice.
68905a14 1122 *
1ce0a9bc
DB
1123 * Repost rcv buffers that have been returned to us when we are finished
1124 * with them.
1125 *
1126 * Return: 0 for success, negative integer on error.
68905a14 1127 */
af9f5e7d
CD
1128static int repost_return(struct uiscmdrsp *cmdrsp,
1129 struct visornic_devdata *devdata,
1130 struct sk_buff *skb, struct net_device *netdev)
68905a14
DK
1131{
1132 struct net_pkt_rcv copy;
1133 int i = 0, cc, numreposted;
1134 int found_skb = 0;
1135 int status = 0;
1136
1137 copy = cmdrsp->net.rcv;
1138 switch (copy.numrcvbufs) {
1139 case 0:
1140 devdata->n_rcv0++;
1141 break;
1142 case 1:
1143 devdata->n_rcv1++;
1144 break;
1145 case 2:
1146 devdata->n_rcv2++;
1147 break;
1148 default:
1149 devdata->n_rcvx++;
1150 break;
1151 }
1152 for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
1153 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1154 if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
1155 continue;
1156
1157 if ((skb) && devdata->rcvbuf[i] == skb) {
1158 devdata->found_repost_rcvbuf_cnt++;
1159 found_skb = 1;
1160 devdata->repost_found_skb_cnt++;
1161 }
1162 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1163 if (!devdata->rcvbuf[i]) {
1164 devdata->num_rcv_bufs_could_not_alloc++;
1165 devdata->alloc_failed_in_repost_rtn_cnt++;
1166 status = -ENOMEM;
1167 break;
1168 }
03156571
DK
1169 status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1170 if (status) {
1171 kfree_skb(devdata->rcvbuf[i]);
1172 devdata->rcvbuf[i] = NULL;
1173 break;
1174 }
68905a14
DK
1175 numreposted++;
1176 break;
1177 }
1178 }
1179 if (numreposted != copy.numrcvbufs) {
1180 devdata->n_repost_deficit++;
1181 status = -EINVAL;
1182 }
1183 if (skb) {
1184 if (found_skb) {
1185 kfree_skb(skb);
1186 } else {
1187 status = -EINVAL;
1188 devdata->bad_rcv_buf++;
1189 }
1190 }
68905a14
DK
1191 return status;
1192}
1193
1ce0a9bc
DB
1194/* visornic_rx - handle receive packets coming back from IO Partition
1195 * @cmdrsp: Receive packet returned from IO Partition.
68905a14 1196 *
1ce0a9bc
DB
1197 * Got a receive packet back from the IO Partition; handle it and send it up
1198 * the stack.
1199
1200 * Return: 1 iff an skb was received, otherwise 0.
68905a14 1201 */
af9f5e7d 1202static int visornic_rx(struct uiscmdrsp *cmdrsp)
68905a14
DK
1203{
1204 struct visornic_devdata *devdata;
1205 struct sk_buff *skb, *prev, *curr;
1206 struct net_device *netdev;
946b2546 1207 int cc, currsize, off;
68905a14
DK
1208 struct ethhdr *eth;
1209 unsigned long flags;
68905a14
DK
1210
1211 /* post new rcv buf to the other end using the cmdrsp we have at hand
1212 * post it without holding lock - but we'll use the signal lock to
1213 * synchronize the queue insert the cmdrsp that contains the net.rcv
1214 * is the one we are using to repost, so copy the info we need from it.
1215 */
1216 skb = cmdrsp->net.buf;
1217 netdev = skb->dev;
1218
68905a14
DK
1219 devdata = netdev_priv(netdev);
1220
1221 spin_lock_irqsave(&devdata->priv_lock, flags);
1222 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1223
68905a14
DK
1224 /* set length to how much was ACTUALLY received -
1225 * NOTE: rcv_done_len includes actual length of data rcvd
1226 * including ethhdr
1227 */
1228 skb->len = cmdrsp->net.rcv.rcv_done_len;
1229
f6b6a8ec
DK
1230 /* update rcv stats - call it with priv_lock held */
1231 devdata->net_stats.rx_packets++;
1232 devdata->net_stats.rx_bytes += skb->len;
1233
68905a14
DK
1234 /* test enabled while holding lock */
1235 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1236 /* don't process it unless we're in enable mode and until
1237 * we've gotten an ACK saying the other end got our RCV enable
1238 */
1239 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1240 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1241 return 0;
68905a14
DK
1242 }
1243
1244 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1245
1246 /* when skb was allocated, skb->dev, skb->data, skb->len and
1247 * skb->data_len were setup. AND, data has already put into the
1248 * skb (both first frag and in frags pages)
1249 * NOTE: firstfragslen is the amount of data in skb->data and that
1250 * which is not in nr_frags or frag_list. This is now simply
1251 * RCVPOST_BUF_SIZE. bump tail to show how much data is in
1252 * firstfrag & set data_len to show rest see if we have to chain
1253 * frag_list.
1254 */
921557cb
SW
1255 /* do PRECAUTIONARY check */
1256 if (skb->len > RCVPOST_BUF_SIZE) {
68905a14
DK
1257 if (cmdrsp->net.rcv.numrcvbufs < 2) {
1258 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1259 dev_err(&devdata->netdev->dev,
1260 "repost_return failed");
73e81350 1261 return 0;
68905a14
DK
1262 }
1263 /* length rcvd is greater than firstfrag in this skb rcv buf */
921557cb
SW
1264 /* amount in skb->data */
1265 skb->tail += RCVPOST_BUF_SIZE;
1266 /* amount that will be in frag_list */
1267 skb->data_len = skb->len - RCVPOST_BUF_SIZE;
68905a14
DK
1268 } else {
1269 /* data fits in this skb - no chaining - do
1270 * PRECAUTIONARY check
1271 */
921557cb
SW
1272 /* should be 1 */
1273 if (cmdrsp->net.rcv.numrcvbufs != 1) {
68905a14
DK
1274 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1275 dev_err(&devdata->netdev->dev,
1276 "repost_return failed");
73e81350 1277 return 0;
68905a14
DK
1278 }
1279 skb->tail += skb->len;
921557cb
SW
1280 /* nothing rcvd in frag_list */
1281 skb->data_len = 0;
68905a14
DK
1282 }
1283 off = skb_tail_pointer(skb) - skb->data;
1284
1285 /* amount we bumped tail by in the head skb
1286 * it is used to calculate the size of each chained skb below
1287 * it is also used to index into bufline to continue the copy
1288 * (for chansocktwopc)
1289 * if necessary chain the rcv skbs together.
1290 * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
1291 * chain the rest to that one.
1292 * - do PRECAUTIONARY check
1293 */
1294 if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
1295 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1296 dev_err(&devdata->netdev->dev, "repost_return failed");
73e81350 1297 return 0;
68905a14
DK
1298 }
1299
1300 if (cmdrsp->net.rcv.numrcvbufs > 1) {
1301 /* chain the various rcv buffers into the skb's frag_list. */
1302 /* Note: off was initialized above */
1303 for (cc = 1, prev = NULL;
1304 cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
1305 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
1306 curr->next = NULL;
921557cb
SW
1307 /* start of list- set head */
1308 if (!prev)
68905a14
DK
1309 skb_shinfo(skb)->frag_list = curr;
1310 else
1311 prev->next = curr;
1312 prev = curr;
1313
1314 /* should we set skb->len and skb->data_len for each
1315 * buffer being chained??? can't hurt!
1316 */
1317 currsize = min(skb->len - off,
1318 (unsigned int)RCVPOST_BUF_SIZE);
1319 curr->len = currsize;
1320 curr->tail += currsize;
1321 curr->data_len = 0;
1322 off += currsize;
1323 }
68905a14
DK
1324 /* assert skb->len == off */
1325 if (skb->len != off) {
cb84fca0
TS
1326 netdev_err(devdata->netdev,
1327 "something wrong; skb->len:%d != off:%d\n",
1328 skb->len, off);
68905a14 1329 }
68905a14
DK
1330 }
1331
50e66ccb 1332 /* set up packet's protocol type using ethernet header - this
68905a14
DK
1333 * sets up skb->pkt_type & it also PULLS out the eth header
1334 */
1335 skb->protocol = eth_type_trans(skb, netdev);
68905a14 1336 eth = eth_hdr(skb);
68905a14
DK
1337 skb->csum = 0;
1338 skb->ip_summed = CHECKSUM_NONE;
1339
1340 do {
921557cb 1341 /* accept all packets */
68905a14 1342 if (netdev->flags & IFF_PROMISC)
921557cb 1343 break;
68905a14 1344 if (skb->pkt_type == PACKET_BROADCAST) {
921557cb 1345 /* accept all broadcast packets */
68905a14 1346 if (netdev->flags & IFF_BROADCAST)
921557cb 1347 break;
68905a14
DK
1348 } else if (skb->pkt_type == PACKET_MULTICAST) {
1349 if ((netdev->flags & IFF_MULTICAST) &&
1350 (netdev_mc_count(netdev))) {
1351 struct netdev_hw_addr *ha;
1352 int found_mc = 0;
1353
1354 /* only accept multicast packets that we can
1355 * find in our multicast address list
1356 */
1357 netdev_for_each_mc_addr(ha, netdev) {
1358 if (ether_addr_equal(eth->h_dest,
1359 ha->addr)) {
1360 found_mc = 1;
1361 break;
1362 }
1363 }
77c9a4ae 1364 /* accept pkt, dest matches a multicast addr */
68905a14 1365 if (found_mc)
77c9a4ae 1366 break;
68905a14 1367 }
77c9a4ae 1368 /* accept packet, h_dest must match vnic mac address */
68905a14 1369 } else if (skb->pkt_type == PACKET_HOST) {
77c9a4ae 1370 break;
68905a14
DK
1371 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1372 /* something is not right */
1373 dev_err(&devdata->netdev->dev,
1374 "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
1375 netdev->name, eth->h_dest, netdev->dev_addr);
1376 }
1377 /* drop packet - don't forward it up to OS */
1378 devdata->n_rcv_packets_not_accepted++;
1379 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1380 return 0;
68905a14
DK
1381 } while (0);
1382
946b2546 1383 netif_receive_skb(skb);
68905a14
DK
1384 /* netif_rx returns various values, but "in practice most drivers
1385 * ignore the return value
1386 */
1387
1388 skb = NULL;
496c8902 1389 /* whether the packet got dropped or handled, the skb is freed by
68905a14
DK
1390 * kernel code, so we shouldn't free it. but we should repost a
1391 * new rcv buffer.
1392 */
1393 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1394 return 1;
68905a14
DK
1395}
1396
1ce0a9bc
DB
1397/* devdata_initialize - initialize devdata structure
1398 * @devdata: visornic_devdata structure to initialize.
1399 * @dev: visorbus_device it belongs to.
1400 *
1401 * Setup initial values for the visornic, based on channel and default values.
68905a14 1402 *
1ce0a9bc 1403 * Return: A pointer to the devdata structure.
68905a14 1404 */
af9f5e7d
CD
1405static struct visornic_devdata *devdata_initialize(
1406 struct visornic_devdata *devdata,
1407 struct visor_device *dev)
68905a14 1408{
68905a14 1409 devdata->dev = dev;
91678f37 1410 devdata->incarnation_id = get_jiffies_64();
68905a14
DK
1411 return devdata;
1412}
1413
1ce0a9bc
DB
1414/* devdata_release - free up references in devdata
1415 * @devdata: Struct to clean up.
68905a14 1416 */
8d0119d8 1417static void devdata_release(struct visornic_devdata *devdata)
68905a14 1418{
46df8226
TS
1419 kfree(devdata->rcvbuf);
1420 kfree(devdata->cmdrsp_rcv);
1421 kfree(devdata->xmit_cmdrsp);
68905a14
DK
1422}
1423
1424static const struct net_device_ops visornic_dev_ops = {
1425 .ndo_open = visornic_open,
1426 .ndo_stop = visornic_close,
1427 .ndo_start_xmit = visornic_xmit,
1428 .ndo_get_stats = visornic_get_stats,
68905a14
DK
1429 .ndo_change_mtu = visornic_change_mtu,
1430 .ndo_tx_timeout = visornic_xmit_timeout,
1431 .ndo_set_rx_mode = visornic_set_multi,
1432};
1433
52b1660d
NH
1434/* DebugFS code */
1435static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1436 size_t len, loff_t *offset)
1437{
1438 ssize_t bytes_read = 0;
1439 int str_pos = 0;
1440 struct visornic_devdata *devdata;
1441 struct net_device *dev;
1442 char *vbuf;
1443
1444 if (len > MAX_BUF)
1445 len = MAX_BUF;
1446 vbuf = kzalloc(len, GFP_KERNEL);
1447 if (!vbuf)
1448 return -ENOMEM;
1449
77c9a4ae 1450 /* for each vnic channel dump out channel specific data */
52b1660d
NH
1451 rcu_read_lock();
1452 for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
77c9a4ae 1453 /* Only consider netdevs that are visornic, and are open */
52b1660d
NH
1454 if ((dev->netdev_ops != &visornic_dev_ops) ||
1455 (!netif_queue_stopped(dev)))
1456 continue;
1457
1458 devdata = netdev_priv(dev);
1459 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1460 "netdev = %s (0x%p), MAC Addr %pM\n",
1461 dev->name,
1462 dev,
1463 dev->dev_addr);
1464 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1465 "VisorNic Dev Info = 0x%p\n", devdata);
1466 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1467 " num_rcv_bufs = %d\n",
1468 devdata->num_rcv_bufs);
1469 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
5bcf2a92 1470 " max_outstanding_next_xmits = %lu\n",
52b1660d
NH
1471 devdata->max_outstanding_net_xmits);
1472 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1473 " upper_threshold_net_xmits = %lu\n",
52b1660d
NH
1474 devdata->upper_threshold_net_xmits);
1475 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1476 " lower_threshold_net_xmits = %lu\n",
52b1660d
NH
1477 devdata->lower_threshold_net_xmits);
1478 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1479 " queuefullmsg_logged = %d\n",
1480 devdata->queuefullmsg_logged);
1481 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1482 " chstat.got_rcv = %lu\n",
1483 devdata->chstat.got_rcv);
1484 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1485 " chstat.got_enbdisack = %lu\n",
1486 devdata->chstat.got_enbdisack);
1487 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1488 " chstat.got_xmit_done = %lu\n",
1489 devdata->chstat.got_xmit_done);
1490 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1491 " chstat.xmit_fail = %lu\n",
1492 devdata->chstat.xmit_fail);
1493 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1494 " chstat.sent_enbdis = %lu\n",
1495 devdata->chstat.sent_enbdis);
1496 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1497 " chstat.sent_promisc = %lu\n",
1498 devdata->chstat.sent_promisc);
1499 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1500 " chstat.sent_post = %lu\n",
1501 devdata->chstat.sent_post);
81d275c6
TS
1502 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1503 " chstat.sent_post_failed = %lu\n",
1504 devdata->chstat.sent_post_failed);
52b1660d
NH
1505 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1506 " chstat.sent_xmit = %lu\n",
1507 devdata->chstat.sent_xmit);
1508 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1509 " chstat.reject_count = %lu\n",
1510 devdata->chstat.reject_count);
1511 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1512 " chstat.extra_rcvbufs_sent = %lu\n",
1513 devdata->chstat.extra_rcvbufs_sent);
1514 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1515 " n_rcv0 = %lu\n", devdata->n_rcv0);
1516 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1517 " n_rcv1 = %lu\n", devdata->n_rcv1);
1518 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1519 " n_rcv2 = %lu\n", devdata->n_rcv2);
1520 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1521 " n_rcvx = %lu\n", devdata->n_rcvx);
1522 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1523 " num_rcvbuf_in_iovm = %d\n",
1524 atomic_read(&devdata->num_rcvbuf_in_iovm));
1525 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1526 " alloc_failed_in_if_needed_cnt = %lu\n",
1527 devdata->alloc_failed_in_if_needed_cnt);
1528 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1529 " alloc_failed_in_repost_rtn_cnt = %lu\n",
1530 devdata->alloc_failed_in_repost_rtn_cnt);
1531 /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1532 * " inner_loop_limit_reached_cnt = %lu\n",
1533 * devdata->inner_loop_limit_reached_cnt);
1534 */
1535 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1536 " found_repost_rcvbuf_cnt = %lu\n",
1537 devdata->found_repost_rcvbuf_cnt);
1538 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1539 " repost_found_skb_cnt = %lu\n",
1540 devdata->repost_found_skb_cnt);
1541 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1542 " n_repost_deficit = %lu\n",
1543 devdata->n_repost_deficit);
1544 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1545 " bad_rcv_buf = %lu\n",
1546 devdata->bad_rcv_buf);
1547 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1548 " n_rcv_packets_not_accepted = %lu\n",
1549 devdata->n_rcv_packets_not_accepted);
1550 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1551 " interrupts_rcvd = %llu\n",
1552 devdata->interrupts_rcvd);
1553 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1554 " interrupts_notme = %llu\n",
1555 devdata->interrupts_notme);
1556 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1557 " interrupts_disabled = %llu\n",
1558 devdata->interrupts_disabled);
1559 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1560 " busy_cnt = %llu\n",
1561 devdata->busy_cnt);
1562 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1563 " flow_control_upper_hits = %llu\n",
1564 devdata->flow_control_upper_hits);
1565 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1566 " flow_control_lower_hits = %llu\n",
1567 devdata->flow_control_lower_hits);
52b1660d
NH
1568 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1569 " netif_queue = %s\n",
1570 netif_queue_stopped(devdata->netdev) ?
1571 "stopped" : "running");
36927c18
TS
1572 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1573 " xmits_outstanding = %lu\n",
1574 devdata_xmits_outstanding(devdata));
52b1660d
NH
1575 }
1576 rcu_read_unlock();
1577 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1578 kfree(vbuf);
1579 return bytes_read;
1580}
1581
0543205b
DK
1582static struct dentry *visornic_debugfs_dir;
1583static const struct file_operations debugfs_info_fops = {
1584 .read = info_debugfs_read,
1585};
1586
1ce0a9bc
DB
1587/* send_rcv_posts_if_needed - send receive buffers to the IO Partition.
1588 * @devdata: Visornic device.
68905a14 1589 */
ebef2610 1590static void send_rcv_posts_if_needed(struct visornic_devdata *devdata)
68905a14
DK
1591{
1592 int i;
1593 struct net_device *netdev;
1594 struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
1595 int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
03156571 1596 int err;
68905a14
DK
1597
1598 /* don't do this until vnic is marked ready */
1599 if (!(devdata->enabled && devdata->enab_dis_acked))
ebef2610 1600 return;
68905a14
DK
1601
1602 netdev = devdata->netdev;
1603 rcv_bufs_allocated = 0;
1604 /* this code is trying to prevent getting stuck here forever,
1605 * but still retry it if you cant allocate them all this time.
1606 */
1607 cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
1608 while (cur_num_rcv_bufs_to_alloc > 0) {
1609 cur_num_rcv_bufs_to_alloc--;
1610 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1611 if (devdata->rcvbuf[i])
1612 continue;
1613 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1614 if (!devdata->rcvbuf[i]) {
1615 devdata->alloc_failed_in_if_needed_cnt++;
1616 break;
1617 }
1618 rcv_bufs_allocated++;
03156571
DK
1619 err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1620 if (err) {
1621 kfree_skb(devdata->rcvbuf[i]);
1622 devdata->rcvbuf[i] = NULL;
1623 break;
1624 }
68905a14
DK
1625 devdata->chstat.extra_rcvbufs_sent++;
1626 }
1627 }
1628 devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
1629}
1630
1ce0a9bc
DB
1631/* drain_resp_queue - drains and ignores all messages from the resp queue
1632 * @cmdrsp: IO channel command response message.
1633 * @devdata: Visornic device to drain.
91678f37 1634 */
af9f5e7d
CD
1635static void drain_resp_queue(struct uiscmdrsp *cmdrsp,
1636 struct visornic_devdata *devdata)
91678f37 1637{
f621a968
DB
1638 while (!visorchannel_signalremove(devdata->dev->visorchannel,
1639 IOCHAN_FROM_IOPART,
1640 cmdrsp))
91678f37
TS
1641 ;
1642}
1643
1ce0a9bc
DB
1644/* service_resp_queue - drain the response queue
1645 * @cmdrsp: IO channel command response message.
1646 * @devdata: Visornic device to drain.
1647 * @rx_work_done:
1648 * @budget:
68905a14 1649 *
1ce0a9bc
DB
1650 * Drain the response queue of any responses from the IO Partition. Process the
1651 * responses as we get them.
68905a14 1652 */
af9f5e7d
CD
1653static void service_resp_queue(struct uiscmdrsp *cmdrsp,
1654 struct visornic_devdata *devdata,
1655 int *rx_work_done, int budget)
68905a14
DK
1656{
1657 unsigned long flags;
1658 struct net_device *netdev;
1659
61dd330a 1660 while (*rx_work_done < budget) {
921557cb
SW
1661 /* TODO: CLIENT ACQUIRE -- Don't really need this at the
1662 * moment
1663 */
1664 /* queue empty */
f621a968
DB
1665 if (visorchannel_signalremove(devdata->dev->visorchannel,
1666 IOCHAN_FROM_IOPART,
1667 cmdrsp))
921557cb 1668 break;
7c03621a
DK
1669
1670 switch (cmdrsp->net.type) {
1671 case NET_RCV:
1672 devdata->chstat.got_rcv++;
1673 /* process incoming packet */
946b2546 1674 *rx_work_done += visornic_rx(cmdrsp);
7c03621a
DK
1675 break;
1676 case NET_XMIT_DONE:
1677 spin_lock_irqsave(&devdata->priv_lock, flags);
1678 devdata->chstat.got_xmit_done++;
1679 if (cmdrsp->net.xmtdone.xmt_done_result)
1680 devdata->chstat.xmit_fail++;
1681 /* only call queue wake if we stopped it */
1682 netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
1683 /* ASSERT netdev == vnicinfo->netdev; */
1684 if ((netdev == devdata->netdev) &&
1685 netif_queue_stopped(netdev)) {
77c9a4ae
EA
1686 /* check if we have crossed the lower watermark
1687 * for netif_wake_queue()
68905a14 1688 */
dc38082f
TS
1689 if (vnic_hit_low_watermark
1690 (devdata,
1691 devdata->lower_threshold_net_xmits)) {
7c03621a
DK
1692 /* enough NET_XMITs completed
1693 * so can restart netif queue
1694 */
1695 netif_wake_queue(netdev);
1696 devdata->flow_control_lower_hits++;
1697 }
68905a14 1698 }
7c03621a
DK
1699 skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
1700 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1701 kfree_skb(cmdrsp->net.buf);
68905a14 1702 break;
7c03621a
DK
1703 case NET_RCV_ENBDIS_ACK:
1704 devdata->chstat.got_enbdisack++;
1705 netdev = (struct net_device *)
1706 cmdrsp->net.enbdis.context;
87a9404e 1707 spin_lock_irqsave(&devdata->priv_lock, flags);
7c03621a
DK
1708 devdata->enab_dis_acked = 1;
1709 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1710
7c03621a
DK
1711 if (devdata->server_down &&
1712 devdata->server_change_state) {
1713 /* Inform Linux that the link is up */
1714 devdata->server_down = false;
1715 devdata->server_change_state = false;
1716 netif_wake_queue(netdev);
1717 netif_carrier_on(netdev);
1718 }
1719 break;
1720 case NET_CONNECT_STATUS:
1721 netdev = devdata->netdev;
1722 if (cmdrsp->net.enbdis.enable == 1) {
1723 spin_lock_irqsave(&devdata->priv_lock, flags);
1724 devdata->enabled = cmdrsp->net.enbdis.enable;
1725 spin_unlock_irqrestore(&devdata->priv_lock,
1726 flags);
1727 netif_wake_queue(netdev);
1728 netif_carrier_on(netdev);
1729 } else {
1730 netif_stop_queue(netdev);
1731 netif_carrier_off(netdev);
1732 spin_lock_irqsave(&devdata->priv_lock, flags);
1733 devdata->enabled = cmdrsp->net.enbdis.enable;
1734 spin_unlock_irqrestore(&devdata->priv_lock,
1735 flags);
1736 }
1737 break;
1738 default:
1739 break;
87a9404e 1740 }
7c03621a 1741 /* cmdrsp is now available for reuse */
68905a14
DK
1742 }
1743}
1744
946b2546
NH
1745static int visornic_poll(struct napi_struct *napi, int budget)
1746{
1747 struct visornic_devdata *devdata = container_of(napi,
1748 struct visornic_devdata,
1749 napi);
1750 int rx_count = 0;
1751
ebef2610 1752 send_rcv_posts_if_needed(devdata);
61dd330a 1753 service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
946b2546 1754
77c9a4ae 1755 /* If there aren't any more packets to receive stop the poll */
946b2546 1756 if (rx_count < budget)
6ad20165 1757 napi_complete_done(napi, rx_count);
946b2546
NH
1758
1759 return rx_count;
1760}
1761
1ce0a9bc
DB
1762/* poll_for_irq - checks the status of the response queue
1763 * @v: Void pointer to the visronic devdata struct.
68905a14 1764 *
1ce0a9bc
DB
1765 * Main function of the vnic_incoming thread. Periodically check the response
1766 * queue and drain it if needed.
68905a14 1767 */
af9f5e7d 1768static void poll_for_irq(unsigned long v)
68905a14 1769{
946b2546 1770 struct visornic_devdata *devdata = (struct visornic_devdata *)v;
68905a14 1771
946b2546
NH
1772 if (!visorchannel_signalempty(
1773 devdata->dev->visorchannel,
1774 IOCHAN_FROM_IOPART))
1775 napi_schedule(&devdata->napi);
68905a14 1776
946b2546 1777 atomic_set(&devdata->interrupt_rcvd, 0);
68905a14 1778
946b2546 1779 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
68905a14
DK
1780}
1781
1ce0a9bc
DB
1782/* visornic_probe - probe function for visornic devices
1783 * @dev: The visor device discovered.
1784 *
1785 * Called when visorbus discovers a visornic device on its bus. It creates a new
1786 * visornic ethernet adapter.
68905a14 1787 *
1ce0a9bc 1788 * Return: 0 on success, or negative integer on error.
68905a14
DK
1789 */
1790static int visornic_probe(struct visor_device *dev)
1791{
1792 struct visornic_devdata *devdata = NULL;
1793 struct net_device *netdev = NULL;
1794 int err;
1795 int channel_offset = 0;
1796 u64 features;
1797
1798 netdev = alloc_etherdev(sizeof(struct visornic_devdata));
00748b0c
TS
1799 if (!netdev) {
1800 dev_err(&dev->device,
1801 "%s alloc_etherdev failed\n", __func__);
68905a14 1802 return -ENOMEM;
00748b0c 1803 }
68905a14
DK
1804
1805 netdev->netdev_ops = &visornic_dev_ops;
90cb147f 1806 netdev->watchdog_timeo = 5 * HZ;
051e9fbb 1807 SET_NETDEV_DEV(netdev, &dev->device);
68905a14 1808
50e66ccb 1809 /* Get MAC address from channel and read it into the device. */
68905a14 1810 netdev->addr_len = ETH_ALEN;
172f4c36 1811 channel_offset = offsetof(struct visor_io_channel, vnic.macaddr);
68905a14
DK
1812 err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
1813 ETH_ALEN);
00748b0c
TS
1814 if (err < 0) {
1815 dev_err(&dev->device,
1816 "%s failed to get mac addr from chan (%d)\n",
1817 __func__, err);
68905a14 1818 goto cleanup_netdev;
00748b0c 1819 }
68905a14
DK
1820
1821 devdata = devdata_initialize(netdev_priv(netdev), dev);
1822 if (!devdata) {
00748b0c
TS
1823 dev_err(&dev->device,
1824 "%s devdata_initialize failed\n", __func__);
68905a14
DK
1825 err = -ENOMEM;
1826 goto cleanup_netdev;
1827 }
91678f37
TS
1828 /* don't trust messages laying around in the channel */
1829 drain_resp_queue(devdata->cmdrsp, devdata);
68905a14
DK
1830
1831 devdata->netdev = netdev;
5deeea33 1832 dev_set_drvdata(&dev->device, devdata);
68905a14
DK
1833 init_waitqueue_head(&devdata->rsp_queue);
1834 spin_lock_init(&devdata->priv_lock);
921557cb
SW
1835 /* not yet */
1836 devdata->enabled = 0;
68905a14
DK
1837 atomic_set(&devdata->usage, 1);
1838
1839 /* Setup rcv bufs */
172f4c36 1840 channel_offset = offsetof(struct visor_io_channel, vnic.num_rcv_bufs);
68905a14
DK
1841 err = visorbus_read_channel(dev, channel_offset,
1842 &devdata->num_rcv_bufs, 4);
00748b0c
TS
1843 if (err) {
1844 dev_err(&dev->device,
1845 "%s failed to get #rcv bufs from chan (%d)\n",
1846 __func__, err);
68905a14 1847 goto cleanup_netdev;
00748b0c 1848 }
68905a14 1849
5e757bc5
SB
1850 devdata->rcvbuf = kcalloc(devdata->num_rcv_bufs,
1851 sizeof(struct sk_buff *), GFP_KERNEL);
68905a14
DK
1852 if (!devdata->rcvbuf) {
1853 err = -ENOMEM;
d12324e3 1854 goto cleanup_netdev;
68905a14
DK
1855 }
1856
496c8902
DB
1857 /* set the net_xmit outstanding threshold
1858 * always leave two slots open but you should have 3 at a minimum
1859 * note that max_outstanding_net_xmits must be > 0
1860 */
68905a14 1861 devdata->max_outstanding_net_xmits =
36927c18 1862 max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
68905a14 1863 devdata->upper_threshold_net_xmits =
36927c18
TS
1864 max_t(unsigned long,
1865 2, (devdata->max_outstanding_net_xmits - 1));
68905a14 1866 devdata->lower_threshold_net_xmits =
36927c18
TS
1867 max_t(unsigned long,
1868 1, (devdata->max_outstanding_net_xmits / 2));
68905a14
DK
1869
1870 skb_queue_head_init(&devdata->xmitbufhead);
1871
1872 /* create a cmdrsp we can use to post and unpost rcv buffers */
1873 devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1874 if (!devdata->cmdrsp_rcv) {
1875 err = -ENOMEM;
d12324e3 1876 goto cleanup_rcvbuf;
68905a14
DK
1877 }
1878 devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1879 if (!devdata->xmit_cmdrsp) {
1880 err = -ENOMEM;
d12324e3 1881 goto cleanup_cmdrsp_rcv;
68905a14 1882 }
68905a14
DK
1883 INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
1884 devdata->server_down = false;
1885 devdata->server_change_state = false;
1886
1887 /*set the default mtu */
172f4c36 1888 channel_offset = offsetof(struct visor_io_channel, vnic.mtu);
68905a14 1889 err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
00748b0c
TS
1890 if (err) {
1891 dev_err(&dev->device,
1892 "%s failed to get mtu from chan (%d)\n",
1893 __func__, err);
68905a14 1894 goto cleanup_xmit_cmdrsp;
00748b0c 1895 }
68905a14
DK
1896
1897 /* TODO: Setup Interrupt information */
1898 /* Let's start our threads to get responses */
9c70ee32 1899 netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
946b2546
NH
1900
1901 setup_timer(&devdata->irq_poll_timer, poll_for_irq,
1902 (unsigned long)devdata);
77c9a4ae 1903 /* Note: This time has to start running before the while
946b2546
NH
1904 * loop below because the napi routine is responsible for
1905 * setting enab_dis_acked
1906 */
1907 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
1908
172f4c36 1909 channel_offset = offsetof(struct visor_io_channel,
68905a14
DK
1910 channel_header.features);
1911 err = visorbus_read_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1912 if (err) {
1913 dev_err(&dev->device,
1914 "%s failed to get features from chan (%d)\n",
1915 __func__, err);
946b2546 1916 goto cleanup_napi_add;
00748b0c 1917 }
68905a14 1918
c75ebe5e
SW
1919 features |= VISOR_CHANNEL_IS_POLLING;
1920 features |= VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING;
68905a14 1921 err = visorbus_write_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1922 if (err) {
1923 dev_err(&dev->device,
1924 "%s failed to set features in chan (%d)\n",
1925 __func__, err);
946b2546 1926 goto cleanup_napi_add;
00748b0c 1927 }
68905a14 1928
50e66ccb 1929 /* Note: Interrupts have to be enable before the while
61dd330a
DK
1930 * loop below because the napi routine is responsible for
1931 * setting enab_dis_acked
1932 */
1933 visorbus_enable_channel_interrupts(dev);
1934
68905a14 1935 err = register_netdev(netdev);
00748b0c
TS
1936 if (err) {
1937 dev_err(&dev->device,
1938 "%s register_netdev failed (%d)\n", __func__, err);
946b2546 1939 goto cleanup_napi_add;
00748b0c 1940 }
68905a14 1941
50e66ccb 1942 /* create debug/sysfs directories */
68905a14
DK
1943 devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
1944 visornic_debugfs_dir);
1945 if (!devdata->eth_debugfs_dir) {
00748b0c
TS
1946 dev_err(&dev->device,
1947 "%s debugfs_create_dir %s failed\n",
1948 __func__, netdev->name);
68905a14 1949 err = -ENOMEM;
5b12100a 1950 goto cleanup_register_netdev;
68905a14
DK
1951 }
1952
00748b0c
TS
1953 dev_info(&dev->device, "%s success netdev=%s\n",
1954 __func__, netdev->name);
68905a14
DK
1955 return 0;
1956
5b12100a
DK
1957cleanup_register_netdev:
1958 unregister_netdev(netdev);
1959
946b2546
NH
1960cleanup_napi_add:
1961 del_timer_sync(&devdata->irq_poll_timer);
1962 netif_napi_del(&devdata->napi);
1963
68905a14
DK
1964cleanup_xmit_cmdrsp:
1965 kfree(devdata->xmit_cmdrsp);
1966
1967cleanup_cmdrsp_rcv:
1968 kfree(devdata->cmdrsp_rcv);
1969
1970cleanup_rcvbuf:
1971 kfree(devdata->rcvbuf);
1972
1973cleanup_netdev:
1974 free_netdev(netdev);
1975 return err;
1976}
1977
1ce0a9bc
DB
1978/* host_side_disappeared - IO Partition is gone
1979 * @devdata: Device object.
68905a14 1980 *
1ce0a9bc 1981 * IO partition servicing this device is gone; do cleanup.
68905a14
DK
1982 */
1983static void host_side_disappeared(struct visornic_devdata *devdata)
1984{
1985 unsigned long flags;
1986
1987 spin_lock_irqsave(&devdata->priv_lock, flags);
921557cb
SW
1988 /* indicate device destroyed */
1989 devdata->dev = NULL;
68905a14
DK
1990 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1991}
1992
1ce0a9bc
DB
1993/* visornic_remove - called when visornic dev goes away
1994 * @dev: Visornic device that is being removed.
68905a14 1995 *
1ce0a9bc 1996 * Called when DEVICE_DESTROY gets called to remove device.
68905a14
DK
1997 */
1998static void visornic_remove(struct visor_device *dev)
1999{
2000 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
46df8226
TS
2001 struct net_device *netdev;
2002 unsigned long flags;
68905a14 2003
00748b0c
TS
2004 if (!devdata) {
2005 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2006 return;
00748b0c 2007 }
46df8226
TS
2008 spin_lock_irqsave(&devdata->priv_lock, flags);
2009 if (devdata->going_away) {
2010 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2011 dev_err(&dev->device, "%s already being removed\n", __func__);
2012 return;
2013 }
2014 devdata->going_away = true;
2015 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2016 netdev = devdata->netdev;
2017 if (!netdev) {
2018 dev_err(&dev->device, "%s not net device\n", __func__);
2019 return;
2020 }
2021
2022 /* going_away prevents new items being added to the workqueues */
ce388d7e 2023 cancel_work_sync(&devdata->timeout_reset);
46df8226
TS
2024
2025 debugfs_remove_recursive(devdata->eth_debugfs_dir);
921557cb
SW
2026 /* this will call visornic_close() */
2027 unregister_netdev(netdev);
46df8226 2028
946b2546
NH
2029 del_timer_sync(&devdata->irq_poll_timer);
2030 netif_napi_del(&devdata->napi);
46df8226 2031
68905a14
DK
2032 dev_set_drvdata(&dev->device, NULL);
2033 host_side_disappeared(devdata);
8d0119d8 2034 devdata_release(devdata);
46df8226 2035 free_netdev(netdev);
68905a14
DK
2036}
2037
1ce0a9bc
DB
2038/* visornic_pause - called when IO Part disappears
2039 * @dev: Visornic device that is being serviced.
2040 * @complete_func: Call when finished.
68905a14 2041 *
1ce0a9bc
DB
2042 * Called when the IO Partition has gone down. Need to free up resources and
2043 * wait for IO partition to come back. Mark link as down and don't attempt any
2044 * DMA. When we have freed memory, call the complete_func so that Command knows
2045 * we are done. If we don't call complete_func, the IO Partition will never
2046 * come back.
2047 *
2048 * Return: 0 on success.
68905a14
DK
2049 */
2050static int visornic_pause(struct visor_device *dev,
2051 visorbus_state_complete_func complete_func)
2052{
2053 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2054
d01da5ea 2055 visornic_serverdown(devdata, complete_func);
68905a14
DK
2056 return 0;
2057}
2058
1ce0a9bc
DB
2059/* visornic_resume - called when IO Partition has recovered
2060 * @dev: Visornic device that is being serviced.
2061 * @compelte_func: Call when finished.
2062 *
2063 * Called when the IO partition has recovered. Re-establish connection to the IO
2064 * Partition and set the link up. Okay to do DMA again.
68905a14 2065 *
1ce0a9bc 2066 * Returns 0 for success, negative integer on error.
68905a14
DK
2067 */
2068static int visornic_resume(struct visor_device *dev,
2069 visorbus_state_complete_func complete_func)
2070{
2071 struct visornic_devdata *devdata;
2072 struct net_device *netdev;
2073 unsigned long flags;
2074
2075 devdata = dev_get_drvdata(&dev->device);
00748b0c
TS
2076 if (!devdata) {
2077 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2078 return -EINVAL;
00748b0c 2079 }
68905a14
DK
2080
2081 netdev = devdata->netdev;
2082
c847020e
TS
2083 spin_lock_irqsave(&devdata->priv_lock, flags);
2084 if (devdata->server_change_state) {
68905a14 2085 spin_unlock_irqrestore(&devdata->priv_lock, flags);
c847020e 2086 dev_err(&dev->device, "%s server already changing state\n",
00748b0c 2087 __func__);
c847020e 2088 return -EINVAL;
68905a14 2089 }
c847020e
TS
2090 if (!devdata->server_down) {
2091 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2092 dev_err(&dev->device, "%s server not down\n", __func__);
2093 complete_func(dev, 0);
2094 return 0;
2095 }
2096 devdata->server_change_state = true;
2097 spin_unlock_irqrestore(&devdata->priv_lock, flags);
946b2546 2098
c847020e
TS
2099 /* Must transition channel to ATTACHED state BEFORE
2100 * we can start using the device again.
2101 * TODO: State transitions
2102 */
946b2546
NH
2103 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
2104
c847020e
TS
2105 rtnl_lock();
2106 dev_open(netdev);
2107 rtnl_unlock();
68905a14
DK
2108
2109 complete_func(dev, 0);
2110 return 0;
2111}
2112
172f4c36 2113/* This is used to tell the visorbus driver which types of visor devices
6083c710
DK
2114 * we support, and what functions to call when a visor device that we support
2115 * is attached or removed.
2116 */
2117static struct visor_driver visornic_driver = {
2118 .name = "visornic",
6083c710
DK
2119 .owner = THIS_MODULE,
2120 .channel_types = visornic_channel_types,
2121 .probe = visornic_probe,
2122 .remove = visornic_remove,
2123 .pause = visornic_pause,
2124 .resume = visornic_resume,
2125 .channel_interrupt = NULL,
2126};
2127
1ce0a9bc
DB
2128/* visornic_init - init function
2129 *
2130 * Init function for the visornic driver. Do initial driver setup and wait
2131 * for devices.
68905a14 2132 *
1ce0a9bc 2133 * Return: 0 on success, negative integer on error.
68905a14
DK
2134 */
2135static int visornic_init(void)
2136{
2137 struct dentry *ret;
2138 int err = -ENOMEM;
2139
68905a14
DK
2140 visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
2141 if (!visornic_debugfs_dir)
2142 return err;
2143
bca74ee5 2144 ret = debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
68905a14
DK
2145 &debugfs_info_fops);
2146 if (!ret)
2147 goto cleanup_debugfs;
bca74ee5 2148 ret = debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir,
68905a14
DK
2149 NULL, &debugfs_enable_ints_fops);
2150 if (!ret)
2151 goto cleanup_debugfs;
2152
8b5081c8 2153 err = visorbus_register_visor_driver(&visornic_driver);
186896fd
DB
2154 if (err)
2155 goto cleanup_debugfs;
2156
2157 return 0;
68905a14 2158
68905a14
DK
2159cleanup_debugfs:
2160 debugfs_remove_recursive(visornic_debugfs_dir);
68905a14
DK
2161 return err;
2162}
2163
1ce0a9bc 2164/* visornic_cleanup - driver exit routine
68905a14 2165 *
1ce0a9bc 2166 * Unregister driver from the bus and free up memory.
68905a14
DK
2167 */
2168static void visornic_cleanup(void)
2169{
3798ff31 2170 visorbus_unregister_visor_driver(&visornic_driver);
68905a14 2171 debugfs_remove_recursive(visornic_debugfs_dir);
68905a14
DK
2172}
2173
2174module_init(visornic_init);
2175module_exit(visornic_cleanup);
2176
2177MODULE_AUTHOR("Unisys");
2178MODULE_LICENSE("GPL");
bff8c1a1 2179MODULE_DESCRIPTION("s-Par NIC driver for virtual network devices");