]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/staging/unisys/visornic/visornic_main.c
Merge tag 'iio-for-4.3a' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio...
[mirror_ubuntu-hirsute-kernel.git] / drivers / staging / unisys / visornic / visornic_main.c
CommitLineData
68905a14
DK
1/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
13 * details.
14 */
15
16/* This driver lives in a spar partition, and registers to ethernet io
17 * channels from the visorbus driver. It creates netdev devices and
18 * forwards transmit to the IO channel and accepts rcvs from the IO
19 * Partition via the IO channel.
20 */
21
22#include <linux/debugfs.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/skbuff.h>
26#include <linux/kthread.h>
27
28#include "visorbus.h"
29#include "iochannel.h"
30
31#define VISORNIC_INFINITE_RESPONSE_WAIT 0
32#define VISORNICSOPENMAX 32
33#define MAXDEVICES 16384
34
35/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
36 * = 163840 bytes
37 */
38#define MAX_BUF 163840
39
40static spinlock_t dev_num_pool_lock;
41static void *dev_num_pool; /**< pool to grab device numbers from */
42
43static int visornic_probe(struct visor_device *dev);
44static void visornic_remove(struct visor_device *dev);
45static int visornic_pause(struct visor_device *dev,
46 visorbus_state_complete_func complete_func);
47static int visornic_resume(struct visor_device *dev,
48 visorbus_state_complete_func complete_func);
49
50/* DEBUGFS declarations */
51static ssize_t info_debugfs_read(struct file *file, char __user *buf,
52 size_t len, loff_t *offset);
53static ssize_t enable_ints_write(struct file *file, const char __user *buf,
54 size_t len, loff_t *ppos);
55static struct dentry *visornic_debugfs_dir;
56static const struct file_operations debugfs_info_fops = {
57 .read = info_debugfs_read,
58};
59
60static const struct file_operations debugfs_enable_ints_fops = {
61 .write = enable_ints_write,
62};
63
64static struct workqueue_struct *visornic_serverdown_workqueue;
65static struct workqueue_struct *visornic_timeout_reset_workqueue;
66
67/* GUIDS for director channel type supported by this driver. */
68static struct visor_channeltype_descriptor visornic_channel_types[] = {
69 /* Note that the only channel type we expect to be reported by the
70 * bus driver is the SPAR_VNIC channel.
71 */
72 { SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
73 { NULL_UUID_LE, NULL }
74};
75
76/* This is used to tell the visor bus driver which types of visor devices
77 * we support, and what functions to call when a visor device that we support
78 * is attached or removed.
79 */
80static struct visor_driver visornic_driver = {
81 .name = "visornic",
82 .version = "1.0.0.0",
83 .vertag = NULL,
84 .owner = THIS_MODULE,
85 .channel_types = visornic_channel_types,
86 .probe = visornic_probe,
87 .remove = visornic_remove,
88 .pause = visornic_pause,
89 .resume = visornic_resume,
90 .channel_interrupt = NULL,
91};
92
93struct visor_thread_info {
94 struct task_struct *task;
95 struct completion has_stopped;
96 int id;
97};
98
99struct chanstat {
100 unsigned long got_rcv;
101 unsigned long got_enbdisack;
102 unsigned long got_xmit_done;
103 unsigned long xmit_fail;
104 unsigned long sent_enbdis;
105 unsigned long sent_promisc;
106 unsigned long sent_post;
107 unsigned long sent_xmit;
108 unsigned long reject_count;
109 unsigned long extra_rcvbufs_sent;
110};
111
112struct visornic_devdata {
113 int devnum;
114 int thread_wait_ms;
115 unsigned short enabled; /* 0 disabled 1 enabled to receive */
116 unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by
117 * IOPART
118 */
119 struct visor_device *dev;
120 char name[99];
121 struct list_head list_all; /* < link within list_all_devices list */
68905a14
DK
122 struct net_device *netdev;
123 struct net_device_stats net_stats;
124 atomic_t interrupt_rcvd;
125 wait_queue_head_t rsp_queue;
126 struct sk_buff **rcvbuf;
127 u64 uniquenum; /* TODO figure out why not used */
128 unsigned short old_flags; /* flags as they were prior to
129 * set_multicast_list
130 */
131 atomic_t usage; /* count of users */
132 int num_rcv_bufs; /* indicates how many rcv buffers
133 * the vnic will post
134 */
135 int num_rcv_bufs_could_not_alloc;
136 atomic_t num_rcvbuf_in_iovm;
137 unsigned long alloc_failed_in_if_needed_cnt;
138 unsigned long alloc_failed_in_repost_rtn_cnt;
139 int max_outstanding_net_xmits; /* absolute max number of outstanding
140 * xmits - should never hit this
141 */
142 int upper_threshold_net_xmits; /* high water mark for calling
143 * netif_stop_queue()
144 */
145 int lower_threshold_net_xmits; /* high water mark for calling
146 * netif_wake_queue()
147 */
148 struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
149 * xmit buffer list that have been
150 * sent to the IOPART end
151 */
152 struct work_struct serverdown_completion;
d01da5ea 153 visorbus_state_complete_func server_down_complete_func;
68905a14
DK
154 struct work_struct timeout_reset;
155 struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for
156 * posting/unposting rcv buffers
157 */
158 struct uiscmdrsp *xmit_cmdrsp; /* used to issue NET_XMIT - there is
159 * never more that one xmit in
160 * progress at a time
161 */
162 bool server_down; /* IOPART is down */
163 bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
46df8226 164 bool going_away; /* device is being torn down */
68905a14
DK
165 struct dentry *eth_debugfs_dir;
166 struct visor_thread_info threadinfo;
167 u64 interrupts_rcvd;
168 u64 interrupts_notme;
169 u64 interrupts_disabled;
170 u64 busy_cnt;
171 spinlock_t priv_lock; /* spinlock to access devdata structures */
172
173 /* flow control counter */
174 u64 flow_control_upper_hits;
175 u64 flow_control_lower_hits;
176
177 /* debug counters */
178 unsigned long n_rcv0; /* # rcvs of 0 buffers */
179 unsigned long n_rcv1; /* # rcvs of 1 buffers */
180 unsigned long n_rcv2; /* # rcvs of 2 buffers */
181 unsigned long n_rcvx; /* # rcvs of >2 buffers */
182 unsigned long found_repost_rcvbuf_cnt; /* # times we called
183 * repost_rcvbuf_cnt
184 */
185 unsigned long repost_found_skb_cnt; /* # times found the skb */
186 unsigned long n_repost_deficit; /* # times we couldn't find
187 * all of the rcv buffers
188 */
189 unsigned long bad_rcv_buf; /* # times we negleted to
190 * free the rcv skb because
191 * we didn't know where it
192 * came from
193 */
194 unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
195
196 int queuefullmsg_logged;
197 struct chanstat chstat;
198};
199
200/* array of open devices maintained by open() and close() */
201static struct net_device *num_visornic_open[VISORNICSOPENMAX];
202
203/* List of all visornic_devdata structs,
204 * linked via the list_all member
205 */
206static LIST_HEAD(list_all_devices);
207static DEFINE_SPINLOCK(lock_all_devices);
208
209/**
210 * visor_copy_fragsinfo_from_skb(
211 * @skb_in: skbuff that we are pulling the frags from
212 * @firstfraglen: length of first fragment in skb
213 * @frags_max: max len of frags array
214 * @frags: frags array filled in on output
215 *
216 * Copy the fragment list in the SKB to a phys_info
217 * array that the IOPART understands.
218 * Return value indicates number of entries filled in frags
219 * Negative values indicate an error.
220 */
221static unsigned int
222visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
223 unsigned int frags_max,
224 struct phys_info frags[])
225{
226 unsigned int count = 0, ii, size, offset = 0, numfrags;
227
228 numfrags = skb_shinfo(skb)->nr_frags;
229
230 while (firstfraglen) {
231 if (count == frags_max)
232 return -EINVAL;
233
234 frags[count].pi_pfn =
235 page_to_pfn(virt_to_page(skb->data + offset));
236 frags[count].pi_off =
237 (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
238 size = min_t(unsigned int, firstfraglen,
239 PI_PAGE_SIZE - frags[count].pi_off);
240
241 /* can take smallest of firstfraglen (what's left) OR
242 * bytes left in the page
243 */
244 frags[count].pi_len = size;
245 firstfraglen -= size;
246 offset += size;
247 count++;
248 }
249 if (numfrags) {
250 if ((count + numfrags) > frags_max)
251 return -EINVAL;
252
253 for (ii = 0; ii < numfrags; ii++) {
254 count = add_physinfo_entries(page_to_pfn(
255 skb_frag_page(&skb_shinfo(skb)->frags[ii])),
256 skb_shinfo(skb)->frags[ii].
257 page_offset,
258 skb_shinfo(skb)->frags[ii].
259 size, count, frags_max, frags);
260 if (!count)
261 return -EIO;
262 }
263 }
264 if (skb_shinfo(skb)->frag_list) {
265 struct sk_buff *skbinlist;
266 int c;
267
268 for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
269 skbinlist = skbinlist->next) {
270 c = visor_copy_fragsinfo_from_skb(skbinlist,
271 skbinlist->len -
272 skbinlist->data_len,
273 frags_max - count,
274 &frags[count]);
275 if (c < 0)
276 return c;
277 count += c;
278 }
279 }
280 return count;
281}
282
283/**
284 * visort_thread_start - starts thread for the device
285 * @thrinfo: The thread to start
286 * @threadfn: Function the thread starts
287 * @thrcontext: Context to pass to the thread, i.e. devdata
288 * @name: string describing name of thread
289 *
290 * Starts a thread for the device, currently only thread is
291 * process_incoming_rsps
292 * Returns 0 on success;
293 */
294static int visor_thread_start(struct visor_thread_info *thrinfo,
295 int (*threadfn)(void *),
296 void *thrcontext, char *name)
297{
298 /* used to stop the thread */
299 init_completion(&thrinfo->has_stopped);
300 thrinfo->task = kthread_run(threadfn, thrcontext, name);
301 if (IS_ERR(thrinfo->task)) {
00748b0c
TS
302 pr_debug("%s failed (%ld)\n",
303 __func__, PTR_ERR(thrinfo->task));
68905a14
DK
304 thrinfo->id = 0;
305 return -EINVAL;
306 }
307 thrinfo->id = thrinfo->task->pid;
308 return 0;
309}
310
311/**
312 * visor_thread_stop - stop a thread for the device
313 * @thrinfo: The thread to stop
314 *
315 * Stop the thread and wait for completion for a minute
316 * Returns void.
317 */
318static void visor_thread_stop(struct visor_thread_info *thrinfo)
319{
320 if (!thrinfo->id)
321 return; /* thread not running */
322
323 kthread_stop(thrinfo->task);
324 /* give up if the thread has NOT died in 1 minute */
325 if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
326 thrinfo->id = 0;
327}
328
329/* DebugFS code */
330static ssize_t info_debugfs_read(struct file *file, char __user *buf,
331 size_t len, loff_t *offset)
332{
333 int i;
334 ssize_t bytes_read = 0;
335 int str_pos = 0;
336 struct visornic_devdata *devdata;
337 char *vbuf;
338
339 if (len > MAX_BUF)
340 len = MAX_BUF;
341 vbuf = kzalloc(len, GFP_KERNEL);
342 if (!vbuf)
343 return -ENOMEM;
344
345 /* for each vnic channel
346 * dump out channel specific data
347 */
348 for (i = 0; i < VISORNICSOPENMAX; i++) {
349 if (!num_visornic_open[i])
350 continue;
351
352 devdata = netdev_priv(num_visornic_open[i]);
353 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
354 "Vnic i = %d\n", i);
355 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
356 "netdev = %s (0x%p), MAC Addr %pM\n",
357 num_visornic_open[i]->name,
358 num_visornic_open[i],
359 num_visornic_open[i]->dev_addr);
360 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
361 "VisorNic Dev Info = 0x%p\n", devdata);
362 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
363 " num_rcv_bufs = %d\n",
364 devdata->num_rcv_bufs);
365 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
366 " max_oustanding_next_xmits = %d\n",
367 devdata->max_outstanding_net_xmits);
368 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
369 " upper_threshold_net_xmits = %d\n",
370 devdata->upper_threshold_net_xmits);
371 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
372 " lower_threshold_net_xmits = %d\n",
373 devdata->lower_threshold_net_xmits);
374 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
375 " queuefullmsg_logged = %d\n",
376 devdata->queuefullmsg_logged);
377 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
378 " chstat.got_rcv = %lu\n",
379 devdata->chstat.got_rcv);
380 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
381 " chstat.got_enbdisack = %lu\n",
382 devdata->chstat.got_enbdisack);
383 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
384 " chstat.got_xmit_done = %lu\n",
385 devdata->chstat.got_xmit_done);
386 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
387 " chstat.xmit_fail = %lu\n",
388 devdata->chstat.xmit_fail);
389 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
390 " chstat.sent_enbdis = %lu\n",
391 devdata->chstat.sent_enbdis);
392 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
393 " chstat.sent_promisc = %lu\n",
394 devdata->chstat.sent_promisc);
395 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
396 " chstat.sent_post = %lu\n",
397 devdata->chstat.sent_post);
398 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
399 " chstat.sent_xmit = %lu\n",
400 devdata->chstat.sent_xmit);
401 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
402 " chstat.reject_count = %lu\n",
403 devdata->chstat.reject_count);
404 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
405 " chstat.extra_rcvbufs_sent = %lu\n",
406 devdata->chstat.extra_rcvbufs_sent);
407 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
408 " n_rcv0 = %lu\n", devdata->n_rcv0);
409 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
410 " n_rcv1 = %lu\n", devdata->n_rcv1);
411 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
412 " n_rcv2 = %lu\n", devdata->n_rcv2);
413 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
414 " n_rcvx = %lu\n", devdata->n_rcvx);
415 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
416 " num_rcvbuf_in_iovm = %d\n",
417 atomic_read(&devdata->num_rcvbuf_in_iovm));
418 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
419 " alloc_failed_in_if_needed_cnt = %lu\n",
420 devdata->alloc_failed_in_if_needed_cnt);
421 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
422 " alloc_failed_in_repost_rtn_cnt = %lu\n",
423 devdata->alloc_failed_in_repost_rtn_cnt);
424 /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
425 * " inner_loop_limit_reached_cnt = %lu\n",
426 * devdata->inner_loop_limit_reached_cnt);
427 */
428 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
429 " found_repost_rcvbuf_cnt = %lu\n",
430 devdata->found_repost_rcvbuf_cnt);
431 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
432 " repost_found_skb_cnt = %lu\n",
433 devdata->repost_found_skb_cnt);
434 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
435 " n_repost_deficit = %lu\n",
436 devdata->n_repost_deficit);
437 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
438 " bad_rcv_buf = %lu\n",
439 devdata->bad_rcv_buf);
440 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
441 " n_rcv_packets_not_accepted = %lu\n",
442 devdata->n_rcv_packets_not_accepted);
443 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
444 " interrupts_rcvd = %llu\n",
445 devdata->interrupts_rcvd);
446 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
447 " interrupts_notme = %llu\n",
448 devdata->interrupts_notme);
449 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
450 " interrupts_disabled = %llu\n",
451 devdata->interrupts_disabled);
452 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
453 " busy_cnt = %llu\n",
454 devdata->busy_cnt);
455 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
456 " flow_control_upper_hits = %llu\n",
457 devdata->flow_control_upper_hits);
458 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
459 " flow_control_lower_hits = %llu\n",
460 devdata->flow_control_lower_hits);
461 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
462 " thread_wait_ms = %d\n",
463 devdata->thread_wait_ms);
464 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
465 " netif_queue = %s\n",
466 netif_queue_stopped(devdata->netdev) ?
467 "stopped" : "running");
468 }
469 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
470 kfree(vbuf);
471 return bytes_read;
472}
473
474static ssize_t enable_ints_write(struct file *file,
475 const char __user *buffer,
476 size_t count, loff_t *ppos)
477{
478 char buf[4];
479 int i, new_value;
480 struct visornic_devdata *devdata;
481
482 if (count >= ARRAY_SIZE(buf))
483 return -EINVAL;
484
485 buf[count] = '\0';
486 if (copy_from_user(buf, buffer, count))
487 return -EFAULT;
488
489 i = kstrtoint(buf, 10, &new_value);
490 if (i != 0)
491 return -EFAULT;
492
493 /* set all counts to new_value usually 0 */
494 for (i = 0; i < VISORNICSOPENMAX; i++) {
495 if (num_visornic_open[i]) {
496 devdata = netdev_priv(num_visornic_open[i]);
497 /* TODO update features bit in channel */
498 }
499 }
500
501 return count;
502}
503
504/**
505 * visornic_serverdown_complete - IOPART went down, need to pause
506 * device
507 * @work: Work queue it was scheduled on
508 *
509 * The IO partition has gone down and we need to do some cleanup
510 * for when it comes back. Treat the IO partition as the link
511 * being down.
512 * Returns void.
513 */
514static void
515visornic_serverdown_complete(struct work_struct *work)
516{
517 struct visornic_devdata *devdata;
518 struct net_device *netdev;
519 unsigned long flags;
520 int i = 0, count = 0;
521
522 devdata = container_of(work, struct visornic_devdata,
523 serverdown_completion);
524 netdev = devdata->netdev;
525
526 /* Stop using datachan */
527 visor_thread_stop(&devdata->threadinfo);
528
529 /* Inform Linux that the link is down */
530 netif_carrier_off(netdev);
531 netif_stop_queue(netdev);
532
533 /* Free the skb for XMITs that haven't been serviced by the server
534 * We shouldn't have to inform Linux about these IOs because they
535 * are "lost in the ethernet"
536 */
537 skb_queue_purge(&devdata->xmitbufhead);
538
539 spin_lock_irqsave(&devdata->priv_lock, flags);
540 /* free rcv buffers */
541 for (i = 0; i < devdata->num_rcv_bufs; i++) {
542 if (devdata->rcvbuf[i]) {
543 kfree_skb(devdata->rcvbuf[i]);
544 devdata->rcvbuf[i] = NULL;
545 count++;
546 }
547 }
548 atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
549 spin_unlock_irqrestore(&devdata->priv_lock, flags);
550
d01da5ea
TS
551 if (devdata->server_down_complete_func)
552 (*devdata->server_down_complete_func)(devdata->dev, 0);
553
68905a14
DK
554 devdata->server_down = true;
555 devdata->server_change_state = false;
d01da5ea 556 devdata->server_down_complete_func = NULL;
68905a14
DK
557}
558
559/**
560 * visornic_serverdown - Command has notified us that IOPARt is down
561 * @devdata: device that is being managed by IOPART
562 *
563 * Schedule the work needed to handle the server down request. Make
564 * sure we haven't already handled the server change state event.
565 * Returns 0 if we scheduled the work, -EINVAL on error.
566 */
567static int
d01da5ea
TS
568visornic_serverdown(struct visornic_devdata *devdata,
569 visorbus_state_complete_func complete_func)
68905a14 570{
46df8226
TS
571 unsigned long flags;
572
573 spin_lock_irqsave(&devdata->priv_lock, flags);
68905a14 574 if (!devdata->server_down && !devdata->server_change_state) {
46df8226
TS
575 if (devdata->going_away) {
576 spin_unlock_irqrestore(&devdata->priv_lock, flags);
577 dev_dbg(&devdata->dev->device,
578 "%s aborting because device removal pending\n",
579 __func__);
580 return -ENODEV;
581 }
68905a14 582 devdata->server_change_state = true;
d01da5ea 583 devdata->server_down_complete_func = complete_func;
68905a14
DK
584 queue_work(visornic_serverdown_workqueue,
585 &devdata->serverdown_completion);
586 } else if (devdata->server_change_state) {
00748b0c
TS
587 dev_dbg(&devdata->dev->device, "%s changing state\n",
588 __func__);
46df8226 589 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
590 return -EINVAL;
591 }
46df8226 592 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
593 return 0;
594}
595
596/**
597 * alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
598 * @netdev: network adapter the rcv bufs are attached too.
599 *
600 * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
601 * so that it can write rcv data into our memory space.
602 * Return pointer to sk_buff
603 */
604static struct sk_buff *
605alloc_rcv_buf(struct net_device *netdev)
606{
607 struct sk_buff *skb;
608
609 /* NOTE: the first fragment in each rcv buffer is pointed to by
610 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
611 * in length, so the firstfrag is large enough to hold 1514.
612 */
613 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
614 if (!skb)
615 return NULL;
616 skb->dev = netdev;
617 skb->len = RCVPOST_BUF_SIZE;
618 /* current value of mtu doesn't come into play here; large
619 * packets will just end up using multiple rcv buffers all of
620 * same size
621 */
622 skb->data_len = 0; /* dev_alloc_skb already zeroes it out
623 * for clarification.
624 */
625 return skb;
626}
627
628/**
629 * post_skb - post a skb to the IO Partition.
630 * @cmdrsp: cmdrsp packet to be send to the IO Partition
631 * @devdata: visornic_devdata to post the skb too
632 * @skb: skb to give to the IO partition
633 *
634 * Send the skb to the IO Partition.
635 * Returns void
636 */
637static inline void
638post_skb(struct uiscmdrsp *cmdrsp,
639 struct visornic_devdata *devdata, struct sk_buff *skb)
640{
641 cmdrsp->net.buf = skb;
642 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
643 cmdrsp->net.rcvpost.frag.pi_off =
644 (unsigned long)skb->data & PI_PAGE_MASK;
645 cmdrsp->net.rcvpost.frag.pi_len = skb->len;
646 cmdrsp->net.rcvpost.unique_num = devdata->uniquenum;
647
648 if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
649 cmdrsp->net.type = NET_RCV_POST;
650 cmdrsp->cmdtype = CMD_NET_TYPE;
651 visorchannel_signalinsert(devdata->dev->visorchannel,
652 IOCHAN_TO_IOPART,
653 cmdrsp);
654 atomic_inc(&devdata->num_rcvbuf_in_iovm);
655 devdata->chstat.sent_post++;
656 }
657}
658
659/**
660 * send_enbdis - send NET_RCV_ENBDIS to IO Partition
661 * @netdev: netdevice we are enable/disable, used as context
662 * return value
663 * @state: enable = 1/disable = 0
664 * @devdata: visornic device we are enabling/disabling
665 *
666 * Send the enable/disable message to the IO Partition.
667 * Returns void
668 */
669static void
670send_enbdis(struct net_device *netdev, int state,
671 struct visornic_devdata *devdata)
672{
673 devdata->cmdrsp_rcv->net.enbdis.enable = state;
674 devdata->cmdrsp_rcv->net.enbdis.context = netdev;
675 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
676 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
677 visorchannel_signalinsert(devdata->dev->visorchannel,
678 IOCHAN_TO_IOPART,
679 devdata->cmdrsp_rcv);
680 devdata->chstat.sent_enbdis++;
681}
682
683/**
684 * visornic_disable_with_timeout - Disable network adapter
685 * @netdev: netdevice to disale
686 * @timeout: timeout to wait for disable
687 *
688 * Disable the network adapter and inform the IO Partition that we
689 * are disabled, reclaim memory from rcv bufs.
690 * Returns 0 on success, negative for failure of IO Partition
691 * responding.
692 *
693 */
694static int
695visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
696{
697 struct visornic_devdata *devdata = netdev_priv(netdev);
698 int i;
699 unsigned long flags;
700 int wait = 0;
701
702 /* stop the transmit queue so nothing more can be transmitted */
703 netif_stop_queue(netdev);
704
705 /* send a msg telling the other end we are stopping incoming pkts */
706 spin_lock_irqsave(&devdata->priv_lock, flags);
707 devdata->enabled = 0;
708 devdata->enab_dis_acked = 0; /* must wait for ack */
709 spin_unlock_irqrestore(&devdata->priv_lock, flags);
710
711 /* send disable and wait for ack -- don't hold lock when sending
712 * disable because if the queue is full, insert might sleep.
713 */
714 send_enbdis(netdev, 0, devdata);
715
716 /* wait for ack to arrive before we try to free rcv buffers
717 * NOTE: the other end automatically unposts the rcv buffers when
718 * when it gets a disable.
719 */
720 spin_lock_irqsave(&devdata->priv_lock, flags);
721 while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
722 (wait < timeout)) {
723 if (devdata->enab_dis_acked)
724 break;
725 if (devdata->server_down || devdata->server_change_state) {
726 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
727 dev_dbg(&netdev->dev, "%s server went away\n",
728 __func__);
68905a14
DK
729 return -EIO;
730 }
731 set_current_state(TASK_INTERRUPTIBLE);
732 spin_unlock_irqrestore(&devdata->priv_lock, flags);
733 wait += schedule_timeout(msecs_to_jiffies(10));
734 spin_lock_irqsave(&devdata->priv_lock, flags);
735 }
736
737 /* Wait for usage to go to 1 (no other users) before freeing
738 * rcv buffers
739 */
740 if (atomic_read(&devdata->usage) > 1) {
741 while (1) {
742 set_current_state(TASK_INTERRUPTIBLE);
743 spin_unlock_irqrestore(&devdata->priv_lock, flags);
744 schedule_timeout(msecs_to_jiffies(10));
745 spin_lock_irqsave(&devdata->priv_lock, flags);
746 if (atomic_read(&devdata->usage))
747 break;
748 }
749 }
750
751 /* we've set enabled to 0, so we can give up the lock. */
752 spin_unlock_irqrestore(&devdata->priv_lock, flags);
753
754 /* Free rcv buffers - other end has automatically unposed them on
755 * disable
756 */
757 for (i = 0; i < devdata->num_rcv_bufs; i++) {
758 if (devdata->rcvbuf[i]) {
759 kfree_skb(devdata->rcvbuf[i]);
760 devdata->rcvbuf[i] = NULL;
761 }
762 }
763
764 /* remove references from array */
765 for (i = 0; i < VISORNICSOPENMAX; i++)
766 if (num_visornic_open[i] == netdev) {
767 num_visornic_open[i] = NULL;
768 break;
769 }
770
771 return 0;
772}
773
774/**
775 * init_rcv_bufs -- initialize receive bufs and send them to the IO Part
776 * @netdev: struct netdevice
777 * @devdata: visornic_devdata
778 *
779 * Allocate rcv buffers and post them to the IO Partition.
780 * Return 0 for success, and negative for failure.
781 */
782static int
783init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
784{
785 int i, count;
786
787 /* allocate fixed number of receive buffers to post to uisnic
788 * post receive buffers after we've allocated a required amount
789 */
790 for (i = 0; i < devdata->num_rcv_bufs; i++) {
791 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
792 if (!devdata->rcvbuf[i])
793 break; /* if we failed to allocate one let us stop */
794 }
795 if (i == 0) /* couldn't even allocate one -- bail out */
796 return -ENOMEM;
797 count = i;
798
799 /* Ensure we can alloc 2/3rd of the requeested number of buffers.
800 * 2/3 is an arbitrary choice; used also in ndis init.c
801 */
802 if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
803 /* free receive buffers we did alloc and then bail out */
804 for (i = 0; i < count; i++) {
805 kfree_skb(devdata->rcvbuf[i]);
806 devdata->rcvbuf[i] = NULL;
807 }
808 return -ENOMEM;
809 }
810
811 /* post receive buffers to receive incoming input - without holding
812 * lock - we've not enabled nor started the queue so there shouldn't
813 * be any rcv or xmit activity
814 */
815 for (i = 0; i < count; i++)
816 post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]);
817
818 return 0;
819}
820
821/**
822 * visornic_enable_with_timeout - send enable to IO Part
823 * @netdev: struct net_device
824 * @timeout: Time to wait for the ACK from the enable
825 *
826 * Sends enable to IOVM, inits, and posts receive buffers to IOVM
827 * timeout is defined in msecs (timeout of 0 specifies infinite wait)
828 * Return 0 for success, negavite for failure.
829 */
830static int
831visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
832{
833 int i;
834 struct visornic_devdata *devdata = netdev_priv(netdev);
835 unsigned long flags;
836 int wait = 0;
837
838 /* NOTE: the other end automatically unposts the rcv buffers when it
839 * gets a disable.
840 */
841 i = init_rcv_bufs(netdev, devdata);
00748b0c
TS
842 if (i < 0) {
843 dev_err(&netdev->dev,
844 "%s failed to init rcv bufs (%d)\n", __func__, i);
68905a14 845 return i;
00748b0c 846 }
68905a14
DK
847
848 spin_lock_irqsave(&devdata->priv_lock, flags);
849 devdata->enabled = 1;
850
851 /* now we're ready, let's send an ENB to uisnic but until we get
852 * an ACK back from uisnic, we'll drop the packets
853 */
854 devdata->n_rcv_packets_not_accepted = 0;
855 spin_unlock_irqrestore(&devdata->priv_lock, flags);
856
857 /* send enable and wait for ack -- don't hold lock when sending enable
858 * because if the queue is full, insert might sleep.
859 */
860 send_enbdis(netdev, 1, devdata);
861
862 spin_lock_irqsave(&devdata->priv_lock, flags);
863 while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
864 (wait < timeout)) {
865 if (devdata->enab_dis_acked)
866 break;
867 if (devdata->server_down || devdata->server_change_state) {
868 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
869 dev_dbg(&netdev->dev, "%s server went away\n",
870 __func__);
68905a14
DK
871 return -EIO;
872 }
873 set_current_state(TASK_INTERRUPTIBLE);
874 spin_unlock_irqrestore(&devdata->priv_lock, flags);
875 wait += schedule_timeout(msecs_to_jiffies(10));
876 spin_lock_irqsave(&devdata->priv_lock, flags);
877 }
878
879 spin_unlock_irqrestore(&devdata->priv_lock, flags);
880
00748b0c
TS
881 if (!devdata->enab_dis_acked) {
882 dev_err(&netdev->dev, "%s missing ACK\n", __func__);
68905a14 883 return -EIO;
00748b0c 884 }
68905a14
DK
885
886 /* find an open slot in the array to save off VisorNic references
887 * for debug
888 */
889 for (i = 0; i < VISORNICSOPENMAX; i++) {
890 if (!num_visornic_open[i]) {
891 num_visornic_open[i] = netdev;
892 break;
893 }
894 }
895
896 return 0;
897}
898
899/**
900 * visornic_timeout_reset - handle xmit timeout resets
901 * @work work item that scheduled the work
902 *
903 * Transmit Timeouts are typically handled by resetting the
904 * device for our virtual NIC we will send a Disable and Enable
905 * to the IOVM. If it doesn't respond we will trigger a serverdown.
906 */
907static void
908visornic_timeout_reset(struct work_struct *work)
909{
910 struct visornic_devdata *devdata;
911 struct net_device *netdev;
912 int response = 0;
913
914 devdata = container_of(work, struct visornic_devdata, timeout_reset);
915 netdev = devdata->netdev;
916
917 netif_stop_queue(netdev);
918 response = visornic_disable_with_timeout(netdev, 100);
919 if (response)
920 goto call_serverdown;
921
922 response = visornic_enable_with_timeout(netdev, 100);
923 if (response)
924 goto call_serverdown;
925 netif_wake_queue(netdev);
926
927 return;
928
929call_serverdown:
d01da5ea 930 visornic_serverdown(devdata, NULL);
68905a14
DK
931}
932
933/**
934 * visornic_open - Enable the visornic device and mark the queue started
935 * @netdev: netdevice to start
936 *
937 * Enable the device and start the transmit queue.
938 * Return 0 for success
939 */
940static int
941visornic_open(struct net_device *netdev)
942{
943 visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
944
945 /* start the interface's transmit queue, allowing it to accept
946 * packets for transmission
947 */
948 netif_start_queue(netdev);
949
950 return 0;
951}
952
953/**
954 * visornic_close - Disables the visornic device and stops the queues
955 * @netdev: netdevice to start
956 *
957 * Disable the device and stop the transmit queue.
958 * Return 0 for success
959 */
960static int
961visornic_close(struct net_device *netdev)
962{
963 netif_stop_queue(netdev);
964 visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
965
966 return 0;
967}
968
969/**
970 * visornic_xmit - send a packet to the IO Partition
971 * @skb: Packet to be sent
972 * @netdev: net device the packet is being sent from
973 *
974 * Convert the skb to a cmdrsp so the IO Partition can undersand it.
975 * Send the XMIT command to the IO Partition for processing. This
976 * function is protected from concurrent calls by a spinlock xmit_lock
977 * in the net_device struct, but as soon as the function returns it
978 * can be called again.
979 * Returns NETDEV_TX_OK for success, NETDEV_TX_BUSY for error.
980 */
981static int
982visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
983{
984 struct visornic_devdata *devdata;
985 int len, firstfraglen, padlen;
986 struct uiscmdrsp *cmdrsp = NULL;
987 unsigned long flags;
988
989 devdata = netdev_priv(netdev);
990 spin_lock_irqsave(&devdata->priv_lock, flags);
991
992 if (netif_queue_stopped(netdev) || devdata->server_down ||
993 devdata->server_change_state) {
994 spin_unlock_irqrestore(&devdata->priv_lock, flags);
995 devdata->busy_cnt++;
00748b0c
TS
996 dev_dbg(&netdev->dev,
997 "%s busy - queue stopped\n", __func__);
68905a14
DK
998 return NETDEV_TX_BUSY;
999 }
1000
1001 /* sk_buff struct is used to host network data throughout all the
1002 * linux network subsystems
1003 */
1004 len = skb->len;
1005
1006 /* skb->len is the FULL length of data (including fragmentary portion)
1007 * skb->data_len is the length of the fragment portion in frags
1008 * skb->len - skb->data_len is size of the 1st fragment in skb->data
1009 * calculate the length of the first fragment that skb->data is
1010 * pointing to
1011 */
1012 firstfraglen = skb->len - skb->data_len;
1013 if (firstfraglen < ETH_HEADER_SIZE) {
1014 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1015 devdata->busy_cnt++;
00748b0c
TS
1016 dev_err(&netdev->dev,
1017 "%s busy - first frag too small (%d)\n",
1018 __func__, firstfraglen);
68905a14
DK
1019 return NETDEV_TX_BUSY;
1020 }
1021
1022 if ((len < ETH_MIN_PACKET_SIZE) &&
1023 ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
1024 /* pad the packet out to minimum size */
1025 padlen = ETH_MIN_PACKET_SIZE - len;
1026 memset(&skb->data[len], 0, padlen);
1027 skb->tail += padlen;
1028 skb->len += padlen;
1029 len += padlen;
1030 firstfraglen += padlen;
1031 }
1032
1033 cmdrsp = devdata->xmit_cmdrsp;
1034 /* clear cmdrsp */
1035 memset(cmdrsp, 0, SIZEOF_CMDRSP);
1036 cmdrsp->net.type = NET_XMIT;
1037 cmdrsp->cmdtype = CMD_NET_TYPE;
1038
1039 /* save the pointer to skb -- we'll need it for completion */
1040 cmdrsp->net.buf = skb;
1041
1042 if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
1043 (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
1044 devdata->max_outstanding_net_xmits)) ||
1045 ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
1046 (ULONG_MAX - devdata->chstat.got_xmit_done +
1047 devdata->chstat.sent_xmit >=
1048 devdata->max_outstanding_net_xmits))) {
1049 /* too many NET_XMITs queued over to IOVM - need to wait
1050 */
1051 devdata->chstat.reject_count++;
1052 if (!devdata->queuefullmsg_logged &&
1053 ((devdata->chstat.reject_count & 0x3ff) == 1))
1054 devdata->queuefullmsg_logged = 1;
1055 netif_stop_queue(netdev);
1056 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1057 devdata->busy_cnt++;
00748b0c
TS
1058 dev_dbg(&netdev->dev,
1059 "%s busy - waiting for iovm to catch up\n",
1060 __func__);
68905a14
DK
1061 return NETDEV_TX_BUSY;
1062 }
1063 if (devdata->queuefullmsg_logged)
1064 devdata->queuefullmsg_logged = 0;
1065
1066 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1067 cmdrsp->net.xmt.lincsum.valid = 1;
1068 cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
1069 if (skb_transport_header(skb) > skb->data) {
1070 cmdrsp->net.xmt.lincsum.hrawoff =
1071 skb_transport_header(skb) - skb->data;
1072 cmdrsp->net.xmt.lincsum.hrawoff = 1;
1073 }
1074 if (skb_network_header(skb) > skb->data) {
1075 cmdrsp->net.xmt.lincsum.nhrawoff =
1076 skb_network_header(skb) - skb->data;
1077 cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
1078 }
1079 cmdrsp->net.xmt.lincsum.csum = skb->csum;
1080 } else {
1081 cmdrsp->net.xmt.lincsum.valid = 0;
1082 }
1083
1084 /* save off the length of the entire data packet */
1085 cmdrsp->net.xmt.len = len;
1086
1087 /* copy ethernet header from first frag into ocmdrsp
1088 * - everything else will be pass in frags & DMA'ed
1089 */
1090 memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HEADER_SIZE);
1091 /* copy frags info - from skb->data we need to only provide access
1092 * beyond eth header
1093 */
1094 cmdrsp->net.xmt.num_frags =
1095 visor_copy_fragsinfo_from_skb(skb, firstfraglen,
1096 MAX_PHYS_INFO,
1097 cmdrsp->net.xmt.frags);
1098 if (cmdrsp->net.xmt.num_frags == -1) {
1099 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1100 devdata->busy_cnt++;
00748b0c
TS
1101 dev_err(&netdev->dev,
1102 "%s busy - copy frags failed\n", __func__);
68905a14
DK
1103 return NETDEV_TX_BUSY;
1104 }
1105
1106 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
1107 IOCHAN_TO_IOPART, cmdrsp)) {
1108 netif_stop_queue(netdev);
1109 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1110 devdata->busy_cnt++;
00748b0c
TS
1111 dev_dbg(&netdev->dev,
1112 "%s busy - signalinsert failed\n", __func__);
68905a14
DK
1113 return NETDEV_TX_BUSY;
1114 }
1115
1116 /* Track the skbs that have been sent to the IOVM for XMIT */
1117 skb_queue_head(&devdata->xmitbufhead, skb);
1118
1119 /* set the last transmission start time
1120 * linux doc says: Do not forget to update netdev->trans_start to
1121 * jiffies after each new tx packet is given to the hardware.
1122 */
1123 netdev->trans_start = jiffies;
1124
1125 /* update xmt stats */
1126 devdata->net_stats.tx_packets++;
1127 devdata->net_stats.tx_bytes += skb->len;
1128 devdata->chstat.sent_xmit++;
1129
1130 /* check to see if we have hit the high watermark for
1131 * netif_stop_queue()
1132 */
1133 if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
1134 (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
1135 devdata->upper_threshold_net_xmits)) ||
1136 ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
1137 (ULONG_MAX - devdata->chstat.got_xmit_done +
1138 devdata->chstat.sent_xmit >=
1139 devdata->upper_threshold_net_xmits))) {
1140 /* too many NET_XMITs queued over to IOVM - need to wait */
1141 netif_stop_queue(netdev); /* calling stop queue - call
1142 * netif_wake_queue() after lower
1143 * threshold
1144 */
00748b0c
TS
1145 dev_dbg(&netdev->dev,
1146 "%s busy - invoking iovm flow control\n",
1147 __func__);
68905a14
DK
1148 devdata->flow_control_upper_hits++;
1149 }
1150 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1151
1152 /* skb will be freed when we get back NET_XMIT_DONE */
1153 return NETDEV_TX_OK;
1154}
1155
1156/**
1157 * visornic_get_stats - returns net_stats of the visornic device
1158 * @netdev: netdevice
1159 *
1160 * Returns the net_device_stats for the device
1161 */
1162static struct net_device_stats *
1163visornic_get_stats(struct net_device *netdev)
1164{
1165 struct visornic_devdata *devdata = netdev_priv(netdev);
1166
1167 return &devdata->net_stats;
1168}
1169
1170/**
1171 * visornic_ioctl - ioctl function for netdevice.
1172 * @netdev: netdevice
1173 * @ifr: ignored
1174 * @cmd: ignored
1175 *
1176 * Currently not supported.
1177 * Returns EOPNOTSUPP
1178 */
1179static int
1180visornic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1181{
1182 return -EOPNOTSUPP;
1183}
1184
1185/**
1186 * visornic_change_mtu - changes mtu of device.
1187 * @netdev: netdevice
1188 * @new_mtu: value of new mtu
1189 *
1190 * MTU cannot be changed by system, must be changed via
1191 * CONTROLVM message. All vnics and pnics in a switch have
1192 * to have the same MTU for everything to work.
1193 * Currently not supported.
1194 * Returns EINVAL
1195 */
1196static int
1197visornic_change_mtu(struct net_device *netdev, int new_mtu)
1198{
1199 return -EINVAL;
1200}
1201
1202/**
1203 * visornic_set_multi - changes mtu of device.
1204 * @netdev: netdevice
1205 *
1206 * Only flag we support currently is IFF_PROMISC
1207 * Returns void
1208 */
1209static void
1210visornic_set_multi(struct net_device *netdev)
1211{
1212 struct uiscmdrsp *cmdrsp;
1213 struct visornic_devdata *devdata = netdev_priv(netdev);
1214
1215 /* any filtering changes */
1216 if (devdata->old_flags != netdev->flags) {
1217 if ((netdev->flags & IFF_PROMISC) !=
1218 (devdata->old_flags & IFF_PROMISC)) {
1219 cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1220 if (!cmdrsp)
1221 return;
1222 cmdrsp->cmdtype = CMD_NET_TYPE;
1223 cmdrsp->net.type = NET_RCV_PROMISC;
1224 cmdrsp->net.enbdis.context = netdev;
1225 cmdrsp->net.enbdis.enable =
1226 (netdev->flags & IFF_PROMISC);
1227 visorchannel_signalinsert(devdata->dev->visorchannel,
1228 IOCHAN_TO_IOPART,
1229 cmdrsp);
1230 kfree(cmdrsp);
1231 }
1232 devdata->old_flags = netdev->flags;
1233 }
1234}
1235
1236/**
1237 * visornic_xmit_timeout - request to timeout the xmit
1238 * @netdev
1239 *
1240 * Queue the work and return. Make sure we have not already
1241 * been informed the IO Partition is gone, if it is gone
1242 * we will already timeout the xmits.
1243 */
1244static void
1245visornic_xmit_timeout(struct net_device *netdev)
1246{
1247 struct visornic_devdata *devdata = netdev_priv(netdev);
1248 unsigned long flags;
1249
1250 spin_lock_irqsave(&devdata->priv_lock, flags);
46df8226
TS
1251 if (devdata->going_away) {
1252 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1253 dev_dbg(&devdata->dev->device,
1254 "%s aborting because device removal pending\n",
1255 __func__);
1256 return;
1257 }
1258
68905a14
DK
1259 /* Ensure that a ServerDown message hasn't been received */
1260 if (!devdata->enabled ||
1261 (devdata->server_down && !devdata->server_change_state)) {
00748b0c
TS
1262 dev_dbg(&netdev->dev, "%s no processing\n",
1263 __func__);
68905a14
DK
1264 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1265 return;
1266 }
68905a14 1267 queue_work(visornic_timeout_reset_workqueue, &devdata->timeout_reset);
46df8226 1268 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
1269}
1270
1271/**
1272 * repost_return - repost rcv bufs that have come back
1273 * @cmdrsp: io channel command struct to post
1274 * @devdata: visornic devdata for the device
1275 * @skb: skb
1276 * @netdev: netdevice
1277 *
1278 * Repost rcv buffers that have been returned to us when
1279 * we are finished with them.
1280 * Returns 0 for success, -1 for error.
1281 */
1282static inline int
1283repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1284 struct sk_buff *skb, struct net_device *netdev)
1285{
1286 struct net_pkt_rcv copy;
1287 int i = 0, cc, numreposted;
1288 int found_skb = 0;
1289 int status = 0;
1290
1291 copy = cmdrsp->net.rcv;
1292 switch (copy.numrcvbufs) {
1293 case 0:
1294 devdata->n_rcv0++;
1295 break;
1296 case 1:
1297 devdata->n_rcv1++;
1298 break;
1299 case 2:
1300 devdata->n_rcv2++;
1301 break;
1302 default:
1303 devdata->n_rcvx++;
1304 break;
1305 }
1306 for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
1307 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1308 if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
1309 continue;
1310
1311 if ((skb) && devdata->rcvbuf[i] == skb) {
1312 devdata->found_repost_rcvbuf_cnt++;
1313 found_skb = 1;
1314 devdata->repost_found_skb_cnt++;
1315 }
1316 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1317 if (!devdata->rcvbuf[i]) {
1318 devdata->num_rcv_bufs_could_not_alloc++;
1319 devdata->alloc_failed_in_repost_rtn_cnt++;
1320 status = -ENOMEM;
1321 break;
1322 }
1323 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1324 numreposted++;
1325 break;
1326 }
1327 }
1328 if (numreposted != copy.numrcvbufs) {
1329 devdata->n_repost_deficit++;
1330 status = -EINVAL;
1331 }
1332 if (skb) {
1333 if (found_skb) {
1334 kfree_skb(skb);
1335 } else {
1336 status = -EINVAL;
1337 devdata->bad_rcv_buf++;
1338 }
1339 }
1340 atomic_dec(&devdata->usage);
1341 return status;
1342}
1343
1344/**
1345 * visornic_rx - Handle receive packets coming back from IO Part
1346 * @cmdrsp: Receive packet returned from IO Part
1347 *
1348 * Got a receive packet back from the IO Part, handle it and send
1349 * it up the stack.
1350 * Returns void
1351 */
1352static void
1353visornic_rx(struct uiscmdrsp *cmdrsp)
1354{
1355 struct visornic_devdata *devdata;
1356 struct sk_buff *skb, *prev, *curr;
1357 struct net_device *netdev;
1358 int cc, currsize, off, status;
1359 struct ethhdr *eth;
1360 unsigned long flags;
1361#ifdef DEBUG
1362 struct phys_info testfrags[MAX_PHYS_INFO];
1363#endif
1364
1365 /* post new rcv buf to the other end using the cmdrsp we have at hand
1366 * post it without holding lock - but we'll use the signal lock to
1367 * synchronize the queue insert the cmdrsp that contains the net.rcv
1368 * is the one we are using to repost, so copy the info we need from it.
1369 */
1370 skb = cmdrsp->net.buf;
1371 netdev = skb->dev;
1372
1373 if (!netdev) {
1374 /* We must have previously downed this network device and
1375 * this skb and device is no longer valid. This also means
1376 * the skb reference was removed from devdata->rcvbuf so no
1377 * need to search for it.
1378 * All we can do is free the skb and return.
1379 * Note: We crash if we try to log this here.
1380 */
1381 kfree_skb(skb);
1382 return;
1383 }
1384
1385 devdata = netdev_priv(netdev);
1386
1387 spin_lock_irqsave(&devdata->priv_lock, flags);
1388 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1389
1390 /* update rcv stats - call it with priv_lock held */
1391 devdata->net_stats.rx_packets++;
1392 devdata->net_stats.rx_bytes = skb->len;
1393
1394 atomic_inc(&devdata->usage); /* don't want a close to happen before
1395 * we're done here
1396 */
1397
1398 /* set length to how much was ACTUALLY received -
1399 * NOTE: rcv_done_len includes actual length of data rcvd
1400 * including ethhdr
1401 */
1402 skb->len = cmdrsp->net.rcv.rcv_done_len;
1403
1404 /* test enabled while holding lock */
1405 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1406 /* don't process it unless we're in enable mode and until
1407 * we've gotten an ACK saying the other end got our RCV enable
1408 */
1409 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1410 repost_return(cmdrsp, devdata, skb, netdev);
1411 return;
1412 }
1413
1414 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1415
1416 /* when skb was allocated, skb->dev, skb->data, skb->len and
1417 * skb->data_len were setup. AND, data has already put into the
1418 * skb (both first frag and in frags pages)
1419 * NOTE: firstfragslen is the amount of data in skb->data and that
1420 * which is not in nr_frags or frag_list. This is now simply
1421 * RCVPOST_BUF_SIZE. bump tail to show how much data is in
1422 * firstfrag & set data_len to show rest see if we have to chain
1423 * frag_list.
1424 */
1425 if (skb->len > RCVPOST_BUF_SIZE) { /* do PRECAUTIONARY check */
1426 if (cmdrsp->net.rcv.numrcvbufs < 2) {
1427 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1428 dev_err(&devdata->netdev->dev,
1429 "repost_return failed");
1430 return;
1431 }
1432 /* length rcvd is greater than firstfrag in this skb rcv buf */
1433 skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
1434 skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that
1435 will be in
1436 frag_list */
1437 } else {
1438 /* data fits in this skb - no chaining - do
1439 * PRECAUTIONARY check
1440 */
1441 if (cmdrsp->net.rcv.numrcvbufs != 1) { /* should be 1 */
1442 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1443 dev_err(&devdata->netdev->dev,
1444 "repost_return failed");
1445 return;
1446 }
1447 skb->tail += skb->len;
1448 skb->data_len = 0; /* nothing rcvd in frag_list */
1449 }
1450 off = skb_tail_pointer(skb) - skb->data;
1451
1452 /* amount we bumped tail by in the head skb
1453 * it is used to calculate the size of each chained skb below
1454 * it is also used to index into bufline to continue the copy
1455 * (for chansocktwopc)
1456 * if necessary chain the rcv skbs together.
1457 * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
1458 * chain the rest to that one.
1459 * - do PRECAUTIONARY check
1460 */
1461 if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
1462 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1463 dev_err(&devdata->netdev->dev, "repost_return failed");
1464 return;
1465 }
1466
1467 if (cmdrsp->net.rcv.numrcvbufs > 1) {
1468 /* chain the various rcv buffers into the skb's frag_list. */
1469 /* Note: off was initialized above */
1470 for (cc = 1, prev = NULL;
1471 cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
1472 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
1473 curr->next = NULL;
1474 if (!prev) /* start of list- set head */
1475 skb_shinfo(skb)->frag_list = curr;
1476 else
1477 prev->next = curr;
1478 prev = curr;
1479
1480 /* should we set skb->len and skb->data_len for each
1481 * buffer being chained??? can't hurt!
1482 */
1483 currsize = min(skb->len - off,
1484 (unsigned int)RCVPOST_BUF_SIZE);
1485 curr->len = currsize;
1486 curr->tail += currsize;
1487 curr->data_len = 0;
1488 off += currsize;
1489 }
1490#ifdef DEBUG
1491 /* assert skb->len == off */
1492 if (skb->len != off) {
1493 dev_err(&devdata->netdev->dev,
1494 "%s something wrong; skb->len:%d != off:%d\n",
1495 netdev->name, skb->len, off);
1496 }
1497 /* test code */
1498 cc = util_copy_fragsinfo_from_skb("rcvchaintest", skb,
1499 RCVPOST_BUF_SIZE,
1500 MAX_PHYS_INFO, testfrags);
1501 if (cc != cmdrsp->net.rcv.numrcvbufs) {
1502 dev_err(&devdata->netdev->dev,
1503 "**** %s Something wrong; rcvd chain length %d different from one we calculated %d\n",
1504 netdev->name, cmdrsp->net.rcv.numrcvbufs, cc);
1505 }
1506 for (i = 0; i < cc; i++) {
1507 dev_inf(&devdata->netdev->dev,
1508 "test:RCVPOST_BUF_SIZE:%d[%d] pfn:%llu off:0x%x len:%d\n",
1509 RCVPOST_BUF_SIZE, i, testfrags[i].pi_pfn,
1510 testfrags[i].pi_off, testfrags[i].pi_len);
1511 }
1512#endif
1513 }
1514
1515 /* set up packet's protocl type using ethernet header - this
1516 * sets up skb->pkt_type & it also PULLS out the eth header
1517 */
1518 skb->protocol = eth_type_trans(skb, netdev);
1519
1520 eth = eth_hdr(skb);
1521
1522 skb->csum = 0;
1523 skb->ip_summed = CHECKSUM_NONE;
1524
1525 do {
1526 if (netdev->flags & IFF_PROMISC)
1527 break; /* accept all packets */
1528 if (skb->pkt_type == PACKET_BROADCAST) {
1529 if (netdev->flags & IFF_BROADCAST)
1530 break; /* accept all broadcast packets */
1531 } else if (skb->pkt_type == PACKET_MULTICAST) {
1532 if ((netdev->flags & IFF_MULTICAST) &&
1533 (netdev_mc_count(netdev))) {
1534 struct netdev_hw_addr *ha;
1535 int found_mc = 0;
1536
1537 /* only accept multicast packets that we can
1538 * find in our multicast address list
1539 */
1540 netdev_for_each_mc_addr(ha, netdev) {
1541 if (ether_addr_equal(eth->h_dest,
1542 ha->addr)) {
1543 found_mc = 1;
1544 break;
1545 }
1546 }
1547 if (found_mc)
1548 break; /* accept packet, dest
1549 matches a multicast
1550 address */
1551 }
1552 } else if (skb->pkt_type == PACKET_HOST) {
1553 break; /* accept packet, h_dest must match vnic
1554 mac address */
1555 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1556 /* something is not right */
1557 dev_err(&devdata->netdev->dev,
1558 "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
1559 netdev->name, eth->h_dest, netdev->dev_addr);
1560 }
1561 /* drop packet - don't forward it up to OS */
1562 devdata->n_rcv_packets_not_accepted++;
1563 repost_return(cmdrsp, devdata, skb, netdev);
1564 return;
1565 } while (0);
1566
1567 status = netif_rx(skb);
1568 /* netif_rx returns various values, but "in practice most drivers
1569 * ignore the return value
1570 */
1571
1572 skb = NULL;
1573 /*
1574 * whether the packet got dropped or handled, the skb is freed by
1575 * kernel code, so we shouldn't free it. but we should repost a
1576 * new rcv buffer.
1577 */
1578 repost_return(cmdrsp, devdata, skb, netdev);
1579}
1580
1581/**
1582 * devdata_initialize - Initialize devdata structure
1583 * @devdata: visornic_devdata structure to initialize
1584 * #dev: visorbus_deviced it belongs to
1585 *
1586 * Setup initial values for the visornic based on channel and default
1587 * values.
1588 * Returns a pointer to the devdata if successful, else NULL
1589 */
1590static struct visornic_devdata *
1591devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
1592{
1593 int devnum = -1;
1594
1595 if (!devdata)
1596 return NULL;
1597 memset(devdata, '\0', sizeof(struct visornic_devdata));
1598 spin_lock(&dev_num_pool_lock);
1599 devnum = find_first_zero_bit(dev_num_pool, MAXDEVICES);
1600 set_bit(devnum, dev_num_pool);
1601 spin_unlock(&dev_num_pool_lock);
1602 if (devnum == MAXDEVICES)
1603 devnum = -1;
8d0119d8 1604 if (devnum < 0)
68905a14 1605 return NULL;
68905a14
DK
1606 devdata->devnum = devnum;
1607 devdata->dev = dev;
1608 strncpy(devdata->name, dev_name(&dev->device), sizeof(devdata->name));
68905a14
DK
1609 spin_lock(&lock_all_devices);
1610 list_add_tail(&devdata->list_all, &list_all_devices);
1611 spin_unlock(&lock_all_devices);
1612 return devdata;
1613}
1614
1615/**
8d0119d8
TS
1616 * devdata_release - Frees up references in devdata
1617 * @devdata: struct to clean up
68905a14 1618 *
8d0119d8 1619 * Frees up references in devdata.
68905a14
DK
1620 * Returns void
1621 */
8d0119d8 1622static void devdata_release(struct visornic_devdata *devdata)
68905a14 1623{
68905a14
DK
1624 spin_lock(&dev_num_pool_lock);
1625 clear_bit(devdata->devnum, dev_num_pool);
1626 spin_unlock(&dev_num_pool_lock);
1627 spin_lock(&lock_all_devices);
1628 list_del(&devdata->list_all);
1629 spin_unlock(&lock_all_devices);
46df8226
TS
1630 kfree(devdata->rcvbuf);
1631 kfree(devdata->cmdrsp_rcv);
1632 kfree(devdata->xmit_cmdrsp);
68905a14
DK
1633}
1634
1635static const struct net_device_ops visornic_dev_ops = {
1636 .ndo_open = visornic_open,
1637 .ndo_stop = visornic_close,
1638 .ndo_start_xmit = visornic_xmit,
1639 .ndo_get_stats = visornic_get_stats,
1640 .ndo_do_ioctl = visornic_ioctl,
1641 .ndo_change_mtu = visornic_change_mtu,
1642 .ndo_tx_timeout = visornic_xmit_timeout,
1643 .ndo_set_rx_mode = visornic_set_multi,
1644};
1645
1646/**
1647 * send_rcv_posts_if_needed
1648 * @devdata: visornic device
1649 *
1650 * Send receive buffers to the IO Partition.
1651 * Returns void
1652 */
1653static void
1654send_rcv_posts_if_needed(struct visornic_devdata *devdata)
1655{
1656 int i;
1657 struct net_device *netdev;
1658 struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
1659 int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
1660
1661 /* don't do this until vnic is marked ready */
1662 if (!(devdata->enabled && devdata->enab_dis_acked))
1663 return;
1664
1665 netdev = devdata->netdev;
1666 rcv_bufs_allocated = 0;
1667 /* this code is trying to prevent getting stuck here forever,
1668 * but still retry it if you cant allocate them all this time.
1669 */
1670 cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
1671 while (cur_num_rcv_bufs_to_alloc > 0) {
1672 cur_num_rcv_bufs_to_alloc--;
1673 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1674 if (devdata->rcvbuf[i])
1675 continue;
1676 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1677 if (!devdata->rcvbuf[i]) {
1678 devdata->alloc_failed_in_if_needed_cnt++;
1679 break;
1680 }
1681 rcv_bufs_allocated++;
1682 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1683 devdata->chstat.extra_rcvbufs_sent++;
1684 }
1685 }
1686 devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
1687}
1688
1689/**
1690 * draing_queue - drains the response queue
1691 * @cmdrsp: io channel command response message
1692 * @devdata: visornic device to drain
1693 *
1694 * Drain the respones queue of any responses from the IO partition.
1695 * Process the responses as we get them.
1696 * Returns when response queue is empty or when the threadd stops.
1697 */
1698static void
1699drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
1700{
1701 unsigned long flags;
1702 struct net_device *netdev;
1703
1704 /* drain queue */
1705 while (1) {
1706 /* TODO: CLIENT ACQUIRE -- Don't really need this at the
1707 * moment */
1708 if (!visorchannel_signalremove(devdata->dev->visorchannel,
1709 IOCHAN_FROM_IOPART,
1710 cmdrsp))
1711 break; /* queue empty */
1712
1713 switch (cmdrsp->net.type) {
1714 case NET_RCV:
1715 devdata->chstat.got_rcv++;
1716 /* process incoming packet */
1717 visornic_rx(cmdrsp);
1718 break;
1719 case NET_XMIT_DONE:
1720 spin_lock_irqsave(&devdata->priv_lock, flags);
1721 devdata->chstat.got_xmit_done++;
1722 if (cmdrsp->net.xmtdone.xmt_done_result)
1723 devdata->chstat.xmit_fail++;
1724 /* only call queue wake if we stopped it */
1725 netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
1726 /* ASSERT netdev == vnicinfo->netdev; */
1727 if ((netdev == devdata->netdev) &&
1728 netif_queue_stopped(netdev)) {
1729 /* check to see if we have crossed
1730 * the lower watermark for
1731 * netif_wake_queue()
1732 */
1733 if (((devdata->chstat.sent_xmit >=
1734 devdata->chstat.got_xmit_done) &&
1735 (devdata->chstat.sent_xmit -
1736 devdata->chstat.got_xmit_done <=
1737 devdata->lower_threshold_net_xmits)) ||
1738 ((devdata->chstat.sent_xmit <
1739 devdata->chstat.got_xmit_done) &&
1740 (ULONG_MAX - devdata->chstat.got_xmit_done
1741 + devdata->chstat.sent_xmit <=
1742 devdata->lower_threshold_net_xmits))) {
1743 /* enough NET_XMITs completed
1744 * so can restart netif queue
1745 */
1746 netif_wake_queue(netdev);
1747 devdata->flow_control_lower_hits++;
1748 }
1749 }
1750 skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
1751 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1752 kfree_skb(cmdrsp->net.buf);
1753 break;
1754 case NET_RCV_ENBDIS_ACK:
1755 devdata->chstat.got_enbdisack++;
1756 netdev = (struct net_device *)
1757 cmdrsp->net.enbdis.context;
1758 spin_lock_irqsave(&devdata->priv_lock, flags);
1759 devdata->enab_dis_acked = 1;
1760 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1761
1762 if (devdata->server_down &&
1763 devdata->server_change_state) {
1764 /* Inform Linux that the link is up */
1765 devdata->server_down = false;
1766 devdata->server_change_state = false;
1767 netif_wake_queue(netdev);
1768 netif_carrier_on(netdev);
1769 }
1770 break;
1771 case NET_CONNECT_STATUS:
1772 netdev = devdata->netdev;
1773 if (cmdrsp->net.enbdis.enable == 1) {
1774 spin_lock_irqsave(&devdata->priv_lock, flags);
1775 devdata->enabled = cmdrsp->net.enbdis.enable;
1776 spin_unlock_irqrestore(&devdata->priv_lock,
1777 flags);
1778 netif_wake_queue(netdev);
1779 netif_carrier_on(netdev);
1780 } else {
1781 netif_stop_queue(netdev);
1782 netif_carrier_off(netdev);
1783 spin_lock_irqsave(&devdata->priv_lock, flags);
1784 devdata->enabled = cmdrsp->net.enbdis.enable;
1785 spin_unlock_irqrestore(&devdata->priv_lock,
1786 flags);
1787 }
1788 break;
1789 default:
1790 break;
1791 }
1792 /* cmdrsp is now available for reuse */
1793
1794 if (kthread_should_stop())
1795 break;
1796 }
1797}
1798
1799/**
1800 * process_incoming_rsps - Checks the status of the response queue.
1801 * @v: void pointer to the visronic devdata
1802 *
1803 * Main function of the vnic_incoming thread. Peridocially check the
1804 * response queue and drain it if needed.
1805 * Returns when thread has stopped.
1806 */
1807static int
1808process_incoming_rsps(void *v)
1809{
1810 struct visornic_devdata *devdata = v;
1811 struct uiscmdrsp *cmdrsp = NULL;
1812 const int SZ = SIZEOF_CMDRSP;
1813
1814 cmdrsp = kmalloc(SZ, GFP_ATOMIC);
1815 if (!cmdrsp)
1816 complete_and_exit(&devdata->threadinfo.has_stopped, 0);
1817
1818 while (1) {
1819 wait_event_interruptible_timeout(
1820 devdata->rsp_queue, (atomic_read(
1821 &devdata->interrupt_rcvd) == 1),
1822 msecs_to_jiffies(devdata->thread_wait_ms));
1823
1824 /* periodically check to see if there are any rcf bufs which
1825 * need to get sent to the IOSP. This can only happen if
1826 * we run out of memory when trying to allocate skbs.
1827 */
1828 atomic_set(&devdata->interrupt_rcvd, 0);
1829 send_rcv_posts_if_needed(devdata);
1830 drain_queue(cmdrsp, devdata);
1831 if (kthread_should_stop())
1832 break;
1833 }
1834
1835 kfree(cmdrsp);
1836 complete_and_exit(&devdata->threadinfo.has_stopped, 0);
1837}
1838
1839/**
1840 * visornic_probe - probe function for visornic devices
1841 * @dev: The visor device discovered
1842 *
1843 * Called when visorbus discovers a visornic device on its
1844 * bus. It creates a new visornic ethernet adapter.
1845 * Returns 0 or negative for error.
1846 */
1847static int visornic_probe(struct visor_device *dev)
1848{
1849 struct visornic_devdata *devdata = NULL;
1850 struct net_device *netdev = NULL;
1851 int err;
1852 int channel_offset = 0;
1853 u64 features;
1854
1855 netdev = alloc_etherdev(sizeof(struct visornic_devdata));
00748b0c
TS
1856 if (!netdev) {
1857 dev_err(&dev->device,
1858 "%s alloc_etherdev failed\n", __func__);
68905a14 1859 return -ENOMEM;
00748b0c 1860 }
68905a14
DK
1861
1862 netdev->netdev_ops = &visornic_dev_ops;
1863 netdev->watchdog_timeo = (5 * HZ);
051e9fbb 1864 SET_NETDEV_DEV(netdev, &dev->device);
68905a14
DK
1865
1866 /* Get MAC adddress from channel and read it into the device. */
1867 netdev->addr_len = ETH_ALEN;
1868 channel_offset = offsetof(struct spar_io_channel_protocol,
1869 vnic.macaddr);
1870 err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
1871 ETH_ALEN);
00748b0c
TS
1872 if (err < 0) {
1873 dev_err(&dev->device,
1874 "%s failed to get mac addr from chan (%d)\n",
1875 __func__, err);
68905a14 1876 goto cleanup_netdev;
00748b0c 1877 }
68905a14
DK
1878
1879 devdata = devdata_initialize(netdev_priv(netdev), dev);
1880 if (!devdata) {
00748b0c
TS
1881 dev_err(&dev->device,
1882 "%s devdata_initialize failed\n", __func__);
68905a14
DK
1883 err = -ENOMEM;
1884 goto cleanup_netdev;
1885 }
1886
1887 devdata->netdev = netdev;
5deeea33 1888 dev_set_drvdata(&dev->device, devdata);
68905a14
DK
1889 init_waitqueue_head(&devdata->rsp_queue);
1890 spin_lock_init(&devdata->priv_lock);
1891 devdata->enabled = 0; /* not yet */
1892 atomic_set(&devdata->usage, 1);
1893
1894 /* Setup rcv bufs */
1895 channel_offset = offsetof(struct spar_io_channel_protocol,
1896 vnic.num_rcv_bufs);
1897 err = visorbus_read_channel(dev, channel_offset,
1898 &devdata->num_rcv_bufs, 4);
00748b0c
TS
1899 if (err) {
1900 dev_err(&dev->device,
1901 "%s failed to get #rcv bufs from chan (%d)\n",
1902 __func__, err);
68905a14 1903 goto cleanup_netdev;
00748b0c 1904 }
68905a14 1905
46dfa3d8 1906 devdata->rcvbuf = kzalloc(sizeof(struct sk_buff *) *
68905a14
DK
1907 devdata->num_rcv_bufs, GFP_KERNEL);
1908 if (!devdata->rcvbuf) {
1909 err = -ENOMEM;
1910 goto cleanup_rcvbuf;
1911 }
1912
1913 /* set the net_xmit outstanding threshold */
1914 /* always leave two slots open but you should have 3 at a minimum */
1915 devdata->max_outstanding_net_xmits =
1916 max(3, ((devdata->num_rcv_bufs / 3) - 2));
1917 devdata->upper_threshold_net_xmits =
1918 max(2, devdata->max_outstanding_net_xmits - 1);
1919 devdata->lower_threshold_net_xmits =
1920 max(1, devdata->max_outstanding_net_xmits / 2);
1921
1922 skb_queue_head_init(&devdata->xmitbufhead);
1923
1924 /* create a cmdrsp we can use to post and unpost rcv buffers */
1925 devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1926 if (!devdata->cmdrsp_rcv) {
1927 err = -ENOMEM;
1928 goto cleanup_cmdrsp_rcv;
1929 }
1930 devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1931 if (!devdata->xmit_cmdrsp) {
1932 err = -ENOMEM;
1933 goto cleanup_xmit_cmdrsp;
1934 }
1935 INIT_WORK(&devdata->serverdown_completion,
1936 visornic_serverdown_complete);
1937 INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
1938 devdata->server_down = false;
1939 devdata->server_change_state = false;
1940
1941 /*set the default mtu */
1942 channel_offset = offsetof(struct spar_io_channel_protocol,
1943 vnic.mtu);
1944 err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
00748b0c
TS
1945 if (err) {
1946 dev_err(&dev->device,
1947 "%s failed to get mtu from chan (%d)\n",
1948 __func__, err);
68905a14 1949 goto cleanup_xmit_cmdrsp;
00748b0c 1950 }
68905a14
DK
1951
1952 /* TODO: Setup Interrupt information */
1953 /* Let's start our threads to get responses */
1954 channel_offset = offsetof(struct spar_io_channel_protocol,
1955 channel_header.features);
1956 err = visorbus_read_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1957 if (err) {
1958 dev_err(&dev->device,
1959 "%s failed to get features from chan (%d)\n",
1960 __func__, err);
68905a14 1961 goto cleanup_xmit_cmdrsp;
00748b0c 1962 }
68905a14
DK
1963
1964 features |= ULTRA_IO_CHANNEL_IS_POLLING;
1965 err = visorbus_write_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1966 if (err) {
1967 dev_err(&dev->device,
1968 "%s failed to set features in chan (%d)\n",
1969 __func__, err);
68905a14 1970 goto cleanup_xmit_cmdrsp;
00748b0c 1971 }
68905a14 1972
68905a14 1973 err = register_netdev(netdev);
00748b0c
TS
1974 if (err) {
1975 dev_err(&dev->device,
1976 "%s register_netdev failed (%d)\n", __func__, err);
1a2e3e3d 1977 goto cleanup_xmit_cmdrsp;
00748b0c 1978 }
68905a14
DK
1979
1980 /* create debgug/sysfs directories */
1981 devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
1982 visornic_debugfs_dir);
1983 if (!devdata->eth_debugfs_dir) {
00748b0c
TS
1984 dev_err(&dev->device,
1985 "%s debugfs_create_dir %s failed\n",
1986 __func__, netdev->name);
68905a14 1987 err = -ENOMEM;
1a2e3e3d 1988 goto cleanup_xmit_cmdrsp;
68905a14
DK
1989 }
1990
1a2e3e3d
TS
1991 devdata->thread_wait_ms = 2;
1992 visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
1993 devdata, "vnic_incoming");
1994
00748b0c
TS
1995 dev_info(&dev->device, "%s success netdev=%s\n",
1996 __func__, netdev->name);
68905a14
DK
1997 return 0;
1998
68905a14
DK
1999cleanup_xmit_cmdrsp:
2000 kfree(devdata->xmit_cmdrsp);
2001
2002cleanup_cmdrsp_rcv:
2003 kfree(devdata->cmdrsp_rcv);
2004
2005cleanup_rcvbuf:
2006 kfree(devdata->rcvbuf);
2007
2008cleanup_netdev:
2009 free_netdev(netdev);
2010 return err;
2011}
2012
2013/**
2014 * host_side_disappeared - IO part is gone.
2015 * @devdata: device object
2016 *
2017 * IO partition servicing this device is gone, do cleanup
2018 * Returns void.
2019 */
2020static void host_side_disappeared(struct visornic_devdata *devdata)
2021{
2022 unsigned long flags;
2023
2024 spin_lock_irqsave(&devdata->priv_lock, flags);
2025 sprintf(devdata->name, "<dev#%d-history>", devdata->devnum);
2026 devdata->dev = NULL; /* indicate device destroyed */
2027 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2028}
2029
2030/**
2031 * visornic_remove - Called when visornic dev goes away
2032 * @dev: visornic device that is being removed
2033 *
2034 * Called when DEVICE_DESTROY gets called to remove device.
2035 * Returns void
2036 */
2037static void visornic_remove(struct visor_device *dev)
2038{
2039 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
46df8226
TS
2040 struct net_device *netdev;
2041 unsigned long flags;
68905a14 2042
00748b0c
TS
2043 if (!devdata) {
2044 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2045 return;
00748b0c 2046 }
46df8226
TS
2047 spin_lock_irqsave(&devdata->priv_lock, flags);
2048 if (devdata->going_away) {
2049 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2050 dev_err(&dev->device, "%s already being removed\n", __func__);
2051 return;
2052 }
2053 devdata->going_away = true;
2054 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2055 netdev = devdata->netdev;
2056 if (!netdev) {
2057 dev_err(&dev->device, "%s not net device\n", __func__);
2058 return;
2059 }
2060
2061 /* going_away prevents new items being added to the workqueues */
2062 flush_workqueue(visornic_serverdown_workqueue);
2063 flush_workqueue(visornic_timeout_reset_workqueue);
2064
2065 debugfs_remove_recursive(devdata->eth_debugfs_dir);
2066
2067 unregister_netdev(netdev); /* this will call visornic_close() */
2068
2069 /* this had to wait until last because visornic_close() /
2070 * visornic_disable_with_timeout() polls waiting for state that is
2071 * only updated by the thread
2072 */
2073 if (devdata->threadinfo.id) {
2074 visor_thread_stop(&devdata->threadinfo);
2075 if (devdata->threadinfo.id) {
2076 dev_err(&dev->device, "%s cannot stop worker thread\n",
2077 __func__);
2078 return;
2079 }
2080 }
2081
68905a14
DK
2082 dev_set_drvdata(&dev->device, NULL);
2083 host_side_disappeared(devdata);
8d0119d8 2084 devdata_release(devdata);
46df8226 2085 free_netdev(netdev);
68905a14
DK
2086}
2087
2088/**
2089 * visornic_pause - Called when IO Part disappears
2090 * @dev: visornic device that is being serviced
2091 * @complete_func: call when finished.
2092 *
2093 * Called when the IO Partition has gone down. Need to free
2094 * up resources and wait for IO partition to come back. Mark
2095 * link as down and don't attempt any DMA. When we have freed
2096 * memory call the complete_func so that Command knows we are
2097 * done. If we don't call complete_func, IO part will never
2098 * come back.
2099 * Returns 0 for success.
2100 */
2101static int visornic_pause(struct visor_device *dev,
2102 visorbus_state_complete_func complete_func)
2103{
2104 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2105
d01da5ea 2106 visornic_serverdown(devdata, complete_func);
68905a14
DK
2107 return 0;
2108}
2109
2110/**
2111 * visornic_resume - Called when IO part has recovered
2112 * @dev: visornic device that is being serviced
2113 * @compelte_func: call when finished
2114 *
2115 * Called when the IO partition has recovered. Reestablish
2116 * connection to the IO part and set the link up. Okay to do
2117 * DMA again.
2118 * Returns 0 for success.
2119 */
2120static int visornic_resume(struct visor_device *dev,
2121 visorbus_state_complete_func complete_func)
2122{
2123 struct visornic_devdata *devdata;
2124 struct net_device *netdev;
2125 unsigned long flags;
2126
2127 devdata = dev_get_drvdata(&dev->device);
00748b0c
TS
2128 if (!devdata) {
2129 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2130 return -EINVAL;
00748b0c 2131 }
68905a14
DK
2132
2133 netdev = devdata->netdev;
2134
2135 if (devdata->server_down && !devdata->server_change_state) {
2136 devdata->server_change_state = true;
2137 /* Must transition channel to ATTACHED state BEFORE
2138 * we can start using the device again.
2139 * TODO: State transitions
2140 */
2141 visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
2142 devdata, "vnic_incoming");
2143 init_rcv_bufs(netdev, devdata);
2144 spin_lock_irqsave(&devdata->priv_lock, flags);
2145 devdata->enabled = 1;
2146
2147 /* Now we're ready, let's send an ENB to uisnic but until
2148 * we get an ACK back from uisnic, we'll drop the packets
2149 */
2150 devdata->enab_dis_acked = 0;
2151 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2152
2153 /* send enable and wait for ack - don't hold lock when
2154 * sending enable because if the queue if sull, insert
2155 * might sleep.
2156 */
2157 send_enbdis(netdev, 1, devdata);
2158 } else if (devdata->server_change_state) {
00748b0c
TS
2159 dev_err(&dev->device, "%s server_change_state\n",
2160 __func__);
68905a14
DK
2161 return -EIO;
2162 }
2163
2164 complete_func(dev, 0);
2165 return 0;
2166}
2167
2168/**
2169 * visornic_init - Init function
2170 *
2171 * Init function for the visornic driver. Do initial driver setup
2172 * and wait for devices.
2173 * Returns 0 for success, negative for error.
2174 */
2175static int visornic_init(void)
2176{
2177 struct dentry *ret;
2178 int err = -ENOMEM;
2179
68905a14
DK
2180 visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
2181 if (!visornic_debugfs_dir)
2182 return err;
2183
2184 ret = debugfs_create_file("info", S_IRUSR, visornic_debugfs_dir, NULL,
2185 &debugfs_info_fops);
2186 if (!ret)
2187 goto cleanup_debugfs;
2188 ret = debugfs_create_file("enable_ints", S_IWUSR, visornic_debugfs_dir,
2189 NULL, &debugfs_enable_ints_fops);
2190 if (!ret)
2191 goto cleanup_debugfs;
2192
2193 /* create workqueue for serverdown completion */
2194 visornic_serverdown_workqueue =
2195 create_singlethread_workqueue("visornic_serverdown");
2196 if (!visornic_serverdown_workqueue)
2197 goto cleanup_debugfs;
2198
2199 /* create workqueue for tx timeout reset */
2200 visornic_timeout_reset_workqueue =
2201 create_singlethread_workqueue("visornic_timeout_reset");
2202 if (!visornic_timeout_reset_workqueue)
2203 goto cleanup_workqueue;
2204
2205 spin_lock_init(&dev_num_pool_lock);
2206 dev_num_pool = kzalloc(BITS_TO_LONGS(MAXDEVICES), GFP_KERNEL);
2207 if (!dev_num_pool)
2208 goto cleanup_workqueue;
2209
2210 visorbus_register_visor_driver(&visornic_driver);
2211 return 0;
2212
2213cleanup_workqueue:
2214 flush_workqueue(visornic_serverdown_workqueue);
2215 destroy_workqueue(visornic_serverdown_workqueue);
2216 if (visornic_timeout_reset_workqueue) {
2217 flush_workqueue(visornic_timeout_reset_workqueue);
2218 destroy_workqueue(visornic_timeout_reset_workqueue);
2219 }
2220cleanup_debugfs:
2221 debugfs_remove_recursive(visornic_debugfs_dir);
2222
2223 return err;
2224}
2225
2226/**
2227 * visornic_cleanup - driver exit routine
2228 *
2229 * Unregister driver from the bus and free up memory.
2230 */
2231static void visornic_cleanup(void)
2232{
3798ff31
TS
2233 visorbus_unregister_visor_driver(&visornic_driver);
2234
68905a14
DK
2235 if (visornic_serverdown_workqueue) {
2236 flush_workqueue(visornic_serverdown_workqueue);
2237 destroy_workqueue(visornic_serverdown_workqueue);
2238 }
2239 if (visornic_timeout_reset_workqueue) {
2240 flush_workqueue(visornic_timeout_reset_workqueue);
2241 destroy_workqueue(visornic_timeout_reset_workqueue);
2242 }
2243 debugfs_remove_recursive(visornic_debugfs_dir);
2244
68905a14
DK
2245 kfree(dev_num_pool);
2246 dev_num_pool = NULL;
2247}
2248
2249module_init(visornic_init);
2250module_exit(visornic_cleanup);
2251
2252MODULE_AUTHOR("Unisys");
2253MODULE_LICENSE("GPL");
2254MODULE_DESCRIPTION("sPAR nic driver for sparlinux: ver 1.0.0.0");
2255MODULE_VERSION("1.0.0.0");