]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/dgrp/dgrp_net_ops.c
TTY: switch tty_buffer_request_room to tty_port
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / dgrp / dgrp_net_ops.c
CommitLineData
0b52b749
BP
1/*
2 *
3 * Copyright 1999 Digi International (www.digi.com)
4 * James Puzzo <jamesp at digi dot com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
15 *
16 */
17
18/*
19 *
20 * Filename:
21 *
22 * dgrp_net_ops.c
23 *
24 * Description:
25 *
26 * Handle the file operations required for the "network" devices.
27 * Includes those functions required to register the "net" devices
28 * in "/proc".
29 *
30 * Author:
31 *
32 * James A. Puzzo
33 *
34 */
35
36#include <linux/module.h>
37#include <linux/proc_fs.h>
38#include <linux/types.h>
39#include <linux/string.h>
40#include <linux/tty.h>
41#include <linux/tty_flip.h>
42#include <linux/spinlock.h>
43#include <linux/poll.h>
44#include <linux/sched.h>
45#include <linux/ratelimit.h>
46#include <asm/unaligned.h>
47
48#define MYFLIPLEN TBUF_MAX
49
50#include "dgrp_common.h"
51
52#define TTY_FLIPBUF_SIZE 512
53#define DEVICE_NAME_SIZE 50
54
55/*
56 * Generic helper function declarations
57 */
58static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
59 unsigned char *fbuf, int *len);
60
61/*
62 * File operation declarations
63 */
64static int dgrp_net_open(struct inode *, struct file *);
65static int dgrp_net_release(struct inode *, struct file *);
66static ssize_t dgrp_net_read(struct file *, char __user *, size_t, loff_t *);
67static ssize_t dgrp_net_write(struct file *, const char __user *, size_t,
68 loff_t *);
69static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
70 unsigned long arg);
71static unsigned int dgrp_net_select(struct file *file,
72 struct poll_table_struct *table);
73
74static const struct file_operations net_ops = {
75 .owner = THIS_MODULE,
76 .read = dgrp_net_read,
77 .write = dgrp_net_write,
78 .poll = dgrp_net_select,
79 .unlocked_ioctl = dgrp_net_ioctl,
80 .open = dgrp_net_open,
81 .release = dgrp_net_release,
82};
83
84static struct inode_operations net_inode_ops = {
85 .permission = dgrp_inode_permission
86};
87
88void dgrp_register_net_hook(struct proc_dir_entry *de)
89{
90 struct nd_struct *node = de->data;
91
92 de->proc_iops = &net_inode_ops;
93 de->proc_fops = &net_ops;
94 node->nd_net_de = de;
95 sema_init(&node->nd_net_semaphore, 1);
96 node->nd_state = NS_CLOSED;
97 dgrp_create_node_class_sysfs_files(node);
98}
99
100
101/**
102 * dgrp_dump() -- prints memory for debugging purposes.
103 * @mem: Memory location which should be printed to the console
104 * @len: Number of bytes to be dumped
105 */
106static void dgrp_dump(u8 *mem, int len)
107{
108 int i;
109
110 pr_debug("dgrp dump length = %d, data = ", len);
111 for (i = 0; i < len; ++i)
112 pr_debug("%.2x ", mem[i]);
113 pr_debug("\n");
114}
115
116/**
117 * dgrp_read_data_block() -- Read a data block
118 * @ch: struct ch_struct *
119 * @flipbuf: u8 *
120 * @flipbuf_size: size of flipbuf
121 */
122static void dgrp_read_data_block(struct ch_struct *ch, u8 *flipbuf,
123 int flipbuf_size)
124{
125 int t;
126 int n;
127
128 if (flipbuf_size <= 0)
129 return;
130
131 t = RBUF_MAX - ch->ch_rout;
132 n = flipbuf_size;
133
134 if (n >= t) {
135 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, t);
136 flipbuf += t;
137 n -= t;
138 ch->ch_rout = 0;
139 }
140
141 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, n);
142 flipbuf += n;
143 ch->ch_rout += n;
144}
145
146
147/**
148 * dgrp_input() -- send data to the line disipline
149 * @ch: pointer to channel struct
150 *
151 * Copys the rbuf to the flipbuf and sends to line discipline.
152 * Sends input buffer data to the line discipline.
153 *
0b52b749
BP
154 */
155static void dgrp_input(struct ch_struct *ch)
156{
157 struct nd_struct *nd;
158 struct tty_struct *tty;
0b52b749
BP
159 int data_len;
160 int len;
0b52b749
BP
161 int tty_count;
162 ulong lock_flags;
0b52b749
BP
163 u8 *myflipbuf;
164 u8 *myflipflagbuf;
165
166 if (!ch)
167 return;
168
169 nd = ch->ch_nd;
170
171 if (!nd)
172 return;
173
174 spin_lock_irqsave(&nd->nd_lock, lock_flags);
175
176 myflipbuf = nd->nd_inputbuf;
177 myflipflagbuf = nd->nd_inputflagbuf;
178
179 if (!ch->ch_open_count) {
180 ch->ch_rout = ch->ch_rin;
181 goto out;
182 }
183
184 if (ch->ch_tun.un_flag & UN_CLOSING) {
185 ch->ch_rout = ch->ch_rin;
186 goto out;
187 }
188
189 tty = (ch->ch_tun).un_tty;
190
191
192 if (!tty || tty->magic != TTY_MAGIC) {
193 ch->ch_rout = ch->ch_rin;
194 goto out;
195 }
196
197 tty_count = tty->count;
198 if (!tty_count) {
199 ch->ch_rout = ch->ch_rin;
200 goto out;
201 }
202
203 if (tty->closing || test_bit(TTY_CLOSING, &tty->flags)) {
204 ch->ch_rout = ch->ch_rin;
205 goto out;
206 }
207
208 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
209
0b52b749
BP
210 /* data_len should be the number of chars that we read in */
211 data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
0b52b749
BP
212
213 /* len is the amount of data we are going to transfer here */
227434f8 214 len = tty_buffer_request_room(&ch->port, data_len);
0b52b749
BP
215
216 /* Check DPA flow control */
217 if ((nd->nd_dpa_debug) &&
218 (nd->nd_dpa_flag & DPA_WAIT_SPACE) &&
219 (nd->nd_dpa_port == MINOR(tty_devnum(ch->ch_tun.un_tty))))
220 len = 0;
221
222 if ((len) && !(ch->ch_flag & CH_RXSTOP)) {
223
224 dgrp_read_data_block(ch, myflipbuf, len);
225
e3224111
BP
226 if (I_PARMRK(tty) || I_BRKINT(tty) || I_INPCK(tty))
227 parity_scan(ch, myflipbuf, myflipflagbuf, &len);
228 else
229 memset(myflipflagbuf, TTY_NORMAL, len);
0b52b749
BP
230
231 if ((nd->nd_dpa_debug) &&
232 (nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
233 dgrp_dpa_data(nd, 1, myflipbuf, len);
234
e3224111
BP
235 tty_insert_flip_string_flags(tty, myflipbuf,
236 myflipflagbuf, len);
237 tty_flip_buffer_push(tty);
0b52b749
BP
238
239 ch->ch_rxcount += len;
240 }
241
0b52b749
BP
242 /*
243 * Wake up any sleepers (maybe dgrp close) that might be waiting
244 * for a channel flag state change.
245 */
246 wake_up_interruptible(&ch->ch_flag_wait);
247 return;
248
249out:
250 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
251}
252
253
254/*
255 * parity_scan
256 *
257 * Loop to inspect each single character or 0xFF escape.
258 *
259 * if PARMRK & ~DOSMODE:
260 * 0xFF 0xFF Normal 0xFF character, escaped
261 * to eliminate confusion.
262 * 0xFF 0x00 0x00 Break
263 * 0xFF 0x00 CC Error character CC.
264 * CC Normal character CC.
265 *
266 * if PARMRK & DOSMODE:
267 * 0xFF 0x18 0x00 Break
268 * 0xFF 0x08 0x00 Framing Error
269 * 0xFF 0x04 0x00 Parity error
270 * 0xFF 0x0C 0x00 Both Framing and Parity error
271 *
272 * TODO: do we need to do the XMODEM, XOFF, XON, XANY processing??
273 * as per protocol
274 */
275static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
276 unsigned char *fbuf, int *len)
277{
278 int l = *len;
279 int count = 0;
280 int DOS = ((ch->ch_iflag & IF_DOSMODE) == 0 ? 0 : 1);
281 unsigned char *cout; /* character buffer */
282 unsigned char *fout; /* flag buffer */
283 unsigned char *in;
284 unsigned char c;
285
286 in = cbuf;
287 cout = cbuf;
288 fout = fbuf;
289
290 while (l--) {
291 c = *in;
292 in++;
293
294 switch (ch->ch_pscan_state) {
295 default:
296 /* reset to sanity and fall through */
297 ch->ch_pscan_state = 0 ;
298
299 case 0:
300 /* No FF seen yet */
301 if (c == 0xff) /* delete this character from stream */
302 ch->ch_pscan_state = 1;
303 else {
304 *cout++ = c;
305 *fout++ = TTY_NORMAL;
306 count += 1;
307 }
308 break;
309
310 case 1:
311 /* first FF seen */
312 if (c == 0xff) {
313 /* doubled ff, transform to single ff */
314 *cout++ = c;
315 *fout++ = TTY_NORMAL;
316 count += 1;
317 ch->ch_pscan_state = 0;
318 } else {
319 /* save value examination in next state */
320 ch->ch_pscan_savechar = c;
321 ch->ch_pscan_state = 2;
322 }
323 break;
324
325 case 2:
326 /* third character of ff sequence */
327 *cout++ = c;
328 if (DOS) {
329 if (ch->ch_pscan_savechar & 0x10)
330 *fout++ = TTY_BREAK;
331 else if (ch->ch_pscan_savechar & 0x08)
332 *fout++ = TTY_FRAME;
333 else
334 /*
335 * either marked as a parity error,
336 * indeterminate, or not in DOSMODE
337 * call it a parity error
338 */
339 *fout++ = TTY_PARITY;
340 } else {
341 /* case FF XX ?? where XX is not 00 */
342 if (ch->ch_pscan_savechar & 0xff) {
343 /* this should not happen */
344 pr_info("%s: parity_scan: error unexpected byte\n",
345 __func__);
346 *fout++ = TTY_PARITY;
347 }
348 /* case FF 00 XX where XX is not 00 */
349 else if (c == 0xff)
350 *fout++ = TTY_PARITY;
351 /* case FF 00 00 */
352 else
353 *fout++ = TTY_BREAK;
354
355 }
356 count += 1;
357 ch->ch_pscan_state = 0;
358 }
359 }
360 *len = count;
361}
362
363
364/**
365 * dgrp_net_idle() -- Idle the network connection
366 * @nd: pointer to node structure to idle
367 */
368static void dgrp_net_idle(struct nd_struct *nd)
369{
370 struct ch_struct *ch;
371 int i;
372
373 nd->nd_tx_work = 1;
374
375 nd->nd_state = NS_IDLE;
376 nd->nd_flag = 0;
377
378 for (i = nd->nd_seq_out; ; i = (i + 1) & SEQ_MASK) {
379 if (!nd->nd_seq_wait[i]) {
380 nd->nd_seq_wait[i] = 0;
381 wake_up_interruptible(&nd->nd_seq_wque[i]);
382 }
383
384 if (i == nd->nd_seq_in)
385 break;
386 }
387
388 nd->nd_seq_out = nd->nd_seq_in;
389
390 nd->nd_unack = 0;
391 nd->nd_remain = 0;
392
393 nd->nd_tx_module = 0x10;
394 nd->nd_rx_module = 0x00;
395
396 for (i = 0, ch = nd->nd_chan; i < CHAN_MAX; i++, ch++) {
397 ch->ch_state = CS_IDLE;
398
399 ch->ch_otype = 0;
400 ch->ch_otype_waiting = 0;
401 }
402}
403
404/*
405 * Increase the number of channels, waking up any
406 * threads that might be waiting for the channels
407 * to appear.
408 */
409static void increase_channel_count(struct nd_struct *nd, int n)
410{
411 struct ch_struct *ch;
412 struct device *classp;
413 char name[DEVICE_NAME_SIZE];
414 int ret;
415 u8 *buf;
416 int i;
417
418 for (i = nd->nd_chan_count; i < n; ++i) {
419 ch = nd->nd_chan + i;
420
421 /* FIXME: return a useful error instead! */
422 buf = kmalloc(TBUF_MAX, GFP_KERNEL);
423 if (!buf)
424 return;
425
426 if (ch->ch_tbuf)
427 pr_info_ratelimited("%s - ch_tbuf was not NULL\n",
428 __func__);
429
430 ch->ch_tbuf = buf;
431
432 buf = kmalloc(RBUF_MAX, GFP_KERNEL);
433 if (!buf)
434 return;
435
436 if (ch->ch_rbuf)
437 pr_info("%s - ch_rbuf was not NULL\n",
438 __func__);
439 ch->ch_rbuf = buf;
440
441 classp = tty_port_register_device(&ch->port,
442 nd->nd_serial_ttdriver, i,
443 NULL);
444
445 ch->ch_tun.un_sysfs = classp;
446 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
447
448 dgrp_create_tty_sysfs(&ch->ch_tun, classp);
449 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
450 &classp->kobj, name);
451
452 /* NOTE: We don't support "cu" devices anymore,
453 * so you will notice we don't register them
454 * here anymore. */
455 if (dgrp_register_prdevices) {
456 classp = tty_register_device(nd->nd_xprint_ttdriver,
457 i, NULL);
458 ch->ch_pun.un_sysfs = classp;
459 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
460
461 dgrp_create_tty_sysfs(&ch->ch_pun, classp);
462 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
463 &classp->kobj, name);
464 }
465
466 nd->nd_chan_count = i + 1;
467 wake_up_interruptible(&ch->ch_flag_wait);
468 }
469}
470
471/*
472 * Decrease the number of channels, and wake up any threads that might
473 * be waiting on the channels that vanished.
474 */
475static void decrease_channel_count(struct nd_struct *nd, int n)
476{
477 struct ch_struct *ch;
478 char name[DEVICE_NAME_SIZE];
479 int i;
480
481 for (i = nd->nd_chan_count - 1; i >= n; --i) {
482 ch = nd->nd_chan + i;
483
484 /*
485 * Make any open ports inoperative.
486 */
487 ch->ch_state = CS_IDLE;
488
489 ch->ch_otype = 0;
490 ch->ch_otype_waiting = 0;
491
492 /*
493 * Only "HANGUP" if we care about carrier
494 * transitions and we are already open.
495 */
496 if (ch->ch_open_count != 0) {
497 ch->ch_flag |= CH_HANGUP;
498 dgrp_carrier(ch);
499 }
500
501 /*
502 * Unlike the CH_HANGUP flag above, use another
503 * flag to indicate to the RealPort state machine
504 * that this port has disappeared.
505 */
506 if (ch->ch_open_count != 0)
507 ch->ch_flag |= CH_PORT_GONE;
508
509 wake_up_interruptible(&ch->ch_flag_wait);
510
511 nd->nd_chan_count = i;
512
513 kfree(ch->ch_tbuf);
514 ch->ch_tbuf = NULL;
515
516 kfree(ch->ch_rbuf);
517 ch->ch_rbuf = NULL;
518
519 nd->nd_chan_count = i;
520
521 dgrp_remove_tty_sysfs(ch->ch_tun.un_sysfs);
522 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
523 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
524 tty_unregister_device(nd->nd_serial_ttdriver, i);
525
526 /*
527 * NOTE: We don't support "cu" devices anymore, so don't
528 * unregister them here anymore.
529 */
530
531 if (dgrp_register_prdevices) {
532 dgrp_remove_tty_sysfs(ch->ch_pun.un_sysfs);
533 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
534 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
535 tty_unregister_device(nd->nd_xprint_ttdriver, i);
536 }
537 }
538}
539
540/**
541 * dgrp_chan_count() -- Adjust the node channel count.
542 * @nd: pointer to a node structure
543 * @n: new value for channel count
544 *
545 * Adjusts the node channel count. If new ports have appeared, it tries
546 * to signal those processes that might have been waiting for ports to
547 * appear. If ports have disappeared it tries to signal those processes
548 * that might be hung waiting for a response for the now non-existant port.
549 */
550static void dgrp_chan_count(struct nd_struct *nd, int n)
551{
552 if (n == nd->nd_chan_count)
553 return;
554
555 if (n > nd->nd_chan_count)
556 increase_channel_count(nd, n);
557
558 if (n < nd->nd_chan_count)
559 decrease_channel_count(nd, n);
560}
561
562/**
563 * dgrp_monitor() -- send data to the device monitor queue
564 * @nd: pointer to a node structure
565 * @buf: data to copy to the monitoring buffer
566 * @len: number of bytes to transfer to the buffer
567 *
568 * Called by the net device routines to send data to the device
569 * monitor queue. If the device monitor buffer is too full to
570 * accept the data, it waits until the buffer is ready.
571 */
572static void dgrp_monitor(struct nd_struct *nd, u8 *buf, int len)
573{
574 int n;
575 int r;
576 int rtn;
577
578 /*
579 * Grab monitor lock.
580 */
581 down(&nd->nd_mon_semaphore);
582
583 /*
584 * Loop while data remains.
585 */
586 while ((len > 0) && (nd->nd_mon_buf)) {
587 /*
588 * Determine the amount of available space left in the
589 * buffer. If there's none, wait until some appears.
590 */
591
592 n = (nd->nd_mon_out - nd->nd_mon_in - 1) & MON_MASK;
593
594 if (!n) {
595 nd->nd_mon_flag |= MON_WAIT_SPACE;
596
597 up(&nd->nd_mon_semaphore);
598
599 /*
600 * Go to sleep waiting until the condition becomes true.
601 */
602 rtn = wait_event_interruptible(nd->nd_mon_wqueue,
603 ((nd->nd_mon_flag & MON_WAIT_SPACE) == 0));
604
605/* FIXME: really ignore rtn? */
606
607 /*
608 * We can't exit here if we receive a signal, since
609 * to do so would trash the debug stream.
610 */
611
612 down(&nd->nd_mon_semaphore);
613
614 continue;
615 }
616
617 /*
618 * Copy as much data as will fit.
619 */
620
621 if (n > len)
622 n = len;
623
624 r = MON_MAX - nd->nd_mon_in;
625
626 if (r <= n) {
627 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, r);
628
629 n -= r;
630
631 nd->nd_mon_in = 0;
632
633 buf += r;
634 len -= r;
635 }
636
637 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, n);
638
639 nd->nd_mon_in += n;
640
641 buf += n;
642 len -= n;
643
644 if (nd->nd_mon_in >= MON_MAX)
645 pr_info_ratelimited("%s - nd_mon_in (%i) >= MON_MAX\n",
646 __func__, nd->nd_mon_in);
647
648 /*
649 * Wakeup any thread waiting for data
650 */
651
652 if (nd->nd_mon_flag & MON_WAIT_DATA) {
653 nd->nd_mon_flag &= ~MON_WAIT_DATA;
654 wake_up_interruptible(&nd->nd_mon_wqueue);
655 }
656 }
657
658 /*
659 * Release the monitor lock.
660 */
661 up(&nd->nd_mon_semaphore);
662}
663
664/**
665 * dgrp_encode_time() -- Encodes rpdump time into a 4-byte quantity.
666 * @nd: pointer to a node structure
667 * @buf: destination buffer
668 *
669 * Encodes "rpdump" time into a 4-byte quantity. Time is measured since
670 * open.
671 */
672static void dgrp_encode_time(struct nd_struct *nd, u8 *buf)
673{
674 ulong t;
675
676 /*
677 * Convert time in HZ since open to time in milliseconds
678 * since open.
679 */
680 t = jiffies - nd->nd_mon_lbolt;
681 t = 1000 * (t / HZ) + 1000 * (t % HZ) / HZ;
682
683 put_unaligned_be32((uint)(t & 0xffffffff), buf);
684}
685
686
687
688/**
689 * dgrp_monitor_message() -- Builds a rpdump style message.
690 * @nd: pointer to a node structure
691 * @message: destination buffer
692 */
693static void dgrp_monitor_message(struct nd_struct *nd, char *message)
694{
695 u8 header[7];
696 int n;
697
698 header[0] = RPDUMP_MESSAGE;
699
700 dgrp_encode_time(nd, header + 1);
701
702 n = strlen(message);
703
704 put_unaligned_be16(n, header + 5);
705
706 dgrp_monitor(nd, header, sizeof(header));
707 dgrp_monitor(nd, (u8 *) message, n);
708}
709
710
711
712/**
713 * dgrp_monitor_reset() -- Note a reset in the monitoring buffer.
714 * @nd: pointer to a node structure
715 */
716static void dgrp_monitor_reset(struct nd_struct *nd)
717{
718 u8 header[5];
719
720 header[0] = RPDUMP_RESET;
721
722 dgrp_encode_time(nd, header + 1);
723
724 dgrp_monitor(nd, header, sizeof(header));
725}
726
727/**
728 * dgrp_monitor_data() -- builds a monitor data packet
729 * @nd: pointer to a node structure
730 * @type: type of message to be logged
731 * @buf: data to be logged
732 * @size: number of bytes in the buffer
733 */
734static void dgrp_monitor_data(struct nd_struct *nd, u8 type, u8 *buf, int size)
735{
736 u8 header[7];
737
738 header[0] = type;
739
740 dgrp_encode_time(nd, header + 1);
741
742 put_unaligned_be16(size, header + 5);
743
744 dgrp_monitor(nd, header, sizeof(header));
745 dgrp_monitor(nd, buf, size);
746}
747
748static int alloc_nd_buffers(struct nd_struct *nd)
749{
750
751 nd->nd_iobuf = NULL;
752 nd->nd_writebuf = NULL;
753 nd->nd_inputbuf = NULL;
754 nd->nd_inputflagbuf = NULL;
755
756 /*
757 * Allocate the network read/write buffer.
758 */
759 nd->nd_iobuf = kzalloc(UIO_MAX + 10, GFP_KERNEL);
760 if (!nd->nd_iobuf)
761 goto out_err;
762
763 /*
764 * Allocate a buffer for doing the copy from user space to
765 * kernel space in the write routines.
766 */
767 nd->nd_writebuf = kzalloc(WRITEBUFLEN, GFP_KERNEL);
768 if (!nd->nd_writebuf)
769 goto out_err;
770
771 /*
772 * Allocate a buffer for doing the copy from kernel space to
773 * tty buffer space in the read routines.
774 */
775 nd->nd_inputbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
776 if (!nd->nd_inputbuf)
777 goto out_err;
778
779 /*
780 * Allocate a buffer for doing the copy from kernel space to
781 * tty buffer space in the read routines.
782 */
783 nd->nd_inputflagbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
784 if (!nd->nd_inputflagbuf)
785 goto out_err;
786
787 return 0;
788
789out_err:
790 kfree(nd->nd_iobuf);
791 kfree(nd->nd_writebuf);
792 kfree(nd->nd_inputbuf);
793 kfree(nd->nd_inputflagbuf);
794 return -ENOMEM;
795}
796
797/*
798 * dgrp_net_open() -- Open the NET device for a particular PortServer
799 */
800static int dgrp_net_open(struct inode *inode, struct file *file)
801{
802 struct nd_struct *nd;
803 struct proc_dir_entry *de;
804 ulong lock_flags;
805 int rtn;
806
807 rtn = try_module_get(THIS_MODULE);
808 if (!rtn)
809 return -EAGAIN;
810
811 if (!capable(CAP_SYS_ADMIN)) {
812 rtn = -EPERM;
813 goto done;
814 }
815
816 /*
817 * Make sure that the "private_data" field hasn't already been used.
818 */
819 if (file->private_data) {
820 rtn = -EINVAL;
821 goto done;
822 }
823
824 /*
825 * Get the node pointer, and fail if it doesn't exist.
826 */
827 de = PDE(inode);
828 if (!de) {
829 rtn = -ENXIO;
830 goto done;
831 }
832
833 nd = (struct nd_struct *) de->data;
834 if (!nd) {
835 rtn = -ENXIO;
836 goto done;
837 }
838
839 file->private_data = (void *) nd;
840
841 /*
842 * Grab the NET lock.
843 */
844 down(&nd->nd_net_semaphore);
845
846 if (nd->nd_state != NS_CLOSED) {
847 rtn = -EBUSY;
848 goto unlock;
849 }
850
851 /*
852 * Initialize the link speed parameters.
853 */
854
855 nd->nd_link.lk_fast_rate = UIO_MAX;
856 nd->nd_link.lk_slow_rate = UIO_MAX;
857
858 nd->nd_link.lk_fast_delay = 1000;
859 nd->nd_link.lk_slow_delay = 1000;
860
861 nd->nd_link.lk_header_size = 46;
862
863
864 rtn = alloc_nd_buffers(nd);
865 if (rtn)
866 goto unlock;
867
868 /*
869 * The port is now open, so move it to the IDLE state
870 */
871 dgrp_net_idle(nd);
872
873 nd->nd_tx_time = jiffies;
874
875 /*
876 * If the polling routing is not running, start it running here
877 */
878 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
879
880 if (!dgrp_poll_data.node_active_count) {
881 dgrp_poll_data.node_active_count = 2;
882 dgrp_poll_data.timer.expires = jiffies +
883 dgrp_poll_tick * HZ / 1000;
884 add_timer(&dgrp_poll_data.timer);
885 }
886
887 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
888
889 dgrp_monitor_message(nd, "Net Open");
890
891unlock:
892 /*
893 * Release the NET lock.
894 */
895 up(&nd->nd_net_semaphore);
896
897done:
898 if (rtn)
899 module_put(THIS_MODULE);
900
901 return rtn;
902}
903
904/* dgrp_net_release() -- close the NET device for a particular PortServer */
905static int dgrp_net_release(struct inode *inode, struct file *file)
906{
907 struct nd_struct *nd;
908 ulong lock_flags;
909
910 nd = (struct nd_struct *)(file->private_data);
911 if (!nd)
912 goto done;
913
914/* TODO : historical locking placeholder */
915/*
916 * In the HPUX version of the RealPort driver (which served as a basis
917 * for this driver) this locking code was used. Saved if ever we need
918 * to review the locking under Linux.
919 */
920/* spinlock(&nd->nd_lock); */
921
922
923 /*
924 * Grab the NET lock.
925 */
926 down(&nd->nd_net_semaphore);
927
928 /*
929 * Before "closing" the internal connection, make sure all
930 * ports are "idle".
931 */
932 dgrp_net_idle(nd);
933
934 nd->nd_state = NS_CLOSED;
935 nd->nd_flag = 0;
936
937 /*
938 * TODO ... must the wait queue be reset on close?
939 * should any pending waiters be reset?
940 * Let's decide to assert that the waitq is empty... and see
941 * how soon we break.
942 */
943 if (waitqueue_active(&nd->nd_tx_waitq))
944 pr_info("%s - expected waitqueue_active to be false\n",
945 __func__);
946
947 nd->nd_send = 0;
948
949 kfree(nd->nd_iobuf);
950 nd->nd_iobuf = NULL;
951
952/* TODO : historical locking placeholder */
953/*
954 * In the HPUX version of the RealPort driver (which served as a basis
955 * for this driver) this locking code was used. Saved if ever we need
956 * to review the locking under Linux.
957 */
958/* spinunlock( &nd->nd_lock ); */
959
960
961 kfree(nd->nd_writebuf);
962 nd->nd_writebuf = NULL;
963
964 kfree(nd->nd_inputbuf);
965 nd->nd_inputbuf = NULL;
966
967 kfree(nd->nd_inputflagbuf);
968 nd->nd_inputflagbuf = NULL;
969
970/* TODO : historical locking placeholder */
971/*
972 * In the HPUX version of the RealPort driver (which served as a basis
973 * for this driver) this locking code was used. Saved if ever we need
974 * to review the locking under Linux.
975 */
976/* spinlock(&nd->nd_lock); */
977
978 /*
979 * Set the active port count to zero.
980 */
981 dgrp_chan_count(nd, 0);
982
983/* TODO : historical locking placeholder */
984/*
985 * In the HPUX version of the RealPort driver (which served as a basis
986 * for this driver) this locking code was used. Saved if ever we need
987 * to review the locking under Linux.
988 */
989/* spinunlock(&nd->nd_lock); */
990
991 /*
992 * Release the NET lock.
993 */
994 up(&nd->nd_net_semaphore);
995
996 /*
997 * Cause the poller to stop scheduling itself if this is
998 * the last active node.
999 */
1000 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
1001
1002 if (dgrp_poll_data.node_active_count == 2) {
1003 del_timer(&dgrp_poll_data.timer);
1004 dgrp_poll_data.node_active_count = 0;
1005 }
1006
1007 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
1008
0b52b749
BP
1009 down(&nd->nd_net_semaphore);
1010
1011 dgrp_monitor_message(nd, "Net Close");
1012
1013 up(&nd->nd_net_semaphore);
1014
7defac36 1015done:
0b52b749
BP
1016 module_put(THIS_MODULE);
1017 file->private_data = NULL;
1018 return 0;
1019}
1020
1021/* used in dgrp_send to setup command header */
1022static inline u8 *set_cmd_header(u8 *b, u8 port, u8 cmd)
1023{
1024 *b++ = 0xb0 + (port & 0x0f);
1025 *b++ = cmd;
1026 return b;
1027}
1028
1029/**
1030 * dgrp_send() -- build a packet for transmission to the server
1031 * @nd: pointer to a node structure
1032 * @tmax: maximum bytes to transmit
1033 *
1034 * returns number of bytes sent
1035 */
1036static int dgrp_send(struct nd_struct *nd, long tmax)
1037{
1038 struct ch_struct *ch = nd->nd_chan;
1039 u8 *b;
1040 u8 *buf;
1041 u8 *mbuf;
1042 u8 port;
1043 int mod;
1044 long send;
1045 int maxport;
1046 long lastport = -1;
1047 ushort rwin;
1048 long in;
1049 ushort n;
1050 long t;
1051 long ttotal;
1052 long tchan;
1053 long tsend;
1054 ushort tsafe;
1055 long work;
1056 long send_sync;
1057 long wanted_sync_port = -1;
1058 ushort tdata[CHAN_MAX];
1059 long used_buffer;
1060
1061 mbuf = nd->nd_iobuf + UIO_BASE;
1062 buf = b = mbuf;
1063
1064 send_sync = nd->nd_link.lk_slow_rate < UIO_MAX;
1065
1066 ttotal = 0;
1067 tchan = 0;
1068
1069 memset(tdata, 0, sizeof(tdata));
1070
1071
1072 /*
1073 * If there are any outstanding requests to be serviced,
1074 * service them here.
1075 */
1076 if (nd->nd_send & NR_PASSWORD) {
1077
1078 /*
1079 * Send Password response.
1080 */
1081
1082 b[0] = 0xfc;
1083 b[1] = 0x20;
1084 put_unaligned_be16(strlen(nd->password), b + 2);
1085 b += 4;
1086 b += strlen(nd->password);
1087 nd->nd_send &= ~(NR_PASSWORD);
1088 }
1089
1090
1091 /*
1092 * Loop over all modules to generate commands, and determine
1093 * the amount of data queued for transmit.
1094 */
1095
1096 for (mod = 0, port = 0; port < nd->nd_chan_count; mod++) {
1097 /*
1098 * If this is not the current module, enter a module select
1099 * code in the buffer.
1100 */
1101
1102 if (mod != nd->nd_tx_module)
1103 mbuf = ++b;
1104
1105 /*
1106 * Loop to process one module.
1107 */
1108
1109 maxport = port + 16;
1110
1111 if (maxport > nd->nd_chan_count)
1112 maxport = nd->nd_chan_count;
1113
1114 for (; port < maxport; port++, ch++) {
1115 /*
1116 * Switch based on channel state.
1117 */
1118
1119 switch (ch->ch_state) {
1120 /*
1121 * Send requests when the port is closed, and there
1122 * are no Open, Close or Cancel requests expected.
1123 */
1124
1125 case CS_IDLE:
1126 /*
1127 * Wait until any open error code
1128 * has been delivered to all
1129 * associated ports.
1130 */
1131
1132 if (ch->ch_open_error) {
1133 if (ch->ch_wait_count[ch->ch_otype]) {
1134 work = 1;
1135 break;
1136 }
1137
1138 ch->ch_open_error = 0;
1139 }
1140
1141 /*
1142 * Wait until the channel HANGUP flag is reset
1143 * before sending the first open. We can only
1144 * get to this state after a server disconnect.
1145 */
1146
1147 if ((ch->ch_flag & CH_HANGUP) != 0)
1148 break;
1149
1150 /*
1151 * If recovering from a TCP disconnect, or if
1152 * there is an immediate open pending, send an
1153 * Immediate Open request.
1154 */
1155 if ((ch->ch_flag & CH_PORT_GONE) ||
1156 ch->ch_wait_count[OTYPE_IMMEDIATE] != 0) {
1157 b = set_cmd_header(b, port, 10);
1158 *b++ = 0;
1159
1160 ch->ch_state = CS_WAIT_OPEN;
1161 ch->ch_otype = OTYPE_IMMEDIATE;
1162 break;
1163 }
1164
1165 /*
1166 * If there is no Persistent or Incoming Open on the wait
1167 * list in the server, and a thread is waiting for a
1168 * Persistent or Incoming Open, send a Persistent or Incoming
1169 * Open Request.
1170 */
1171 if (ch->ch_otype_waiting == 0) {
1172 if (ch->ch_wait_count[OTYPE_PERSISTENT] != 0) {
1173 b = set_cmd_header(b, port, 10);
1174 *b++ = 1;
1175
1176 ch->ch_state = CS_WAIT_OPEN;
1177 ch->ch_otype = OTYPE_PERSISTENT;
1178 } else if (ch->ch_wait_count[OTYPE_INCOMING] != 0) {
1179 b = set_cmd_header(b, port, 10);
1180 *b++ = 2;
1181
1182 ch->ch_state = CS_WAIT_OPEN;
1183 ch->ch_otype = OTYPE_INCOMING;
1184 }
1185 break;
1186 }
1187
1188 /*
1189 * If a Persistent or Incoming Open is pending in
1190 * the server, but there is no longer an open
1191 * thread waiting for it, cancel the request.
1192 */
1193
1194 if (ch->ch_wait_count[ch->ch_otype_waiting] == 0) {
1195 b = set_cmd_header(b, port, 10);
1196 *b++ = 4;
1197
1198 ch->ch_state = CS_WAIT_CANCEL;
1199 ch->ch_otype = ch->ch_otype_waiting;
1200 }
1201 break;
1202
1203 /*
1204 * Send port parameter queries.
1205 */
1206 case CS_SEND_QUERY:
1207 /*
1208 * Clear out all FEP state that might remain
1209 * from the last connection.
1210 */
1211
1212 ch->ch_flag |= CH_PARAM;
1213
1214 ch->ch_flag &= ~CH_RX_FLUSH;
1215
1216 ch->ch_expect = 0;
1217
1218 ch->ch_s_tin = 0;
1219 ch->ch_s_tpos = 0;
1220 ch->ch_s_tsize = 0;
1221 ch->ch_s_treq = 0;
1222 ch->ch_s_elast = 0;
1223
1224 ch->ch_s_rin = 0;
1225 ch->ch_s_rwin = 0;
1226 ch->ch_s_rsize = 0;
1227
1228 ch->ch_s_tmax = 0;
1229 ch->ch_s_ttime = 0;
1230 ch->ch_s_rmax = 0;
1231 ch->ch_s_rtime = 0;
1232 ch->ch_s_rlow = 0;
1233 ch->ch_s_rhigh = 0;
1234
1235 ch->ch_s_brate = 0;
1236 ch->ch_s_iflag = 0;
1237 ch->ch_s_cflag = 0;
1238 ch->ch_s_oflag = 0;
1239 ch->ch_s_xflag = 0;
1240
1241 ch->ch_s_mout = 0;
1242 ch->ch_s_mflow = 0;
1243 ch->ch_s_mctrl = 0;
1244 ch->ch_s_xon = 0;
1245 ch->ch_s_xoff = 0;
1246 ch->ch_s_lnext = 0;
1247 ch->ch_s_xxon = 0;
1248 ch->ch_s_xxoff = 0;
1249
1250 /* Send Sequence Request */
1251 b = set_cmd_header(b, port, 14);
1252
1253 /* Configure Event Conditions Packet */
1254 b = set_cmd_header(b, port, 42);
1255 put_unaligned_be16(0x02c0, b);
1256 b += 2;
1257 *b++ = (DM_DTR | DM_RTS | DM_CTS |
1258 DM_DSR | DM_RI | DM_CD);
1259
1260 /* Send Status Request */
1261 b = set_cmd_header(b, port, 16);
1262
1263 /* Send Buffer Request */
1264 b = set_cmd_header(b, port, 20);
1265
1266 /* Send Port Capability Request */
1267 b = set_cmd_header(b, port, 22);
1268
1269 ch->ch_expect = (RR_SEQUENCE |
1270 RR_STATUS |
1271 RR_BUFFER |
1272 RR_CAPABILITY);
1273
1274 ch->ch_state = CS_WAIT_QUERY;
1275
1276 /* Raise modem signals */
1277 b = set_cmd_header(b, port, 44);
1278
1279 if (ch->ch_flag & CH_PORT_GONE)
1280 ch->ch_s_mout = ch->ch_mout;
1281 else
1282 ch->ch_s_mout = ch->ch_mout = DM_DTR | DM_RTS;
1283
1284 *b++ = ch->ch_mout;
1285 *b++ = ch->ch_s_mflow = 0;
1286 *b++ = ch->ch_s_mctrl = ch->ch_mctrl = 0;
1287
1288 if (ch->ch_flag & CH_PORT_GONE)
1289 ch->ch_flag &= ~CH_PORT_GONE;
1290
1291 break;
1292
1293 /*
1294 * Handle normal open and ready mode.
1295 */
1296
1297 case CS_READY:
1298
1299 /*
1300 * If the port is not open, and there are no
1301 * no longer any ports requesting an open,
1302 * then close the port.
1303 */
1304
1305 if (ch->ch_open_count == 0 &&
1306 ch->ch_wait_count[ch->ch_otype] == 0) {
1307 goto send_close;
1308 }
1309
1310 /*
1311 * Process waiting input.
1312 *
1313 * If there is no one to read it, discard the data.
1314 *
1315 * Otherwise if we are not in fastcook mode, or if there is a
1316 * fastcook thread waiting for data, send the data to the
1317 * line discipline.
1318 */
1319 if (ch->ch_rin != ch->ch_rout) {
1320 if (ch->ch_tun.un_open_count == 0 ||
1321 (ch->ch_tun.un_flag & UN_CLOSING) ||
1322 (ch->ch_cflag & CF_CREAD) == 0) {
1323 ch->ch_rout = ch->ch_rin;
1324 } else if ((ch->ch_flag & CH_FAST_READ) == 0 ||
1325 ch->ch_inwait != 0) {
1326 dgrp_input(ch);
1327
1328 if (ch->ch_rin != ch->ch_rout)
1329 work = 1;
1330 }
1331 }
1332
1333 /*
1334 * Handle receive flush, and changes to
1335 * server port parameters.
1336 */
1337
1338 if (ch->ch_flag & (CH_RX_FLUSH | CH_PARAM)) {
1339 /*
1340 * If we are in receive flush mode,
1341 * and enough data has gone by, reset
1342 * receive flush mode.
1343 */
1344 if (ch->ch_flag & CH_RX_FLUSH) {
1345 if (((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >
1346 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK))
1347 ch->ch_flag &= ~CH_RX_FLUSH;
1348 else
1349 work = 1;
1350 }
1351
1352 /*
1353 * Send TMAX, TTIME.
1354 */
1355
1356 if (ch->ch_s_tmax != ch->ch_tmax ||
1357 ch->ch_s_ttime != ch->ch_ttime) {
1358 b = set_cmd_header(b, port, 48);
1359
1360 ch->ch_s_tmax = ch->ch_tmax;
1361 ch->ch_s_ttime = ch->ch_ttime;
1362
1363 put_unaligned_be16(ch->ch_s_tmax,
1364 b);
1365 b += 2;
1366
1367 put_unaligned_be16(ch->ch_s_ttime,
1368 b);
1369 b += 2;
1370 }
1371
1372 /*
1373 * Send RLOW, RHIGH.
1374 */
1375
1376 if (ch->ch_s_rlow != ch->ch_rlow ||
1377 ch->ch_s_rhigh != ch->ch_rhigh) {
1378 b = set_cmd_header(b, port, 45);
1379
1380 ch->ch_s_rlow = ch->ch_rlow;
1381 ch->ch_s_rhigh = ch->ch_rhigh;
1382
1383 put_unaligned_be16(ch->ch_s_rlow,
1384 b);
1385 b += 2;
1386
1387 put_unaligned_be16(ch->ch_s_rhigh,
1388 b);
1389 b += 2;
1390 }
1391
1392 /*
1393 * Send BRATE, CFLAG, IFLAG,
1394 * OFLAG, XFLAG.
1395 */
1396
1397 if (ch->ch_s_brate != ch->ch_brate ||
1398 ch->ch_s_cflag != ch->ch_cflag ||
1399 ch->ch_s_iflag != ch->ch_iflag ||
1400 ch->ch_s_oflag != ch->ch_oflag ||
1401 ch->ch_s_xflag != ch->ch_xflag) {
1402 b = set_cmd_header(b, port, 40);
1403
1404 ch->ch_s_brate = ch->ch_brate;
1405 ch->ch_s_cflag = ch->ch_cflag;
1406 ch->ch_s_iflag = ch->ch_iflag;
1407 ch->ch_s_oflag = ch->ch_oflag;
1408 ch->ch_s_xflag = ch->ch_xflag;
1409
1410 put_unaligned_be16(ch->ch_s_brate,
1411 b);
1412 b += 2;
1413
1414 put_unaligned_be16(ch->ch_s_cflag,
1415 b);
1416 b += 2;
1417
1418 put_unaligned_be16(ch->ch_s_iflag,
1419 b);
1420 b += 2;
1421
1422 put_unaligned_be16(ch->ch_s_oflag,
1423 b);
1424 b += 2;
1425
1426 put_unaligned_be16(ch->ch_s_xflag,
1427 b);
1428 b += 2;
1429 }
1430
1431 /*
1432 * Send MOUT, MFLOW, MCTRL.
1433 */
1434
1435 if (ch->ch_s_mout != ch->ch_mout ||
1436 ch->ch_s_mflow != ch->ch_mflow ||
1437 ch->ch_s_mctrl != ch->ch_mctrl) {
1438 b = set_cmd_header(b, port, 44);
1439
1440 *b++ = ch->ch_s_mout = ch->ch_mout;
1441 *b++ = ch->ch_s_mflow = ch->ch_mflow;
1442 *b++ = ch->ch_s_mctrl = ch->ch_mctrl;
1443 }
1444
1445 /*
1446 * Send Flow control characters.
1447 */
1448
1449 if (ch->ch_s_xon != ch->ch_xon ||
1450 ch->ch_s_xoff != ch->ch_xoff ||
1451 ch->ch_s_lnext != ch->ch_lnext ||
1452 ch->ch_s_xxon != ch->ch_xxon ||
1453 ch->ch_s_xxoff != ch->ch_xxoff) {
1454 b = set_cmd_header(b, port, 46);
1455
1456 *b++ = ch->ch_s_xon = ch->ch_xon;
1457 *b++ = ch->ch_s_xoff = ch->ch_xoff;
1458 *b++ = ch->ch_s_lnext = ch->ch_lnext;
1459 *b++ = ch->ch_s_xxon = ch->ch_xxon;
1460 *b++ = ch->ch_s_xxoff = ch->ch_xxoff;
1461 }
1462
1463 /*
1464 * Send RMAX, RTIME.
1465 */
1466
1467 if (ch->ch_s_rmax != ch->ch_rmax ||
1468 ch->ch_s_rtime != ch->ch_rtime) {
1469 b = set_cmd_header(b, port, 47);
1470
1471 ch->ch_s_rmax = ch->ch_rmax;
1472 ch->ch_s_rtime = ch->ch_rtime;
1473
1474 put_unaligned_be16(ch->ch_s_rmax,
1475 b);
1476 b += 2;
1477
1478 put_unaligned_be16(ch->ch_s_rtime,
1479 b);
1480 b += 2;
1481 }
1482
1483 ch->ch_flag &= ~CH_PARAM;
1484 wake_up_interruptible(&ch->ch_flag_wait);
1485 }
1486
1487
1488 /*
1489 * Handle action commands.
1490 */
1491
1492 if (ch->ch_send != 0) {
1493 /* int send = ch->ch_send & ~ch->ch_expect; */
1494 send = ch->ch_send & ~ch->ch_expect;
1495
1496 /* Send character immediate */
1497 if ((send & RR_TX_ICHAR) != 0) {
1498 b = set_cmd_header(b, port, 60);
1499
1500 *b++ = ch->ch_xon;
1501 ch->ch_expect |= RR_TX_ICHAR;
1502 }
1503
1504 /* BREAK request */
1505 if ((send & RR_TX_BREAK) != 0) {
1506 if (ch->ch_break_time != 0) {
1507 b = set_cmd_header(b, port, 61);
1508 put_unaligned_be16(ch->ch_break_time,
1509 b);
1510 b += 2;
1511
1512 ch->ch_expect |= RR_TX_BREAK;
1513 ch->ch_break_time = 0;
1514 } else {
1515 ch->ch_send &= ~RR_TX_BREAK;
1516 ch->ch_flag &= ~CH_TX_BREAK;
1517 wake_up_interruptible(&ch->ch_flag_wait);
1518 }
1519 }
1520
1521 /*
1522 * Flush input/output buffers.
1523 */
1524
1525 if ((send & (RR_RX_FLUSH | RR_TX_FLUSH)) != 0) {
1526 b = set_cmd_header(b, port, 62);
1527
1528 *b++ = ((send & RR_TX_FLUSH) == 0 ? 1 :
1529 (send & RR_RX_FLUSH) == 0 ? 2 : 3);
1530
1531 if (send & RR_RX_FLUSH) {
1532 ch->ch_flush_seq = nd->nd_seq_in;
1533 ch->ch_flag |= CH_RX_FLUSH;
1534 work = 1;
1535 send_sync = 1;
1536 wanted_sync_port = port;
1537 }
1538
1539 ch->ch_send &= ~(RR_RX_FLUSH | RR_TX_FLUSH);
1540 }
1541
1542 /* Pause input/output */
1543 if ((send & (RR_RX_STOP | RR_TX_STOP)) != 0) {
1544 b = set_cmd_header(b, port, 63);
1545 *b = 0;
1546
1547 if ((send & RR_TX_STOP) != 0)
1548 *b |= EV_OPU;
1549
1550 if ((send & RR_RX_STOP) != 0)
1551 *b |= EV_IPU;
1552
1553 b++;
1554
1555 ch->ch_send &= ~(RR_RX_STOP | RR_TX_STOP);
1556 }
1557
1558 /* Start input/output */
1559 if ((send & (RR_RX_START | RR_TX_START)) != 0) {
1560 b = set_cmd_header(b, port, 64);
1561 *b = 0;
1562
1563 if ((send & RR_TX_START) != 0)
1564 *b |= EV_OPU | EV_OPS | EV_OPX;
1565
1566 if ((send & RR_RX_START) != 0)
1567 *b |= EV_IPU | EV_IPS;
1568
1569 b++;
1570
1571 ch->ch_send &= ~(RR_RX_START | RR_TX_START);
1572 }
1573 }
1574
1575
1576 /*
1577 * Send a window sequence to acknowledge received data.
1578 */
1579
1580 rwin = (ch->ch_s_rin +
1581 ((ch->ch_rout - ch->ch_rin - 1) & RBUF_MASK));
1582
1583 n = (rwin - ch->ch_s_rwin) & 0xffff;
1584
1585 if (n >= RBUF_MAX / 4) {
1586 b[0] = 0xa0 + (port & 0xf);
1587 ch->ch_s_rwin = rwin;
1588 put_unaligned_be16(rwin, b + 1);
1589 b += 3;
1590 }
1591
1592 /*
1593 * If the terminal is waiting on LOW
1594 * water or EMPTY, and the condition
1595 * is now satisfied, call the line
1596 * discipline to put more data in the
1597 * buffer.
1598 */
1599
1600 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1601
1602 if ((ch->ch_tun.un_flag & (UN_EMPTY|UN_LOW)) != 0) {
1603 if ((ch->ch_tun.un_flag & UN_LOW) != 0 ?
1604 (n <= TBUF_LOW) :
1605 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin)) {
1606 ch->ch_tun.un_flag &= ~(UN_EMPTY|UN_LOW);
1607
1608 if (waitqueue_active(&((ch->ch_tun.un_tty)->write_wait)))
1609 wake_up_interruptible(&((ch->ch_tun.un_tty)->write_wait));
1610 tty_wakeup(ch->ch_tun.un_tty);
1611 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1612 }
1613 }
1614
1615 /*
1616 * If the printer is waiting on LOW
1617 * water, TIME, EMPTY or PWAIT, and is
1618 * now ready to put more data in the
1619 * buffer, call the line discipline to
1620 * do the job.
1621 */
1622
720a9bec
AC
1623 /* FIXME: jiffies - ch->ch_waketime can never
1624 be < 0. Someone needs to work out what is
1625 actually intended here */
0b52b749
BP
1626 if (ch->ch_pun.un_open_count &&
1627 (ch->ch_pun.un_flag &
1628 (UN_EMPTY|UN_TIME|UN_LOW|UN_PWAIT)) != 0) {
1629
1630 if ((ch->ch_pun.un_flag & UN_LOW) != 0 ?
1631 (n <= TBUF_LOW) :
1632 (ch->ch_pun.un_flag & UN_TIME) != 0 ?
1633 ((jiffies - ch->ch_waketime) >= 0) :
1634 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin) &&
1635 ((ch->ch_pun.un_flag & UN_EMPTY) != 0 ||
1636 ((ch->ch_tun.un_open_count &&
1637 ch->ch_tun.un_tty->ops->chars_in_buffer) ?
1638 (ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) == 0
1639 : 1
1640 )
1641 )) {
1642 ch->ch_pun.un_flag &= ~(UN_EMPTY | UN_TIME | UN_LOW | UN_PWAIT);
1643
1644 if (waitqueue_active(&((ch->ch_pun.un_tty)->write_wait)))
1645 wake_up_interruptible(&((ch->ch_pun.un_tty)->write_wait));
1646 tty_wakeup(ch->ch_pun.un_tty);
1647 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1648
1649 } else if ((ch->ch_pun.un_flag & UN_TIME) != 0) {
1650 work = 1;
1651 }
1652 }
1653
1654
1655 /*
1656 * Determine the max number of bytes
1657 * this port can send, including
1658 * packet header overhead.
1659 */
1660
1661 t = ((ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff);
1662
1663 if (n > t)
1664 n = t;
1665
1666 if (n != 0) {
1667 n += (n <= 8 ? 1 : n <= 255 ? 2 : 3);
1668
1669 tdata[tchan++] = n;
1670 ttotal += n;
1671 }
1672 break;
1673
1674 /*
1675 * Close the port.
1676 */
1677
1678send_close:
1679 case CS_SEND_CLOSE:
1680 b = set_cmd_header(b, port, 10);
1681 if (ch->ch_otype == OTYPE_IMMEDIATE)
1682 *b++ = 3;
1683 else
1684 *b++ = 4;
1685
1686 ch->ch_state = CS_WAIT_CLOSE;
1687 break;
1688
1689 /*
1690 * Wait for a previous server request.
1691 */
1692
1693 case CS_WAIT_OPEN:
1694 case CS_WAIT_CANCEL:
1695 case CS_WAIT_FAIL:
1696 case CS_WAIT_QUERY:
1697 case CS_WAIT_CLOSE:
1698 break;
1699
1700 default:
1701 pr_info("%s - unexpected channel state (%i)\n",
1702 __func__, ch->ch_state);
1703 }
1704 }
1705
1706 /*
1707 * If a module select code is needed, drop one in. If space
1708 * was reserved for one, but none is needed, recover the space.
1709 */
1710
1711 if (mod != nd->nd_tx_module) {
1712 if (b != mbuf) {
1713 mbuf[-1] = 0xf0 | mod;
1714 nd->nd_tx_module = mod;
1715 } else {
1716 b--;
1717 }
1718 }
1719 }
1720
1721 /*
1722 * Adjust "tmax" so that under worst case conditions we do
1723 * not overflow either the daemon buffer or the internal
1724 * buffer in the loop that follows. Leave a safe area
1725 * of 64 bytes so we start getting asserts before we start
1726 * losing data or clobbering memory.
1727 */
1728
1729 n = UIO_MAX - UIO_BASE;
1730
1731 if (tmax > n)
1732 tmax = n;
1733
1734 tmax -= 64;
1735
1736 tsafe = tmax;
1737
1738 /*
1739 * Allocate space for 5 Module Selects, 1 Sequence Request,
1740 * and 1 Set TREQ for each active channel.
1741 */
1742
1743 tmax -= 5 + 3 + 4 * nd->nd_chan_count;
1744
1745 /*
1746 * Further reduce "tmax" to the available transmit credit.
1747 * Note that this is a soft constraint; The transmit credit
1748 * can go negative for a time and then recover.
1749 */
1750
1751 n = nd->nd_tx_deposit - nd->nd_tx_charge - nd->nd_link.lk_header_size;
1752
1753 if (tmax > n)
1754 tmax = n;
1755
1756 /*
1757 * Finally reduce tmax by the number of bytes already in
1758 * the buffer.
1759 */
1760
1761 tmax -= b - buf;
1762
1763 /*
1764 * Suspend data transmit unless every ready channel can send
1765 * at least 1 character.
1766 */
1767 if (tmax < 2 * nd->nd_chan_count) {
1768 tsend = 1;
1769
1770 } else if (tchan > 1 && ttotal > tmax) {
1771
1772 /*
1773 * If transmit is limited by the credit budget, find the
1774 * largest number of characters we can send without driving
1775 * the credit negative.
1776 */
1777
1778 long tm = tmax;
1779 int tc = tchan;
1780 int try;
1781
1782 tsend = tm / tc;
1783
1784 for (try = 0; try < 3; try++) {
1785 int i;
1786 int c = 0;
1787
1788 for (i = 0; i < tc; i++) {
1789 if (tsend < tdata[i])
1790 tdata[c++] = tdata[i];
1791 else
1792 tm -= tdata[i];
1793 }
1794
1795 if (c == tc)
1796 break;
1797
1798 tsend = tm / c;
1799
1800 if (c == 1)
1801 break;
1802
1803 tc = c;
1804 }
1805
1806 tsend = tm / nd->nd_chan_count;
1807
1808 if (tsend < 2)
1809 tsend = 1;
1810
1811 } else {
1812 /*
1813 * If no budgetary constraints, or only one channel ready
1814 * to send, set the character limit to the remaining
1815 * buffer size.
1816 */
1817
1818 tsend = tmax;
1819 }
1820
1821 tsend -= (tsend <= 9) ? 1 : (tsend <= 257) ? 2 : 3;
1822
1823 /*
1824 * Loop over all channels, sending queued data.
1825 */
1826
1827 port = 0;
1828 ch = nd->nd_chan;
1829 used_buffer = tmax;
1830
1831 for (mod = 0; port < nd->nd_chan_count; mod++) {
1832 /*
1833 * If this is not the current module, enter a module select
1834 * code in the buffer.
1835 */
1836
1837 if (mod != nd->nd_tx_module)
1838 mbuf = ++b;
1839
1840 /*
1841 * Loop to process one module.
1842 */
1843
1844 maxport = port + 16;
1845
1846 if (maxport > nd->nd_chan_count)
1847 maxport = nd->nd_chan_count;
1848
1849 for (; port < maxport; port++, ch++) {
1850 if (ch->ch_state != CS_READY)
1851 continue;
1852
1853 lastport = port;
1854
1855 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1856
1857 /*
1858 * If there is data that can be sent, send it.
1859 */
1860
1861 if (n != 0 && used_buffer > 0) {
1862 t = (ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff;
1863
1864 if (n > t)
1865 n = t;
1866
1867 if (n > tsend) {
1868 work = 1;
1869 n = tsend;
1870 }
1871
1872 if (n > used_buffer) {
1873 work = 1;
1874 n = used_buffer;
1875 }
1876
1877 if (n <= 0)
1878 continue;
1879
1880 /*
1881 * Create the correct size transmit header,
1882 * depending on the amount of data to transmit.
1883 */
1884
1885 if (n <= 8) {
1886
1887 b[0] = ((n - 1) << 4) + (port & 0xf);
1888 b += 1;
1889
1890 } else if (n <= 255) {
1891
1892 b[0] = 0x80 + (port & 0xf);
1893 b[1] = n;
1894 b += 2;
1895
1896 } else {
1897
1898 b[0] = 0x90 + (port & 0xf);
1899 put_unaligned_be16(n, b + 1);
1900 b += 3;
1901 }
1902
1903 ch->ch_s_tin = (ch->ch_s_tin + n) & 0xffff;
1904
1905 /*
1906 * Copy transmit data to the packet.
1907 */
1908
1909 t = TBUF_MAX - ch->ch_tout;
1910
1911 if (n >= t) {
1912 memcpy(b, ch->ch_tbuf + ch->ch_tout, t);
1913 b += t;
1914 n -= t;
1915 used_buffer -= t;
1916 ch->ch_tout = 0;
1917 }
1918
1919 memcpy(b, ch->ch_tbuf + ch->ch_tout, n);
1920 b += n;
1921 used_buffer -= n;
1922 ch->ch_tout += n;
1923 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1924 }
1925
1926 /*
1927 * Wake any terminal unit process waiting in the
1928 * dgrp_write routine for low water.
1929 */
1930
1931 if (n > TBUF_LOW)
1932 continue;
1933
1934 if ((ch->ch_flag & CH_LOW) != 0) {
1935 ch->ch_flag &= ~CH_LOW;
1936 wake_up_interruptible(&ch->ch_flag_wait);
1937 }
1938
1939 /* selwakeup tty_sel */
1940 if (ch->ch_tun.un_open_count) {
1941 struct tty_struct *tty = (ch->ch_tun.un_tty);
1942
1943 if (waitqueue_active(&tty->write_wait))
1944 wake_up_interruptible(&tty->write_wait);
1945
1946 tty_wakeup(tty);
1947 }
1948
1949 if (ch->ch_pun.un_open_count) {
1950 struct tty_struct *tty = (ch->ch_pun.un_tty);
1951
1952 if (waitqueue_active(&tty->write_wait))
1953 wake_up_interruptible(&tty->write_wait);
1954
1955 tty_wakeup(tty);
1956 }
1957
1958 /*
1959 * Do EMPTY processing.
1960 */
1961
1962 if (n != 0)
1963 continue;
1964
1965 if ((ch->ch_flag & (CH_EMPTY | CH_DRAIN)) != 0 ||
1966 (ch->ch_pun.un_flag & UN_EMPTY) != 0) {
1967 /*
1968 * If there is still data in the server, ask the server
1969 * to notify us when its all gone.
1970 */
1971
1972 if (ch->ch_s_treq != ch->ch_s_tin) {
1973 b = set_cmd_header(b, port, 43);
1974
1975 ch->ch_s_treq = ch->ch_s_tin;
1976 put_unaligned_be16(ch->ch_s_treq,
1977 b);
1978 b += 2;
1979 }
1980
1981 /*
1982 * If there is a thread waiting for buffer empty,
1983 * and we are truly empty, wake the thread.
1984 */
1985
1986 else if ((ch->ch_flag & CH_EMPTY) != 0 &&
1987 (ch->ch_send & RR_TX_BREAK) == 0) {
1988 ch->ch_flag &= ~CH_EMPTY;
1989
1990 wake_up_interruptible(&ch->ch_flag_wait);
1991 }
1992 }
1993 }
1994
1995 /*
1996 * If a module select code is needed, drop one in. If space
1997 * was reserved for one, but none is needed, recover the space.
1998 */
1999
2000 if (mod != nd->nd_tx_module) {
2001 if (b != mbuf) {
2002 mbuf[-1] = 0xf0 | mod;
2003 nd->nd_tx_module = mod;
2004 } else {
2005 b--;
2006 }
2007 }
2008 }
2009
2010 /*
2011 * Send a synchronization sequence associated with the last open
2012 * channel that sent data, and remember the time when the data was
2013 * sent.
2014 */
2015
2016 in = nd->nd_seq_in;
2017
2018 if ((send_sync || nd->nd_seq_wait[in] != 0) && lastport >= 0) {
2019 u8 *bb = b;
2020
2021 /*
2022 * Attempt the use the port that really wanted the sync.
2023 * This gets around a race condition where the "lastport" is in
2024 * the middle of the close() routine, and by the time we
2025 * send this command, it will have already acked the close, and
2026 * thus not send the sync response.
2027 */
2028 if (wanted_sync_port >= 0)
2029 lastport = wanted_sync_port;
2030 /*
2031 * Set a flag just in case the port is in the middle of a close,
2032 * it will not be permitted to actually close until we get an
2033 * sync response, and clear the flag there.
2034 */
2035 ch = nd->nd_chan + lastport;
2036 ch->ch_flag |= CH_WAITING_SYNC;
2037
2038 mod = lastport >> 4;
2039
2040 if (mod != nd->nd_tx_module) {
2041 bb[0] = 0xf0 + mod;
2042 bb += 1;
2043
2044 nd->nd_tx_module = mod;
2045 }
2046
2047 bb = set_cmd_header(bb, lastport, 12);
2048 *bb++ = in;
2049
2050 nd->nd_seq_size[in] = bb - buf;
2051 nd->nd_seq_time[in] = jiffies;
2052
2053 if (++in >= SEQ_MAX)
2054 in = 0;
2055
2056 if (in != nd->nd_seq_out) {
2057 b = bb;
2058 nd->nd_seq_in = in;
2059 nd->nd_unack += b - buf;
2060 }
2061 }
2062
2063 /*
2064 * If there are no open ports, a sync cannot be sent.
2065 * There is nothing left to wait for anyway, so wake any
2066 * thread waiting for an acknowledgement.
2067 */
2068
2069 else if (nd->nd_seq_wait[in] != 0) {
2070 nd->nd_seq_wait[in] = 0;
2071
2072 wake_up_interruptible(&nd->nd_seq_wque[in]);
2073 }
2074
2075 /*
2076 * If there is no traffic for an interval of IDLE_MAX, then
2077 * send a single byte packet.
2078 */
2079
2080 if (b != buf) {
2081 nd->nd_tx_time = jiffies;
2082 } else if ((ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX) {
2083 *b++ = 0xf0 | nd->nd_tx_module;
2084 nd->nd_tx_time = jiffies;
2085 }
2086
2087 n = b - buf;
2088
2089 if (n >= tsafe)
2090 pr_info("%s - n(%i) >= tsafe(%i)\n",
2091 __func__, n, tsafe);
2092
2093 if (tsend < 0)
2094 dgrp_dump(buf, n);
2095
2096 nd->nd_tx_work = work;
2097
2098 return n;
2099}
2100
2101/*
2102 * dgrp_net_read()
2103 * Data to be sent TO the PortServer from the "async." half of the driver.
2104 */
2105static ssize_t dgrp_net_read(struct file *file, char __user *buf, size_t count,
2106 loff_t *ppos)
2107{
2108 struct nd_struct *nd;
2109 long n;
2110 u8 *local_buf;
2111 u8 *b;
2112 ssize_t rtn;
2113
2114 /*
2115 * Get the node pointer, and quit if it doesn't exist.
2116 */
2117 nd = (struct nd_struct *)(file->private_data);
2118 if (!nd)
2119 return -ENXIO;
2120
2121 if (count < UIO_MIN)
2122 return -EINVAL;
2123
2124 /*
2125 * Only one read/write operation may be in progress at
2126 * any given time.
2127 */
2128
2129 /*
2130 * Grab the NET lock.
2131 */
2132 down(&nd->nd_net_semaphore);
2133
2134 nd->nd_read_count++;
2135
2136 nd->nd_tx_ready = 0;
2137
2138 /*
2139 * Determine the effective size of the buffer.
2140 */
2141
2142 if (nd->nd_remain > UIO_BASE)
2143 pr_info_ratelimited("%s - nd_remain(%i) > UIO_BASE\n",
2144 __func__, nd->nd_remain);
2145
2146 b = local_buf = nd->nd_iobuf + UIO_BASE;
2147
2148 /*
2149 * Generate data according to the node state.
2150 */
2151
2152 switch (nd->nd_state) {
2153 /*
2154 * Initialize the connection.
2155 */
2156
2157 case NS_IDLE:
2158 if (nd->nd_mon_buf)
2159 dgrp_monitor_reset(nd);
2160
2161 /*
2162 * Request a Product ID Packet.
2163 */
2164
2165 b[0] = 0xfb;
2166 b[1] = 0x01;
2167 b += 2;
2168
2169 nd->nd_expect |= NR_IDENT;
2170
2171 /*
2172 * Request a Server Capability ID Response.
2173 */
2174
2175 b[0] = 0xfb;
2176 b[1] = 0x02;
2177 b += 2;
2178
2179 nd->nd_expect |= NR_CAPABILITY;
2180
2181 /*
2182 * Request a Server VPD Response.
2183 */
2184
2185 b[0] = 0xfb;
2186 b[1] = 0x18;
2187 b += 2;
2188
2189 nd->nd_expect |= NR_VPD;
2190
2191 nd->nd_state = NS_WAIT_QUERY;
2192 break;
2193
2194 /*
2195 * We do serious communication with the server only in
2196 * the READY state.
2197 */
2198
2199 case NS_READY:
2200 b = dgrp_send(nd, count) + local_buf;
2201 break;
2202
2203 /*
2204 * Send off an error after receiving a bogus message
2205 * from the server.
2206 */
2207
2208 case NS_SEND_ERROR:
2209 n = strlen(nd->nd_error);
2210
2211 b[0] = 0xff;
2212 b[1] = n;
2213 memcpy(b + 2, nd->nd_error, n);
2214 b += 2 + n;
2215
2216 dgrp_net_idle(nd);
2217 /*
2218 * Set the active port count to zero.
2219 */
2220 dgrp_chan_count(nd, 0);
2221 break;
2222
2223 default:
2224 break;
2225 }
2226
2227 n = b - local_buf;
2228
2229 if (n != 0) {
2230 nd->nd_send_count++;
2231
2232 nd->nd_tx_byte += n + nd->nd_link.lk_header_size;
2233 nd->nd_tx_charge += n + nd->nd_link.lk_header_size;
2234 }
2235
2236 rtn = copy_to_user((void __user *)buf, local_buf, n);
2237 if (rtn) {
2238 rtn = -EFAULT;
2239 goto done;
2240 }
2241
2242 *ppos += n;
2243
2244 rtn = n;
2245
2246 if (nd->nd_mon_buf)
2247 dgrp_monitor_data(nd, RPDUMP_CLIENT, local_buf, n);
2248
2249 /*
2250 * Release the NET lock.
2251 */
2252done:
2253 up(&nd->nd_net_semaphore);
2254
2255 return rtn;
2256}
2257
2258/**
2259 * dgrp_receive() -- decode data packets received from the remote PortServer.
2260 * @nd: pointer to a node structure
2261 */
2262static void dgrp_receive(struct nd_struct *nd)
2263{
2264 struct ch_struct *ch;
2265 u8 *buf;
2266 u8 *b;
2267 u8 *dbuf;
2268 char *error;
2269 long port;
2270 long dlen;
2271 long plen;
2272 long remain;
2273 long n;
2274 long mlast;
2275 long elast;
2276 long mstat;
2277 long estat;
2278
2279 char ID[3];
2280
2281 nd->nd_tx_time = jiffies;
2282
2283 ID_TO_CHAR(nd->nd_ID, ID);
2284
2285 b = buf = nd->nd_iobuf;
2286 remain = nd->nd_remain;
2287
2288 /*
2289 * Loop to process Realport protocol packets.
2290 */
2291
2292 while (remain > 0) {
2293 int n0 = b[0] >> 4;
2294 int n1 = b[0] & 0x0f;
2295
2296 if (n0 <= 12) {
2297 port = (nd->nd_rx_module << 4) + n1;
2298
2299 if (port >= nd->nd_chan_count) {
2300 error = "Improper Port Number";
2301 goto prot_error;
2302 }
2303
2304 ch = nd->nd_chan + port;
2305 } else {
2306 port = -1;
2307 ch = NULL;
2308 }
2309
2310 /*
2311 * Process by major packet type.
2312 */
2313
2314 switch (n0) {
2315
2316 /*
2317 * Process 1-byte header data packet.
2318 */
2319
2320 case 0:
2321 case 1:
2322 case 2:
2323 case 3:
2324 case 4:
2325 case 5:
2326 case 6:
2327 case 7:
2328 dlen = n0 + 1;
2329 plen = dlen + 1;
2330
2331 dbuf = b + 1;
2332 goto data;
2333
2334 /*
2335 * Process 2-byte header data packet.
2336 */
2337
2338 case 8:
2339 if (remain < 3)
2340 goto done;
2341
2342 dlen = b[1];
2343 plen = dlen + 2;
2344
2345 dbuf = b + 2;
2346 goto data;
2347
2348 /*
2349 * Process 3-byte header data packet.
2350 */
2351
2352 case 9:
2353 if (remain < 4)
2354 goto done;
2355
2356 dlen = get_unaligned_be16(b + 1);
2357 plen = dlen + 3;
2358
2359 dbuf = b + 3;
2360
2361 /*
2362 * Common packet handling code.
2363 */
2364
2365data:
2366 nd->nd_tx_work = 1;
2367
2368 /*
2369 * Otherwise data should appear only when we are
2370 * in the CS_READY state.
2371 */
2372
2373 if (ch->ch_state < CS_READY) {
2374 error = "Data received before RWIN established";
2375 goto prot_error;
2376 }
2377
2378 /*
2379 * Assure that the data received is within the
2380 * allowable window.
2381 */
2382
2383 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2384
2385 if (dlen > n) {
2386 error = "Receive data overrun";
2387 goto prot_error;
2388 }
2389
2390 /*
2391 * If we received 3 or less characters,
2392 * assume it is a human typing, and set RTIME
2393 * to 10 milliseconds.
2394 *
2395 * If we receive 10 or more characters,
2396 * assume its not a human typing, and set RTIME
2397 * to 100 milliseconds.
2398 */
2399
2400 if (ch->ch_edelay != DGRP_RTIME) {
2401 if (ch->ch_rtime != ch->ch_edelay) {
2402 ch->ch_rtime = ch->ch_edelay;
2403 ch->ch_flag |= CH_PARAM;
2404 }
2405 } else if (dlen <= 3) {
2406 if (ch->ch_rtime != 10) {
2407 ch->ch_rtime = 10;
2408 ch->ch_flag |= CH_PARAM;
2409 }
2410 } else {
2411 if (ch->ch_rtime != DGRP_RTIME) {
2412 ch->ch_rtime = DGRP_RTIME;
2413 ch->ch_flag |= CH_PARAM;
2414 }
2415 }
2416
2417 /*
2418 * If a portion of the packet is outside the
2419 * buffer, shorten the effective length of the
2420 * data packet to be the amount of data received.
2421 */
2422
2423 if (remain < plen)
2424 dlen -= plen - remain;
2425
2426 /*
2427 * Detect if receive flush is now complete.
2428 */
2429
2430 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2431 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2432 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2433 ch->ch_flag &= ~CH_RX_FLUSH;
2434 }
2435
2436 /*
2437 * If we are ready to receive, move the data into
2438 * the receive buffer.
2439 */
2440
2441 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2442
2443 if (ch->ch_state == CS_READY &&
2444 (ch->ch_tun.un_open_count != 0) &&
2445 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2446 (ch->ch_cflag & CF_CREAD) != 0 &&
2447 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2448 (ch->ch_send & RR_RX_FLUSH) == 0) {
2449
2450 if (ch->ch_rin + dlen >= RBUF_MAX) {
2451 n = RBUF_MAX - ch->ch_rin;
2452
2453 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2454
2455 ch->ch_rin = 0;
2456 dbuf += n;
2457 dlen -= n;
2458 }
2459
2460 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2461
2462 ch->ch_rin += dlen;
2463
2464
2465 /*
2466 * If we are not in fastcook mode, or
2467 * if there is a fastcook thread
2468 * waiting for data, send the data to
2469 * the line discipline.
2470 */
2471
2472 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2473 ch->ch_inwait != 0) {
2474 dgrp_input(ch);
2475 }
2476
2477 /*
2478 * If there is a read thread waiting
2479 * in select, and we are in fastcook
2480 * mode, wake him up.
2481 */
2482
2483 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2484 (ch->ch_flag & CH_FAST_READ) != 0)
2485 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2486
2487 /*
2488 * Wake any thread waiting in the
2489 * fastcook loop.
2490 */
2491
2492 if ((ch->ch_flag & CH_INPUT) != 0) {
2493 ch->ch_flag &= ~CH_INPUT;
2494
2495 wake_up_interruptible(&ch->ch_flag_wait);
2496 }
2497 }
2498
2499 /*
2500 * Fabricate and insert a data packet header to
a547e5e0 2501 * preced the remaining data when it comes in.
0b52b749
BP
2502 */
2503
2504 if (remain < plen) {
2505 dlen = plen - remain;
2506 b = buf;
2507
2508 b[0] = 0x90 + n1;
2509 put_unaligned_be16(dlen, b + 1);
2510
2511 remain = 3;
2512 goto done;
2513 }
2514 break;
2515
2516 /*
2517 * Handle Window Sequence packets.
2518 */
2519
2520 case 10:
2521 plen = 3;
2522 if (remain < plen)
2523 goto done;
2524
2525 nd->nd_tx_work = 1;
2526
2527 {
2528 ushort tpos = get_unaligned_be16(b + 1);
2529
2530 ushort ack = (tpos - ch->ch_s_tpos) & 0xffff;
2531 ushort unack = (ch->ch_s_tin - ch->ch_s_tpos) & 0xffff;
2532 ushort notify = (ch->ch_s_treq - ch->ch_s_tpos) & 0xffff;
2533
2534 if (ch->ch_state < CS_READY || ack > unack) {
2535 error = "Improper Window Sequence";
2536 goto prot_error;
2537 }
2538
2539 ch->ch_s_tpos = tpos;
2540
2541 if (notify <= ack)
2542 ch->ch_s_treq = tpos;
2543 }
2544 break;
2545
2546 /*
2547 * Handle Command response packets.
2548 */
2549
2550 case 11:
2551
2552 /*
2553 * RealPort engine fix - 03/11/2004
2554 *
2555 * This check did not used to be here.
2556 *
2557 * We were using b[1] without verifying that the data
2558 * is actually there and valid. On a split packet, it
2559 * might not be yet.
2560 *
2561 * NOTE: I have never actually seen the failure happen
2562 * under Linux, but since I have seen it occur
2563 * under both Solaris and HP-UX, the assumption
2564 * is that it *could* happen here as well...
2565 */
2566 if (remain < 2)
2567 goto done;
2568
2569
2570 switch (b[1]) {
2571
2572 /*
2573 * Handle Open Response.
2574 */
2575
2576 case 11:
2577 plen = 6;
2578 if (remain < plen)
2579 goto done;
2580
2581 nd->nd_tx_work = 1;
2582
2583 {
2584 int req = b[2];
2585 int resp = b[3];
2586 port = get_unaligned_be16(b + 4);
2587
2588 if (port >= nd->nd_chan_count) {
2589 error = "Open channel number out of range";
2590 goto prot_error;
2591 }
2592
2593 ch = nd->nd_chan + port;
2594
2595 /*
2596 * How we handle an open response depends primarily
2597 * on our current channel state.
2598 */
2599
2600 switch (ch->ch_state) {
2601 case CS_IDLE:
2602
2603 /*
2604 * Handle a delayed open.
2605 */
2606
2607 if (ch->ch_otype_waiting != 0 &&
2608 req == ch->ch_otype_waiting &&
2609 resp == 0) {
2610 ch->ch_otype = req;
2611 ch->ch_otype_waiting = 0;
2612 ch->ch_state = CS_SEND_QUERY;
2613 break;
2614 }
2615 goto open_error;
2616
2617 case CS_WAIT_OPEN:
2618
2619 /*
2620 * Handle the open response.
2621 */
2622
2623 if (req == ch->ch_otype) {
2624 switch (resp) {
2625
2626 /*
2627 * On successful response, open the
2628 * port and proceed normally.
2629 */
2630
2631 case 0:
2632 ch->ch_state = CS_SEND_QUERY;
2633 break;
2634
2635 /*
2636 * On a busy response to a persistent open,
2637 * remember that the open is pending.
2638 */
2639
2640 case 1:
2641 case 2:
2642 if (req != OTYPE_IMMEDIATE) {
2643 ch->ch_otype_waiting = req;
2644 ch->ch_state = CS_IDLE;
2645 break;
2646 }
2647
2648 /*
2649 * Otherwise the server open failed. If
2650 * the Unix port is open, hang it up.
2651 */
2652
2653 default:
2654 if (ch->ch_open_count != 0) {
2655 ch->ch_flag |= CH_HANGUP;
2656 dgrp_carrier(ch);
2657 ch->ch_state = CS_IDLE;
2658 break;
2659 }
2660
2661 ch->ch_open_error = resp;
2662 ch->ch_state = CS_IDLE;
2663
2664 wake_up_interruptible(&ch->ch_flag_wait);
2665 }
2666 break;
2667 }
2668
2669 /*
a547e5e0 2670 * Handle delayed response arrival preceding
0b52b749
BP
2671 * the open response we are waiting for.
2672 */
2673
2674 if (ch->ch_otype_waiting != 0 &&
2675 req == ch->ch_otype_waiting &&
2676 resp == 0) {
2677 ch->ch_otype = ch->ch_otype_waiting;
2678 ch->ch_otype_waiting = 0;
2679 ch->ch_state = CS_WAIT_FAIL;
2680 break;
2681 }
2682 goto open_error;
2683
2684
2685 case CS_WAIT_FAIL:
2686
2687 /*
2688 * Handle response to immediate open arriving
2689 * after a delayed open success.
2690 */
2691
2692 if (req == OTYPE_IMMEDIATE) {
2693 ch->ch_state = CS_SEND_QUERY;
2694 break;
2695 }
2696 goto open_error;
2697
2698
2699 case CS_WAIT_CANCEL:
2700 /*
2701 * Handle delayed open response arriving before
2702 * the cancel response.
2703 */
2704
2705 if (req == ch->ch_otype_waiting &&
2706 resp == 0) {
2707 ch->ch_otype_waiting = 0;
2708 break;
2709 }
2710
2711 /*
2712 * Handle cancel response.
2713 */
2714
2715 if (req == 4 && resp == 0) {
2716 ch->ch_otype_waiting = 0;
2717 ch->ch_state = CS_IDLE;
2718 break;
2719 }
2720 goto open_error;
2721
2722
2723 case CS_WAIT_CLOSE:
2724 /*
2725 * Handle a successful response to a port
2726 * close.
2727 */
2728
2729 if (req >= 3) {
2730 ch->ch_state = CS_IDLE;
2731 break;
2732 }
2733 goto open_error;
2734
2735open_error:
2736 default:
2737 {
2738 error = "Improper Open Response";
2739 goto prot_error;
2740 }
2741 }
2742 }
2743 break;
2744
2745 /*
2746 * Handle Synchronize Response.
2747 */
2748
2749 case 13:
2750 plen = 3;
2751 if (remain < plen)
2752 goto done;
2753 {
2754 int seq = b[2];
2755 int s;
2756
2757 /*
2758 * If channel was waiting for this sync response,
2759 * unset the flag, and wake up anyone waiting
2760 * on the event.
2761 */
2762 if (ch->ch_flag & CH_WAITING_SYNC) {
2763 ch->ch_flag &= ~(CH_WAITING_SYNC);
2764 wake_up_interruptible(&ch->ch_flag_wait);
2765 }
2766
2767 if (((seq - nd->nd_seq_out) & SEQ_MASK) >=
2768 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2769 break;
2770 }
2771
2772 for (s = nd->nd_seq_out;; s = (s + 1) & SEQ_MASK) {
2773 if (nd->nd_seq_wait[s] != 0) {
2774 nd->nd_seq_wait[s] = 0;
2775
2776 wake_up_interruptible(&nd->nd_seq_wque[s]);
2777 }
2778
2779 nd->nd_unack -= nd->nd_seq_size[s];
2780
2781 if (s == seq)
2782 break;
2783 }
2784
2785 nd->nd_seq_out = (seq + 1) & SEQ_MASK;
2786 }
2787 break;
2788
2789 /*
2790 * Handle Sequence Response.
2791 */
2792
2793 case 15:
2794 plen = 6;
2795 if (remain < plen)
2796 goto done;
2797
2798 {
2799 /* Record that we have received the Sequence
2800 * Response, but we aren't interested in the
2801 * sequence numbers. We were using RIN like it
2802 * was ROUT and that was causing problems,
2803 * fixed 7-13-2001 David Fries. See comment in
2804 * drp.h for ch_s_rin variable.
2805 int rin = get_unaligned_be16(b + 2);
2806 int tpos = get_unaligned_be16(b + 4);
2807 */
2808
2809 ch->ch_send &= ~RR_SEQUENCE;
2810 ch->ch_expect &= ~RR_SEQUENCE;
2811 }
2812 goto check_query;
2813
2814 /*
2815 * Handle Status Response.
2816 */
2817
2818 case 17:
2819 plen = 5;
2820 if (remain < plen)
2821 goto done;
2822
2823 {
2824 ch->ch_s_elast = get_unaligned_be16(b + 2);
2825 ch->ch_s_mlast = b[4];
2826
2827 ch->ch_expect &= ~RR_STATUS;
2828 ch->ch_send &= ~RR_STATUS;
2829
2830 /*
2831 * CH_PHYS_CD is cleared because something _could_ be
2832 * waiting for the initial sense of carrier... and if
2833 * carrier is high immediately, we want to be sure to
2834 * wake them as soon as possible.
2835 */
2836 ch->ch_flag &= ~CH_PHYS_CD;
2837
2838 dgrp_carrier(ch);
2839 }
2840 goto check_query;
2841
2842 /*
2843 * Handle Line Error Response.
2844 */
2845
2846 case 19:
2847 plen = 14;
2848 if (remain < plen)
2849 goto done;
2850
2851 break;
2852
2853 /*
2854 * Handle Buffer Response.
2855 */
2856
2857 case 21:
2858 plen = 6;
2859 if (remain < plen)
2860 goto done;
2861
2862 {
2863 ch->ch_s_rsize = get_unaligned_be16(b + 2);
2864 ch->ch_s_tsize = get_unaligned_be16(b + 4);
2865
2866 ch->ch_send &= ~RR_BUFFER;
2867 ch->ch_expect &= ~RR_BUFFER;
2868 }
2869 goto check_query;
2870
2871 /*
2872 * Handle Port Capability Response.
2873 */
2874
2875 case 23:
2876 plen = 32;
2877 if (remain < plen)
2878 goto done;
2879
2880 {
2881 ch->ch_send &= ~RR_CAPABILITY;
2882 ch->ch_expect &= ~RR_CAPABILITY;
2883 }
2884
2885 /*
2886 * When all queries are complete, set those parameters
2887 * derived from the query results, then transition
2888 * to the READY state.
2889 */
2890
2891check_query:
2892 if (ch->ch_state == CS_WAIT_QUERY &&
2893 (ch->ch_expect & (RR_SEQUENCE |
2894 RR_STATUS |
2895 RR_BUFFER |
2896 RR_CAPABILITY)) == 0) {
2897 ch->ch_tmax = ch->ch_s_tsize / 4;
2898
2899 if (ch->ch_edelay == DGRP_TTIME)
2900 ch->ch_ttime = DGRP_TTIME;
2901 else
2902 ch->ch_ttime = ch->ch_edelay;
2903
2904 ch->ch_rmax = ch->ch_s_rsize / 4;
2905
2906 if (ch->ch_edelay == DGRP_RTIME)
2907 ch->ch_rtime = DGRP_RTIME;
2908 else
2909 ch->ch_rtime = ch->ch_edelay;
2910
2911 ch->ch_rlow = 2 * ch->ch_s_rsize / 8;
2912 ch->ch_rhigh = 6 * ch->ch_s_rsize / 8;
2913
2914 ch->ch_state = CS_READY;
2915
2916 nd->nd_tx_work = 1;
2917 wake_up_interruptible(&ch->ch_flag_wait);
2918
2919 }
2920 break;
2921
2922 default:
2923 goto decode_error;
2924 }
2925 break;
2926
2927 /*
2928 * Handle Events.
2929 */
2930
2931 case 12:
2932 plen = 4;
2933 if (remain < plen)
2934 goto done;
2935
2936 mlast = ch->ch_s_mlast;
2937 elast = ch->ch_s_elast;
2938
2939 mstat = ch->ch_s_mlast = b[1];
2940 estat = ch->ch_s_elast = get_unaligned_be16(b + 2);
2941
2942 /*
2943 * Handle modem changes.
2944 */
2945
2946 if (((mstat ^ mlast) & DM_CD) != 0)
2947 dgrp_carrier(ch);
2948
2949
2950 /*
2951 * Handle received break.
2952 */
2953
2954 if ((estat & ~elast & EV_RXB) != 0 &&
2955 (ch->ch_tun.un_open_count != 0) &&
2956 I_BRKINT(ch->ch_tun.un_tty) &&
2957 !(I_IGNBRK(ch->ch_tun.un_tty))) {
2958
227434f8 2959 tty_buffer_request_room(&ch->port, 1);
0b52b749
BP
2960 tty_insert_flip_char(ch->ch_tun.un_tty, 0, TTY_BREAK);
2961 tty_flip_buffer_push(ch->ch_tun.un_tty);
2962
2963 }
2964
2965 /*
2966 * On transmit break complete, if more break traffic
2967 * is waiting then send it. Otherwise wake any threads
2968 * waiting for transmitter empty.
2969 */
2970
2971 if ((~estat & elast & EV_TXB) != 0 &&
2972 (ch->ch_expect & RR_TX_BREAK) != 0) {
2973
2974 nd->nd_tx_work = 1;
2975
2976 ch->ch_expect &= ~RR_TX_BREAK;
2977
2978 if (ch->ch_break_time != 0) {
2979 ch->ch_send |= RR_TX_BREAK;
2980 } else {
2981 ch->ch_send &= ~RR_TX_BREAK;
2982 ch->ch_flag &= ~CH_TX_BREAK;
2983 wake_up_interruptible(&ch->ch_flag_wait);
2984 }
2985 }
2986 break;
2987
2988 case 13:
2989 case 14:
2990 error = "Unrecognized command";
2991 goto prot_error;
2992
2993 /*
2994 * Decode Special Codes.
2995 */
2996
2997 case 15:
2998 switch (n1) {
2999 /*
3000 * One byte module select.
3001 */
3002
3003 case 0:
3004 case 1:
3005 case 2:
3006 case 3:
3007 case 4:
3008 case 5:
3009 case 6:
3010 case 7:
3011 plen = 1;
3012 nd->nd_rx_module = n1;
3013 break;
3014
3015 /*
3016 * Two byte module select.
3017 */
3018
3019 case 8:
3020 plen = 2;
3021 if (remain < plen)
3022 goto done;
3023
3024 nd->nd_rx_module = b[1];
3025 break;
3026
3027 /*
3028 * ID Request packet.
3029 */
3030
3031 case 11:
3032 if (remain < 4)
3033 goto done;
3034
3035 plen = get_unaligned_be16(b + 2);
3036
3037 if (plen < 12 || plen > 1000) {
3038 error = "Response Packet length error";
3039 goto prot_error;
3040 }
3041
3042 nd->nd_tx_work = 1;
3043
3044 switch (b[1]) {
3045 /*
3046 * Echo packet.
3047 */
3048
3049 case 0:
3050 nd->nd_send |= NR_ECHO;
3051 break;
3052
3053 /*
3054 * ID Response packet.
3055 */
3056
3057 case 1:
3058 nd->nd_send |= NR_IDENT;
3059 break;
3060
3061 /*
3062 * ID Response packet.
3063 */
3064
3065 case 32:
3066 nd->nd_send |= NR_PASSWORD;
3067 break;
3068
3069 }
3070 break;
3071
3072 /*
3073 * Various node-level response packets.
3074 */
3075
3076 case 12:
3077 if (remain < 4)
3078 goto done;
3079
3080 plen = get_unaligned_be16(b + 2);
3081
3082 if (plen < 4 || plen > 1000) {
3083 error = "Response Packet length error";
3084 goto prot_error;
3085 }
3086
3087 nd->nd_tx_work = 1;
3088
3089 switch (b[1]) {
3090 /*
3091 * Echo packet.
3092 */
3093
3094 case 0:
3095 nd->nd_expect &= ~NR_ECHO;
3096 break;
3097
3098 /*
3099 * Product Response Packet.
3100 */
3101
3102 case 1:
3103 {
3104 int desclen;
3105
3106 nd->nd_hw_ver = (b[8] << 8) | b[9];
3107 nd->nd_sw_ver = (b[10] << 8) | b[11];
3108 nd->nd_hw_id = b[6];
3109 desclen = ((plen - 12) > MAX_DESC_LEN) ? MAX_DESC_LEN :
3110 plen - 12;
ad0c6e36
BP
3111
3112 if (desclen <= 0) {
3113 error = "Response Packet desclen error";
3114 goto prot_error;
3115 }
3116
0b52b749
BP
3117 strncpy(nd->nd_ps_desc, b + 12, desclen);
3118 nd->nd_ps_desc[desclen] = 0;
3119 }
3120
3121 nd->nd_expect &= ~NR_IDENT;
3122 break;
3123
3124 /*
3125 * Capability Response Packet.
3126 */
3127
3128 case 2:
3129 {
3130 int nn = get_unaligned_be16(b + 4);
3131
3132 if (nn > CHAN_MAX)
3133 nn = CHAN_MAX;
3134
3135 dgrp_chan_count(nd, nn);
3136 }
3137
3138 nd->nd_expect &= ~NR_CAPABILITY;
3139 break;
3140
3141 /*
3142 * VPD Response Packet.
3143 */
3144
3145 case 15:
3146 /*
3147 * NOTE: case 15 is here ONLY because the EtherLite
3148 * is broken, and sends a response to 24 back as 15.
3149 * To resolve this, the EtherLite firmware is now
3150 * fixed to send back 24 correctly, but, for backwards
3151 * compatibility, we now have reserved 15 for the
3152 * bad EtherLite response to 24 as well.
3153 */
3154
3155 /* Fallthru! */
3156
3157 case 24:
3158
3159 /*
3160 * If the product doesn't support VPD,
3161 * it will send back a null IDRESP,
3162 * which is a length of 4 bytes.
3163 */
3164 if (plen > 4) {
3165 memcpy(nd->nd_vpd, b + 4, min(plen - 4, (long) VPDSIZE));
3166 nd->nd_vpd_len = min(plen - 4, (long) VPDSIZE);
3167 }
3168
3169 nd->nd_expect &= ~NR_VPD;
3170 break;
3171
3172 default:
3173 goto decode_error;
3174 }
3175
3176 if (nd->nd_expect == 0 &&
3177 nd->nd_state == NS_WAIT_QUERY) {
3178 nd->nd_state = NS_READY;
3179 }
3180 break;
3181
3182 /*
3183 * Debug packet.
3184 */
3185
3186 case 14:
3187 if (remain < 4)
3188 goto done;
3189
3190 plen = get_unaligned_be16(b + 2) + 4;
3191
3192 if (plen > 1000) {
3193 error = "Debug Packet too large";
3194 goto prot_error;
3195 }
3196
3197 if (remain < plen)
3198 goto done;
3199 break;
3200
3201 /*
3202 * Handle reset packet.
3203 */
3204
3205 case 15:
3206 if (remain < 2)
3207 goto done;
3208
3209 plen = 2 + b[1];
3210
3211 if (remain < plen)
3212 goto done;
3213
3214 nd->nd_tx_work = 1;
3215
3216 n = b[plen];
3217 b[plen] = 0;
3218
3219 b[plen] = n;
3220
3221 error = "Client Reset Acknowledge";
3222 goto prot_error;
3223
3224 default:
3225 goto decode_error;
3226 }
3227 break;
3228
3229 default:
3230 goto decode_error;
3231 }
3232
3233 b += plen;
3234 remain -= plen;
3235 }
3236
3237 /*
3238 * When the buffer is exhausted, copy any data left at the
3239 * top of the buffer back down to the bottom for the next
3240 * read request.
3241 */
3242
3243done:
3244 if (remain > 0 && b != buf)
3245 memcpy(buf, b, remain);
3246
3247 nd->nd_remain = remain;
3248 return;
3249
3250/*
3251 * Handle a decode error.
3252 */
3253
3254decode_error:
3255 error = "Protocol decode error";
3256
3257/*
3258 * Handle a general protocol error.
3259 */
3260
3261prot_error:
3262 nd->nd_remain = 0;
3263 nd->nd_state = NS_SEND_ERROR;
3264 nd->nd_error = error;
3265}
3266
3267/*
3268 * dgrp_net_write() -- write data to the network device.
3269 *
3270 * A zero byte write indicates that the connection to the RealPort
3271 * device has been broken.
3272 *
3273 * A non-zero write indicates data from the RealPort device.
3274 */
3275static ssize_t dgrp_net_write(struct file *file, const char __user *buf,
3276 size_t count, loff_t *ppos)
3277{
3278 struct nd_struct *nd;
3279 ssize_t rtn = 0;
3280 long n;
3281 long total = 0;
3282
3283 /*
3284 * Get the node pointer, and quit if it doesn't exist.
3285 */
3286 nd = (struct nd_struct *)(file->private_data);
3287 if (!nd)
3288 return -ENXIO;
3289
3290 /*
3291 * Grab the NET lock.
3292 */
3293 down(&nd->nd_net_semaphore);
3294
3295 nd->nd_write_count++;
3296
3297 /*
3298 * Handle disconnect.
3299 */
3300
3301 if (count == 0) {
3302 dgrp_net_idle(nd);
3303 /*
3304 * Set the active port count to zero.
3305 */
3306 dgrp_chan_count(nd, 0);
3307 goto unlock;
3308 }
3309
3310 /*
3311 * Loop to process entire receive packet.
3312 */
3313
3314 while (count > 0) {
3315 n = UIO_MAX - nd->nd_remain;
3316
3317 if (n > count)
3318 n = count;
3319
3320 nd->nd_rx_byte += n + nd->nd_link.lk_header_size;
3321
3322 rtn = copy_from_user(nd->nd_iobuf + nd->nd_remain,
3323 (void __user *) buf + total, n);
3324 if (rtn) {
3325 rtn = -EFAULT;
3326 goto unlock;
3327 }
3328
3329 *ppos += n;
3330
3331 total += n;
3332
3333 count -= n;
3334
3335 if (nd->nd_mon_buf)
3336 dgrp_monitor_data(nd, RPDUMP_SERVER,
3337 nd->nd_iobuf + nd->nd_remain, n);
3338
3339 nd->nd_remain += n;
3340
3341 dgrp_receive(nd);
3342 }
3343
3344 rtn = total;
3345
3346unlock:
3347 /*
3348 * Release the NET lock.
3349 */
3350 up(&nd->nd_net_semaphore);
3351
3352 return rtn;
3353}
3354
3355
3356/*
3357 * dgrp_net_select()
3358 * Determine whether a device is ready to be read or written to, and
3359 * sleep if not.
3360 */
3361static unsigned int dgrp_net_select(struct file *file,
3362 struct poll_table_struct *table)
3363{
3364 unsigned int retval = 0;
3365 struct nd_struct *nd = file->private_data;
3366
3367 poll_wait(file, &nd->nd_tx_waitq, table);
3368
3369 if (nd->nd_tx_ready)
3370 retval |= POLLIN | POLLRDNORM; /* Conditionally readable */
3371
3372 retval |= POLLOUT | POLLWRNORM; /* Always writeable */
3373
3374 return retval;
3375}
3376
3377/*
3378 * dgrp_net_ioctl
3379 *
3380 * Implement those functions which allow the network daemon to control
3381 * the network parameters in the driver. The ioctls include ones to
3382 * get and set the link speed parameters for the PortServer.
3383 */
3384static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
3385 unsigned long arg)
3386{
3387 struct nd_struct *nd;
3388 int rtn = 0;
3389 long size = _IOC_SIZE(cmd);
3390 struct link_struct link;
3391
3392 nd = file->private_data;
3393
3394 if (_IOC_DIR(cmd) & _IOC_READ)
3395 rtn = access_ok(VERIFY_WRITE, (void __user *) arg, size);
3396 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3397 rtn = access_ok(VERIFY_READ, (void __user *) arg, size);
3398
3399 if (!rtn)
3400 return rtn;
3401
3402 switch (cmd) {
3403 case DIGI_SETLINK:
3404 if (size != sizeof(struct link_struct))
3405 return -EINVAL;
3406
3407 if (copy_from_user((void *)(&link), (void __user *) arg, size))
3408 return -EFAULT;
3409
3410 if (link.lk_fast_rate < 9600)
3411 link.lk_fast_rate = 9600;
3412
3413 if (link.lk_slow_rate < 2400)
3414 link.lk_slow_rate = 2400;
3415
3416 if (link.lk_fast_rate > 10000000)
3417 link.lk_fast_rate = 10000000;
3418
3419 if (link.lk_slow_rate > link.lk_fast_rate)
3420 link.lk_slow_rate = link.lk_fast_rate;
3421
3422 if (link.lk_fast_delay > 2000)
3423 link.lk_fast_delay = 2000;
3424
3425 if (link.lk_slow_delay > 10000)
3426 link.lk_slow_delay = 10000;
3427
3428 if (link.lk_fast_delay < 60)
3429 link.lk_fast_delay = 60;
3430
3431 if (link.lk_slow_delay < link.lk_fast_delay)
3432 link.lk_slow_delay = link.lk_fast_delay;
3433
3434 if (link.lk_header_size < 2)
3435 link.lk_header_size = 2;
3436
3437 if (link.lk_header_size > 128)
3438 link.lk_header_size = 128;
3439
3440 link.lk_fast_rate /= 8 * 1000 / dgrp_poll_tick;
3441 link.lk_slow_rate /= 8 * 1000 / dgrp_poll_tick;
3442
3443 link.lk_fast_delay /= dgrp_poll_tick;
3444 link.lk_slow_delay /= dgrp_poll_tick;
3445
3446 nd->nd_link = link;
3447
3448 break;
3449
3450 case DIGI_GETLINK:
3451 if (size != sizeof(struct link_struct))
3452 return -EINVAL;
3453
3454 if (copy_to_user((void __user *)arg, (void *)(&nd->nd_link),
3455 size))
3456 return -EFAULT;
3457
3458 break;
3459
3460 default:
3461 return -EINVAL;
3462
3463 }
3464
3465 return 0;
3466}
3467
3468/**
3469 * dgrp_poll_handler() -- handler for poll timer
3470 *
3471 * As each timer expires, it determines (a) whether the "transmit"
3472 * waiter needs to be woken up, and (b) whether the poller needs to
3473 * be rescheduled.
3474 */
3475void dgrp_poll_handler(unsigned long arg)
3476{
3477 struct dgrp_poll_data *poll_data;
3478 struct nd_struct *nd;
3479 struct link_struct *lk;
3480 ulong time;
3481 ulong poll_time;
3482 ulong freq;
3483 ulong lock_flags;
3484
3485 poll_data = (struct dgrp_poll_data *) arg;
3486 freq = 1000 / poll_data->poll_tick;
3487 poll_data->poll_round += 17;
3488
3489 if (poll_data->poll_round >= freq)
3490 poll_data->poll_round -= freq;
3491
3492 /*
3493 * Loop to process all open nodes.
3494 *
3495 * For each node, determine the rate at which it should
3496 * be transmitting data. Then if the node should wake up
3497 * and transmit data now, enable the net receive select
3498 * to get the transmit going.
3499 */
3500
3501 list_for_each_entry(nd, &nd_struct_list, list) {
3502
3503 lk = &nd->nd_link;
3504
3505 /*
3506 * Decrement statistics. These are only for use with
3507 * KME, so don't worry that the operations are done
a547e5e0 3508 * unlocked, and so the results are occasionally wrong.
0b52b749
BP
3509 */
3510
3511 nd->nd_read_count -= (nd->nd_read_count +
3512 poll_data->poll_round) / freq;
3513 nd->nd_write_count -= (nd->nd_write_count +
3514 poll_data->poll_round) / freq;
3515 nd->nd_send_count -= (nd->nd_send_count +
3516 poll_data->poll_round) / freq;
3517 nd->nd_tx_byte -= (nd->nd_tx_byte +
3518 poll_data->poll_round) / freq;
3519 nd->nd_rx_byte -= (nd->nd_rx_byte +
3520 poll_data->poll_round) / freq;
3521
3522 /*
3523 * Wake the daemon to transmit data only when there is
3524 * enough byte credit to send data.
3525 *
3526 * The results are approximate because the operations
3527 * are performed unlocked, and we are inspecting
3528 * data asynchronously updated elsewhere. The whole
3529 * thing is just approximation anyway, so that should
3530 * be okay.
3531 */
3532
3533 if (lk->lk_slow_rate >= UIO_MAX) {
3534
3535 nd->nd_delay = 0;
3536 nd->nd_rate = UIO_MAX;
3537
3538 nd->nd_tx_deposit = nd->nd_tx_charge + 3 * UIO_MAX;
3539 nd->nd_tx_credit = 3 * UIO_MAX;
3540
3541 } else {
3542
3543 long rate;
3544 long delay;
3545 long deposit;
3546 long charge;
3547 long size;
3548 long excess;
3549
3550 long seq_in = nd->nd_seq_in;
3551 long seq_out = nd->nd_seq_out;
3552
3553 /*
3554 * If there are no outstanding packets, run at the
3555 * fastest rate.
3556 */
3557
3558 if (seq_in == seq_out) {
3559 delay = 0;
3560 rate = lk->lk_fast_rate;
3561 }
3562
3563 /*
3564 * Otherwise compute the transmit rate based on the
3565 * delay since the oldest packet.
3566 */
3567
3568 else {
3569 /*
3570 * The actual delay is computed as the
3571 * time since the oldest unacknowledged
3572 * packet was sent, minus the time it
3573 * took to send that packet to the server.
3574 */
3575
3576 delay = ((jiffies - nd->nd_seq_time[seq_out])
3577 - (nd->nd_seq_size[seq_out] /
3578 lk->lk_fast_rate));
3579
3580 /*
3581 * If the delay is less than the "fast"
3582 * delay, transmit full speed. If greater
3583 * than the "slow" delay, transmit at the
3584 * "slow" speed. In between, interpolate
3585 * between the fast and slow speeds.
3586 */
3587
3588 rate =
3589 (delay <= lk->lk_fast_delay ?
3590 lk->lk_fast_rate :
3591 delay >= lk->lk_slow_delay ?
3592 lk->lk_slow_rate :
3593 (lk->lk_slow_rate +
3594 (lk->lk_slow_delay - delay) *
3595 (lk->lk_fast_rate - lk->lk_slow_rate) /
3596 (lk->lk_slow_delay - lk->lk_fast_delay)
3597 )
3598 );
3599 }
3600
3601 nd->nd_delay = delay;
3602 nd->nd_rate = rate;
3603
3604 /*
3605 * Increase the transmit credit by depositing the
3606 * current transmit rate.
3607 */
3608
3609 deposit = nd->nd_tx_deposit;
3610 charge = nd->nd_tx_charge;
3611
3612 deposit += rate;
3613
3614 /*
3615 * If the available transmit credit becomes too large,
3616 * reduce the deposit to correct the value.
3617 *
3618 * Too large is the max of:
3619 * 6 times the header size
3620 * 3 times the current transmit rate.
3621 */
3622
3623 size = 2 * nd->nd_link.lk_header_size;
3624
3625 if (size < rate)
3626 size = rate;
3627
3628 size *= 3;
3629
3630 excess = deposit - charge - size;
3631
3632 if (excess > 0)
3633 deposit -= excess;
3634
3635 nd->nd_tx_deposit = deposit;
3636 nd->nd_tx_credit = deposit - charge;
3637
3638 /*
3639 * Wake the transmit task only if the transmit credit
3640 * is at least 3 times the transmit header size.
3641 */
3642
3643 size = 3 * lk->lk_header_size;
3644
3645 if (nd->nd_tx_credit < size)
3646 continue;
3647 }
3648
3649
3650 /*
3651 * Enable the READ select to wake the daemon if there
3652 * is useful work for the drp_read routine to perform.
3653 */
3654
3655 if (waitqueue_active(&nd->nd_tx_waitq) &&
3656 (nd->nd_tx_work != 0 ||
3657 (ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX)) {
3658 nd->nd_tx_ready = 1;
3659
3660 wake_up_interruptible(&nd->nd_tx_waitq);
3661
3662 /* not needed */
3663 /* nd->nd_flag &= ~ND_SELECT; */
3664 }
3665 }
3666
3667
3668 /*
3669 * Schedule ourself back at the nominal wakeup interval.
3670 */
3671 spin_lock_irqsave(&poll_data->poll_lock, lock_flags);
3672
3673 poll_data->node_active_count--;
3674 if (poll_data->node_active_count > 0) {
3675 poll_data->node_active_count++;
3676 poll_time = poll_data->timer.expires +
3677 poll_data->poll_tick * HZ / 1000;
3678
3679 time = poll_time - jiffies;
3680
3681 if (time >= 2 * poll_data->poll_tick)
3682 poll_time = jiffies + dgrp_poll_tick * HZ / 1000;
3683
3684 poll_data->timer.expires = poll_time;
3685 add_timer(&poll_data->timer);
3686 }
3687
3688 spin_unlock_irqrestore(&poll_data->poll_lock, lock_flags);
3689}