Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / dgrp / dgrp_net_ops.c
1 /*
2 *
3 * Copyright 1999 Digi International (www.digi.com)
4 * James Puzzo <jamesp at digi dot com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
15 *
16 */
17
18 /*
19 *
20 * Filename:
21 *
22 * dgrp_net_ops.c
23 *
24 * Description:
25 *
26 * Handle the file operations required for the "network" devices.
27 * Includes those functions required to register the "net" devices
28 * in "/proc".
29 *
30 * Author:
31 *
32 * James A. Puzzo
33 *
34 */
35
36 #include <linux/module.h>
37 #include <linux/proc_fs.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
40 #include <linux/device.h>
41 #include <linux/tty.h>
42 #include <linux/tty_flip.h>
43 #include <linux/spinlock.h>
44 #include <linux/poll.h>
45 #include <linux/sched.h>
46 #include <linux/ratelimit.h>
47 #include <asm/unaligned.h>
48
49 #define MYFLIPLEN TBUF_MAX
50
51 #include "dgrp_common.h"
52
53 #define TTY_FLIPBUF_SIZE 512
54 #define DEVICE_NAME_SIZE 50
55
56 /*
57 * Generic helper function declarations
58 */
59 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
60 unsigned char *fbuf, int *len);
61
62 /*
63 * File operation declarations
64 */
65 static int dgrp_net_open(struct inode *, struct file *);
66 static int dgrp_net_release(struct inode *, struct file *);
67 static ssize_t dgrp_net_read(struct file *, char __user *, size_t, loff_t *);
68 static ssize_t dgrp_net_write(struct file *, const char __user *, size_t,
69 loff_t *);
70 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
71 unsigned long arg);
72 static unsigned int dgrp_net_select(struct file *file,
73 struct poll_table_struct *table);
74
75 const struct file_operations dgrp_net_ops = {
76 .owner = THIS_MODULE,
77 .read = dgrp_net_read,
78 .write = dgrp_net_write,
79 .poll = dgrp_net_select,
80 .unlocked_ioctl = dgrp_net_ioctl,
81 .open = dgrp_net_open,
82 .release = dgrp_net_release,
83 };
84
85 /**
86 * dgrp_dump() -- prints memory for debugging purposes.
87 * @mem: Memory location which should be printed to the console
88 * @len: Number of bytes to be dumped
89 */
90 static void dgrp_dump(u8 *mem, int len)
91 {
92 int i;
93
94 pr_debug("dgrp dump length = %d, data = ", len);
95 for (i = 0; i < len; ++i)
96 pr_debug("%.2x ", mem[i]);
97 pr_debug("\n");
98 }
99
100 /**
101 * dgrp_read_data_block() -- Read a data block
102 * @ch: struct ch_struct *
103 * @flipbuf: u8 *
104 * @flipbuf_size: size of flipbuf
105 */
106 static void dgrp_read_data_block(struct ch_struct *ch, u8 *flipbuf,
107 int flipbuf_size)
108 {
109 int t;
110 int n;
111
112 if (flipbuf_size <= 0)
113 return;
114
115 t = RBUF_MAX - ch->ch_rout;
116 n = flipbuf_size;
117
118 if (n >= t) {
119 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, t);
120 flipbuf += t;
121 n -= t;
122 ch->ch_rout = 0;
123 }
124
125 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, n);
126 flipbuf += n;
127 ch->ch_rout += n;
128 }
129
130
131 /**
132 * dgrp_input() -- send data to the line disipline
133 * @ch: pointer to channel struct
134 *
135 * Copys the rbuf to the flipbuf and sends to line discipline.
136 * Sends input buffer data to the line discipline.
137 *
138 */
139 static void dgrp_input(struct ch_struct *ch)
140 {
141 struct nd_struct *nd;
142 struct tty_struct *tty;
143 int data_len;
144 int len;
145 int tty_count;
146 ulong lock_flags;
147 u8 *myflipbuf;
148 u8 *myflipflagbuf;
149
150 if (!ch)
151 return;
152
153 nd = ch->ch_nd;
154
155 if (!nd)
156 return;
157
158 spin_lock_irqsave(&nd->nd_lock, lock_flags);
159
160 myflipbuf = nd->nd_inputbuf;
161 myflipflagbuf = nd->nd_inputflagbuf;
162
163 if (!ch->ch_open_count) {
164 ch->ch_rout = ch->ch_rin;
165 goto out;
166 }
167
168 if (ch->ch_tun.un_flag & UN_CLOSING) {
169 ch->ch_rout = ch->ch_rin;
170 goto out;
171 }
172
173 tty = (ch->ch_tun).un_tty;
174
175
176 if (!tty || tty->magic != TTY_MAGIC) {
177 ch->ch_rout = ch->ch_rin;
178 goto out;
179 }
180
181 tty_count = tty->count;
182 if (!tty_count) {
183 ch->ch_rout = ch->ch_rin;
184 goto out;
185 }
186
187 if (tty->closing || test_bit(TTY_CLOSING, &tty->flags)) {
188 ch->ch_rout = ch->ch_rin;
189 goto out;
190 }
191
192 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
193
194 /* data_len should be the number of chars that we read in */
195 data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
196
197 /* len is the amount of data we are going to transfer here */
198 len = tty_buffer_request_room(&ch->port, data_len);
199
200 /* Check DPA flow control */
201 if ((nd->nd_dpa_debug) &&
202 (nd->nd_dpa_flag & DPA_WAIT_SPACE) &&
203 (nd->nd_dpa_port == MINOR(tty_devnum(ch->ch_tun.un_tty))))
204 len = 0;
205
206 if ((len) && !(ch->ch_flag & CH_RXSTOP)) {
207
208 dgrp_read_data_block(ch, myflipbuf, len);
209
210 if (I_PARMRK(tty) || I_BRKINT(tty) || I_INPCK(tty))
211 parity_scan(ch, myflipbuf, myflipflagbuf, &len);
212 else
213 memset(myflipflagbuf, TTY_NORMAL, len);
214
215 if ((nd->nd_dpa_debug) &&
216 (nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
217 dgrp_dpa_data(nd, 1, myflipbuf, len);
218
219 tty_insert_flip_string_flags(&ch->port, myflipbuf,
220 myflipflagbuf, len);
221 tty_flip_buffer_push(&ch->port);
222
223 ch->ch_rxcount += len;
224 }
225
226 /*
227 * Wake up any sleepers (maybe dgrp close) that might be waiting
228 * for a channel flag state change.
229 */
230 wake_up_interruptible(&ch->ch_flag_wait);
231 return;
232
233 out:
234 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
235 }
236
237
238 /*
239 * parity_scan
240 *
241 * Loop to inspect each single character or 0xFF escape.
242 *
243 * if PARMRK & ~DOSMODE:
244 * 0xFF 0xFF Normal 0xFF character, escaped
245 * to eliminate confusion.
246 * 0xFF 0x00 0x00 Break
247 * 0xFF 0x00 CC Error character CC.
248 * CC Normal character CC.
249 *
250 * if PARMRK & DOSMODE:
251 * 0xFF 0x18 0x00 Break
252 * 0xFF 0x08 0x00 Framing Error
253 * 0xFF 0x04 0x00 Parity error
254 * 0xFF 0x0C 0x00 Both Framing and Parity error
255 *
256 * TODO: do we need to do the XMODEM, XOFF, XON, XANY processing??
257 * as per protocol
258 */
259 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
260 unsigned char *fbuf, int *len)
261 {
262 int l = *len;
263 int count = 0;
264 int DOS = ((ch->ch_iflag & IF_DOSMODE) == 0 ? 0 : 1);
265 unsigned char *cout; /* character buffer */
266 unsigned char *fout; /* flag buffer */
267 unsigned char *in;
268 unsigned char c;
269
270 in = cbuf;
271 cout = cbuf;
272 fout = fbuf;
273
274 while (l--) {
275 c = *in;
276 in++;
277
278 switch (ch->ch_pscan_state) {
279 default:
280 /* reset to sanity and fall through */
281 ch->ch_pscan_state = 0 ;
282
283 case 0:
284 /* No FF seen yet */
285 if (c == 0xff) /* delete this character from stream */
286 ch->ch_pscan_state = 1;
287 else {
288 *cout++ = c;
289 *fout++ = TTY_NORMAL;
290 count += 1;
291 }
292 break;
293
294 case 1:
295 /* first FF seen */
296 if (c == 0xff) {
297 /* doubled ff, transform to single ff */
298 *cout++ = c;
299 *fout++ = TTY_NORMAL;
300 count += 1;
301 ch->ch_pscan_state = 0;
302 } else {
303 /* save value examination in next state */
304 ch->ch_pscan_savechar = c;
305 ch->ch_pscan_state = 2;
306 }
307 break;
308
309 case 2:
310 /* third character of ff sequence */
311 *cout++ = c;
312 if (DOS) {
313 if (ch->ch_pscan_savechar & 0x10)
314 *fout++ = TTY_BREAK;
315 else if (ch->ch_pscan_savechar & 0x08)
316 *fout++ = TTY_FRAME;
317 else
318 /*
319 * either marked as a parity error,
320 * indeterminate, or not in DOSMODE
321 * call it a parity error
322 */
323 *fout++ = TTY_PARITY;
324 } else {
325 /* case FF XX ?? where XX is not 00 */
326 if (ch->ch_pscan_savechar & 0xff) {
327 /* this should not happen */
328 pr_info("%s: parity_scan: error unexpected byte\n",
329 __func__);
330 *fout++ = TTY_PARITY;
331 }
332 /* case FF 00 XX where XX is not 00 */
333 else if (c == 0xff)
334 *fout++ = TTY_PARITY;
335 /* case FF 00 00 */
336 else
337 *fout++ = TTY_BREAK;
338
339 }
340 count += 1;
341 ch->ch_pscan_state = 0;
342 }
343 }
344 *len = count;
345 }
346
347
348 /**
349 * dgrp_net_idle() -- Idle the network connection
350 * @nd: pointer to node structure to idle
351 */
352 static void dgrp_net_idle(struct nd_struct *nd)
353 {
354 struct ch_struct *ch;
355 int i;
356
357 nd->nd_tx_work = 1;
358
359 nd->nd_state = NS_IDLE;
360 nd->nd_flag = 0;
361
362 for (i = nd->nd_seq_out; ; i = (i + 1) & SEQ_MASK) {
363 if (!nd->nd_seq_wait[i]) {
364 nd->nd_seq_wait[i] = 0;
365 wake_up_interruptible(&nd->nd_seq_wque[i]);
366 }
367
368 if (i == nd->nd_seq_in)
369 break;
370 }
371
372 nd->nd_seq_out = nd->nd_seq_in;
373
374 nd->nd_unack = 0;
375 nd->nd_remain = 0;
376
377 nd->nd_tx_module = 0x10;
378 nd->nd_rx_module = 0x00;
379
380 for (i = 0, ch = nd->nd_chan; i < CHAN_MAX; i++, ch++) {
381 ch->ch_state = CS_IDLE;
382
383 ch->ch_otype = 0;
384 ch->ch_otype_waiting = 0;
385 }
386 }
387
388 /*
389 * Increase the number of channels, waking up any
390 * threads that might be waiting for the channels
391 * to appear.
392 */
393 static void increase_channel_count(struct nd_struct *nd, int n)
394 {
395 struct ch_struct *ch;
396 struct device *classp;
397 char name[DEVICE_NAME_SIZE];
398 int ret;
399 u8 *buf;
400 int i;
401
402 for (i = nd->nd_chan_count; i < n; ++i) {
403 ch = nd->nd_chan + i;
404
405 /* FIXME: return a useful error instead! */
406 buf = kmalloc(TBUF_MAX, GFP_KERNEL);
407 if (!buf)
408 return;
409
410 if (ch->ch_tbuf)
411 pr_info_ratelimited("%s - ch_tbuf was not NULL\n",
412 __func__);
413
414 ch->ch_tbuf = buf;
415
416 buf = kmalloc(RBUF_MAX, GFP_KERNEL);
417 if (!buf)
418 return;
419
420 if (ch->ch_rbuf)
421 pr_info("%s - ch_rbuf was not NULL\n",
422 __func__);
423 ch->ch_rbuf = buf;
424
425 classp = tty_port_register_device(&ch->port,
426 nd->nd_serial_ttdriver, i,
427 NULL);
428
429 ch->ch_tun.un_sysfs = classp;
430 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
431
432 dgrp_create_tty_sysfs(&ch->ch_tun, classp);
433 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
434 &classp->kobj, name);
435
436 /* NOTE: We don't support "cu" devices anymore,
437 * so you will notice we don't register them
438 * here anymore. */
439 if (dgrp_register_prdevices) {
440 classp = tty_register_device(nd->nd_xprint_ttdriver,
441 i, NULL);
442 ch->ch_pun.un_sysfs = classp;
443 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
444
445 dgrp_create_tty_sysfs(&ch->ch_pun, classp);
446 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
447 &classp->kobj, name);
448 }
449
450 nd->nd_chan_count = i + 1;
451 wake_up_interruptible(&ch->ch_flag_wait);
452 }
453 }
454
455 /*
456 * Decrease the number of channels, and wake up any threads that might
457 * be waiting on the channels that vanished.
458 */
459 static void decrease_channel_count(struct nd_struct *nd, int n)
460 {
461 struct ch_struct *ch;
462 char name[DEVICE_NAME_SIZE];
463 int i;
464
465 for (i = nd->nd_chan_count - 1; i >= n; --i) {
466 ch = nd->nd_chan + i;
467
468 /*
469 * Make any open ports inoperative.
470 */
471 ch->ch_state = CS_IDLE;
472
473 ch->ch_otype = 0;
474 ch->ch_otype_waiting = 0;
475
476 /*
477 * Only "HANGUP" if we care about carrier
478 * transitions and we are already open.
479 */
480 if (ch->ch_open_count != 0) {
481 ch->ch_flag |= CH_HANGUP;
482 dgrp_carrier(ch);
483 }
484
485 /*
486 * Unlike the CH_HANGUP flag above, use another
487 * flag to indicate to the RealPort state machine
488 * that this port has disappeared.
489 */
490 if (ch->ch_open_count != 0)
491 ch->ch_flag |= CH_PORT_GONE;
492
493 wake_up_interruptible(&ch->ch_flag_wait);
494
495 nd->nd_chan_count = i;
496
497 kfree(ch->ch_tbuf);
498 ch->ch_tbuf = NULL;
499
500 kfree(ch->ch_rbuf);
501 ch->ch_rbuf = NULL;
502
503 nd->nd_chan_count = i;
504
505 dgrp_remove_tty_sysfs(ch->ch_tun.un_sysfs);
506 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
507 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
508 tty_unregister_device(nd->nd_serial_ttdriver, i);
509
510 /*
511 * NOTE: We don't support "cu" devices anymore, so don't
512 * unregister them here anymore.
513 */
514
515 if (dgrp_register_prdevices) {
516 dgrp_remove_tty_sysfs(ch->ch_pun.un_sysfs);
517 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
518 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
519 tty_unregister_device(nd->nd_xprint_ttdriver, i);
520 }
521 }
522 }
523
524 /**
525 * dgrp_chan_count() -- Adjust the node channel count.
526 * @nd: pointer to a node structure
527 * @n: new value for channel count
528 *
529 * Adjusts the node channel count. If new ports have appeared, it tries
530 * to signal those processes that might have been waiting for ports to
531 * appear. If ports have disappeared it tries to signal those processes
532 * that might be hung waiting for a response for the now non-existant port.
533 */
534 static void dgrp_chan_count(struct nd_struct *nd, int n)
535 {
536 if (n == nd->nd_chan_count)
537 return;
538
539 if (n > nd->nd_chan_count)
540 increase_channel_count(nd, n);
541
542 if (n < nd->nd_chan_count)
543 decrease_channel_count(nd, n);
544 }
545
546 /**
547 * dgrp_monitor() -- send data to the device monitor queue
548 * @nd: pointer to a node structure
549 * @buf: data to copy to the monitoring buffer
550 * @len: number of bytes to transfer to the buffer
551 *
552 * Called by the net device routines to send data to the device
553 * monitor queue. If the device monitor buffer is too full to
554 * accept the data, it waits until the buffer is ready.
555 */
556 static void dgrp_monitor(struct nd_struct *nd, u8 *buf, int len)
557 {
558 int n;
559 int r;
560 int rtn;
561
562 /*
563 * Grab monitor lock.
564 */
565 down(&nd->nd_mon_semaphore);
566
567 /*
568 * Loop while data remains.
569 */
570 while ((len > 0) && (nd->nd_mon_buf)) {
571 /*
572 * Determine the amount of available space left in the
573 * buffer. If there's none, wait until some appears.
574 */
575
576 n = (nd->nd_mon_out - nd->nd_mon_in - 1) & MON_MASK;
577
578 if (!n) {
579 nd->nd_mon_flag |= MON_WAIT_SPACE;
580
581 up(&nd->nd_mon_semaphore);
582
583 /*
584 * Go to sleep waiting until the condition becomes true.
585 */
586 rtn = wait_event_interruptible(nd->nd_mon_wqueue,
587 ((nd->nd_mon_flag & MON_WAIT_SPACE) == 0));
588
589 /* FIXME: really ignore rtn? */
590
591 /*
592 * We can't exit here if we receive a signal, since
593 * to do so would trash the debug stream.
594 */
595
596 down(&nd->nd_mon_semaphore);
597
598 continue;
599 }
600
601 /*
602 * Copy as much data as will fit.
603 */
604
605 if (n > len)
606 n = len;
607
608 r = MON_MAX - nd->nd_mon_in;
609
610 if (r <= n) {
611 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, r);
612
613 n -= r;
614
615 nd->nd_mon_in = 0;
616
617 buf += r;
618 len -= r;
619 }
620
621 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, n);
622
623 nd->nd_mon_in += n;
624
625 buf += n;
626 len -= n;
627
628 if (nd->nd_mon_in >= MON_MAX)
629 pr_info_ratelimited("%s - nd_mon_in (%i) >= MON_MAX\n",
630 __func__, nd->nd_mon_in);
631
632 /*
633 * Wakeup any thread waiting for data
634 */
635
636 if (nd->nd_mon_flag & MON_WAIT_DATA) {
637 nd->nd_mon_flag &= ~MON_WAIT_DATA;
638 wake_up_interruptible(&nd->nd_mon_wqueue);
639 }
640 }
641
642 /*
643 * Release the monitor lock.
644 */
645 up(&nd->nd_mon_semaphore);
646 }
647
648 /**
649 * dgrp_encode_time() -- Encodes rpdump time into a 4-byte quantity.
650 * @nd: pointer to a node structure
651 * @buf: destination buffer
652 *
653 * Encodes "rpdump" time into a 4-byte quantity. Time is measured since
654 * open.
655 */
656 static void dgrp_encode_time(struct nd_struct *nd, u8 *buf)
657 {
658 ulong t;
659
660 /*
661 * Convert time in HZ since open to time in milliseconds
662 * since open.
663 */
664 t = jiffies - nd->nd_mon_lbolt;
665 t = 1000 * (t / HZ) + 1000 * (t % HZ) / HZ;
666
667 put_unaligned_be32((uint)(t & 0xffffffff), buf);
668 }
669
670
671
672 /**
673 * dgrp_monitor_message() -- Builds a rpdump style message.
674 * @nd: pointer to a node structure
675 * @message: destination buffer
676 */
677 static void dgrp_monitor_message(struct nd_struct *nd, char *message)
678 {
679 u8 header[7];
680 int n;
681
682 header[0] = RPDUMP_MESSAGE;
683
684 dgrp_encode_time(nd, header + 1);
685
686 n = strlen(message);
687
688 put_unaligned_be16(n, header + 5);
689
690 dgrp_monitor(nd, header, sizeof(header));
691 dgrp_monitor(nd, (u8 *) message, n);
692 }
693
694
695
696 /**
697 * dgrp_monitor_reset() -- Note a reset in the monitoring buffer.
698 * @nd: pointer to a node structure
699 */
700 static void dgrp_monitor_reset(struct nd_struct *nd)
701 {
702 u8 header[5];
703
704 header[0] = RPDUMP_RESET;
705
706 dgrp_encode_time(nd, header + 1);
707
708 dgrp_monitor(nd, header, sizeof(header));
709 }
710
711 /**
712 * dgrp_monitor_data() -- builds a monitor data packet
713 * @nd: pointer to a node structure
714 * @type: type of message to be logged
715 * @buf: data to be logged
716 * @size: number of bytes in the buffer
717 */
718 static void dgrp_monitor_data(struct nd_struct *nd, u8 type, u8 *buf, int size)
719 {
720 u8 header[7];
721
722 header[0] = type;
723
724 dgrp_encode_time(nd, header + 1);
725
726 put_unaligned_be16(size, header + 5);
727
728 dgrp_monitor(nd, header, sizeof(header));
729 dgrp_monitor(nd, buf, size);
730 }
731
732 static int alloc_nd_buffers(struct nd_struct *nd)
733 {
734
735 nd->nd_iobuf = NULL;
736 nd->nd_writebuf = NULL;
737 nd->nd_inputbuf = NULL;
738 nd->nd_inputflagbuf = NULL;
739
740 /*
741 * Allocate the network read/write buffer.
742 */
743 nd->nd_iobuf = kzalloc(UIO_MAX + 10, GFP_KERNEL);
744 if (!nd->nd_iobuf)
745 goto out_err;
746
747 /*
748 * Allocate a buffer for doing the copy from user space to
749 * kernel space in the write routines.
750 */
751 nd->nd_writebuf = kzalloc(WRITEBUFLEN, GFP_KERNEL);
752 if (!nd->nd_writebuf)
753 goto out_err;
754
755 /*
756 * Allocate a buffer for doing the copy from kernel space to
757 * tty buffer space in the read routines.
758 */
759 nd->nd_inputbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
760 if (!nd->nd_inputbuf)
761 goto out_err;
762
763 /*
764 * Allocate a buffer for doing the copy from kernel space to
765 * tty buffer space in the read routines.
766 */
767 nd->nd_inputflagbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
768 if (!nd->nd_inputflagbuf)
769 goto out_err;
770
771 return 0;
772
773 out_err:
774 kfree(nd->nd_iobuf);
775 kfree(nd->nd_writebuf);
776 kfree(nd->nd_inputbuf);
777 kfree(nd->nd_inputflagbuf);
778 return -ENOMEM;
779 }
780
781 /*
782 * dgrp_net_open() -- Open the NET device for a particular PortServer
783 */
784 static int dgrp_net_open(struct inode *inode, struct file *file)
785 {
786 struct nd_struct *nd;
787 ulong lock_flags;
788 int rtn;
789
790 rtn = try_module_get(THIS_MODULE);
791 if (!rtn)
792 return -EAGAIN;
793
794 if (!capable(CAP_SYS_ADMIN)) {
795 rtn = -EPERM;
796 goto done;
797 }
798
799 /*
800 * Make sure that the "private_data" field hasn't already been used.
801 */
802 if (file->private_data) {
803 rtn = -EINVAL;
804 goto done;
805 }
806
807 /*
808 * Get the node pointer, and fail if it doesn't exist.
809 */
810 nd = PDE_DATA(inode);
811 if (!nd) {
812 rtn = -ENXIO;
813 goto done;
814 }
815
816 file->private_data = (void *) nd;
817
818 /*
819 * Grab the NET lock.
820 */
821 down(&nd->nd_net_semaphore);
822
823 if (nd->nd_state != NS_CLOSED) {
824 rtn = -EBUSY;
825 goto unlock;
826 }
827
828 /*
829 * Initialize the link speed parameters.
830 */
831
832 nd->nd_link.lk_fast_rate = UIO_MAX;
833 nd->nd_link.lk_slow_rate = UIO_MAX;
834
835 nd->nd_link.lk_fast_delay = 1000;
836 nd->nd_link.lk_slow_delay = 1000;
837
838 nd->nd_link.lk_header_size = 46;
839
840
841 rtn = alloc_nd_buffers(nd);
842 if (rtn)
843 goto unlock;
844
845 /*
846 * The port is now open, so move it to the IDLE state
847 */
848 dgrp_net_idle(nd);
849
850 nd->nd_tx_time = jiffies;
851
852 /*
853 * If the polling routing is not running, start it running here
854 */
855 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
856
857 if (!dgrp_poll_data.node_active_count) {
858 dgrp_poll_data.node_active_count = 2;
859 dgrp_poll_data.timer.expires = jiffies +
860 dgrp_poll_tick * HZ / 1000;
861 add_timer(&dgrp_poll_data.timer);
862 }
863
864 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
865
866 dgrp_monitor_message(nd, "Net Open");
867
868 unlock:
869 /*
870 * Release the NET lock.
871 */
872 up(&nd->nd_net_semaphore);
873
874 done:
875 if (rtn)
876 module_put(THIS_MODULE);
877
878 return rtn;
879 }
880
881 /* dgrp_net_release() -- close the NET device for a particular PortServer */
882 static int dgrp_net_release(struct inode *inode, struct file *file)
883 {
884 struct nd_struct *nd;
885 ulong lock_flags;
886
887 nd = (struct nd_struct *)(file->private_data);
888 if (!nd)
889 goto done;
890
891 /* TODO : historical locking placeholder */
892 /*
893 * In the HPUX version of the RealPort driver (which served as a basis
894 * for this driver) this locking code was used. Saved if ever we need
895 * to review the locking under Linux.
896 */
897 /* spinlock(&nd->nd_lock); */
898
899
900 /*
901 * Grab the NET lock.
902 */
903 down(&nd->nd_net_semaphore);
904
905 /*
906 * Before "closing" the internal connection, make sure all
907 * ports are "idle".
908 */
909 dgrp_net_idle(nd);
910
911 nd->nd_state = NS_CLOSED;
912 nd->nd_flag = 0;
913
914 /*
915 * TODO ... must the wait queue be reset on close?
916 * should any pending waiters be reset?
917 * Let's decide to assert that the waitq is empty... and see
918 * how soon we break.
919 */
920 if (waitqueue_active(&nd->nd_tx_waitq))
921 pr_info("%s - expected waitqueue_active to be false\n",
922 __func__);
923
924 nd->nd_send = 0;
925
926 kfree(nd->nd_iobuf);
927 nd->nd_iobuf = NULL;
928
929 /* TODO : historical locking placeholder */
930 /*
931 * In the HPUX version of the RealPort driver (which served as a basis
932 * for this driver) this locking code was used. Saved if ever we need
933 * to review the locking under Linux.
934 */
935 /* spinunlock( &nd->nd_lock ); */
936
937
938 kfree(nd->nd_writebuf);
939 nd->nd_writebuf = NULL;
940
941 kfree(nd->nd_inputbuf);
942 nd->nd_inputbuf = NULL;
943
944 kfree(nd->nd_inputflagbuf);
945 nd->nd_inputflagbuf = NULL;
946
947 /* TODO : historical locking placeholder */
948 /*
949 * In the HPUX version of the RealPort driver (which served as a basis
950 * for this driver) this locking code was used. Saved if ever we need
951 * to review the locking under Linux.
952 */
953 /* spinlock(&nd->nd_lock); */
954
955 /*
956 * Set the active port count to zero.
957 */
958 dgrp_chan_count(nd, 0);
959
960 /* TODO : historical locking placeholder */
961 /*
962 * In the HPUX version of the RealPort driver (which served as a basis
963 * for this driver) this locking code was used. Saved if ever we need
964 * to review the locking under Linux.
965 */
966 /* spinunlock(&nd->nd_lock); */
967
968 /*
969 * Release the NET lock.
970 */
971 up(&nd->nd_net_semaphore);
972
973 /*
974 * Cause the poller to stop scheduling itself if this is
975 * the last active node.
976 */
977 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
978
979 if (dgrp_poll_data.node_active_count == 2) {
980 del_timer(&dgrp_poll_data.timer);
981 dgrp_poll_data.node_active_count = 0;
982 }
983
984 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
985
986 down(&nd->nd_net_semaphore);
987
988 dgrp_monitor_message(nd, "Net Close");
989
990 up(&nd->nd_net_semaphore);
991
992 done:
993 module_put(THIS_MODULE);
994 file->private_data = NULL;
995 return 0;
996 }
997
998 /* used in dgrp_send to setup command header */
999 static inline u8 *set_cmd_header(u8 *b, u8 port, u8 cmd)
1000 {
1001 *b++ = 0xb0 + (port & 0x0f);
1002 *b++ = cmd;
1003 return b;
1004 }
1005
1006 /**
1007 * dgrp_send() -- build a packet for transmission to the server
1008 * @nd: pointer to a node structure
1009 * @tmax: maximum bytes to transmit
1010 *
1011 * returns number of bytes sent
1012 */
1013 static int dgrp_send(struct nd_struct *nd, long tmax)
1014 {
1015 struct ch_struct *ch = nd->nd_chan;
1016 u8 *b;
1017 u8 *buf;
1018 u8 *mbuf;
1019 u8 port;
1020 int mod;
1021 long send;
1022 int maxport;
1023 long lastport = -1;
1024 ushort rwin;
1025 long in;
1026 ushort n;
1027 long t;
1028 long ttotal;
1029 long tchan;
1030 long tsend;
1031 ushort tsafe;
1032 long work;
1033 long send_sync;
1034 long wanted_sync_port = -1;
1035 ushort tdata[CHAN_MAX];
1036 long used_buffer;
1037
1038 mbuf = nd->nd_iobuf + UIO_BASE;
1039 buf = b = mbuf;
1040
1041 send_sync = nd->nd_link.lk_slow_rate < UIO_MAX;
1042
1043 ttotal = 0;
1044 tchan = 0;
1045
1046 memset(tdata, 0, sizeof(tdata));
1047
1048
1049 /*
1050 * If there are any outstanding requests to be serviced,
1051 * service them here.
1052 */
1053 if (nd->nd_send & NR_PASSWORD) {
1054
1055 /*
1056 * Send Password response.
1057 */
1058
1059 b[0] = 0xfc;
1060 b[1] = 0x20;
1061 put_unaligned_be16(strlen(nd->password), b + 2);
1062 b += 4;
1063 b += strlen(nd->password);
1064 nd->nd_send &= ~(NR_PASSWORD);
1065 }
1066
1067
1068 /*
1069 * Loop over all modules to generate commands, and determine
1070 * the amount of data queued for transmit.
1071 */
1072
1073 for (mod = 0, port = 0; port < nd->nd_chan_count; mod++) {
1074 /*
1075 * If this is not the current module, enter a module select
1076 * code in the buffer.
1077 */
1078
1079 if (mod != nd->nd_tx_module)
1080 mbuf = ++b;
1081
1082 /*
1083 * Loop to process one module.
1084 */
1085
1086 maxport = port + 16;
1087
1088 if (maxport > nd->nd_chan_count)
1089 maxport = nd->nd_chan_count;
1090
1091 for (; port < maxport; port++, ch++) {
1092 /*
1093 * Switch based on channel state.
1094 */
1095
1096 switch (ch->ch_state) {
1097 /*
1098 * Send requests when the port is closed, and there
1099 * are no Open, Close or Cancel requests expected.
1100 */
1101
1102 case CS_IDLE:
1103 /*
1104 * Wait until any open error code
1105 * has been delivered to all
1106 * associated ports.
1107 */
1108
1109 if (ch->ch_open_error) {
1110 if (ch->ch_wait_count[ch->ch_otype]) {
1111 work = 1;
1112 break;
1113 }
1114
1115 ch->ch_open_error = 0;
1116 }
1117
1118 /*
1119 * Wait until the channel HANGUP flag is reset
1120 * before sending the first open. We can only
1121 * get to this state after a server disconnect.
1122 */
1123
1124 if ((ch->ch_flag & CH_HANGUP) != 0)
1125 break;
1126
1127 /*
1128 * If recovering from a TCP disconnect, or if
1129 * there is an immediate open pending, send an
1130 * Immediate Open request.
1131 */
1132 if ((ch->ch_flag & CH_PORT_GONE) ||
1133 ch->ch_wait_count[OTYPE_IMMEDIATE] != 0) {
1134 b = set_cmd_header(b, port, 10);
1135 *b++ = 0;
1136
1137 ch->ch_state = CS_WAIT_OPEN;
1138 ch->ch_otype = OTYPE_IMMEDIATE;
1139 break;
1140 }
1141
1142 /*
1143 * If there is no Persistent or Incoming Open on the wait
1144 * list in the server, and a thread is waiting for a
1145 * Persistent or Incoming Open, send a Persistent or Incoming
1146 * Open Request.
1147 */
1148 if (ch->ch_otype_waiting == 0) {
1149 if (ch->ch_wait_count[OTYPE_PERSISTENT] != 0) {
1150 b = set_cmd_header(b, port, 10);
1151 *b++ = 1;
1152
1153 ch->ch_state = CS_WAIT_OPEN;
1154 ch->ch_otype = OTYPE_PERSISTENT;
1155 } else if (ch->ch_wait_count[OTYPE_INCOMING] != 0) {
1156 b = set_cmd_header(b, port, 10);
1157 *b++ = 2;
1158
1159 ch->ch_state = CS_WAIT_OPEN;
1160 ch->ch_otype = OTYPE_INCOMING;
1161 }
1162 break;
1163 }
1164
1165 /*
1166 * If a Persistent or Incoming Open is pending in
1167 * the server, but there is no longer an open
1168 * thread waiting for it, cancel the request.
1169 */
1170
1171 if (ch->ch_wait_count[ch->ch_otype_waiting] == 0) {
1172 b = set_cmd_header(b, port, 10);
1173 *b++ = 4;
1174
1175 ch->ch_state = CS_WAIT_CANCEL;
1176 ch->ch_otype = ch->ch_otype_waiting;
1177 }
1178 break;
1179
1180 /*
1181 * Send port parameter queries.
1182 */
1183 case CS_SEND_QUERY:
1184 /*
1185 * Clear out all FEP state that might remain
1186 * from the last connection.
1187 */
1188
1189 ch->ch_flag |= CH_PARAM;
1190
1191 ch->ch_flag &= ~CH_RX_FLUSH;
1192
1193 ch->ch_expect = 0;
1194
1195 ch->ch_s_tin = 0;
1196 ch->ch_s_tpos = 0;
1197 ch->ch_s_tsize = 0;
1198 ch->ch_s_treq = 0;
1199 ch->ch_s_elast = 0;
1200
1201 ch->ch_s_rin = 0;
1202 ch->ch_s_rwin = 0;
1203 ch->ch_s_rsize = 0;
1204
1205 ch->ch_s_tmax = 0;
1206 ch->ch_s_ttime = 0;
1207 ch->ch_s_rmax = 0;
1208 ch->ch_s_rtime = 0;
1209 ch->ch_s_rlow = 0;
1210 ch->ch_s_rhigh = 0;
1211
1212 ch->ch_s_brate = 0;
1213 ch->ch_s_iflag = 0;
1214 ch->ch_s_cflag = 0;
1215 ch->ch_s_oflag = 0;
1216 ch->ch_s_xflag = 0;
1217
1218 ch->ch_s_mout = 0;
1219 ch->ch_s_mflow = 0;
1220 ch->ch_s_mctrl = 0;
1221 ch->ch_s_xon = 0;
1222 ch->ch_s_xoff = 0;
1223 ch->ch_s_lnext = 0;
1224 ch->ch_s_xxon = 0;
1225 ch->ch_s_xxoff = 0;
1226
1227 /* Send Sequence Request */
1228 b = set_cmd_header(b, port, 14);
1229
1230 /* Configure Event Conditions Packet */
1231 b = set_cmd_header(b, port, 42);
1232 put_unaligned_be16(0x02c0, b);
1233 b += 2;
1234 *b++ = (DM_DTR | DM_RTS | DM_CTS |
1235 DM_DSR | DM_RI | DM_CD);
1236
1237 /* Send Status Request */
1238 b = set_cmd_header(b, port, 16);
1239
1240 /* Send Buffer Request */
1241 b = set_cmd_header(b, port, 20);
1242
1243 /* Send Port Capability Request */
1244 b = set_cmd_header(b, port, 22);
1245
1246 ch->ch_expect = (RR_SEQUENCE |
1247 RR_STATUS |
1248 RR_BUFFER |
1249 RR_CAPABILITY);
1250
1251 ch->ch_state = CS_WAIT_QUERY;
1252
1253 /* Raise modem signals */
1254 b = set_cmd_header(b, port, 44);
1255
1256 if (ch->ch_flag & CH_PORT_GONE)
1257 ch->ch_s_mout = ch->ch_mout;
1258 else
1259 ch->ch_s_mout = ch->ch_mout = DM_DTR | DM_RTS;
1260
1261 *b++ = ch->ch_mout;
1262 *b++ = ch->ch_s_mflow = 0;
1263 *b++ = ch->ch_s_mctrl = ch->ch_mctrl = 0;
1264
1265 if (ch->ch_flag & CH_PORT_GONE)
1266 ch->ch_flag &= ~CH_PORT_GONE;
1267
1268 break;
1269
1270 /*
1271 * Handle normal open and ready mode.
1272 */
1273
1274 case CS_READY:
1275
1276 /*
1277 * If the port is not open, and there are no
1278 * no longer any ports requesting an open,
1279 * then close the port.
1280 */
1281
1282 if (ch->ch_open_count == 0 &&
1283 ch->ch_wait_count[ch->ch_otype] == 0) {
1284 goto send_close;
1285 }
1286
1287 /*
1288 * Process waiting input.
1289 *
1290 * If there is no one to read it, discard the data.
1291 *
1292 * Otherwise if we are not in fastcook mode, or if there is a
1293 * fastcook thread waiting for data, send the data to the
1294 * line discipline.
1295 */
1296 if (ch->ch_rin != ch->ch_rout) {
1297 if (ch->ch_tun.un_open_count == 0 ||
1298 (ch->ch_tun.un_flag & UN_CLOSING) ||
1299 (ch->ch_cflag & CF_CREAD) == 0) {
1300 ch->ch_rout = ch->ch_rin;
1301 } else if ((ch->ch_flag & CH_FAST_READ) == 0 ||
1302 ch->ch_inwait != 0) {
1303 dgrp_input(ch);
1304
1305 if (ch->ch_rin != ch->ch_rout)
1306 work = 1;
1307 }
1308 }
1309
1310 /*
1311 * Handle receive flush, and changes to
1312 * server port parameters.
1313 */
1314
1315 if (ch->ch_flag & (CH_RX_FLUSH | CH_PARAM)) {
1316 /*
1317 * If we are in receive flush mode,
1318 * and enough data has gone by, reset
1319 * receive flush mode.
1320 */
1321 if (ch->ch_flag & CH_RX_FLUSH) {
1322 if (((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >
1323 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK))
1324 ch->ch_flag &= ~CH_RX_FLUSH;
1325 else
1326 work = 1;
1327 }
1328
1329 /*
1330 * Send TMAX, TTIME.
1331 */
1332
1333 if (ch->ch_s_tmax != ch->ch_tmax ||
1334 ch->ch_s_ttime != ch->ch_ttime) {
1335 b = set_cmd_header(b, port, 48);
1336
1337 ch->ch_s_tmax = ch->ch_tmax;
1338 ch->ch_s_ttime = ch->ch_ttime;
1339
1340 put_unaligned_be16(ch->ch_s_tmax,
1341 b);
1342 b += 2;
1343
1344 put_unaligned_be16(ch->ch_s_ttime,
1345 b);
1346 b += 2;
1347 }
1348
1349 /*
1350 * Send RLOW, RHIGH.
1351 */
1352
1353 if (ch->ch_s_rlow != ch->ch_rlow ||
1354 ch->ch_s_rhigh != ch->ch_rhigh) {
1355 b = set_cmd_header(b, port, 45);
1356
1357 ch->ch_s_rlow = ch->ch_rlow;
1358 ch->ch_s_rhigh = ch->ch_rhigh;
1359
1360 put_unaligned_be16(ch->ch_s_rlow,
1361 b);
1362 b += 2;
1363
1364 put_unaligned_be16(ch->ch_s_rhigh,
1365 b);
1366 b += 2;
1367 }
1368
1369 /*
1370 * Send BRATE, CFLAG, IFLAG,
1371 * OFLAG, XFLAG.
1372 */
1373
1374 if (ch->ch_s_brate != ch->ch_brate ||
1375 ch->ch_s_cflag != ch->ch_cflag ||
1376 ch->ch_s_iflag != ch->ch_iflag ||
1377 ch->ch_s_oflag != ch->ch_oflag ||
1378 ch->ch_s_xflag != ch->ch_xflag) {
1379 b = set_cmd_header(b, port, 40);
1380
1381 ch->ch_s_brate = ch->ch_brate;
1382 ch->ch_s_cflag = ch->ch_cflag;
1383 ch->ch_s_iflag = ch->ch_iflag;
1384 ch->ch_s_oflag = ch->ch_oflag;
1385 ch->ch_s_xflag = ch->ch_xflag;
1386
1387 put_unaligned_be16(ch->ch_s_brate,
1388 b);
1389 b += 2;
1390
1391 put_unaligned_be16(ch->ch_s_cflag,
1392 b);
1393 b += 2;
1394
1395 put_unaligned_be16(ch->ch_s_iflag,
1396 b);
1397 b += 2;
1398
1399 put_unaligned_be16(ch->ch_s_oflag,
1400 b);
1401 b += 2;
1402
1403 put_unaligned_be16(ch->ch_s_xflag,
1404 b);
1405 b += 2;
1406 }
1407
1408 /*
1409 * Send MOUT, MFLOW, MCTRL.
1410 */
1411
1412 if (ch->ch_s_mout != ch->ch_mout ||
1413 ch->ch_s_mflow != ch->ch_mflow ||
1414 ch->ch_s_mctrl != ch->ch_mctrl) {
1415 b = set_cmd_header(b, port, 44);
1416
1417 *b++ = ch->ch_s_mout = ch->ch_mout;
1418 *b++ = ch->ch_s_mflow = ch->ch_mflow;
1419 *b++ = ch->ch_s_mctrl = ch->ch_mctrl;
1420 }
1421
1422 /*
1423 * Send Flow control characters.
1424 */
1425
1426 if (ch->ch_s_xon != ch->ch_xon ||
1427 ch->ch_s_xoff != ch->ch_xoff ||
1428 ch->ch_s_lnext != ch->ch_lnext ||
1429 ch->ch_s_xxon != ch->ch_xxon ||
1430 ch->ch_s_xxoff != ch->ch_xxoff) {
1431 b = set_cmd_header(b, port, 46);
1432
1433 *b++ = ch->ch_s_xon = ch->ch_xon;
1434 *b++ = ch->ch_s_xoff = ch->ch_xoff;
1435 *b++ = ch->ch_s_lnext = ch->ch_lnext;
1436 *b++ = ch->ch_s_xxon = ch->ch_xxon;
1437 *b++ = ch->ch_s_xxoff = ch->ch_xxoff;
1438 }
1439
1440 /*
1441 * Send RMAX, RTIME.
1442 */
1443
1444 if (ch->ch_s_rmax != ch->ch_rmax ||
1445 ch->ch_s_rtime != ch->ch_rtime) {
1446 b = set_cmd_header(b, port, 47);
1447
1448 ch->ch_s_rmax = ch->ch_rmax;
1449 ch->ch_s_rtime = ch->ch_rtime;
1450
1451 put_unaligned_be16(ch->ch_s_rmax,
1452 b);
1453 b += 2;
1454
1455 put_unaligned_be16(ch->ch_s_rtime,
1456 b);
1457 b += 2;
1458 }
1459
1460 ch->ch_flag &= ~CH_PARAM;
1461 wake_up_interruptible(&ch->ch_flag_wait);
1462 }
1463
1464
1465 /*
1466 * Handle action commands.
1467 */
1468
1469 if (ch->ch_send != 0) {
1470 /* int send = ch->ch_send & ~ch->ch_expect; */
1471 send = ch->ch_send & ~ch->ch_expect;
1472
1473 /* Send character immediate */
1474 if ((send & RR_TX_ICHAR) != 0) {
1475 b = set_cmd_header(b, port, 60);
1476
1477 *b++ = ch->ch_xon;
1478 ch->ch_expect |= RR_TX_ICHAR;
1479 }
1480
1481 /* BREAK request */
1482 if ((send & RR_TX_BREAK) != 0) {
1483 if (ch->ch_break_time != 0) {
1484 b = set_cmd_header(b, port, 61);
1485 put_unaligned_be16(ch->ch_break_time,
1486 b);
1487 b += 2;
1488
1489 ch->ch_expect |= RR_TX_BREAK;
1490 ch->ch_break_time = 0;
1491 } else {
1492 ch->ch_send &= ~RR_TX_BREAK;
1493 ch->ch_flag &= ~CH_TX_BREAK;
1494 wake_up_interruptible(&ch->ch_flag_wait);
1495 }
1496 }
1497
1498 /*
1499 * Flush input/output buffers.
1500 */
1501
1502 if ((send & (RR_RX_FLUSH | RR_TX_FLUSH)) != 0) {
1503 b = set_cmd_header(b, port, 62);
1504
1505 *b++ = ((send & RR_TX_FLUSH) == 0 ? 1 :
1506 (send & RR_RX_FLUSH) == 0 ? 2 : 3);
1507
1508 if (send & RR_RX_FLUSH) {
1509 ch->ch_flush_seq = nd->nd_seq_in;
1510 ch->ch_flag |= CH_RX_FLUSH;
1511 work = 1;
1512 send_sync = 1;
1513 wanted_sync_port = port;
1514 }
1515
1516 ch->ch_send &= ~(RR_RX_FLUSH | RR_TX_FLUSH);
1517 }
1518
1519 /* Pause input/output */
1520 if ((send & (RR_RX_STOP | RR_TX_STOP)) != 0) {
1521 b = set_cmd_header(b, port, 63);
1522 *b = 0;
1523
1524 if ((send & RR_TX_STOP) != 0)
1525 *b |= EV_OPU;
1526
1527 if ((send & RR_RX_STOP) != 0)
1528 *b |= EV_IPU;
1529
1530 b++;
1531
1532 ch->ch_send &= ~(RR_RX_STOP | RR_TX_STOP);
1533 }
1534
1535 /* Start input/output */
1536 if ((send & (RR_RX_START | RR_TX_START)) != 0) {
1537 b = set_cmd_header(b, port, 64);
1538 *b = 0;
1539
1540 if ((send & RR_TX_START) != 0)
1541 *b |= EV_OPU | EV_OPS | EV_OPX;
1542
1543 if ((send & RR_RX_START) != 0)
1544 *b |= EV_IPU | EV_IPS;
1545
1546 b++;
1547
1548 ch->ch_send &= ~(RR_RX_START | RR_TX_START);
1549 }
1550 }
1551
1552
1553 /*
1554 * Send a window sequence to acknowledge received data.
1555 */
1556
1557 rwin = (ch->ch_s_rin +
1558 ((ch->ch_rout - ch->ch_rin - 1) & RBUF_MASK));
1559
1560 n = (rwin - ch->ch_s_rwin) & 0xffff;
1561
1562 if (n >= RBUF_MAX / 4) {
1563 b[0] = 0xa0 + (port & 0xf);
1564 ch->ch_s_rwin = rwin;
1565 put_unaligned_be16(rwin, b + 1);
1566 b += 3;
1567 }
1568
1569 /*
1570 * If the terminal is waiting on LOW
1571 * water or EMPTY, and the condition
1572 * is now satisfied, call the line
1573 * discipline to put more data in the
1574 * buffer.
1575 */
1576
1577 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1578
1579 if ((ch->ch_tun.un_flag & (UN_EMPTY|UN_LOW)) != 0) {
1580 if ((ch->ch_tun.un_flag & UN_LOW) != 0 ?
1581 (n <= TBUF_LOW) :
1582 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin)) {
1583 ch->ch_tun.un_flag &= ~(UN_EMPTY|UN_LOW);
1584
1585 if (waitqueue_active(&((ch->ch_tun.un_tty)->write_wait)))
1586 wake_up_interruptible(&((ch->ch_tun.un_tty)->write_wait));
1587 tty_wakeup(ch->ch_tun.un_tty);
1588 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1589 }
1590 }
1591
1592 /*
1593 * If the printer is waiting on LOW
1594 * water, TIME, EMPTY or PWAIT, and is
1595 * now ready to put more data in the
1596 * buffer, call the line discipline to
1597 * do the job.
1598 */
1599
1600 /* FIXME: jiffies - ch->ch_waketime can never
1601 be < 0. Someone needs to work out what is
1602 actually intended here */
1603 if (ch->ch_pun.un_open_count &&
1604 (ch->ch_pun.un_flag &
1605 (UN_EMPTY|UN_TIME|UN_LOW|UN_PWAIT)) != 0) {
1606
1607 if ((ch->ch_pun.un_flag & UN_LOW) != 0 ?
1608 (n <= TBUF_LOW) :
1609 (ch->ch_pun.un_flag & UN_TIME) != 0 ?
1610 ((jiffies - ch->ch_waketime) >= 0) :
1611 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin) &&
1612 ((ch->ch_pun.un_flag & UN_EMPTY) != 0 ||
1613 ((ch->ch_tun.un_open_count &&
1614 ch->ch_tun.un_tty->ops->chars_in_buffer) ?
1615 (ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) == 0
1616 : 1
1617 )
1618 )) {
1619 ch->ch_pun.un_flag &= ~(UN_EMPTY | UN_TIME | UN_LOW | UN_PWAIT);
1620
1621 if (waitqueue_active(&((ch->ch_pun.un_tty)->write_wait)))
1622 wake_up_interruptible(&((ch->ch_pun.un_tty)->write_wait));
1623 tty_wakeup(ch->ch_pun.un_tty);
1624 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1625
1626 } else if ((ch->ch_pun.un_flag & UN_TIME) != 0) {
1627 work = 1;
1628 }
1629 }
1630
1631
1632 /*
1633 * Determine the max number of bytes
1634 * this port can send, including
1635 * packet header overhead.
1636 */
1637
1638 t = ((ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff);
1639
1640 if (n > t)
1641 n = t;
1642
1643 if (n != 0) {
1644 n += (n <= 8 ? 1 : n <= 255 ? 2 : 3);
1645
1646 tdata[tchan++] = n;
1647 ttotal += n;
1648 }
1649 break;
1650
1651 /*
1652 * Close the port.
1653 */
1654
1655 send_close:
1656 case CS_SEND_CLOSE:
1657 b = set_cmd_header(b, port, 10);
1658 if (ch->ch_otype == OTYPE_IMMEDIATE)
1659 *b++ = 3;
1660 else
1661 *b++ = 4;
1662
1663 ch->ch_state = CS_WAIT_CLOSE;
1664 break;
1665
1666 /*
1667 * Wait for a previous server request.
1668 */
1669
1670 case CS_WAIT_OPEN:
1671 case CS_WAIT_CANCEL:
1672 case CS_WAIT_FAIL:
1673 case CS_WAIT_QUERY:
1674 case CS_WAIT_CLOSE:
1675 break;
1676
1677 default:
1678 pr_info("%s - unexpected channel state (%i)\n",
1679 __func__, ch->ch_state);
1680 }
1681 }
1682
1683 /*
1684 * If a module select code is needed, drop one in. If space
1685 * was reserved for one, but none is needed, recover the space.
1686 */
1687
1688 if (mod != nd->nd_tx_module) {
1689 if (b != mbuf) {
1690 mbuf[-1] = 0xf0 | mod;
1691 nd->nd_tx_module = mod;
1692 } else {
1693 b--;
1694 }
1695 }
1696 }
1697
1698 /*
1699 * Adjust "tmax" so that under worst case conditions we do
1700 * not overflow either the daemon buffer or the internal
1701 * buffer in the loop that follows. Leave a safe area
1702 * of 64 bytes so we start getting asserts before we start
1703 * losing data or clobbering memory.
1704 */
1705
1706 n = UIO_MAX - UIO_BASE;
1707
1708 if (tmax > n)
1709 tmax = n;
1710
1711 tmax -= 64;
1712
1713 tsafe = tmax;
1714
1715 /*
1716 * Allocate space for 5 Module Selects, 1 Sequence Request,
1717 * and 1 Set TREQ for each active channel.
1718 */
1719
1720 tmax -= 5 + 3 + 4 * nd->nd_chan_count;
1721
1722 /*
1723 * Further reduce "tmax" to the available transmit credit.
1724 * Note that this is a soft constraint; The transmit credit
1725 * can go negative for a time and then recover.
1726 */
1727
1728 n = nd->nd_tx_deposit - nd->nd_tx_charge - nd->nd_link.lk_header_size;
1729
1730 if (tmax > n)
1731 tmax = n;
1732
1733 /*
1734 * Finally reduce tmax by the number of bytes already in
1735 * the buffer.
1736 */
1737
1738 tmax -= b - buf;
1739
1740 /*
1741 * Suspend data transmit unless every ready channel can send
1742 * at least 1 character.
1743 */
1744 if (tmax < 2 * nd->nd_chan_count) {
1745 tsend = 1;
1746
1747 } else if (tchan > 1 && ttotal > tmax) {
1748
1749 /*
1750 * If transmit is limited by the credit budget, find the
1751 * largest number of characters we can send without driving
1752 * the credit negative.
1753 */
1754
1755 long tm = tmax;
1756 int tc = tchan;
1757 int try;
1758
1759 tsend = tm / tc;
1760
1761 for (try = 0; try < 3; try++) {
1762 int i;
1763 int c = 0;
1764
1765 for (i = 0; i < tc; i++) {
1766 if (tsend < tdata[i])
1767 tdata[c++] = tdata[i];
1768 else
1769 tm -= tdata[i];
1770 }
1771
1772 if (c == tc)
1773 break;
1774
1775 tsend = tm / c;
1776
1777 if (c == 1)
1778 break;
1779
1780 tc = c;
1781 }
1782
1783 tsend = tm / nd->nd_chan_count;
1784
1785 if (tsend < 2)
1786 tsend = 1;
1787
1788 } else {
1789 /*
1790 * If no budgetary constraints, or only one channel ready
1791 * to send, set the character limit to the remaining
1792 * buffer size.
1793 */
1794
1795 tsend = tmax;
1796 }
1797
1798 tsend -= (tsend <= 9) ? 1 : (tsend <= 257) ? 2 : 3;
1799
1800 /*
1801 * Loop over all channels, sending queued data.
1802 */
1803
1804 port = 0;
1805 ch = nd->nd_chan;
1806 used_buffer = tmax;
1807
1808 for (mod = 0; port < nd->nd_chan_count; mod++) {
1809 /*
1810 * If this is not the current module, enter a module select
1811 * code in the buffer.
1812 */
1813
1814 if (mod != nd->nd_tx_module)
1815 mbuf = ++b;
1816
1817 /*
1818 * Loop to process one module.
1819 */
1820
1821 maxport = port + 16;
1822
1823 if (maxport > nd->nd_chan_count)
1824 maxport = nd->nd_chan_count;
1825
1826 for (; port < maxport; port++, ch++) {
1827 if (ch->ch_state != CS_READY)
1828 continue;
1829
1830 lastport = port;
1831
1832 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1833
1834 /*
1835 * If there is data that can be sent, send it.
1836 */
1837
1838 if (n != 0 && used_buffer > 0) {
1839 t = (ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff;
1840
1841 if (n > t)
1842 n = t;
1843
1844 if (n > tsend) {
1845 work = 1;
1846 n = tsend;
1847 }
1848
1849 if (n > used_buffer) {
1850 work = 1;
1851 n = used_buffer;
1852 }
1853
1854 if (n <= 0)
1855 continue;
1856
1857 /*
1858 * Create the correct size transmit header,
1859 * depending on the amount of data to transmit.
1860 */
1861
1862 if (n <= 8) {
1863
1864 b[0] = ((n - 1) << 4) + (port & 0xf);
1865 b += 1;
1866
1867 } else if (n <= 255) {
1868
1869 b[0] = 0x80 + (port & 0xf);
1870 b[1] = n;
1871 b += 2;
1872
1873 } else {
1874
1875 b[0] = 0x90 + (port & 0xf);
1876 put_unaligned_be16(n, b + 1);
1877 b += 3;
1878 }
1879
1880 ch->ch_s_tin = (ch->ch_s_tin + n) & 0xffff;
1881
1882 /*
1883 * Copy transmit data to the packet.
1884 */
1885
1886 t = TBUF_MAX - ch->ch_tout;
1887
1888 if (n >= t) {
1889 memcpy(b, ch->ch_tbuf + ch->ch_tout, t);
1890 b += t;
1891 n -= t;
1892 used_buffer -= t;
1893 ch->ch_tout = 0;
1894 }
1895
1896 memcpy(b, ch->ch_tbuf + ch->ch_tout, n);
1897 b += n;
1898 used_buffer -= n;
1899 ch->ch_tout += n;
1900 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1901 }
1902
1903 /*
1904 * Wake any terminal unit process waiting in the
1905 * dgrp_write routine for low water.
1906 */
1907
1908 if (n > TBUF_LOW)
1909 continue;
1910
1911 if ((ch->ch_flag & CH_LOW) != 0) {
1912 ch->ch_flag &= ~CH_LOW;
1913 wake_up_interruptible(&ch->ch_flag_wait);
1914 }
1915
1916 /* selwakeup tty_sel */
1917 if (ch->ch_tun.un_open_count) {
1918 struct tty_struct *tty = (ch->ch_tun.un_tty);
1919
1920 if (waitqueue_active(&tty->write_wait))
1921 wake_up_interruptible(&tty->write_wait);
1922
1923 tty_wakeup(tty);
1924 }
1925
1926 if (ch->ch_pun.un_open_count) {
1927 struct tty_struct *tty = (ch->ch_pun.un_tty);
1928
1929 if (waitqueue_active(&tty->write_wait))
1930 wake_up_interruptible(&tty->write_wait);
1931
1932 tty_wakeup(tty);
1933 }
1934
1935 /*
1936 * Do EMPTY processing.
1937 */
1938
1939 if (n != 0)
1940 continue;
1941
1942 if ((ch->ch_flag & (CH_EMPTY | CH_DRAIN)) != 0 ||
1943 (ch->ch_pun.un_flag & UN_EMPTY) != 0) {
1944 /*
1945 * If there is still data in the server, ask the server
1946 * to notify us when its all gone.
1947 */
1948
1949 if (ch->ch_s_treq != ch->ch_s_tin) {
1950 b = set_cmd_header(b, port, 43);
1951
1952 ch->ch_s_treq = ch->ch_s_tin;
1953 put_unaligned_be16(ch->ch_s_treq,
1954 b);
1955 b += 2;
1956 }
1957
1958 /*
1959 * If there is a thread waiting for buffer empty,
1960 * and we are truly empty, wake the thread.
1961 */
1962
1963 else if ((ch->ch_flag & CH_EMPTY) != 0 &&
1964 (ch->ch_send & RR_TX_BREAK) == 0) {
1965 ch->ch_flag &= ~CH_EMPTY;
1966
1967 wake_up_interruptible(&ch->ch_flag_wait);
1968 }
1969 }
1970 }
1971
1972 /*
1973 * If a module select code is needed, drop one in. If space
1974 * was reserved for one, but none is needed, recover the space.
1975 */
1976
1977 if (mod != nd->nd_tx_module) {
1978 if (b != mbuf) {
1979 mbuf[-1] = 0xf0 | mod;
1980 nd->nd_tx_module = mod;
1981 } else {
1982 b--;
1983 }
1984 }
1985 }
1986
1987 /*
1988 * Send a synchronization sequence associated with the last open
1989 * channel that sent data, and remember the time when the data was
1990 * sent.
1991 */
1992
1993 in = nd->nd_seq_in;
1994
1995 if ((send_sync || nd->nd_seq_wait[in] != 0) && lastport >= 0) {
1996 u8 *bb = b;
1997
1998 /*
1999 * Attempt the use the port that really wanted the sync.
2000 * This gets around a race condition where the "lastport" is in
2001 * the middle of the close() routine, and by the time we
2002 * send this command, it will have already acked the close, and
2003 * thus not send the sync response.
2004 */
2005 if (wanted_sync_port >= 0)
2006 lastport = wanted_sync_port;
2007 /*
2008 * Set a flag just in case the port is in the middle of a close,
2009 * it will not be permitted to actually close until we get an
2010 * sync response, and clear the flag there.
2011 */
2012 ch = nd->nd_chan + lastport;
2013 ch->ch_flag |= CH_WAITING_SYNC;
2014
2015 mod = lastport >> 4;
2016
2017 if (mod != nd->nd_tx_module) {
2018 bb[0] = 0xf0 + mod;
2019 bb += 1;
2020
2021 nd->nd_tx_module = mod;
2022 }
2023
2024 bb = set_cmd_header(bb, lastport, 12);
2025 *bb++ = in;
2026
2027 nd->nd_seq_size[in] = bb - buf;
2028 nd->nd_seq_time[in] = jiffies;
2029
2030 if (++in >= SEQ_MAX)
2031 in = 0;
2032
2033 if (in != nd->nd_seq_out) {
2034 b = bb;
2035 nd->nd_seq_in = in;
2036 nd->nd_unack += b - buf;
2037 }
2038 }
2039
2040 /*
2041 * If there are no open ports, a sync cannot be sent.
2042 * There is nothing left to wait for anyway, so wake any
2043 * thread waiting for an acknowledgement.
2044 */
2045
2046 else if (nd->nd_seq_wait[in] != 0) {
2047 nd->nd_seq_wait[in] = 0;
2048
2049 wake_up_interruptible(&nd->nd_seq_wque[in]);
2050 }
2051
2052 /*
2053 * If there is no traffic for an interval of IDLE_MAX, then
2054 * send a single byte packet.
2055 */
2056
2057 if (b != buf) {
2058 nd->nd_tx_time = jiffies;
2059 } else if ((ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX) {
2060 *b++ = 0xf0 | nd->nd_tx_module;
2061 nd->nd_tx_time = jiffies;
2062 }
2063
2064 n = b - buf;
2065
2066 if (n >= tsafe)
2067 pr_info("%s - n(%i) >= tsafe(%i)\n",
2068 __func__, n, tsafe);
2069
2070 if (tsend < 0)
2071 dgrp_dump(buf, n);
2072
2073 nd->nd_tx_work = work;
2074
2075 return n;
2076 }
2077
2078 /*
2079 * dgrp_net_read()
2080 * Data to be sent TO the PortServer from the "async." half of the driver.
2081 */
2082 static ssize_t dgrp_net_read(struct file *file, char __user *buf, size_t count,
2083 loff_t *ppos)
2084 {
2085 struct nd_struct *nd;
2086 long n;
2087 u8 *local_buf;
2088 u8 *b;
2089 ssize_t rtn;
2090
2091 /*
2092 * Get the node pointer, and quit if it doesn't exist.
2093 */
2094 nd = (struct nd_struct *)(file->private_data);
2095 if (!nd)
2096 return -ENXIO;
2097
2098 if (count < UIO_MIN)
2099 return -EINVAL;
2100
2101 /*
2102 * Only one read/write operation may be in progress at
2103 * any given time.
2104 */
2105
2106 /*
2107 * Grab the NET lock.
2108 */
2109 down(&nd->nd_net_semaphore);
2110
2111 nd->nd_read_count++;
2112
2113 nd->nd_tx_ready = 0;
2114
2115 /*
2116 * Determine the effective size of the buffer.
2117 */
2118
2119 if (nd->nd_remain > UIO_BASE)
2120 pr_info_ratelimited("%s - nd_remain(%i) > UIO_BASE\n",
2121 __func__, nd->nd_remain);
2122
2123 b = local_buf = nd->nd_iobuf + UIO_BASE;
2124
2125 /*
2126 * Generate data according to the node state.
2127 */
2128
2129 switch (nd->nd_state) {
2130 /*
2131 * Initialize the connection.
2132 */
2133
2134 case NS_IDLE:
2135 if (nd->nd_mon_buf)
2136 dgrp_monitor_reset(nd);
2137
2138 /*
2139 * Request a Product ID Packet.
2140 */
2141
2142 b[0] = 0xfb;
2143 b[1] = 0x01;
2144 b += 2;
2145
2146 nd->nd_expect |= NR_IDENT;
2147
2148 /*
2149 * Request a Server Capability ID Response.
2150 */
2151
2152 b[0] = 0xfb;
2153 b[1] = 0x02;
2154 b += 2;
2155
2156 nd->nd_expect |= NR_CAPABILITY;
2157
2158 /*
2159 * Request a Server VPD Response.
2160 */
2161
2162 b[0] = 0xfb;
2163 b[1] = 0x18;
2164 b += 2;
2165
2166 nd->nd_expect |= NR_VPD;
2167
2168 nd->nd_state = NS_WAIT_QUERY;
2169 break;
2170
2171 /*
2172 * We do serious communication with the server only in
2173 * the READY state.
2174 */
2175
2176 case NS_READY:
2177 b = dgrp_send(nd, count) + local_buf;
2178 break;
2179
2180 /*
2181 * Send off an error after receiving a bogus message
2182 * from the server.
2183 */
2184
2185 case NS_SEND_ERROR:
2186 n = strlen(nd->nd_error);
2187
2188 b[0] = 0xff;
2189 b[1] = n;
2190 memcpy(b + 2, nd->nd_error, n);
2191 b += 2 + n;
2192
2193 dgrp_net_idle(nd);
2194 /*
2195 * Set the active port count to zero.
2196 */
2197 dgrp_chan_count(nd, 0);
2198 break;
2199
2200 default:
2201 break;
2202 }
2203
2204 n = b - local_buf;
2205
2206 if (n != 0) {
2207 nd->nd_send_count++;
2208
2209 nd->nd_tx_byte += n + nd->nd_link.lk_header_size;
2210 nd->nd_tx_charge += n + nd->nd_link.lk_header_size;
2211 }
2212
2213 rtn = copy_to_user((void __user *)buf, local_buf, n);
2214 if (rtn) {
2215 rtn = -EFAULT;
2216 goto done;
2217 }
2218
2219 *ppos += n;
2220
2221 rtn = n;
2222
2223 if (nd->nd_mon_buf)
2224 dgrp_monitor_data(nd, RPDUMP_CLIENT, local_buf, n);
2225
2226 /*
2227 * Release the NET lock.
2228 */
2229 done:
2230 up(&nd->nd_net_semaphore);
2231
2232 return rtn;
2233 }
2234
2235 /**
2236 * dgrp_receive() -- decode data packets received from the remote PortServer.
2237 * @nd: pointer to a node structure
2238 */
2239 static void dgrp_receive(struct nd_struct *nd)
2240 {
2241 struct ch_struct *ch;
2242 u8 *buf;
2243 u8 *b;
2244 u8 *dbuf;
2245 char *error;
2246 long port;
2247 long dlen;
2248 long plen;
2249 long remain;
2250 long n;
2251 long mlast;
2252 long elast;
2253 long mstat;
2254 long estat;
2255
2256 char ID[3];
2257
2258 nd->nd_tx_time = jiffies;
2259
2260 ID_TO_CHAR(nd->nd_ID, ID);
2261
2262 b = buf = nd->nd_iobuf;
2263 remain = nd->nd_remain;
2264
2265 /*
2266 * Loop to process Realport protocol packets.
2267 */
2268
2269 while (remain > 0) {
2270 int n0 = b[0] >> 4;
2271 int n1 = b[0] & 0x0f;
2272
2273 if (n0 <= 12) {
2274 port = (nd->nd_rx_module << 4) + n1;
2275
2276 if (port >= nd->nd_chan_count) {
2277 error = "Improper Port Number";
2278 goto prot_error;
2279 }
2280
2281 ch = nd->nd_chan + port;
2282 } else {
2283 port = -1;
2284 ch = NULL;
2285 }
2286
2287 /*
2288 * Process by major packet type.
2289 */
2290
2291 switch (n0) {
2292
2293 /*
2294 * Process 1-byte header data packet.
2295 */
2296
2297 case 0:
2298 case 1:
2299 case 2:
2300 case 3:
2301 case 4:
2302 case 5:
2303 case 6:
2304 case 7:
2305 dlen = n0 + 1;
2306 plen = dlen + 1;
2307
2308 dbuf = b + 1;
2309 goto data;
2310
2311 /*
2312 * Process 2-byte header data packet.
2313 */
2314
2315 case 8:
2316 if (remain < 3)
2317 goto done;
2318
2319 dlen = b[1];
2320 plen = dlen + 2;
2321
2322 dbuf = b + 2;
2323 goto data;
2324
2325 /*
2326 * Process 3-byte header data packet.
2327 */
2328
2329 case 9:
2330 if (remain < 4)
2331 goto done;
2332
2333 dlen = get_unaligned_be16(b + 1);
2334 plen = dlen + 3;
2335
2336 dbuf = b + 3;
2337
2338 /*
2339 * Common packet handling code.
2340 */
2341
2342 data:
2343 nd->nd_tx_work = 1;
2344
2345 /*
2346 * Otherwise data should appear only when we are
2347 * in the CS_READY state.
2348 */
2349
2350 if (ch->ch_state < CS_READY) {
2351 error = "Data received before RWIN established";
2352 goto prot_error;
2353 }
2354
2355 /*
2356 * Assure that the data received is within the
2357 * allowable window.
2358 */
2359
2360 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2361
2362 if (dlen > n) {
2363 error = "Receive data overrun";
2364 goto prot_error;
2365 }
2366
2367 /*
2368 * If we received 3 or less characters,
2369 * assume it is a human typing, and set RTIME
2370 * to 10 milliseconds.
2371 *
2372 * If we receive 10 or more characters,
2373 * assume its not a human typing, and set RTIME
2374 * to 100 milliseconds.
2375 */
2376
2377 if (ch->ch_edelay != DGRP_RTIME) {
2378 if (ch->ch_rtime != ch->ch_edelay) {
2379 ch->ch_rtime = ch->ch_edelay;
2380 ch->ch_flag |= CH_PARAM;
2381 }
2382 } else if (dlen <= 3) {
2383 if (ch->ch_rtime != 10) {
2384 ch->ch_rtime = 10;
2385 ch->ch_flag |= CH_PARAM;
2386 }
2387 } else {
2388 if (ch->ch_rtime != DGRP_RTIME) {
2389 ch->ch_rtime = DGRP_RTIME;
2390 ch->ch_flag |= CH_PARAM;
2391 }
2392 }
2393
2394 /*
2395 * If a portion of the packet is outside the
2396 * buffer, shorten the effective length of the
2397 * data packet to be the amount of data received.
2398 */
2399
2400 if (remain < plen)
2401 dlen -= plen - remain;
2402
2403 /*
2404 * Detect if receive flush is now complete.
2405 */
2406
2407 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2408 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2409 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2410 ch->ch_flag &= ~CH_RX_FLUSH;
2411 }
2412
2413 /*
2414 * If we are ready to receive, move the data into
2415 * the receive buffer.
2416 */
2417
2418 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2419
2420 if (ch->ch_state == CS_READY &&
2421 (ch->ch_tun.un_open_count != 0) &&
2422 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2423 (ch->ch_cflag & CF_CREAD) != 0 &&
2424 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2425 (ch->ch_send & RR_RX_FLUSH) == 0) {
2426
2427 if (ch->ch_rin + dlen >= RBUF_MAX) {
2428 n = RBUF_MAX - ch->ch_rin;
2429
2430 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2431
2432 ch->ch_rin = 0;
2433 dbuf += n;
2434 dlen -= n;
2435 }
2436
2437 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2438
2439 ch->ch_rin += dlen;
2440
2441
2442 /*
2443 * If we are not in fastcook mode, or
2444 * if there is a fastcook thread
2445 * waiting for data, send the data to
2446 * the line discipline.
2447 */
2448
2449 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2450 ch->ch_inwait != 0) {
2451 dgrp_input(ch);
2452 }
2453
2454 /*
2455 * If there is a read thread waiting
2456 * in select, and we are in fastcook
2457 * mode, wake him up.
2458 */
2459
2460 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2461 (ch->ch_flag & CH_FAST_READ) != 0)
2462 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2463
2464 /*
2465 * Wake any thread waiting in the
2466 * fastcook loop.
2467 */
2468
2469 if ((ch->ch_flag & CH_INPUT) != 0) {
2470 ch->ch_flag &= ~CH_INPUT;
2471
2472 wake_up_interruptible(&ch->ch_flag_wait);
2473 }
2474 }
2475
2476 /*
2477 * Fabricate and insert a data packet header to
2478 * preced the remaining data when it comes in.
2479 */
2480
2481 if (remain < plen) {
2482 dlen = plen - remain;
2483 b = buf;
2484
2485 b[0] = 0x90 + n1;
2486 put_unaligned_be16(dlen, b + 1);
2487
2488 remain = 3;
2489 goto done;
2490 }
2491 break;
2492
2493 /*
2494 * Handle Window Sequence packets.
2495 */
2496
2497 case 10:
2498 plen = 3;
2499 if (remain < plen)
2500 goto done;
2501
2502 nd->nd_tx_work = 1;
2503
2504 {
2505 ushort tpos = get_unaligned_be16(b + 1);
2506
2507 ushort ack = (tpos - ch->ch_s_tpos) & 0xffff;
2508 ushort unack = (ch->ch_s_tin - ch->ch_s_tpos) & 0xffff;
2509 ushort notify = (ch->ch_s_treq - ch->ch_s_tpos) & 0xffff;
2510
2511 if (ch->ch_state < CS_READY || ack > unack) {
2512 error = "Improper Window Sequence";
2513 goto prot_error;
2514 }
2515
2516 ch->ch_s_tpos = tpos;
2517
2518 if (notify <= ack)
2519 ch->ch_s_treq = tpos;
2520 }
2521 break;
2522
2523 /*
2524 * Handle Command response packets.
2525 */
2526
2527 case 11:
2528
2529 /*
2530 * RealPort engine fix - 03/11/2004
2531 *
2532 * This check did not used to be here.
2533 *
2534 * We were using b[1] without verifying that the data
2535 * is actually there and valid. On a split packet, it
2536 * might not be yet.
2537 *
2538 * NOTE: I have never actually seen the failure happen
2539 * under Linux, but since I have seen it occur
2540 * under both Solaris and HP-UX, the assumption
2541 * is that it *could* happen here as well...
2542 */
2543 if (remain < 2)
2544 goto done;
2545
2546
2547 switch (b[1]) {
2548
2549 /*
2550 * Handle Open Response.
2551 */
2552
2553 case 11:
2554 plen = 6;
2555 if (remain < plen)
2556 goto done;
2557
2558 nd->nd_tx_work = 1;
2559
2560 {
2561 int req = b[2];
2562 int resp = b[3];
2563 port = get_unaligned_be16(b + 4);
2564
2565 if (port >= nd->nd_chan_count) {
2566 error = "Open channel number out of range";
2567 goto prot_error;
2568 }
2569
2570 ch = nd->nd_chan + port;
2571
2572 /*
2573 * How we handle an open response depends primarily
2574 * on our current channel state.
2575 */
2576
2577 switch (ch->ch_state) {
2578 case CS_IDLE:
2579
2580 /*
2581 * Handle a delayed open.
2582 */
2583
2584 if (ch->ch_otype_waiting != 0 &&
2585 req == ch->ch_otype_waiting &&
2586 resp == 0) {
2587 ch->ch_otype = req;
2588 ch->ch_otype_waiting = 0;
2589 ch->ch_state = CS_SEND_QUERY;
2590 break;
2591 }
2592 goto open_error;
2593
2594 case CS_WAIT_OPEN:
2595
2596 /*
2597 * Handle the open response.
2598 */
2599
2600 if (req == ch->ch_otype) {
2601 switch (resp) {
2602
2603 /*
2604 * On successful response, open the
2605 * port and proceed normally.
2606 */
2607
2608 case 0:
2609 ch->ch_state = CS_SEND_QUERY;
2610 break;
2611
2612 /*
2613 * On a busy response to a persistent open,
2614 * remember that the open is pending.
2615 */
2616
2617 case 1:
2618 case 2:
2619 if (req != OTYPE_IMMEDIATE) {
2620 ch->ch_otype_waiting = req;
2621 ch->ch_state = CS_IDLE;
2622 break;
2623 }
2624
2625 /*
2626 * Otherwise the server open failed. If
2627 * the Unix port is open, hang it up.
2628 */
2629
2630 default:
2631 if (ch->ch_open_count != 0) {
2632 ch->ch_flag |= CH_HANGUP;
2633 dgrp_carrier(ch);
2634 ch->ch_state = CS_IDLE;
2635 break;
2636 }
2637
2638 ch->ch_open_error = resp;
2639 ch->ch_state = CS_IDLE;
2640
2641 wake_up_interruptible(&ch->ch_flag_wait);
2642 }
2643 break;
2644 }
2645
2646 /*
2647 * Handle delayed response arrival preceding
2648 * the open response we are waiting for.
2649 */
2650
2651 if (ch->ch_otype_waiting != 0 &&
2652 req == ch->ch_otype_waiting &&
2653 resp == 0) {
2654 ch->ch_otype = ch->ch_otype_waiting;
2655 ch->ch_otype_waiting = 0;
2656 ch->ch_state = CS_WAIT_FAIL;
2657 break;
2658 }
2659 goto open_error;
2660
2661
2662 case CS_WAIT_FAIL:
2663
2664 /*
2665 * Handle response to immediate open arriving
2666 * after a delayed open success.
2667 */
2668
2669 if (req == OTYPE_IMMEDIATE) {
2670 ch->ch_state = CS_SEND_QUERY;
2671 break;
2672 }
2673 goto open_error;
2674
2675
2676 case CS_WAIT_CANCEL:
2677 /*
2678 * Handle delayed open response arriving before
2679 * the cancel response.
2680 */
2681
2682 if (req == ch->ch_otype_waiting &&
2683 resp == 0) {
2684 ch->ch_otype_waiting = 0;
2685 break;
2686 }
2687
2688 /*
2689 * Handle cancel response.
2690 */
2691
2692 if (req == 4 && resp == 0) {
2693 ch->ch_otype_waiting = 0;
2694 ch->ch_state = CS_IDLE;
2695 break;
2696 }
2697 goto open_error;
2698
2699
2700 case CS_WAIT_CLOSE:
2701 /*
2702 * Handle a successful response to a port
2703 * close.
2704 */
2705
2706 if (req >= 3) {
2707 ch->ch_state = CS_IDLE;
2708 break;
2709 }
2710 goto open_error;
2711
2712 open_error:
2713 default:
2714 {
2715 error = "Improper Open Response";
2716 goto prot_error;
2717 }
2718 }
2719 }
2720 break;
2721
2722 /*
2723 * Handle Synchronize Response.
2724 */
2725
2726 case 13:
2727 plen = 3;
2728 if (remain < plen)
2729 goto done;
2730 {
2731 int seq = b[2];
2732 int s;
2733
2734 /*
2735 * If channel was waiting for this sync response,
2736 * unset the flag, and wake up anyone waiting
2737 * on the event.
2738 */
2739 if (ch->ch_flag & CH_WAITING_SYNC) {
2740 ch->ch_flag &= ~(CH_WAITING_SYNC);
2741 wake_up_interruptible(&ch->ch_flag_wait);
2742 }
2743
2744 if (((seq - nd->nd_seq_out) & SEQ_MASK) >=
2745 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2746 break;
2747 }
2748
2749 for (s = nd->nd_seq_out;; s = (s + 1) & SEQ_MASK) {
2750 if (nd->nd_seq_wait[s] != 0) {
2751 nd->nd_seq_wait[s] = 0;
2752
2753 wake_up_interruptible(&nd->nd_seq_wque[s]);
2754 }
2755
2756 nd->nd_unack -= nd->nd_seq_size[s];
2757
2758 if (s == seq)
2759 break;
2760 }
2761
2762 nd->nd_seq_out = (seq + 1) & SEQ_MASK;
2763 }
2764 break;
2765
2766 /*
2767 * Handle Sequence Response.
2768 */
2769
2770 case 15:
2771 plen = 6;
2772 if (remain < plen)
2773 goto done;
2774
2775 {
2776 /* Record that we have received the Sequence
2777 * Response, but we aren't interested in the
2778 * sequence numbers. We were using RIN like it
2779 * was ROUT and that was causing problems,
2780 * fixed 7-13-2001 David Fries. See comment in
2781 * drp.h for ch_s_rin variable.
2782 int rin = get_unaligned_be16(b + 2);
2783 int tpos = get_unaligned_be16(b + 4);
2784 */
2785
2786 ch->ch_send &= ~RR_SEQUENCE;
2787 ch->ch_expect &= ~RR_SEQUENCE;
2788 }
2789 goto check_query;
2790
2791 /*
2792 * Handle Status Response.
2793 */
2794
2795 case 17:
2796 plen = 5;
2797 if (remain < plen)
2798 goto done;
2799
2800 {
2801 ch->ch_s_elast = get_unaligned_be16(b + 2);
2802 ch->ch_s_mlast = b[4];
2803
2804 ch->ch_expect &= ~RR_STATUS;
2805 ch->ch_send &= ~RR_STATUS;
2806
2807 /*
2808 * CH_PHYS_CD is cleared because something _could_ be
2809 * waiting for the initial sense of carrier... and if
2810 * carrier is high immediately, we want to be sure to
2811 * wake them as soon as possible.
2812 */
2813 ch->ch_flag &= ~CH_PHYS_CD;
2814
2815 dgrp_carrier(ch);
2816 }
2817 goto check_query;
2818
2819 /*
2820 * Handle Line Error Response.
2821 */
2822
2823 case 19:
2824 plen = 14;
2825 if (remain < plen)
2826 goto done;
2827
2828 break;
2829
2830 /*
2831 * Handle Buffer Response.
2832 */
2833
2834 case 21:
2835 plen = 6;
2836 if (remain < plen)
2837 goto done;
2838
2839 {
2840 ch->ch_s_rsize = get_unaligned_be16(b + 2);
2841 ch->ch_s_tsize = get_unaligned_be16(b + 4);
2842
2843 ch->ch_send &= ~RR_BUFFER;
2844 ch->ch_expect &= ~RR_BUFFER;
2845 }
2846 goto check_query;
2847
2848 /*
2849 * Handle Port Capability Response.
2850 */
2851
2852 case 23:
2853 plen = 32;
2854 if (remain < plen)
2855 goto done;
2856
2857 {
2858 ch->ch_send &= ~RR_CAPABILITY;
2859 ch->ch_expect &= ~RR_CAPABILITY;
2860 }
2861
2862 /*
2863 * When all queries are complete, set those parameters
2864 * derived from the query results, then transition
2865 * to the READY state.
2866 */
2867
2868 check_query:
2869 if (ch->ch_state == CS_WAIT_QUERY &&
2870 (ch->ch_expect & (RR_SEQUENCE |
2871 RR_STATUS |
2872 RR_BUFFER |
2873 RR_CAPABILITY)) == 0) {
2874 ch->ch_tmax = ch->ch_s_tsize / 4;
2875
2876 if (ch->ch_edelay == DGRP_TTIME)
2877 ch->ch_ttime = DGRP_TTIME;
2878 else
2879 ch->ch_ttime = ch->ch_edelay;
2880
2881 ch->ch_rmax = ch->ch_s_rsize / 4;
2882
2883 if (ch->ch_edelay == DGRP_RTIME)
2884 ch->ch_rtime = DGRP_RTIME;
2885 else
2886 ch->ch_rtime = ch->ch_edelay;
2887
2888 ch->ch_rlow = 2 * ch->ch_s_rsize / 8;
2889 ch->ch_rhigh = 6 * ch->ch_s_rsize / 8;
2890
2891 ch->ch_state = CS_READY;
2892
2893 nd->nd_tx_work = 1;
2894 wake_up_interruptible(&ch->ch_flag_wait);
2895
2896 }
2897 break;
2898
2899 default:
2900 goto decode_error;
2901 }
2902 break;
2903
2904 /*
2905 * Handle Events.
2906 */
2907
2908 case 12:
2909 plen = 4;
2910 if (remain < plen)
2911 goto done;
2912
2913 mlast = ch->ch_s_mlast;
2914 elast = ch->ch_s_elast;
2915
2916 mstat = ch->ch_s_mlast = b[1];
2917 estat = ch->ch_s_elast = get_unaligned_be16(b + 2);
2918
2919 /*
2920 * Handle modem changes.
2921 */
2922
2923 if (((mstat ^ mlast) & DM_CD) != 0)
2924 dgrp_carrier(ch);
2925
2926
2927 /*
2928 * Handle received break.
2929 */
2930
2931 if ((estat & ~elast & EV_RXB) != 0 &&
2932 (ch->ch_tun.un_open_count != 0) &&
2933 I_BRKINT(ch->ch_tun.un_tty) &&
2934 !(I_IGNBRK(ch->ch_tun.un_tty))) {
2935
2936 tty_buffer_request_room(&ch->port, 1);
2937 tty_insert_flip_char(&ch->port, 0, TTY_BREAK);
2938 tty_flip_buffer_push(&ch->port);
2939
2940 }
2941
2942 /*
2943 * On transmit break complete, if more break traffic
2944 * is waiting then send it. Otherwise wake any threads
2945 * waiting for transmitter empty.
2946 */
2947
2948 if ((~estat & elast & EV_TXB) != 0 &&
2949 (ch->ch_expect & RR_TX_BREAK) != 0) {
2950
2951 nd->nd_tx_work = 1;
2952
2953 ch->ch_expect &= ~RR_TX_BREAK;
2954
2955 if (ch->ch_break_time != 0) {
2956 ch->ch_send |= RR_TX_BREAK;
2957 } else {
2958 ch->ch_send &= ~RR_TX_BREAK;
2959 ch->ch_flag &= ~CH_TX_BREAK;
2960 wake_up_interruptible(&ch->ch_flag_wait);
2961 }
2962 }
2963 break;
2964
2965 case 13:
2966 case 14:
2967 error = "Unrecognized command";
2968 goto prot_error;
2969
2970 /*
2971 * Decode Special Codes.
2972 */
2973
2974 case 15:
2975 switch (n1) {
2976 /*
2977 * One byte module select.
2978 */
2979
2980 case 0:
2981 case 1:
2982 case 2:
2983 case 3:
2984 case 4:
2985 case 5:
2986 case 6:
2987 case 7:
2988 plen = 1;
2989 nd->nd_rx_module = n1;
2990 break;
2991
2992 /*
2993 * Two byte module select.
2994 */
2995
2996 case 8:
2997 plen = 2;
2998 if (remain < plen)
2999 goto done;
3000
3001 nd->nd_rx_module = b[1];
3002 break;
3003
3004 /*
3005 * ID Request packet.
3006 */
3007
3008 case 11:
3009 if (remain < 4)
3010 goto done;
3011
3012 plen = get_unaligned_be16(b + 2);
3013
3014 if (plen < 12 || plen > 1000) {
3015 error = "Response Packet length error";
3016 goto prot_error;
3017 }
3018
3019 nd->nd_tx_work = 1;
3020
3021 switch (b[1]) {
3022 /*
3023 * Echo packet.
3024 */
3025
3026 case 0:
3027 nd->nd_send |= NR_ECHO;
3028 break;
3029
3030 /*
3031 * ID Response packet.
3032 */
3033
3034 case 1:
3035 nd->nd_send |= NR_IDENT;
3036 break;
3037
3038 /*
3039 * ID Response packet.
3040 */
3041
3042 case 32:
3043 nd->nd_send |= NR_PASSWORD;
3044 break;
3045
3046 }
3047 break;
3048
3049 /*
3050 * Various node-level response packets.
3051 */
3052
3053 case 12:
3054 if (remain < 4)
3055 goto done;
3056
3057 plen = get_unaligned_be16(b + 2);
3058
3059 if (plen < 4 || plen > 1000) {
3060 error = "Response Packet length error";
3061 goto prot_error;
3062 }
3063
3064 nd->nd_tx_work = 1;
3065
3066 switch (b[1]) {
3067 /*
3068 * Echo packet.
3069 */
3070
3071 case 0:
3072 nd->nd_expect &= ~NR_ECHO;
3073 break;
3074
3075 /*
3076 * Product Response Packet.
3077 */
3078
3079 case 1:
3080 {
3081 int desclen;
3082
3083 nd->nd_hw_ver = (b[8] << 8) | b[9];
3084 nd->nd_sw_ver = (b[10] << 8) | b[11];
3085 nd->nd_hw_id = b[6];
3086 desclen = ((plen - 12) > MAX_DESC_LEN) ? MAX_DESC_LEN :
3087 plen - 12;
3088
3089 if (desclen <= 0) {
3090 error = "Response Packet desclen error";
3091 goto prot_error;
3092 }
3093
3094 strncpy(nd->nd_ps_desc, b + 12, desclen);
3095 nd->nd_ps_desc[desclen] = 0;
3096 }
3097
3098 nd->nd_expect &= ~NR_IDENT;
3099 break;
3100
3101 /*
3102 * Capability Response Packet.
3103 */
3104
3105 case 2:
3106 {
3107 int nn = get_unaligned_be16(b + 4);
3108
3109 if (nn > CHAN_MAX)
3110 nn = CHAN_MAX;
3111
3112 dgrp_chan_count(nd, nn);
3113 }
3114
3115 nd->nd_expect &= ~NR_CAPABILITY;
3116 break;
3117
3118 /*
3119 * VPD Response Packet.
3120 */
3121
3122 case 15:
3123 /*
3124 * NOTE: case 15 is here ONLY because the EtherLite
3125 * is broken, and sends a response to 24 back as 15.
3126 * To resolve this, the EtherLite firmware is now
3127 * fixed to send back 24 correctly, but, for backwards
3128 * compatibility, we now have reserved 15 for the
3129 * bad EtherLite response to 24 as well.
3130 */
3131
3132 /* Fallthru! */
3133
3134 case 24:
3135
3136 /*
3137 * If the product doesn't support VPD,
3138 * it will send back a null IDRESP,
3139 * which is a length of 4 bytes.
3140 */
3141 if (plen > 4) {
3142 memcpy(nd->nd_vpd, b + 4, min(plen - 4, (long) VPDSIZE));
3143 nd->nd_vpd_len = min(plen - 4, (long) VPDSIZE);
3144 }
3145
3146 nd->nd_expect &= ~NR_VPD;
3147 break;
3148
3149 default:
3150 goto decode_error;
3151 }
3152
3153 if (nd->nd_expect == 0 &&
3154 nd->nd_state == NS_WAIT_QUERY) {
3155 nd->nd_state = NS_READY;
3156 }
3157 break;
3158
3159 /*
3160 * Debug packet.
3161 */
3162
3163 case 14:
3164 if (remain < 4)
3165 goto done;
3166
3167 plen = get_unaligned_be16(b + 2) + 4;
3168
3169 if (plen > 1000) {
3170 error = "Debug Packet too large";
3171 goto prot_error;
3172 }
3173
3174 if (remain < plen)
3175 goto done;
3176 break;
3177
3178 /*
3179 * Handle reset packet.
3180 */
3181
3182 case 15:
3183 if (remain < 2)
3184 goto done;
3185
3186 plen = 2 + b[1];
3187
3188 if (remain < plen)
3189 goto done;
3190
3191 nd->nd_tx_work = 1;
3192
3193 n = b[plen];
3194 b[plen] = 0;
3195
3196 b[plen] = n;
3197
3198 error = "Client Reset Acknowledge";
3199 goto prot_error;
3200
3201 default:
3202 goto decode_error;
3203 }
3204 break;
3205
3206 default:
3207 goto decode_error;
3208 }
3209
3210 b += plen;
3211 remain -= plen;
3212 }
3213
3214 /*
3215 * When the buffer is exhausted, copy any data left at the
3216 * top of the buffer back down to the bottom for the next
3217 * read request.
3218 */
3219
3220 done:
3221 if (remain > 0 && b != buf)
3222 memcpy(buf, b, remain);
3223
3224 nd->nd_remain = remain;
3225 return;
3226
3227 /*
3228 * Handle a decode error.
3229 */
3230
3231 decode_error:
3232 error = "Protocol decode error";
3233
3234 /*
3235 * Handle a general protocol error.
3236 */
3237
3238 prot_error:
3239 nd->nd_remain = 0;
3240 nd->nd_state = NS_SEND_ERROR;
3241 nd->nd_error = error;
3242 }
3243
3244 /*
3245 * dgrp_net_write() -- write data to the network device.
3246 *
3247 * A zero byte write indicates that the connection to the RealPort
3248 * device has been broken.
3249 *
3250 * A non-zero write indicates data from the RealPort device.
3251 */
3252 static ssize_t dgrp_net_write(struct file *file, const char __user *buf,
3253 size_t count, loff_t *ppos)
3254 {
3255 struct nd_struct *nd;
3256 ssize_t rtn = 0;
3257 long n;
3258 long total = 0;
3259
3260 /*
3261 * Get the node pointer, and quit if it doesn't exist.
3262 */
3263 nd = (struct nd_struct *)(file->private_data);
3264 if (!nd)
3265 return -ENXIO;
3266
3267 /*
3268 * Grab the NET lock.
3269 */
3270 down(&nd->nd_net_semaphore);
3271
3272 nd->nd_write_count++;
3273
3274 /*
3275 * Handle disconnect.
3276 */
3277
3278 if (count == 0) {
3279 dgrp_net_idle(nd);
3280 /*
3281 * Set the active port count to zero.
3282 */
3283 dgrp_chan_count(nd, 0);
3284 goto unlock;
3285 }
3286
3287 /*
3288 * Loop to process entire receive packet.
3289 */
3290
3291 while (count > 0) {
3292 n = UIO_MAX - nd->nd_remain;
3293
3294 if (n > count)
3295 n = count;
3296
3297 nd->nd_rx_byte += n + nd->nd_link.lk_header_size;
3298
3299 rtn = copy_from_user(nd->nd_iobuf + nd->nd_remain,
3300 (void __user *) buf + total, n);
3301 if (rtn) {
3302 rtn = -EFAULT;
3303 goto unlock;
3304 }
3305
3306 *ppos += n;
3307
3308 total += n;
3309
3310 count -= n;
3311
3312 if (nd->nd_mon_buf)
3313 dgrp_monitor_data(nd, RPDUMP_SERVER,
3314 nd->nd_iobuf + nd->nd_remain, n);
3315
3316 nd->nd_remain += n;
3317
3318 dgrp_receive(nd);
3319 }
3320
3321 rtn = total;
3322
3323 unlock:
3324 /*
3325 * Release the NET lock.
3326 */
3327 up(&nd->nd_net_semaphore);
3328
3329 return rtn;
3330 }
3331
3332
3333 /*
3334 * dgrp_net_select()
3335 * Determine whether a device is ready to be read or written to, and
3336 * sleep if not.
3337 */
3338 static unsigned int dgrp_net_select(struct file *file,
3339 struct poll_table_struct *table)
3340 {
3341 unsigned int retval = 0;
3342 struct nd_struct *nd = file->private_data;
3343
3344 poll_wait(file, &nd->nd_tx_waitq, table);
3345
3346 if (nd->nd_tx_ready)
3347 retval |= POLLIN | POLLRDNORM; /* Conditionally readable */
3348
3349 retval |= POLLOUT | POLLWRNORM; /* Always writeable */
3350
3351 return retval;
3352 }
3353
3354 /*
3355 * dgrp_net_ioctl
3356 *
3357 * Implement those functions which allow the network daemon to control
3358 * the network parameters in the driver. The ioctls include ones to
3359 * get and set the link speed parameters for the PortServer.
3360 */
3361 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
3362 unsigned long arg)
3363 {
3364 struct nd_struct *nd;
3365 int rtn = 0;
3366 long size = _IOC_SIZE(cmd);
3367 struct link_struct link;
3368
3369 nd = file->private_data;
3370
3371 if (_IOC_DIR(cmd) & _IOC_READ)
3372 rtn = access_ok(VERIFY_WRITE, (void __user *) arg, size);
3373 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3374 rtn = access_ok(VERIFY_READ, (void __user *) arg, size);
3375
3376 if (!rtn)
3377 return rtn;
3378
3379 switch (cmd) {
3380 case DIGI_SETLINK:
3381 if (size != sizeof(struct link_struct))
3382 return -EINVAL;
3383
3384 if (copy_from_user(&link, (void __user *)arg, size))
3385 return -EFAULT;
3386
3387 if (link.lk_fast_rate < 9600)
3388 link.lk_fast_rate = 9600;
3389
3390 if (link.lk_slow_rate < 2400)
3391 link.lk_slow_rate = 2400;
3392
3393 if (link.lk_fast_rate > 10000000)
3394 link.lk_fast_rate = 10000000;
3395
3396 if (link.lk_slow_rate > link.lk_fast_rate)
3397 link.lk_slow_rate = link.lk_fast_rate;
3398
3399 if (link.lk_fast_delay > 2000)
3400 link.lk_fast_delay = 2000;
3401
3402 if (link.lk_slow_delay > 10000)
3403 link.lk_slow_delay = 10000;
3404
3405 if (link.lk_fast_delay < 60)
3406 link.lk_fast_delay = 60;
3407
3408 if (link.lk_slow_delay < link.lk_fast_delay)
3409 link.lk_slow_delay = link.lk_fast_delay;
3410
3411 if (link.lk_header_size < 2)
3412 link.lk_header_size = 2;
3413
3414 if (link.lk_header_size > 128)
3415 link.lk_header_size = 128;
3416
3417 link.lk_fast_rate /= 8 * 1000 / dgrp_poll_tick;
3418 link.lk_slow_rate /= 8 * 1000 / dgrp_poll_tick;
3419
3420 link.lk_fast_delay /= dgrp_poll_tick;
3421 link.lk_slow_delay /= dgrp_poll_tick;
3422
3423 nd->nd_link = link;
3424
3425 break;
3426
3427 case DIGI_GETLINK:
3428 if (size != sizeof(struct link_struct))
3429 return -EINVAL;
3430
3431 if (copy_to_user((void __user *)arg, (void *)(&nd->nd_link),
3432 size))
3433 return -EFAULT;
3434
3435 break;
3436
3437 default:
3438 return -EINVAL;
3439
3440 }
3441
3442 return 0;
3443 }
3444
3445 /**
3446 * dgrp_poll_handler() -- handler for poll timer
3447 *
3448 * As each timer expires, it determines (a) whether the "transmit"
3449 * waiter needs to be woken up, and (b) whether the poller needs to
3450 * be rescheduled.
3451 */
3452 void dgrp_poll_handler(unsigned long arg)
3453 {
3454 struct dgrp_poll_data *poll_data;
3455 struct nd_struct *nd;
3456 struct link_struct *lk;
3457 ulong time;
3458 ulong poll_time;
3459 ulong freq;
3460 ulong lock_flags;
3461
3462 poll_data = (struct dgrp_poll_data *) arg;
3463 freq = 1000 / poll_data->poll_tick;
3464 poll_data->poll_round += 17;
3465
3466 if (poll_data->poll_round >= freq)
3467 poll_data->poll_round -= freq;
3468
3469 /*
3470 * Loop to process all open nodes.
3471 *
3472 * For each node, determine the rate at which it should
3473 * be transmitting data. Then if the node should wake up
3474 * and transmit data now, enable the net receive select
3475 * to get the transmit going.
3476 */
3477
3478 list_for_each_entry(nd, &nd_struct_list, list) {
3479
3480 lk = &nd->nd_link;
3481
3482 /*
3483 * Decrement statistics. These are only for use with
3484 * KME, so don't worry that the operations are done
3485 * unlocked, and so the results are occasionally wrong.
3486 */
3487
3488 nd->nd_read_count -= (nd->nd_read_count +
3489 poll_data->poll_round) / freq;
3490 nd->nd_write_count -= (nd->nd_write_count +
3491 poll_data->poll_round) / freq;
3492 nd->nd_send_count -= (nd->nd_send_count +
3493 poll_data->poll_round) / freq;
3494 nd->nd_tx_byte -= (nd->nd_tx_byte +
3495 poll_data->poll_round) / freq;
3496 nd->nd_rx_byte -= (nd->nd_rx_byte +
3497 poll_data->poll_round) / freq;
3498
3499 /*
3500 * Wake the daemon to transmit data only when there is
3501 * enough byte credit to send data.
3502 *
3503 * The results are approximate because the operations
3504 * are performed unlocked, and we are inspecting
3505 * data asynchronously updated elsewhere. The whole
3506 * thing is just approximation anyway, so that should
3507 * be okay.
3508 */
3509
3510 if (lk->lk_slow_rate >= UIO_MAX) {
3511
3512 nd->nd_delay = 0;
3513 nd->nd_rate = UIO_MAX;
3514
3515 nd->nd_tx_deposit = nd->nd_tx_charge + 3 * UIO_MAX;
3516 nd->nd_tx_credit = 3 * UIO_MAX;
3517
3518 } else {
3519
3520 long rate;
3521 long delay;
3522 long deposit;
3523 long charge;
3524 long size;
3525 long excess;
3526
3527 long seq_in = nd->nd_seq_in;
3528 long seq_out = nd->nd_seq_out;
3529
3530 /*
3531 * If there are no outstanding packets, run at the
3532 * fastest rate.
3533 */
3534
3535 if (seq_in == seq_out) {
3536 delay = 0;
3537 rate = lk->lk_fast_rate;
3538 }
3539
3540 /*
3541 * Otherwise compute the transmit rate based on the
3542 * delay since the oldest packet.
3543 */
3544
3545 else {
3546 /*
3547 * The actual delay is computed as the
3548 * time since the oldest unacknowledged
3549 * packet was sent, minus the time it
3550 * took to send that packet to the server.
3551 */
3552
3553 delay = ((jiffies - nd->nd_seq_time[seq_out])
3554 - (nd->nd_seq_size[seq_out] /
3555 lk->lk_fast_rate));
3556
3557 /*
3558 * If the delay is less than the "fast"
3559 * delay, transmit full speed. If greater
3560 * than the "slow" delay, transmit at the
3561 * "slow" speed. In between, interpolate
3562 * between the fast and slow speeds.
3563 */
3564
3565 rate =
3566 (delay <= lk->lk_fast_delay ?
3567 lk->lk_fast_rate :
3568 delay >= lk->lk_slow_delay ?
3569 lk->lk_slow_rate :
3570 (lk->lk_slow_rate +
3571 (lk->lk_slow_delay - delay) *
3572 (lk->lk_fast_rate - lk->lk_slow_rate) /
3573 (lk->lk_slow_delay - lk->lk_fast_delay)
3574 )
3575 );
3576 }
3577
3578 nd->nd_delay = delay;
3579 nd->nd_rate = rate;
3580
3581 /*
3582 * Increase the transmit credit by depositing the
3583 * current transmit rate.
3584 */
3585
3586 deposit = nd->nd_tx_deposit;
3587 charge = nd->nd_tx_charge;
3588
3589 deposit += rate;
3590
3591 /*
3592 * If the available transmit credit becomes too large,
3593 * reduce the deposit to correct the value.
3594 *
3595 * Too large is the max of:
3596 * 6 times the header size
3597 * 3 times the current transmit rate.
3598 */
3599
3600 size = 2 * nd->nd_link.lk_header_size;
3601
3602 if (size < rate)
3603 size = rate;
3604
3605 size *= 3;
3606
3607 excess = deposit - charge - size;
3608
3609 if (excess > 0)
3610 deposit -= excess;
3611
3612 nd->nd_tx_deposit = deposit;
3613 nd->nd_tx_credit = deposit - charge;
3614
3615 /*
3616 * Wake the transmit task only if the transmit credit
3617 * is at least 3 times the transmit header size.
3618 */
3619
3620 size = 3 * lk->lk_header_size;
3621
3622 if (nd->nd_tx_credit < size)
3623 continue;
3624 }
3625
3626
3627 /*
3628 * Enable the READ select to wake the daemon if there
3629 * is useful work for the drp_read routine to perform.
3630 */
3631
3632 if (waitqueue_active(&nd->nd_tx_waitq) &&
3633 (nd->nd_tx_work != 0 ||
3634 (ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX)) {
3635 nd->nd_tx_ready = 1;
3636
3637 wake_up_interruptible(&nd->nd_tx_waitq);
3638
3639 /* not needed */
3640 /* nd->nd_flag &= ~ND_SELECT; */
3641 }
3642 }
3643
3644
3645 /*
3646 * Schedule ourself back at the nominal wakeup interval.
3647 */
3648 spin_lock_irqsave(&poll_data->poll_lock, lock_flags);
3649
3650 poll_data->node_active_count--;
3651 if (poll_data->node_active_count > 0) {
3652 poll_data->node_active_count++;
3653 poll_time = poll_data->timer.expires +
3654 poll_data->poll_tick * HZ / 1000;
3655
3656 time = poll_time - jiffies;
3657
3658 if (time >= 2 * poll_data->poll_tick)
3659 poll_time = jiffies + dgrp_poll_tick * HZ / 1000;
3660
3661 poll_data->timer.expires = poll_time;
3662 add_timer(&poll_data->timer);
3663 }
3664
3665 spin_unlock_irqrestore(&poll_data->poll_lock, lock_flags);
3666 }