Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / dgrp / dgrp_net_ops.c
1 /*
2 *
3 * Copyright 1999 Digi International (www.digi.com)
4 * James Puzzo <jamesp at digi dot com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
15 *
16 */
17
18 /*
19 *
20 * Filename:
21 *
22 * dgrp_net_ops.c
23 *
24 * Description:
25 *
26 * Handle the file operations required for the "network" devices.
27 * Includes those functions required to register the "net" devices
28 * in "/proc".
29 *
30 * Author:
31 *
32 * James A. Puzzo
33 *
34 */
35
36 #include <linux/module.h>
37 #include <linux/proc_fs.h>
38 #include <linux/types.h>
39 #include <linux/string.h>
40 #include <linux/device.h>
41 #include <linux/tty.h>
42 #include <linux/tty_flip.h>
43 #include <linux/spinlock.h>
44 #include <linux/poll.h>
45 #include <linux/sched.h>
46 #include <linux/ratelimit.h>
47 #include <asm/unaligned.h>
48
49 #define MYFLIPLEN TBUF_MAX
50
51 #include "dgrp_common.h"
52
53 #define TTY_FLIPBUF_SIZE 512
54 #define DEVICE_NAME_SIZE 50
55
56 /*
57 * Generic helper function declarations
58 */
59 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
60 unsigned char *fbuf, int *len);
61
62 /*
63 * File operation declarations
64 */
65 static int dgrp_net_open(struct inode *, struct file *);
66 static int dgrp_net_release(struct inode *, struct file *);
67 static ssize_t dgrp_net_read(struct file *, char __user *, size_t, loff_t *);
68 static ssize_t dgrp_net_write(struct file *, const char __user *, size_t,
69 loff_t *);
70 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
71 unsigned long arg);
72 static unsigned int dgrp_net_select(struct file *file,
73 struct poll_table_struct *table);
74
75 static const struct file_operations net_ops = {
76 .owner = THIS_MODULE,
77 .read = dgrp_net_read,
78 .write = dgrp_net_write,
79 .poll = dgrp_net_select,
80 .unlocked_ioctl = dgrp_net_ioctl,
81 .open = dgrp_net_open,
82 .release = dgrp_net_release,
83 };
84
85 static struct inode_operations net_inode_ops = {
86 .permission = dgrp_inode_permission
87 };
88
89 void dgrp_register_net_hook(struct proc_dir_entry *de)
90 {
91 struct nd_struct *node = de->data;
92
93 de->proc_iops = &net_inode_ops;
94 de->proc_fops = &net_ops;
95 node->nd_net_de = de;
96 sema_init(&node->nd_net_semaphore, 1);
97 node->nd_state = NS_CLOSED;
98 dgrp_create_node_class_sysfs_files(node);
99 }
100
101
102 /**
103 * dgrp_dump() -- prints memory for debugging purposes.
104 * @mem: Memory location which should be printed to the console
105 * @len: Number of bytes to be dumped
106 */
107 static void dgrp_dump(u8 *mem, int len)
108 {
109 int i;
110
111 pr_debug("dgrp dump length = %d, data = ", len);
112 for (i = 0; i < len; ++i)
113 pr_debug("%.2x ", mem[i]);
114 pr_debug("\n");
115 }
116
117 /**
118 * dgrp_read_data_block() -- Read a data block
119 * @ch: struct ch_struct *
120 * @flipbuf: u8 *
121 * @flipbuf_size: size of flipbuf
122 */
123 static void dgrp_read_data_block(struct ch_struct *ch, u8 *flipbuf,
124 int flipbuf_size)
125 {
126 int t;
127 int n;
128
129 if (flipbuf_size <= 0)
130 return;
131
132 t = RBUF_MAX - ch->ch_rout;
133 n = flipbuf_size;
134
135 if (n >= t) {
136 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, t);
137 flipbuf += t;
138 n -= t;
139 ch->ch_rout = 0;
140 }
141
142 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, n);
143 flipbuf += n;
144 ch->ch_rout += n;
145 }
146
147
148 /**
149 * dgrp_input() -- send data to the line disipline
150 * @ch: pointer to channel struct
151 *
152 * Copys the rbuf to the flipbuf and sends to line discipline.
153 * Sends input buffer data to the line discipline.
154 *
155 */
156 static void dgrp_input(struct ch_struct *ch)
157 {
158 struct nd_struct *nd;
159 struct tty_struct *tty;
160 int data_len;
161 int len;
162 int tty_count;
163 ulong lock_flags;
164 u8 *myflipbuf;
165 u8 *myflipflagbuf;
166
167 if (!ch)
168 return;
169
170 nd = ch->ch_nd;
171
172 if (!nd)
173 return;
174
175 spin_lock_irqsave(&nd->nd_lock, lock_flags);
176
177 myflipbuf = nd->nd_inputbuf;
178 myflipflagbuf = nd->nd_inputflagbuf;
179
180 if (!ch->ch_open_count) {
181 ch->ch_rout = ch->ch_rin;
182 goto out;
183 }
184
185 if (ch->ch_tun.un_flag & UN_CLOSING) {
186 ch->ch_rout = ch->ch_rin;
187 goto out;
188 }
189
190 tty = (ch->ch_tun).un_tty;
191
192
193 if (!tty || tty->magic != TTY_MAGIC) {
194 ch->ch_rout = ch->ch_rin;
195 goto out;
196 }
197
198 tty_count = tty->count;
199 if (!tty_count) {
200 ch->ch_rout = ch->ch_rin;
201 goto out;
202 }
203
204 if (tty->closing || test_bit(TTY_CLOSING, &tty->flags)) {
205 ch->ch_rout = ch->ch_rin;
206 goto out;
207 }
208
209 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
210
211 /* data_len should be the number of chars that we read in */
212 data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
213
214 /* len is the amount of data we are going to transfer here */
215 len = tty_buffer_request_room(&ch->port, data_len);
216
217 /* Check DPA flow control */
218 if ((nd->nd_dpa_debug) &&
219 (nd->nd_dpa_flag & DPA_WAIT_SPACE) &&
220 (nd->nd_dpa_port == MINOR(tty_devnum(ch->ch_tun.un_tty))))
221 len = 0;
222
223 if ((len) && !(ch->ch_flag & CH_RXSTOP)) {
224
225 dgrp_read_data_block(ch, myflipbuf, len);
226
227 if (I_PARMRK(tty) || I_BRKINT(tty) || I_INPCK(tty))
228 parity_scan(ch, myflipbuf, myflipflagbuf, &len);
229 else
230 memset(myflipflagbuf, TTY_NORMAL, len);
231
232 if ((nd->nd_dpa_debug) &&
233 (nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
234 dgrp_dpa_data(nd, 1, myflipbuf, len);
235
236 tty_insert_flip_string_flags(&ch->port, myflipbuf,
237 myflipflagbuf, len);
238 tty_flip_buffer_push(&ch->port);
239
240 ch->ch_rxcount += len;
241 }
242
243 /*
244 * Wake up any sleepers (maybe dgrp close) that might be waiting
245 * for a channel flag state change.
246 */
247 wake_up_interruptible(&ch->ch_flag_wait);
248 return;
249
250 out:
251 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
252 }
253
254
255 /*
256 * parity_scan
257 *
258 * Loop to inspect each single character or 0xFF escape.
259 *
260 * if PARMRK & ~DOSMODE:
261 * 0xFF 0xFF Normal 0xFF character, escaped
262 * to eliminate confusion.
263 * 0xFF 0x00 0x00 Break
264 * 0xFF 0x00 CC Error character CC.
265 * CC Normal character CC.
266 *
267 * if PARMRK & DOSMODE:
268 * 0xFF 0x18 0x00 Break
269 * 0xFF 0x08 0x00 Framing Error
270 * 0xFF 0x04 0x00 Parity error
271 * 0xFF 0x0C 0x00 Both Framing and Parity error
272 *
273 * TODO: do we need to do the XMODEM, XOFF, XON, XANY processing??
274 * as per protocol
275 */
276 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
277 unsigned char *fbuf, int *len)
278 {
279 int l = *len;
280 int count = 0;
281 int DOS = ((ch->ch_iflag & IF_DOSMODE) == 0 ? 0 : 1);
282 unsigned char *cout; /* character buffer */
283 unsigned char *fout; /* flag buffer */
284 unsigned char *in;
285 unsigned char c;
286
287 in = cbuf;
288 cout = cbuf;
289 fout = fbuf;
290
291 while (l--) {
292 c = *in;
293 in++;
294
295 switch (ch->ch_pscan_state) {
296 default:
297 /* reset to sanity and fall through */
298 ch->ch_pscan_state = 0 ;
299
300 case 0:
301 /* No FF seen yet */
302 if (c == 0xff) /* delete this character from stream */
303 ch->ch_pscan_state = 1;
304 else {
305 *cout++ = c;
306 *fout++ = TTY_NORMAL;
307 count += 1;
308 }
309 break;
310
311 case 1:
312 /* first FF seen */
313 if (c == 0xff) {
314 /* doubled ff, transform to single ff */
315 *cout++ = c;
316 *fout++ = TTY_NORMAL;
317 count += 1;
318 ch->ch_pscan_state = 0;
319 } else {
320 /* save value examination in next state */
321 ch->ch_pscan_savechar = c;
322 ch->ch_pscan_state = 2;
323 }
324 break;
325
326 case 2:
327 /* third character of ff sequence */
328 *cout++ = c;
329 if (DOS) {
330 if (ch->ch_pscan_savechar & 0x10)
331 *fout++ = TTY_BREAK;
332 else if (ch->ch_pscan_savechar & 0x08)
333 *fout++ = TTY_FRAME;
334 else
335 /*
336 * either marked as a parity error,
337 * indeterminate, or not in DOSMODE
338 * call it a parity error
339 */
340 *fout++ = TTY_PARITY;
341 } else {
342 /* case FF XX ?? where XX is not 00 */
343 if (ch->ch_pscan_savechar & 0xff) {
344 /* this should not happen */
345 pr_info("%s: parity_scan: error unexpected byte\n",
346 __func__);
347 *fout++ = TTY_PARITY;
348 }
349 /* case FF 00 XX where XX is not 00 */
350 else if (c == 0xff)
351 *fout++ = TTY_PARITY;
352 /* case FF 00 00 */
353 else
354 *fout++ = TTY_BREAK;
355
356 }
357 count += 1;
358 ch->ch_pscan_state = 0;
359 }
360 }
361 *len = count;
362 }
363
364
365 /**
366 * dgrp_net_idle() -- Idle the network connection
367 * @nd: pointer to node structure to idle
368 */
369 static void dgrp_net_idle(struct nd_struct *nd)
370 {
371 struct ch_struct *ch;
372 int i;
373
374 nd->nd_tx_work = 1;
375
376 nd->nd_state = NS_IDLE;
377 nd->nd_flag = 0;
378
379 for (i = nd->nd_seq_out; ; i = (i + 1) & SEQ_MASK) {
380 if (!nd->nd_seq_wait[i]) {
381 nd->nd_seq_wait[i] = 0;
382 wake_up_interruptible(&nd->nd_seq_wque[i]);
383 }
384
385 if (i == nd->nd_seq_in)
386 break;
387 }
388
389 nd->nd_seq_out = nd->nd_seq_in;
390
391 nd->nd_unack = 0;
392 nd->nd_remain = 0;
393
394 nd->nd_tx_module = 0x10;
395 nd->nd_rx_module = 0x00;
396
397 for (i = 0, ch = nd->nd_chan; i < CHAN_MAX; i++, ch++) {
398 ch->ch_state = CS_IDLE;
399
400 ch->ch_otype = 0;
401 ch->ch_otype_waiting = 0;
402 }
403 }
404
405 /*
406 * Increase the number of channels, waking up any
407 * threads that might be waiting for the channels
408 * to appear.
409 */
410 static void increase_channel_count(struct nd_struct *nd, int n)
411 {
412 struct ch_struct *ch;
413 struct device *classp;
414 char name[DEVICE_NAME_SIZE];
415 int ret;
416 u8 *buf;
417 int i;
418
419 for (i = nd->nd_chan_count; i < n; ++i) {
420 ch = nd->nd_chan + i;
421
422 /* FIXME: return a useful error instead! */
423 buf = kmalloc(TBUF_MAX, GFP_KERNEL);
424 if (!buf)
425 return;
426
427 if (ch->ch_tbuf)
428 pr_info_ratelimited("%s - ch_tbuf was not NULL\n",
429 __func__);
430
431 ch->ch_tbuf = buf;
432
433 buf = kmalloc(RBUF_MAX, GFP_KERNEL);
434 if (!buf)
435 return;
436
437 if (ch->ch_rbuf)
438 pr_info("%s - ch_rbuf was not NULL\n",
439 __func__);
440 ch->ch_rbuf = buf;
441
442 classp = tty_port_register_device(&ch->port,
443 nd->nd_serial_ttdriver, i,
444 NULL);
445
446 ch->ch_tun.un_sysfs = classp;
447 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
448
449 dgrp_create_tty_sysfs(&ch->ch_tun, classp);
450 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
451 &classp->kobj, name);
452
453 /* NOTE: We don't support "cu" devices anymore,
454 * so you will notice we don't register them
455 * here anymore. */
456 if (dgrp_register_prdevices) {
457 classp = tty_register_device(nd->nd_xprint_ttdriver,
458 i, NULL);
459 ch->ch_pun.un_sysfs = classp;
460 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
461
462 dgrp_create_tty_sysfs(&ch->ch_pun, classp);
463 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
464 &classp->kobj, name);
465 }
466
467 nd->nd_chan_count = i + 1;
468 wake_up_interruptible(&ch->ch_flag_wait);
469 }
470 }
471
472 /*
473 * Decrease the number of channels, and wake up any threads that might
474 * be waiting on the channels that vanished.
475 */
476 static void decrease_channel_count(struct nd_struct *nd, int n)
477 {
478 struct ch_struct *ch;
479 char name[DEVICE_NAME_SIZE];
480 int i;
481
482 for (i = nd->nd_chan_count - 1; i >= n; --i) {
483 ch = nd->nd_chan + i;
484
485 /*
486 * Make any open ports inoperative.
487 */
488 ch->ch_state = CS_IDLE;
489
490 ch->ch_otype = 0;
491 ch->ch_otype_waiting = 0;
492
493 /*
494 * Only "HANGUP" if we care about carrier
495 * transitions and we are already open.
496 */
497 if (ch->ch_open_count != 0) {
498 ch->ch_flag |= CH_HANGUP;
499 dgrp_carrier(ch);
500 }
501
502 /*
503 * Unlike the CH_HANGUP flag above, use another
504 * flag to indicate to the RealPort state machine
505 * that this port has disappeared.
506 */
507 if (ch->ch_open_count != 0)
508 ch->ch_flag |= CH_PORT_GONE;
509
510 wake_up_interruptible(&ch->ch_flag_wait);
511
512 nd->nd_chan_count = i;
513
514 kfree(ch->ch_tbuf);
515 ch->ch_tbuf = NULL;
516
517 kfree(ch->ch_rbuf);
518 ch->ch_rbuf = NULL;
519
520 nd->nd_chan_count = i;
521
522 dgrp_remove_tty_sysfs(ch->ch_tun.un_sysfs);
523 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
524 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
525 tty_unregister_device(nd->nd_serial_ttdriver, i);
526
527 /*
528 * NOTE: We don't support "cu" devices anymore, so don't
529 * unregister them here anymore.
530 */
531
532 if (dgrp_register_prdevices) {
533 dgrp_remove_tty_sysfs(ch->ch_pun.un_sysfs);
534 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
535 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
536 tty_unregister_device(nd->nd_xprint_ttdriver, i);
537 }
538 }
539 }
540
541 /**
542 * dgrp_chan_count() -- Adjust the node channel count.
543 * @nd: pointer to a node structure
544 * @n: new value for channel count
545 *
546 * Adjusts the node channel count. If new ports have appeared, it tries
547 * to signal those processes that might have been waiting for ports to
548 * appear. If ports have disappeared it tries to signal those processes
549 * that might be hung waiting for a response for the now non-existant port.
550 */
551 static void dgrp_chan_count(struct nd_struct *nd, int n)
552 {
553 if (n == nd->nd_chan_count)
554 return;
555
556 if (n > nd->nd_chan_count)
557 increase_channel_count(nd, n);
558
559 if (n < nd->nd_chan_count)
560 decrease_channel_count(nd, n);
561 }
562
563 /**
564 * dgrp_monitor() -- send data to the device monitor queue
565 * @nd: pointer to a node structure
566 * @buf: data to copy to the monitoring buffer
567 * @len: number of bytes to transfer to the buffer
568 *
569 * Called by the net device routines to send data to the device
570 * monitor queue. If the device monitor buffer is too full to
571 * accept the data, it waits until the buffer is ready.
572 */
573 static void dgrp_monitor(struct nd_struct *nd, u8 *buf, int len)
574 {
575 int n;
576 int r;
577 int rtn;
578
579 /*
580 * Grab monitor lock.
581 */
582 down(&nd->nd_mon_semaphore);
583
584 /*
585 * Loop while data remains.
586 */
587 while ((len > 0) && (nd->nd_mon_buf)) {
588 /*
589 * Determine the amount of available space left in the
590 * buffer. If there's none, wait until some appears.
591 */
592
593 n = (nd->nd_mon_out - nd->nd_mon_in - 1) & MON_MASK;
594
595 if (!n) {
596 nd->nd_mon_flag |= MON_WAIT_SPACE;
597
598 up(&nd->nd_mon_semaphore);
599
600 /*
601 * Go to sleep waiting until the condition becomes true.
602 */
603 rtn = wait_event_interruptible(nd->nd_mon_wqueue,
604 ((nd->nd_mon_flag & MON_WAIT_SPACE) == 0));
605
606 /* FIXME: really ignore rtn? */
607
608 /*
609 * We can't exit here if we receive a signal, since
610 * to do so would trash the debug stream.
611 */
612
613 down(&nd->nd_mon_semaphore);
614
615 continue;
616 }
617
618 /*
619 * Copy as much data as will fit.
620 */
621
622 if (n > len)
623 n = len;
624
625 r = MON_MAX - nd->nd_mon_in;
626
627 if (r <= n) {
628 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, r);
629
630 n -= r;
631
632 nd->nd_mon_in = 0;
633
634 buf += r;
635 len -= r;
636 }
637
638 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, n);
639
640 nd->nd_mon_in += n;
641
642 buf += n;
643 len -= n;
644
645 if (nd->nd_mon_in >= MON_MAX)
646 pr_info_ratelimited("%s - nd_mon_in (%i) >= MON_MAX\n",
647 __func__, nd->nd_mon_in);
648
649 /*
650 * Wakeup any thread waiting for data
651 */
652
653 if (nd->nd_mon_flag & MON_WAIT_DATA) {
654 nd->nd_mon_flag &= ~MON_WAIT_DATA;
655 wake_up_interruptible(&nd->nd_mon_wqueue);
656 }
657 }
658
659 /*
660 * Release the monitor lock.
661 */
662 up(&nd->nd_mon_semaphore);
663 }
664
665 /**
666 * dgrp_encode_time() -- Encodes rpdump time into a 4-byte quantity.
667 * @nd: pointer to a node structure
668 * @buf: destination buffer
669 *
670 * Encodes "rpdump" time into a 4-byte quantity. Time is measured since
671 * open.
672 */
673 static void dgrp_encode_time(struct nd_struct *nd, u8 *buf)
674 {
675 ulong t;
676
677 /*
678 * Convert time in HZ since open to time in milliseconds
679 * since open.
680 */
681 t = jiffies - nd->nd_mon_lbolt;
682 t = 1000 * (t / HZ) + 1000 * (t % HZ) / HZ;
683
684 put_unaligned_be32((uint)(t & 0xffffffff), buf);
685 }
686
687
688
689 /**
690 * dgrp_monitor_message() -- Builds a rpdump style message.
691 * @nd: pointer to a node structure
692 * @message: destination buffer
693 */
694 static void dgrp_monitor_message(struct nd_struct *nd, char *message)
695 {
696 u8 header[7];
697 int n;
698
699 header[0] = RPDUMP_MESSAGE;
700
701 dgrp_encode_time(nd, header + 1);
702
703 n = strlen(message);
704
705 put_unaligned_be16(n, header + 5);
706
707 dgrp_monitor(nd, header, sizeof(header));
708 dgrp_monitor(nd, (u8 *) message, n);
709 }
710
711
712
713 /**
714 * dgrp_monitor_reset() -- Note a reset in the monitoring buffer.
715 * @nd: pointer to a node structure
716 */
717 static void dgrp_monitor_reset(struct nd_struct *nd)
718 {
719 u8 header[5];
720
721 header[0] = RPDUMP_RESET;
722
723 dgrp_encode_time(nd, header + 1);
724
725 dgrp_monitor(nd, header, sizeof(header));
726 }
727
728 /**
729 * dgrp_monitor_data() -- builds a monitor data packet
730 * @nd: pointer to a node structure
731 * @type: type of message to be logged
732 * @buf: data to be logged
733 * @size: number of bytes in the buffer
734 */
735 static void dgrp_monitor_data(struct nd_struct *nd, u8 type, u8 *buf, int size)
736 {
737 u8 header[7];
738
739 header[0] = type;
740
741 dgrp_encode_time(nd, header + 1);
742
743 put_unaligned_be16(size, header + 5);
744
745 dgrp_monitor(nd, header, sizeof(header));
746 dgrp_monitor(nd, buf, size);
747 }
748
749 static int alloc_nd_buffers(struct nd_struct *nd)
750 {
751
752 nd->nd_iobuf = NULL;
753 nd->nd_writebuf = NULL;
754 nd->nd_inputbuf = NULL;
755 nd->nd_inputflagbuf = NULL;
756
757 /*
758 * Allocate the network read/write buffer.
759 */
760 nd->nd_iobuf = kzalloc(UIO_MAX + 10, GFP_KERNEL);
761 if (!nd->nd_iobuf)
762 goto out_err;
763
764 /*
765 * Allocate a buffer for doing the copy from user space to
766 * kernel space in the write routines.
767 */
768 nd->nd_writebuf = kzalloc(WRITEBUFLEN, GFP_KERNEL);
769 if (!nd->nd_writebuf)
770 goto out_err;
771
772 /*
773 * Allocate a buffer for doing the copy from kernel space to
774 * tty buffer space in the read routines.
775 */
776 nd->nd_inputbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
777 if (!nd->nd_inputbuf)
778 goto out_err;
779
780 /*
781 * Allocate a buffer for doing the copy from kernel space to
782 * tty buffer space in the read routines.
783 */
784 nd->nd_inputflagbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
785 if (!nd->nd_inputflagbuf)
786 goto out_err;
787
788 return 0;
789
790 out_err:
791 kfree(nd->nd_iobuf);
792 kfree(nd->nd_writebuf);
793 kfree(nd->nd_inputbuf);
794 kfree(nd->nd_inputflagbuf);
795 return -ENOMEM;
796 }
797
798 /*
799 * dgrp_net_open() -- Open the NET device for a particular PortServer
800 */
801 static int dgrp_net_open(struct inode *inode, struct file *file)
802 {
803 struct nd_struct *nd;
804 struct proc_dir_entry *de;
805 ulong lock_flags;
806 int rtn;
807
808 rtn = try_module_get(THIS_MODULE);
809 if (!rtn)
810 return -EAGAIN;
811
812 if (!capable(CAP_SYS_ADMIN)) {
813 rtn = -EPERM;
814 goto done;
815 }
816
817 /*
818 * Make sure that the "private_data" field hasn't already been used.
819 */
820 if (file->private_data) {
821 rtn = -EINVAL;
822 goto done;
823 }
824
825 /*
826 * Get the node pointer, and fail if it doesn't exist.
827 */
828 de = PDE(inode);
829 if (!de) {
830 rtn = -ENXIO;
831 goto done;
832 }
833
834 nd = (struct nd_struct *) de->data;
835 if (!nd) {
836 rtn = -ENXIO;
837 goto done;
838 }
839
840 file->private_data = (void *) nd;
841
842 /*
843 * Grab the NET lock.
844 */
845 down(&nd->nd_net_semaphore);
846
847 if (nd->nd_state != NS_CLOSED) {
848 rtn = -EBUSY;
849 goto unlock;
850 }
851
852 /*
853 * Initialize the link speed parameters.
854 */
855
856 nd->nd_link.lk_fast_rate = UIO_MAX;
857 nd->nd_link.lk_slow_rate = UIO_MAX;
858
859 nd->nd_link.lk_fast_delay = 1000;
860 nd->nd_link.lk_slow_delay = 1000;
861
862 nd->nd_link.lk_header_size = 46;
863
864
865 rtn = alloc_nd_buffers(nd);
866 if (rtn)
867 goto unlock;
868
869 /*
870 * The port is now open, so move it to the IDLE state
871 */
872 dgrp_net_idle(nd);
873
874 nd->nd_tx_time = jiffies;
875
876 /*
877 * If the polling routing is not running, start it running here
878 */
879 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
880
881 if (!dgrp_poll_data.node_active_count) {
882 dgrp_poll_data.node_active_count = 2;
883 dgrp_poll_data.timer.expires = jiffies +
884 dgrp_poll_tick * HZ / 1000;
885 add_timer(&dgrp_poll_data.timer);
886 }
887
888 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
889
890 dgrp_monitor_message(nd, "Net Open");
891
892 unlock:
893 /*
894 * Release the NET lock.
895 */
896 up(&nd->nd_net_semaphore);
897
898 done:
899 if (rtn)
900 module_put(THIS_MODULE);
901
902 return rtn;
903 }
904
905 /* dgrp_net_release() -- close the NET device for a particular PortServer */
906 static int dgrp_net_release(struct inode *inode, struct file *file)
907 {
908 struct nd_struct *nd;
909 ulong lock_flags;
910
911 nd = (struct nd_struct *)(file->private_data);
912 if (!nd)
913 goto done;
914
915 /* TODO : historical locking placeholder */
916 /*
917 * In the HPUX version of the RealPort driver (which served as a basis
918 * for this driver) this locking code was used. Saved if ever we need
919 * to review the locking under Linux.
920 */
921 /* spinlock(&nd->nd_lock); */
922
923
924 /*
925 * Grab the NET lock.
926 */
927 down(&nd->nd_net_semaphore);
928
929 /*
930 * Before "closing" the internal connection, make sure all
931 * ports are "idle".
932 */
933 dgrp_net_idle(nd);
934
935 nd->nd_state = NS_CLOSED;
936 nd->nd_flag = 0;
937
938 /*
939 * TODO ... must the wait queue be reset on close?
940 * should any pending waiters be reset?
941 * Let's decide to assert that the waitq is empty... and see
942 * how soon we break.
943 */
944 if (waitqueue_active(&nd->nd_tx_waitq))
945 pr_info("%s - expected waitqueue_active to be false\n",
946 __func__);
947
948 nd->nd_send = 0;
949
950 kfree(nd->nd_iobuf);
951 nd->nd_iobuf = NULL;
952
953 /* TODO : historical locking placeholder */
954 /*
955 * In the HPUX version of the RealPort driver (which served as a basis
956 * for this driver) this locking code was used. Saved if ever we need
957 * to review the locking under Linux.
958 */
959 /* spinunlock( &nd->nd_lock ); */
960
961
962 kfree(nd->nd_writebuf);
963 nd->nd_writebuf = NULL;
964
965 kfree(nd->nd_inputbuf);
966 nd->nd_inputbuf = NULL;
967
968 kfree(nd->nd_inputflagbuf);
969 nd->nd_inputflagbuf = NULL;
970
971 /* TODO : historical locking placeholder */
972 /*
973 * In the HPUX version of the RealPort driver (which served as a basis
974 * for this driver) this locking code was used. Saved if ever we need
975 * to review the locking under Linux.
976 */
977 /* spinlock(&nd->nd_lock); */
978
979 /*
980 * Set the active port count to zero.
981 */
982 dgrp_chan_count(nd, 0);
983
984 /* TODO : historical locking placeholder */
985 /*
986 * In the HPUX version of the RealPort driver (which served as a basis
987 * for this driver) this locking code was used. Saved if ever we need
988 * to review the locking under Linux.
989 */
990 /* spinunlock(&nd->nd_lock); */
991
992 /*
993 * Release the NET lock.
994 */
995 up(&nd->nd_net_semaphore);
996
997 /*
998 * Cause the poller to stop scheduling itself if this is
999 * the last active node.
1000 */
1001 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
1002
1003 if (dgrp_poll_data.node_active_count == 2) {
1004 del_timer(&dgrp_poll_data.timer);
1005 dgrp_poll_data.node_active_count = 0;
1006 }
1007
1008 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
1009
1010 down(&nd->nd_net_semaphore);
1011
1012 dgrp_monitor_message(nd, "Net Close");
1013
1014 up(&nd->nd_net_semaphore);
1015
1016 done:
1017 module_put(THIS_MODULE);
1018 file->private_data = NULL;
1019 return 0;
1020 }
1021
1022 /* used in dgrp_send to setup command header */
1023 static inline u8 *set_cmd_header(u8 *b, u8 port, u8 cmd)
1024 {
1025 *b++ = 0xb0 + (port & 0x0f);
1026 *b++ = cmd;
1027 return b;
1028 }
1029
1030 /**
1031 * dgrp_send() -- build a packet for transmission to the server
1032 * @nd: pointer to a node structure
1033 * @tmax: maximum bytes to transmit
1034 *
1035 * returns number of bytes sent
1036 */
1037 static int dgrp_send(struct nd_struct *nd, long tmax)
1038 {
1039 struct ch_struct *ch = nd->nd_chan;
1040 u8 *b;
1041 u8 *buf;
1042 u8 *mbuf;
1043 u8 port;
1044 int mod;
1045 long send;
1046 int maxport;
1047 long lastport = -1;
1048 ushort rwin;
1049 long in;
1050 ushort n;
1051 long t;
1052 long ttotal;
1053 long tchan;
1054 long tsend;
1055 ushort tsafe;
1056 long work;
1057 long send_sync;
1058 long wanted_sync_port = -1;
1059 ushort tdata[CHAN_MAX];
1060 long used_buffer;
1061
1062 mbuf = nd->nd_iobuf + UIO_BASE;
1063 buf = b = mbuf;
1064
1065 send_sync = nd->nd_link.lk_slow_rate < UIO_MAX;
1066
1067 ttotal = 0;
1068 tchan = 0;
1069
1070 memset(tdata, 0, sizeof(tdata));
1071
1072
1073 /*
1074 * If there are any outstanding requests to be serviced,
1075 * service them here.
1076 */
1077 if (nd->nd_send & NR_PASSWORD) {
1078
1079 /*
1080 * Send Password response.
1081 */
1082
1083 b[0] = 0xfc;
1084 b[1] = 0x20;
1085 put_unaligned_be16(strlen(nd->password), b + 2);
1086 b += 4;
1087 b += strlen(nd->password);
1088 nd->nd_send &= ~(NR_PASSWORD);
1089 }
1090
1091
1092 /*
1093 * Loop over all modules to generate commands, and determine
1094 * the amount of data queued for transmit.
1095 */
1096
1097 for (mod = 0, port = 0; port < nd->nd_chan_count; mod++) {
1098 /*
1099 * If this is not the current module, enter a module select
1100 * code in the buffer.
1101 */
1102
1103 if (mod != nd->nd_tx_module)
1104 mbuf = ++b;
1105
1106 /*
1107 * Loop to process one module.
1108 */
1109
1110 maxport = port + 16;
1111
1112 if (maxport > nd->nd_chan_count)
1113 maxport = nd->nd_chan_count;
1114
1115 for (; port < maxport; port++, ch++) {
1116 /*
1117 * Switch based on channel state.
1118 */
1119
1120 switch (ch->ch_state) {
1121 /*
1122 * Send requests when the port is closed, and there
1123 * are no Open, Close or Cancel requests expected.
1124 */
1125
1126 case CS_IDLE:
1127 /*
1128 * Wait until any open error code
1129 * has been delivered to all
1130 * associated ports.
1131 */
1132
1133 if (ch->ch_open_error) {
1134 if (ch->ch_wait_count[ch->ch_otype]) {
1135 work = 1;
1136 break;
1137 }
1138
1139 ch->ch_open_error = 0;
1140 }
1141
1142 /*
1143 * Wait until the channel HANGUP flag is reset
1144 * before sending the first open. We can only
1145 * get to this state after a server disconnect.
1146 */
1147
1148 if ((ch->ch_flag & CH_HANGUP) != 0)
1149 break;
1150
1151 /*
1152 * If recovering from a TCP disconnect, or if
1153 * there is an immediate open pending, send an
1154 * Immediate Open request.
1155 */
1156 if ((ch->ch_flag & CH_PORT_GONE) ||
1157 ch->ch_wait_count[OTYPE_IMMEDIATE] != 0) {
1158 b = set_cmd_header(b, port, 10);
1159 *b++ = 0;
1160
1161 ch->ch_state = CS_WAIT_OPEN;
1162 ch->ch_otype = OTYPE_IMMEDIATE;
1163 break;
1164 }
1165
1166 /*
1167 * If there is no Persistent or Incoming Open on the wait
1168 * list in the server, and a thread is waiting for a
1169 * Persistent or Incoming Open, send a Persistent or Incoming
1170 * Open Request.
1171 */
1172 if (ch->ch_otype_waiting == 0) {
1173 if (ch->ch_wait_count[OTYPE_PERSISTENT] != 0) {
1174 b = set_cmd_header(b, port, 10);
1175 *b++ = 1;
1176
1177 ch->ch_state = CS_WAIT_OPEN;
1178 ch->ch_otype = OTYPE_PERSISTENT;
1179 } else if (ch->ch_wait_count[OTYPE_INCOMING] != 0) {
1180 b = set_cmd_header(b, port, 10);
1181 *b++ = 2;
1182
1183 ch->ch_state = CS_WAIT_OPEN;
1184 ch->ch_otype = OTYPE_INCOMING;
1185 }
1186 break;
1187 }
1188
1189 /*
1190 * If a Persistent or Incoming Open is pending in
1191 * the server, but there is no longer an open
1192 * thread waiting for it, cancel the request.
1193 */
1194
1195 if (ch->ch_wait_count[ch->ch_otype_waiting] == 0) {
1196 b = set_cmd_header(b, port, 10);
1197 *b++ = 4;
1198
1199 ch->ch_state = CS_WAIT_CANCEL;
1200 ch->ch_otype = ch->ch_otype_waiting;
1201 }
1202 break;
1203
1204 /*
1205 * Send port parameter queries.
1206 */
1207 case CS_SEND_QUERY:
1208 /*
1209 * Clear out all FEP state that might remain
1210 * from the last connection.
1211 */
1212
1213 ch->ch_flag |= CH_PARAM;
1214
1215 ch->ch_flag &= ~CH_RX_FLUSH;
1216
1217 ch->ch_expect = 0;
1218
1219 ch->ch_s_tin = 0;
1220 ch->ch_s_tpos = 0;
1221 ch->ch_s_tsize = 0;
1222 ch->ch_s_treq = 0;
1223 ch->ch_s_elast = 0;
1224
1225 ch->ch_s_rin = 0;
1226 ch->ch_s_rwin = 0;
1227 ch->ch_s_rsize = 0;
1228
1229 ch->ch_s_tmax = 0;
1230 ch->ch_s_ttime = 0;
1231 ch->ch_s_rmax = 0;
1232 ch->ch_s_rtime = 0;
1233 ch->ch_s_rlow = 0;
1234 ch->ch_s_rhigh = 0;
1235
1236 ch->ch_s_brate = 0;
1237 ch->ch_s_iflag = 0;
1238 ch->ch_s_cflag = 0;
1239 ch->ch_s_oflag = 0;
1240 ch->ch_s_xflag = 0;
1241
1242 ch->ch_s_mout = 0;
1243 ch->ch_s_mflow = 0;
1244 ch->ch_s_mctrl = 0;
1245 ch->ch_s_xon = 0;
1246 ch->ch_s_xoff = 0;
1247 ch->ch_s_lnext = 0;
1248 ch->ch_s_xxon = 0;
1249 ch->ch_s_xxoff = 0;
1250
1251 /* Send Sequence Request */
1252 b = set_cmd_header(b, port, 14);
1253
1254 /* Configure Event Conditions Packet */
1255 b = set_cmd_header(b, port, 42);
1256 put_unaligned_be16(0x02c0, b);
1257 b += 2;
1258 *b++ = (DM_DTR | DM_RTS | DM_CTS |
1259 DM_DSR | DM_RI | DM_CD);
1260
1261 /* Send Status Request */
1262 b = set_cmd_header(b, port, 16);
1263
1264 /* Send Buffer Request */
1265 b = set_cmd_header(b, port, 20);
1266
1267 /* Send Port Capability Request */
1268 b = set_cmd_header(b, port, 22);
1269
1270 ch->ch_expect = (RR_SEQUENCE |
1271 RR_STATUS |
1272 RR_BUFFER |
1273 RR_CAPABILITY);
1274
1275 ch->ch_state = CS_WAIT_QUERY;
1276
1277 /* Raise modem signals */
1278 b = set_cmd_header(b, port, 44);
1279
1280 if (ch->ch_flag & CH_PORT_GONE)
1281 ch->ch_s_mout = ch->ch_mout;
1282 else
1283 ch->ch_s_mout = ch->ch_mout = DM_DTR | DM_RTS;
1284
1285 *b++ = ch->ch_mout;
1286 *b++ = ch->ch_s_mflow = 0;
1287 *b++ = ch->ch_s_mctrl = ch->ch_mctrl = 0;
1288
1289 if (ch->ch_flag & CH_PORT_GONE)
1290 ch->ch_flag &= ~CH_PORT_GONE;
1291
1292 break;
1293
1294 /*
1295 * Handle normal open and ready mode.
1296 */
1297
1298 case CS_READY:
1299
1300 /*
1301 * If the port is not open, and there are no
1302 * no longer any ports requesting an open,
1303 * then close the port.
1304 */
1305
1306 if (ch->ch_open_count == 0 &&
1307 ch->ch_wait_count[ch->ch_otype] == 0) {
1308 goto send_close;
1309 }
1310
1311 /*
1312 * Process waiting input.
1313 *
1314 * If there is no one to read it, discard the data.
1315 *
1316 * Otherwise if we are not in fastcook mode, or if there is a
1317 * fastcook thread waiting for data, send the data to the
1318 * line discipline.
1319 */
1320 if (ch->ch_rin != ch->ch_rout) {
1321 if (ch->ch_tun.un_open_count == 0 ||
1322 (ch->ch_tun.un_flag & UN_CLOSING) ||
1323 (ch->ch_cflag & CF_CREAD) == 0) {
1324 ch->ch_rout = ch->ch_rin;
1325 } else if ((ch->ch_flag & CH_FAST_READ) == 0 ||
1326 ch->ch_inwait != 0) {
1327 dgrp_input(ch);
1328
1329 if (ch->ch_rin != ch->ch_rout)
1330 work = 1;
1331 }
1332 }
1333
1334 /*
1335 * Handle receive flush, and changes to
1336 * server port parameters.
1337 */
1338
1339 if (ch->ch_flag & (CH_RX_FLUSH | CH_PARAM)) {
1340 /*
1341 * If we are in receive flush mode,
1342 * and enough data has gone by, reset
1343 * receive flush mode.
1344 */
1345 if (ch->ch_flag & CH_RX_FLUSH) {
1346 if (((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >
1347 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK))
1348 ch->ch_flag &= ~CH_RX_FLUSH;
1349 else
1350 work = 1;
1351 }
1352
1353 /*
1354 * Send TMAX, TTIME.
1355 */
1356
1357 if (ch->ch_s_tmax != ch->ch_tmax ||
1358 ch->ch_s_ttime != ch->ch_ttime) {
1359 b = set_cmd_header(b, port, 48);
1360
1361 ch->ch_s_tmax = ch->ch_tmax;
1362 ch->ch_s_ttime = ch->ch_ttime;
1363
1364 put_unaligned_be16(ch->ch_s_tmax,
1365 b);
1366 b += 2;
1367
1368 put_unaligned_be16(ch->ch_s_ttime,
1369 b);
1370 b += 2;
1371 }
1372
1373 /*
1374 * Send RLOW, RHIGH.
1375 */
1376
1377 if (ch->ch_s_rlow != ch->ch_rlow ||
1378 ch->ch_s_rhigh != ch->ch_rhigh) {
1379 b = set_cmd_header(b, port, 45);
1380
1381 ch->ch_s_rlow = ch->ch_rlow;
1382 ch->ch_s_rhigh = ch->ch_rhigh;
1383
1384 put_unaligned_be16(ch->ch_s_rlow,
1385 b);
1386 b += 2;
1387
1388 put_unaligned_be16(ch->ch_s_rhigh,
1389 b);
1390 b += 2;
1391 }
1392
1393 /*
1394 * Send BRATE, CFLAG, IFLAG,
1395 * OFLAG, XFLAG.
1396 */
1397
1398 if (ch->ch_s_brate != ch->ch_brate ||
1399 ch->ch_s_cflag != ch->ch_cflag ||
1400 ch->ch_s_iflag != ch->ch_iflag ||
1401 ch->ch_s_oflag != ch->ch_oflag ||
1402 ch->ch_s_xflag != ch->ch_xflag) {
1403 b = set_cmd_header(b, port, 40);
1404
1405 ch->ch_s_brate = ch->ch_brate;
1406 ch->ch_s_cflag = ch->ch_cflag;
1407 ch->ch_s_iflag = ch->ch_iflag;
1408 ch->ch_s_oflag = ch->ch_oflag;
1409 ch->ch_s_xflag = ch->ch_xflag;
1410
1411 put_unaligned_be16(ch->ch_s_brate,
1412 b);
1413 b += 2;
1414
1415 put_unaligned_be16(ch->ch_s_cflag,
1416 b);
1417 b += 2;
1418
1419 put_unaligned_be16(ch->ch_s_iflag,
1420 b);
1421 b += 2;
1422
1423 put_unaligned_be16(ch->ch_s_oflag,
1424 b);
1425 b += 2;
1426
1427 put_unaligned_be16(ch->ch_s_xflag,
1428 b);
1429 b += 2;
1430 }
1431
1432 /*
1433 * Send MOUT, MFLOW, MCTRL.
1434 */
1435
1436 if (ch->ch_s_mout != ch->ch_mout ||
1437 ch->ch_s_mflow != ch->ch_mflow ||
1438 ch->ch_s_mctrl != ch->ch_mctrl) {
1439 b = set_cmd_header(b, port, 44);
1440
1441 *b++ = ch->ch_s_mout = ch->ch_mout;
1442 *b++ = ch->ch_s_mflow = ch->ch_mflow;
1443 *b++ = ch->ch_s_mctrl = ch->ch_mctrl;
1444 }
1445
1446 /*
1447 * Send Flow control characters.
1448 */
1449
1450 if (ch->ch_s_xon != ch->ch_xon ||
1451 ch->ch_s_xoff != ch->ch_xoff ||
1452 ch->ch_s_lnext != ch->ch_lnext ||
1453 ch->ch_s_xxon != ch->ch_xxon ||
1454 ch->ch_s_xxoff != ch->ch_xxoff) {
1455 b = set_cmd_header(b, port, 46);
1456
1457 *b++ = ch->ch_s_xon = ch->ch_xon;
1458 *b++ = ch->ch_s_xoff = ch->ch_xoff;
1459 *b++ = ch->ch_s_lnext = ch->ch_lnext;
1460 *b++ = ch->ch_s_xxon = ch->ch_xxon;
1461 *b++ = ch->ch_s_xxoff = ch->ch_xxoff;
1462 }
1463
1464 /*
1465 * Send RMAX, RTIME.
1466 */
1467
1468 if (ch->ch_s_rmax != ch->ch_rmax ||
1469 ch->ch_s_rtime != ch->ch_rtime) {
1470 b = set_cmd_header(b, port, 47);
1471
1472 ch->ch_s_rmax = ch->ch_rmax;
1473 ch->ch_s_rtime = ch->ch_rtime;
1474
1475 put_unaligned_be16(ch->ch_s_rmax,
1476 b);
1477 b += 2;
1478
1479 put_unaligned_be16(ch->ch_s_rtime,
1480 b);
1481 b += 2;
1482 }
1483
1484 ch->ch_flag &= ~CH_PARAM;
1485 wake_up_interruptible(&ch->ch_flag_wait);
1486 }
1487
1488
1489 /*
1490 * Handle action commands.
1491 */
1492
1493 if (ch->ch_send != 0) {
1494 /* int send = ch->ch_send & ~ch->ch_expect; */
1495 send = ch->ch_send & ~ch->ch_expect;
1496
1497 /* Send character immediate */
1498 if ((send & RR_TX_ICHAR) != 0) {
1499 b = set_cmd_header(b, port, 60);
1500
1501 *b++ = ch->ch_xon;
1502 ch->ch_expect |= RR_TX_ICHAR;
1503 }
1504
1505 /* BREAK request */
1506 if ((send & RR_TX_BREAK) != 0) {
1507 if (ch->ch_break_time != 0) {
1508 b = set_cmd_header(b, port, 61);
1509 put_unaligned_be16(ch->ch_break_time,
1510 b);
1511 b += 2;
1512
1513 ch->ch_expect |= RR_TX_BREAK;
1514 ch->ch_break_time = 0;
1515 } else {
1516 ch->ch_send &= ~RR_TX_BREAK;
1517 ch->ch_flag &= ~CH_TX_BREAK;
1518 wake_up_interruptible(&ch->ch_flag_wait);
1519 }
1520 }
1521
1522 /*
1523 * Flush input/output buffers.
1524 */
1525
1526 if ((send & (RR_RX_FLUSH | RR_TX_FLUSH)) != 0) {
1527 b = set_cmd_header(b, port, 62);
1528
1529 *b++ = ((send & RR_TX_FLUSH) == 0 ? 1 :
1530 (send & RR_RX_FLUSH) == 0 ? 2 : 3);
1531
1532 if (send & RR_RX_FLUSH) {
1533 ch->ch_flush_seq = nd->nd_seq_in;
1534 ch->ch_flag |= CH_RX_FLUSH;
1535 work = 1;
1536 send_sync = 1;
1537 wanted_sync_port = port;
1538 }
1539
1540 ch->ch_send &= ~(RR_RX_FLUSH | RR_TX_FLUSH);
1541 }
1542
1543 /* Pause input/output */
1544 if ((send & (RR_RX_STOP | RR_TX_STOP)) != 0) {
1545 b = set_cmd_header(b, port, 63);
1546 *b = 0;
1547
1548 if ((send & RR_TX_STOP) != 0)
1549 *b |= EV_OPU;
1550
1551 if ((send & RR_RX_STOP) != 0)
1552 *b |= EV_IPU;
1553
1554 b++;
1555
1556 ch->ch_send &= ~(RR_RX_STOP | RR_TX_STOP);
1557 }
1558
1559 /* Start input/output */
1560 if ((send & (RR_RX_START | RR_TX_START)) != 0) {
1561 b = set_cmd_header(b, port, 64);
1562 *b = 0;
1563
1564 if ((send & RR_TX_START) != 0)
1565 *b |= EV_OPU | EV_OPS | EV_OPX;
1566
1567 if ((send & RR_RX_START) != 0)
1568 *b |= EV_IPU | EV_IPS;
1569
1570 b++;
1571
1572 ch->ch_send &= ~(RR_RX_START | RR_TX_START);
1573 }
1574 }
1575
1576
1577 /*
1578 * Send a window sequence to acknowledge received data.
1579 */
1580
1581 rwin = (ch->ch_s_rin +
1582 ((ch->ch_rout - ch->ch_rin - 1) & RBUF_MASK));
1583
1584 n = (rwin - ch->ch_s_rwin) & 0xffff;
1585
1586 if (n >= RBUF_MAX / 4) {
1587 b[0] = 0xa0 + (port & 0xf);
1588 ch->ch_s_rwin = rwin;
1589 put_unaligned_be16(rwin, b + 1);
1590 b += 3;
1591 }
1592
1593 /*
1594 * If the terminal is waiting on LOW
1595 * water or EMPTY, and the condition
1596 * is now satisfied, call the line
1597 * discipline to put more data in the
1598 * buffer.
1599 */
1600
1601 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1602
1603 if ((ch->ch_tun.un_flag & (UN_EMPTY|UN_LOW)) != 0) {
1604 if ((ch->ch_tun.un_flag & UN_LOW) != 0 ?
1605 (n <= TBUF_LOW) :
1606 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin)) {
1607 ch->ch_tun.un_flag &= ~(UN_EMPTY|UN_LOW);
1608
1609 if (waitqueue_active(&((ch->ch_tun.un_tty)->write_wait)))
1610 wake_up_interruptible(&((ch->ch_tun.un_tty)->write_wait));
1611 tty_wakeup(ch->ch_tun.un_tty);
1612 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1613 }
1614 }
1615
1616 /*
1617 * If the printer is waiting on LOW
1618 * water, TIME, EMPTY or PWAIT, and is
1619 * now ready to put more data in the
1620 * buffer, call the line discipline to
1621 * do the job.
1622 */
1623
1624 /* FIXME: jiffies - ch->ch_waketime can never
1625 be < 0. Someone needs to work out what is
1626 actually intended here */
1627 if (ch->ch_pun.un_open_count &&
1628 (ch->ch_pun.un_flag &
1629 (UN_EMPTY|UN_TIME|UN_LOW|UN_PWAIT)) != 0) {
1630
1631 if ((ch->ch_pun.un_flag & UN_LOW) != 0 ?
1632 (n <= TBUF_LOW) :
1633 (ch->ch_pun.un_flag & UN_TIME) != 0 ?
1634 ((jiffies - ch->ch_waketime) >= 0) :
1635 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin) &&
1636 ((ch->ch_pun.un_flag & UN_EMPTY) != 0 ||
1637 ((ch->ch_tun.un_open_count &&
1638 ch->ch_tun.un_tty->ops->chars_in_buffer) ?
1639 (ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) == 0
1640 : 1
1641 )
1642 )) {
1643 ch->ch_pun.un_flag &= ~(UN_EMPTY | UN_TIME | UN_LOW | UN_PWAIT);
1644
1645 if (waitqueue_active(&((ch->ch_pun.un_tty)->write_wait)))
1646 wake_up_interruptible(&((ch->ch_pun.un_tty)->write_wait));
1647 tty_wakeup(ch->ch_pun.un_tty);
1648 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1649
1650 } else if ((ch->ch_pun.un_flag & UN_TIME) != 0) {
1651 work = 1;
1652 }
1653 }
1654
1655
1656 /*
1657 * Determine the max number of bytes
1658 * this port can send, including
1659 * packet header overhead.
1660 */
1661
1662 t = ((ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff);
1663
1664 if (n > t)
1665 n = t;
1666
1667 if (n != 0) {
1668 n += (n <= 8 ? 1 : n <= 255 ? 2 : 3);
1669
1670 tdata[tchan++] = n;
1671 ttotal += n;
1672 }
1673 break;
1674
1675 /*
1676 * Close the port.
1677 */
1678
1679 send_close:
1680 case CS_SEND_CLOSE:
1681 b = set_cmd_header(b, port, 10);
1682 if (ch->ch_otype == OTYPE_IMMEDIATE)
1683 *b++ = 3;
1684 else
1685 *b++ = 4;
1686
1687 ch->ch_state = CS_WAIT_CLOSE;
1688 break;
1689
1690 /*
1691 * Wait for a previous server request.
1692 */
1693
1694 case CS_WAIT_OPEN:
1695 case CS_WAIT_CANCEL:
1696 case CS_WAIT_FAIL:
1697 case CS_WAIT_QUERY:
1698 case CS_WAIT_CLOSE:
1699 break;
1700
1701 default:
1702 pr_info("%s - unexpected channel state (%i)\n",
1703 __func__, ch->ch_state);
1704 }
1705 }
1706
1707 /*
1708 * If a module select code is needed, drop one in. If space
1709 * was reserved for one, but none is needed, recover the space.
1710 */
1711
1712 if (mod != nd->nd_tx_module) {
1713 if (b != mbuf) {
1714 mbuf[-1] = 0xf0 | mod;
1715 nd->nd_tx_module = mod;
1716 } else {
1717 b--;
1718 }
1719 }
1720 }
1721
1722 /*
1723 * Adjust "tmax" so that under worst case conditions we do
1724 * not overflow either the daemon buffer or the internal
1725 * buffer in the loop that follows. Leave a safe area
1726 * of 64 bytes so we start getting asserts before we start
1727 * losing data or clobbering memory.
1728 */
1729
1730 n = UIO_MAX - UIO_BASE;
1731
1732 if (tmax > n)
1733 tmax = n;
1734
1735 tmax -= 64;
1736
1737 tsafe = tmax;
1738
1739 /*
1740 * Allocate space for 5 Module Selects, 1 Sequence Request,
1741 * and 1 Set TREQ for each active channel.
1742 */
1743
1744 tmax -= 5 + 3 + 4 * nd->nd_chan_count;
1745
1746 /*
1747 * Further reduce "tmax" to the available transmit credit.
1748 * Note that this is a soft constraint; The transmit credit
1749 * can go negative for a time and then recover.
1750 */
1751
1752 n = nd->nd_tx_deposit - nd->nd_tx_charge - nd->nd_link.lk_header_size;
1753
1754 if (tmax > n)
1755 tmax = n;
1756
1757 /*
1758 * Finally reduce tmax by the number of bytes already in
1759 * the buffer.
1760 */
1761
1762 tmax -= b - buf;
1763
1764 /*
1765 * Suspend data transmit unless every ready channel can send
1766 * at least 1 character.
1767 */
1768 if (tmax < 2 * nd->nd_chan_count) {
1769 tsend = 1;
1770
1771 } else if (tchan > 1 && ttotal > tmax) {
1772
1773 /*
1774 * If transmit is limited by the credit budget, find the
1775 * largest number of characters we can send without driving
1776 * the credit negative.
1777 */
1778
1779 long tm = tmax;
1780 int tc = tchan;
1781 int try;
1782
1783 tsend = tm / tc;
1784
1785 for (try = 0; try < 3; try++) {
1786 int i;
1787 int c = 0;
1788
1789 for (i = 0; i < tc; i++) {
1790 if (tsend < tdata[i])
1791 tdata[c++] = tdata[i];
1792 else
1793 tm -= tdata[i];
1794 }
1795
1796 if (c == tc)
1797 break;
1798
1799 tsend = tm / c;
1800
1801 if (c == 1)
1802 break;
1803
1804 tc = c;
1805 }
1806
1807 tsend = tm / nd->nd_chan_count;
1808
1809 if (tsend < 2)
1810 tsend = 1;
1811
1812 } else {
1813 /*
1814 * If no budgetary constraints, or only one channel ready
1815 * to send, set the character limit to the remaining
1816 * buffer size.
1817 */
1818
1819 tsend = tmax;
1820 }
1821
1822 tsend -= (tsend <= 9) ? 1 : (tsend <= 257) ? 2 : 3;
1823
1824 /*
1825 * Loop over all channels, sending queued data.
1826 */
1827
1828 port = 0;
1829 ch = nd->nd_chan;
1830 used_buffer = tmax;
1831
1832 for (mod = 0; port < nd->nd_chan_count; mod++) {
1833 /*
1834 * If this is not the current module, enter a module select
1835 * code in the buffer.
1836 */
1837
1838 if (mod != nd->nd_tx_module)
1839 mbuf = ++b;
1840
1841 /*
1842 * Loop to process one module.
1843 */
1844
1845 maxport = port + 16;
1846
1847 if (maxport > nd->nd_chan_count)
1848 maxport = nd->nd_chan_count;
1849
1850 for (; port < maxport; port++, ch++) {
1851 if (ch->ch_state != CS_READY)
1852 continue;
1853
1854 lastport = port;
1855
1856 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1857
1858 /*
1859 * If there is data that can be sent, send it.
1860 */
1861
1862 if (n != 0 && used_buffer > 0) {
1863 t = (ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff;
1864
1865 if (n > t)
1866 n = t;
1867
1868 if (n > tsend) {
1869 work = 1;
1870 n = tsend;
1871 }
1872
1873 if (n > used_buffer) {
1874 work = 1;
1875 n = used_buffer;
1876 }
1877
1878 if (n <= 0)
1879 continue;
1880
1881 /*
1882 * Create the correct size transmit header,
1883 * depending on the amount of data to transmit.
1884 */
1885
1886 if (n <= 8) {
1887
1888 b[0] = ((n - 1) << 4) + (port & 0xf);
1889 b += 1;
1890
1891 } else if (n <= 255) {
1892
1893 b[0] = 0x80 + (port & 0xf);
1894 b[1] = n;
1895 b += 2;
1896
1897 } else {
1898
1899 b[0] = 0x90 + (port & 0xf);
1900 put_unaligned_be16(n, b + 1);
1901 b += 3;
1902 }
1903
1904 ch->ch_s_tin = (ch->ch_s_tin + n) & 0xffff;
1905
1906 /*
1907 * Copy transmit data to the packet.
1908 */
1909
1910 t = TBUF_MAX - ch->ch_tout;
1911
1912 if (n >= t) {
1913 memcpy(b, ch->ch_tbuf + ch->ch_tout, t);
1914 b += t;
1915 n -= t;
1916 used_buffer -= t;
1917 ch->ch_tout = 0;
1918 }
1919
1920 memcpy(b, ch->ch_tbuf + ch->ch_tout, n);
1921 b += n;
1922 used_buffer -= n;
1923 ch->ch_tout += n;
1924 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1925 }
1926
1927 /*
1928 * Wake any terminal unit process waiting in the
1929 * dgrp_write routine for low water.
1930 */
1931
1932 if (n > TBUF_LOW)
1933 continue;
1934
1935 if ((ch->ch_flag & CH_LOW) != 0) {
1936 ch->ch_flag &= ~CH_LOW;
1937 wake_up_interruptible(&ch->ch_flag_wait);
1938 }
1939
1940 /* selwakeup tty_sel */
1941 if (ch->ch_tun.un_open_count) {
1942 struct tty_struct *tty = (ch->ch_tun.un_tty);
1943
1944 if (waitqueue_active(&tty->write_wait))
1945 wake_up_interruptible(&tty->write_wait);
1946
1947 tty_wakeup(tty);
1948 }
1949
1950 if (ch->ch_pun.un_open_count) {
1951 struct tty_struct *tty = (ch->ch_pun.un_tty);
1952
1953 if (waitqueue_active(&tty->write_wait))
1954 wake_up_interruptible(&tty->write_wait);
1955
1956 tty_wakeup(tty);
1957 }
1958
1959 /*
1960 * Do EMPTY processing.
1961 */
1962
1963 if (n != 0)
1964 continue;
1965
1966 if ((ch->ch_flag & (CH_EMPTY | CH_DRAIN)) != 0 ||
1967 (ch->ch_pun.un_flag & UN_EMPTY) != 0) {
1968 /*
1969 * If there is still data in the server, ask the server
1970 * to notify us when its all gone.
1971 */
1972
1973 if (ch->ch_s_treq != ch->ch_s_tin) {
1974 b = set_cmd_header(b, port, 43);
1975
1976 ch->ch_s_treq = ch->ch_s_tin;
1977 put_unaligned_be16(ch->ch_s_treq,
1978 b);
1979 b += 2;
1980 }
1981
1982 /*
1983 * If there is a thread waiting for buffer empty,
1984 * and we are truly empty, wake the thread.
1985 */
1986
1987 else if ((ch->ch_flag & CH_EMPTY) != 0 &&
1988 (ch->ch_send & RR_TX_BREAK) == 0) {
1989 ch->ch_flag &= ~CH_EMPTY;
1990
1991 wake_up_interruptible(&ch->ch_flag_wait);
1992 }
1993 }
1994 }
1995
1996 /*
1997 * If a module select code is needed, drop one in. If space
1998 * was reserved for one, but none is needed, recover the space.
1999 */
2000
2001 if (mod != nd->nd_tx_module) {
2002 if (b != mbuf) {
2003 mbuf[-1] = 0xf0 | mod;
2004 nd->nd_tx_module = mod;
2005 } else {
2006 b--;
2007 }
2008 }
2009 }
2010
2011 /*
2012 * Send a synchronization sequence associated with the last open
2013 * channel that sent data, and remember the time when the data was
2014 * sent.
2015 */
2016
2017 in = nd->nd_seq_in;
2018
2019 if ((send_sync || nd->nd_seq_wait[in] != 0) && lastport >= 0) {
2020 u8 *bb = b;
2021
2022 /*
2023 * Attempt the use the port that really wanted the sync.
2024 * This gets around a race condition where the "lastport" is in
2025 * the middle of the close() routine, and by the time we
2026 * send this command, it will have already acked the close, and
2027 * thus not send the sync response.
2028 */
2029 if (wanted_sync_port >= 0)
2030 lastport = wanted_sync_port;
2031 /*
2032 * Set a flag just in case the port is in the middle of a close,
2033 * it will not be permitted to actually close until we get an
2034 * sync response, and clear the flag there.
2035 */
2036 ch = nd->nd_chan + lastport;
2037 ch->ch_flag |= CH_WAITING_SYNC;
2038
2039 mod = lastport >> 4;
2040
2041 if (mod != nd->nd_tx_module) {
2042 bb[0] = 0xf0 + mod;
2043 bb += 1;
2044
2045 nd->nd_tx_module = mod;
2046 }
2047
2048 bb = set_cmd_header(bb, lastport, 12);
2049 *bb++ = in;
2050
2051 nd->nd_seq_size[in] = bb - buf;
2052 nd->nd_seq_time[in] = jiffies;
2053
2054 if (++in >= SEQ_MAX)
2055 in = 0;
2056
2057 if (in != nd->nd_seq_out) {
2058 b = bb;
2059 nd->nd_seq_in = in;
2060 nd->nd_unack += b - buf;
2061 }
2062 }
2063
2064 /*
2065 * If there are no open ports, a sync cannot be sent.
2066 * There is nothing left to wait for anyway, so wake any
2067 * thread waiting for an acknowledgement.
2068 */
2069
2070 else if (nd->nd_seq_wait[in] != 0) {
2071 nd->nd_seq_wait[in] = 0;
2072
2073 wake_up_interruptible(&nd->nd_seq_wque[in]);
2074 }
2075
2076 /*
2077 * If there is no traffic for an interval of IDLE_MAX, then
2078 * send a single byte packet.
2079 */
2080
2081 if (b != buf) {
2082 nd->nd_tx_time = jiffies;
2083 } else if ((ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX) {
2084 *b++ = 0xf0 | nd->nd_tx_module;
2085 nd->nd_tx_time = jiffies;
2086 }
2087
2088 n = b - buf;
2089
2090 if (n >= tsafe)
2091 pr_info("%s - n(%i) >= tsafe(%i)\n",
2092 __func__, n, tsafe);
2093
2094 if (tsend < 0)
2095 dgrp_dump(buf, n);
2096
2097 nd->nd_tx_work = work;
2098
2099 return n;
2100 }
2101
2102 /*
2103 * dgrp_net_read()
2104 * Data to be sent TO the PortServer from the "async." half of the driver.
2105 */
2106 static ssize_t dgrp_net_read(struct file *file, char __user *buf, size_t count,
2107 loff_t *ppos)
2108 {
2109 struct nd_struct *nd;
2110 long n;
2111 u8 *local_buf;
2112 u8 *b;
2113 ssize_t rtn;
2114
2115 /*
2116 * Get the node pointer, and quit if it doesn't exist.
2117 */
2118 nd = (struct nd_struct *)(file->private_data);
2119 if (!nd)
2120 return -ENXIO;
2121
2122 if (count < UIO_MIN)
2123 return -EINVAL;
2124
2125 /*
2126 * Only one read/write operation may be in progress at
2127 * any given time.
2128 */
2129
2130 /*
2131 * Grab the NET lock.
2132 */
2133 down(&nd->nd_net_semaphore);
2134
2135 nd->nd_read_count++;
2136
2137 nd->nd_tx_ready = 0;
2138
2139 /*
2140 * Determine the effective size of the buffer.
2141 */
2142
2143 if (nd->nd_remain > UIO_BASE)
2144 pr_info_ratelimited("%s - nd_remain(%i) > UIO_BASE\n",
2145 __func__, nd->nd_remain);
2146
2147 b = local_buf = nd->nd_iobuf + UIO_BASE;
2148
2149 /*
2150 * Generate data according to the node state.
2151 */
2152
2153 switch (nd->nd_state) {
2154 /*
2155 * Initialize the connection.
2156 */
2157
2158 case NS_IDLE:
2159 if (nd->nd_mon_buf)
2160 dgrp_monitor_reset(nd);
2161
2162 /*
2163 * Request a Product ID Packet.
2164 */
2165
2166 b[0] = 0xfb;
2167 b[1] = 0x01;
2168 b += 2;
2169
2170 nd->nd_expect |= NR_IDENT;
2171
2172 /*
2173 * Request a Server Capability ID Response.
2174 */
2175
2176 b[0] = 0xfb;
2177 b[1] = 0x02;
2178 b += 2;
2179
2180 nd->nd_expect |= NR_CAPABILITY;
2181
2182 /*
2183 * Request a Server VPD Response.
2184 */
2185
2186 b[0] = 0xfb;
2187 b[1] = 0x18;
2188 b += 2;
2189
2190 nd->nd_expect |= NR_VPD;
2191
2192 nd->nd_state = NS_WAIT_QUERY;
2193 break;
2194
2195 /*
2196 * We do serious communication with the server only in
2197 * the READY state.
2198 */
2199
2200 case NS_READY:
2201 b = dgrp_send(nd, count) + local_buf;
2202 break;
2203
2204 /*
2205 * Send off an error after receiving a bogus message
2206 * from the server.
2207 */
2208
2209 case NS_SEND_ERROR:
2210 n = strlen(nd->nd_error);
2211
2212 b[0] = 0xff;
2213 b[1] = n;
2214 memcpy(b + 2, nd->nd_error, n);
2215 b += 2 + n;
2216
2217 dgrp_net_idle(nd);
2218 /*
2219 * Set the active port count to zero.
2220 */
2221 dgrp_chan_count(nd, 0);
2222 break;
2223
2224 default:
2225 break;
2226 }
2227
2228 n = b - local_buf;
2229
2230 if (n != 0) {
2231 nd->nd_send_count++;
2232
2233 nd->nd_tx_byte += n + nd->nd_link.lk_header_size;
2234 nd->nd_tx_charge += n + nd->nd_link.lk_header_size;
2235 }
2236
2237 rtn = copy_to_user((void __user *)buf, local_buf, n);
2238 if (rtn) {
2239 rtn = -EFAULT;
2240 goto done;
2241 }
2242
2243 *ppos += n;
2244
2245 rtn = n;
2246
2247 if (nd->nd_mon_buf)
2248 dgrp_monitor_data(nd, RPDUMP_CLIENT, local_buf, n);
2249
2250 /*
2251 * Release the NET lock.
2252 */
2253 done:
2254 up(&nd->nd_net_semaphore);
2255
2256 return rtn;
2257 }
2258
2259 /**
2260 * dgrp_receive() -- decode data packets received from the remote PortServer.
2261 * @nd: pointer to a node structure
2262 */
2263 static void dgrp_receive(struct nd_struct *nd)
2264 {
2265 struct ch_struct *ch;
2266 u8 *buf;
2267 u8 *b;
2268 u8 *dbuf;
2269 char *error;
2270 long port;
2271 long dlen;
2272 long plen;
2273 long remain;
2274 long n;
2275 long mlast;
2276 long elast;
2277 long mstat;
2278 long estat;
2279
2280 char ID[3];
2281
2282 nd->nd_tx_time = jiffies;
2283
2284 ID_TO_CHAR(nd->nd_ID, ID);
2285
2286 b = buf = nd->nd_iobuf;
2287 remain = nd->nd_remain;
2288
2289 /*
2290 * Loop to process Realport protocol packets.
2291 */
2292
2293 while (remain > 0) {
2294 int n0 = b[0] >> 4;
2295 int n1 = b[0] & 0x0f;
2296
2297 if (n0 <= 12) {
2298 port = (nd->nd_rx_module << 4) + n1;
2299
2300 if (port >= nd->nd_chan_count) {
2301 error = "Improper Port Number";
2302 goto prot_error;
2303 }
2304
2305 ch = nd->nd_chan + port;
2306 } else {
2307 port = -1;
2308 ch = NULL;
2309 }
2310
2311 /*
2312 * Process by major packet type.
2313 */
2314
2315 switch (n0) {
2316
2317 /*
2318 * Process 1-byte header data packet.
2319 */
2320
2321 case 0:
2322 case 1:
2323 case 2:
2324 case 3:
2325 case 4:
2326 case 5:
2327 case 6:
2328 case 7:
2329 dlen = n0 + 1;
2330 plen = dlen + 1;
2331
2332 dbuf = b + 1;
2333 goto data;
2334
2335 /*
2336 * Process 2-byte header data packet.
2337 */
2338
2339 case 8:
2340 if (remain < 3)
2341 goto done;
2342
2343 dlen = b[1];
2344 plen = dlen + 2;
2345
2346 dbuf = b + 2;
2347 goto data;
2348
2349 /*
2350 * Process 3-byte header data packet.
2351 */
2352
2353 case 9:
2354 if (remain < 4)
2355 goto done;
2356
2357 dlen = get_unaligned_be16(b + 1);
2358 plen = dlen + 3;
2359
2360 dbuf = b + 3;
2361
2362 /*
2363 * Common packet handling code.
2364 */
2365
2366 data:
2367 nd->nd_tx_work = 1;
2368
2369 /*
2370 * Otherwise data should appear only when we are
2371 * in the CS_READY state.
2372 */
2373
2374 if (ch->ch_state < CS_READY) {
2375 error = "Data received before RWIN established";
2376 goto prot_error;
2377 }
2378
2379 /*
2380 * Assure that the data received is within the
2381 * allowable window.
2382 */
2383
2384 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2385
2386 if (dlen > n) {
2387 error = "Receive data overrun";
2388 goto prot_error;
2389 }
2390
2391 /*
2392 * If we received 3 or less characters,
2393 * assume it is a human typing, and set RTIME
2394 * to 10 milliseconds.
2395 *
2396 * If we receive 10 or more characters,
2397 * assume its not a human typing, and set RTIME
2398 * to 100 milliseconds.
2399 */
2400
2401 if (ch->ch_edelay != DGRP_RTIME) {
2402 if (ch->ch_rtime != ch->ch_edelay) {
2403 ch->ch_rtime = ch->ch_edelay;
2404 ch->ch_flag |= CH_PARAM;
2405 }
2406 } else if (dlen <= 3) {
2407 if (ch->ch_rtime != 10) {
2408 ch->ch_rtime = 10;
2409 ch->ch_flag |= CH_PARAM;
2410 }
2411 } else {
2412 if (ch->ch_rtime != DGRP_RTIME) {
2413 ch->ch_rtime = DGRP_RTIME;
2414 ch->ch_flag |= CH_PARAM;
2415 }
2416 }
2417
2418 /*
2419 * If a portion of the packet is outside the
2420 * buffer, shorten the effective length of the
2421 * data packet to be the amount of data received.
2422 */
2423
2424 if (remain < plen)
2425 dlen -= plen - remain;
2426
2427 /*
2428 * Detect if receive flush is now complete.
2429 */
2430
2431 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2432 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2433 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2434 ch->ch_flag &= ~CH_RX_FLUSH;
2435 }
2436
2437 /*
2438 * If we are ready to receive, move the data into
2439 * the receive buffer.
2440 */
2441
2442 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2443
2444 if (ch->ch_state == CS_READY &&
2445 (ch->ch_tun.un_open_count != 0) &&
2446 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2447 (ch->ch_cflag & CF_CREAD) != 0 &&
2448 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2449 (ch->ch_send & RR_RX_FLUSH) == 0) {
2450
2451 if (ch->ch_rin + dlen >= RBUF_MAX) {
2452 n = RBUF_MAX - ch->ch_rin;
2453
2454 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2455
2456 ch->ch_rin = 0;
2457 dbuf += n;
2458 dlen -= n;
2459 }
2460
2461 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2462
2463 ch->ch_rin += dlen;
2464
2465
2466 /*
2467 * If we are not in fastcook mode, or
2468 * if there is a fastcook thread
2469 * waiting for data, send the data to
2470 * the line discipline.
2471 */
2472
2473 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2474 ch->ch_inwait != 0) {
2475 dgrp_input(ch);
2476 }
2477
2478 /*
2479 * If there is a read thread waiting
2480 * in select, and we are in fastcook
2481 * mode, wake him up.
2482 */
2483
2484 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2485 (ch->ch_flag & CH_FAST_READ) != 0)
2486 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2487
2488 /*
2489 * Wake any thread waiting in the
2490 * fastcook loop.
2491 */
2492
2493 if ((ch->ch_flag & CH_INPUT) != 0) {
2494 ch->ch_flag &= ~CH_INPUT;
2495
2496 wake_up_interruptible(&ch->ch_flag_wait);
2497 }
2498 }
2499
2500 /*
2501 * Fabricate and insert a data packet header to
2502 * preced the remaining data when it comes in.
2503 */
2504
2505 if (remain < plen) {
2506 dlen = plen - remain;
2507 b = buf;
2508
2509 b[0] = 0x90 + n1;
2510 put_unaligned_be16(dlen, b + 1);
2511
2512 remain = 3;
2513 goto done;
2514 }
2515 break;
2516
2517 /*
2518 * Handle Window Sequence packets.
2519 */
2520
2521 case 10:
2522 plen = 3;
2523 if (remain < plen)
2524 goto done;
2525
2526 nd->nd_tx_work = 1;
2527
2528 {
2529 ushort tpos = get_unaligned_be16(b + 1);
2530
2531 ushort ack = (tpos - ch->ch_s_tpos) & 0xffff;
2532 ushort unack = (ch->ch_s_tin - ch->ch_s_tpos) & 0xffff;
2533 ushort notify = (ch->ch_s_treq - ch->ch_s_tpos) & 0xffff;
2534
2535 if (ch->ch_state < CS_READY || ack > unack) {
2536 error = "Improper Window Sequence";
2537 goto prot_error;
2538 }
2539
2540 ch->ch_s_tpos = tpos;
2541
2542 if (notify <= ack)
2543 ch->ch_s_treq = tpos;
2544 }
2545 break;
2546
2547 /*
2548 * Handle Command response packets.
2549 */
2550
2551 case 11:
2552
2553 /*
2554 * RealPort engine fix - 03/11/2004
2555 *
2556 * This check did not used to be here.
2557 *
2558 * We were using b[1] without verifying that the data
2559 * is actually there and valid. On a split packet, it
2560 * might not be yet.
2561 *
2562 * NOTE: I have never actually seen the failure happen
2563 * under Linux, but since I have seen it occur
2564 * under both Solaris and HP-UX, the assumption
2565 * is that it *could* happen here as well...
2566 */
2567 if (remain < 2)
2568 goto done;
2569
2570
2571 switch (b[1]) {
2572
2573 /*
2574 * Handle Open Response.
2575 */
2576
2577 case 11:
2578 plen = 6;
2579 if (remain < plen)
2580 goto done;
2581
2582 nd->nd_tx_work = 1;
2583
2584 {
2585 int req = b[2];
2586 int resp = b[3];
2587 port = get_unaligned_be16(b + 4);
2588
2589 if (port >= nd->nd_chan_count) {
2590 error = "Open channel number out of range";
2591 goto prot_error;
2592 }
2593
2594 ch = nd->nd_chan + port;
2595
2596 /*
2597 * How we handle an open response depends primarily
2598 * on our current channel state.
2599 */
2600
2601 switch (ch->ch_state) {
2602 case CS_IDLE:
2603
2604 /*
2605 * Handle a delayed open.
2606 */
2607
2608 if (ch->ch_otype_waiting != 0 &&
2609 req == ch->ch_otype_waiting &&
2610 resp == 0) {
2611 ch->ch_otype = req;
2612 ch->ch_otype_waiting = 0;
2613 ch->ch_state = CS_SEND_QUERY;
2614 break;
2615 }
2616 goto open_error;
2617
2618 case CS_WAIT_OPEN:
2619
2620 /*
2621 * Handle the open response.
2622 */
2623
2624 if (req == ch->ch_otype) {
2625 switch (resp) {
2626
2627 /*
2628 * On successful response, open the
2629 * port and proceed normally.
2630 */
2631
2632 case 0:
2633 ch->ch_state = CS_SEND_QUERY;
2634 break;
2635
2636 /*
2637 * On a busy response to a persistent open,
2638 * remember that the open is pending.
2639 */
2640
2641 case 1:
2642 case 2:
2643 if (req != OTYPE_IMMEDIATE) {
2644 ch->ch_otype_waiting = req;
2645 ch->ch_state = CS_IDLE;
2646 break;
2647 }
2648
2649 /*
2650 * Otherwise the server open failed. If
2651 * the Unix port is open, hang it up.
2652 */
2653
2654 default:
2655 if (ch->ch_open_count != 0) {
2656 ch->ch_flag |= CH_HANGUP;
2657 dgrp_carrier(ch);
2658 ch->ch_state = CS_IDLE;
2659 break;
2660 }
2661
2662 ch->ch_open_error = resp;
2663 ch->ch_state = CS_IDLE;
2664
2665 wake_up_interruptible(&ch->ch_flag_wait);
2666 }
2667 break;
2668 }
2669
2670 /*
2671 * Handle delayed response arrival preceding
2672 * the open response we are waiting for.
2673 */
2674
2675 if (ch->ch_otype_waiting != 0 &&
2676 req == ch->ch_otype_waiting &&
2677 resp == 0) {
2678 ch->ch_otype = ch->ch_otype_waiting;
2679 ch->ch_otype_waiting = 0;
2680 ch->ch_state = CS_WAIT_FAIL;
2681 break;
2682 }
2683 goto open_error;
2684
2685
2686 case CS_WAIT_FAIL:
2687
2688 /*
2689 * Handle response to immediate open arriving
2690 * after a delayed open success.
2691 */
2692
2693 if (req == OTYPE_IMMEDIATE) {
2694 ch->ch_state = CS_SEND_QUERY;
2695 break;
2696 }
2697 goto open_error;
2698
2699
2700 case CS_WAIT_CANCEL:
2701 /*
2702 * Handle delayed open response arriving before
2703 * the cancel response.
2704 */
2705
2706 if (req == ch->ch_otype_waiting &&
2707 resp == 0) {
2708 ch->ch_otype_waiting = 0;
2709 break;
2710 }
2711
2712 /*
2713 * Handle cancel response.
2714 */
2715
2716 if (req == 4 && resp == 0) {
2717 ch->ch_otype_waiting = 0;
2718 ch->ch_state = CS_IDLE;
2719 break;
2720 }
2721 goto open_error;
2722
2723
2724 case CS_WAIT_CLOSE:
2725 /*
2726 * Handle a successful response to a port
2727 * close.
2728 */
2729
2730 if (req >= 3) {
2731 ch->ch_state = CS_IDLE;
2732 break;
2733 }
2734 goto open_error;
2735
2736 open_error:
2737 default:
2738 {
2739 error = "Improper Open Response";
2740 goto prot_error;
2741 }
2742 }
2743 }
2744 break;
2745
2746 /*
2747 * Handle Synchronize Response.
2748 */
2749
2750 case 13:
2751 plen = 3;
2752 if (remain < plen)
2753 goto done;
2754 {
2755 int seq = b[2];
2756 int s;
2757
2758 /*
2759 * If channel was waiting for this sync response,
2760 * unset the flag, and wake up anyone waiting
2761 * on the event.
2762 */
2763 if (ch->ch_flag & CH_WAITING_SYNC) {
2764 ch->ch_flag &= ~(CH_WAITING_SYNC);
2765 wake_up_interruptible(&ch->ch_flag_wait);
2766 }
2767
2768 if (((seq - nd->nd_seq_out) & SEQ_MASK) >=
2769 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2770 break;
2771 }
2772
2773 for (s = nd->nd_seq_out;; s = (s + 1) & SEQ_MASK) {
2774 if (nd->nd_seq_wait[s] != 0) {
2775 nd->nd_seq_wait[s] = 0;
2776
2777 wake_up_interruptible(&nd->nd_seq_wque[s]);
2778 }
2779
2780 nd->nd_unack -= nd->nd_seq_size[s];
2781
2782 if (s == seq)
2783 break;
2784 }
2785
2786 nd->nd_seq_out = (seq + 1) & SEQ_MASK;
2787 }
2788 break;
2789
2790 /*
2791 * Handle Sequence Response.
2792 */
2793
2794 case 15:
2795 plen = 6;
2796 if (remain < plen)
2797 goto done;
2798
2799 {
2800 /* Record that we have received the Sequence
2801 * Response, but we aren't interested in the
2802 * sequence numbers. We were using RIN like it
2803 * was ROUT and that was causing problems,
2804 * fixed 7-13-2001 David Fries. See comment in
2805 * drp.h for ch_s_rin variable.
2806 int rin = get_unaligned_be16(b + 2);
2807 int tpos = get_unaligned_be16(b + 4);
2808 */
2809
2810 ch->ch_send &= ~RR_SEQUENCE;
2811 ch->ch_expect &= ~RR_SEQUENCE;
2812 }
2813 goto check_query;
2814
2815 /*
2816 * Handle Status Response.
2817 */
2818
2819 case 17:
2820 plen = 5;
2821 if (remain < plen)
2822 goto done;
2823
2824 {
2825 ch->ch_s_elast = get_unaligned_be16(b + 2);
2826 ch->ch_s_mlast = b[4];
2827
2828 ch->ch_expect &= ~RR_STATUS;
2829 ch->ch_send &= ~RR_STATUS;
2830
2831 /*
2832 * CH_PHYS_CD is cleared because something _could_ be
2833 * waiting for the initial sense of carrier... and if
2834 * carrier is high immediately, we want to be sure to
2835 * wake them as soon as possible.
2836 */
2837 ch->ch_flag &= ~CH_PHYS_CD;
2838
2839 dgrp_carrier(ch);
2840 }
2841 goto check_query;
2842
2843 /*
2844 * Handle Line Error Response.
2845 */
2846
2847 case 19:
2848 plen = 14;
2849 if (remain < plen)
2850 goto done;
2851
2852 break;
2853
2854 /*
2855 * Handle Buffer Response.
2856 */
2857
2858 case 21:
2859 plen = 6;
2860 if (remain < plen)
2861 goto done;
2862
2863 {
2864 ch->ch_s_rsize = get_unaligned_be16(b + 2);
2865 ch->ch_s_tsize = get_unaligned_be16(b + 4);
2866
2867 ch->ch_send &= ~RR_BUFFER;
2868 ch->ch_expect &= ~RR_BUFFER;
2869 }
2870 goto check_query;
2871
2872 /*
2873 * Handle Port Capability Response.
2874 */
2875
2876 case 23:
2877 plen = 32;
2878 if (remain < plen)
2879 goto done;
2880
2881 {
2882 ch->ch_send &= ~RR_CAPABILITY;
2883 ch->ch_expect &= ~RR_CAPABILITY;
2884 }
2885
2886 /*
2887 * When all queries are complete, set those parameters
2888 * derived from the query results, then transition
2889 * to the READY state.
2890 */
2891
2892 check_query:
2893 if (ch->ch_state == CS_WAIT_QUERY &&
2894 (ch->ch_expect & (RR_SEQUENCE |
2895 RR_STATUS |
2896 RR_BUFFER |
2897 RR_CAPABILITY)) == 0) {
2898 ch->ch_tmax = ch->ch_s_tsize / 4;
2899
2900 if (ch->ch_edelay == DGRP_TTIME)
2901 ch->ch_ttime = DGRP_TTIME;
2902 else
2903 ch->ch_ttime = ch->ch_edelay;
2904
2905 ch->ch_rmax = ch->ch_s_rsize / 4;
2906
2907 if (ch->ch_edelay == DGRP_RTIME)
2908 ch->ch_rtime = DGRP_RTIME;
2909 else
2910 ch->ch_rtime = ch->ch_edelay;
2911
2912 ch->ch_rlow = 2 * ch->ch_s_rsize / 8;
2913 ch->ch_rhigh = 6 * ch->ch_s_rsize / 8;
2914
2915 ch->ch_state = CS_READY;
2916
2917 nd->nd_tx_work = 1;
2918 wake_up_interruptible(&ch->ch_flag_wait);
2919
2920 }
2921 break;
2922
2923 default:
2924 goto decode_error;
2925 }
2926 break;
2927
2928 /*
2929 * Handle Events.
2930 */
2931
2932 case 12:
2933 plen = 4;
2934 if (remain < plen)
2935 goto done;
2936
2937 mlast = ch->ch_s_mlast;
2938 elast = ch->ch_s_elast;
2939
2940 mstat = ch->ch_s_mlast = b[1];
2941 estat = ch->ch_s_elast = get_unaligned_be16(b + 2);
2942
2943 /*
2944 * Handle modem changes.
2945 */
2946
2947 if (((mstat ^ mlast) & DM_CD) != 0)
2948 dgrp_carrier(ch);
2949
2950
2951 /*
2952 * Handle received break.
2953 */
2954
2955 if ((estat & ~elast & EV_RXB) != 0 &&
2956 (ch->ch_tun.un_open_count != 0) &&
2957 I_BRKINT(ch->ch_tun.un_tty) &&
2958 !(I_IGNBRK(ch->ch_tun.un_tty))) {
2959
2960 tty_buffer_request_room(&ch->port, 1);
2961 tty_insert_flip_char(&ch->port, 0, TTY_BREAK);
2962 tty_flip_buffer_push(&ch->port);
2963
2964 }
2965
2966 /*
2967 * On transmit break complete, if more break traffic
2968 * is waiting then send it. Otherwise wake any threads
2969 * waiting for transmitter empty.
2970 */
2971
2972 if ((~estat & elast & EV_TXB) != 0 &&
2973 (ch->ch_expect & RR_TX_BREAK) != 0) {
2974
2975 nd->nd_tx_work = 1;
2976
2977 ch->ch_expect &= ~RR_TX_BREAK;
2978
2979 if (ch->ch_break_time != 0) {
2980 ch->ch_send |= RR_TX_BREAK;
2981 } else {
2982 ch->ch_send &= ~RR_TX_BREAK;
2983 ch->ch_flag &= ~CH_TX_BREAK;
2984 wake_up_interruptible(&ch->ch_flag_wait);
2985 }
2986 }
2987 break;
2988
2989 case 13:
2990 case 14:
2991 error = "Unrecognized command";
2992 goto prot_error;
2993
2994 /*
2995 * Decode Special Codes.
2996 */
2997
2998 case 15:
2999 switch (n1) {
3000 /*
3001 * One byte module select.
3002 */
3003
3004 case 0:
3005 case 1:
3006 case 2:
3007 case 3:
3008 case 4:
3009 case 5:
3010 case 6:
3011 case 7:
3012 plen = 1;
3013 nd->nd_rx_module = n1;
3014 break;
3015
3016 /*
3017 * Two byte module select.
3018 */
3019
3020 case 8:
3021 plen = 2;
3022 if (remain < plen)
3023 goto done;
3024
3025 nd->nd_rx_module = b[1];
3026 break;
3027
3028 /*
3029 * ID Request packet.
3030 */
3031
3032 case 11:
3033 if (remain < 4)
3034 goto done;
3035
3036 plen = get_unaligned_be16(b + 2);
3037
3038 if (plen < 12 || plen > 1000) {
3039 error = "Response Packet length error";
3040 goto prot_error;
3041 }
3042
3043 nd->nd_tx_work = 1;
3044
3045 switch (b[1]) {
3046 /*
3047 * Echo packet.
3048 */
3049
3050 case 0:
3051 nd->nd_send |= NR_ECHO;
3052 break;
3053
3054 /*
3055 * ID Response packet.
3056 */
3057
3058 case 1:
3059 nd->nd_send |= NR_IDENT;
3060 break;
3061
3062 /*
3063 * ID Response packet.
3064 */
3065
3066 case 32:
3067 nd->nd_send |= NR_PASSWORD;
3068 break;
3069
3070 }
3071 break;
3072
3073 /*
3074 * Various node-level response packets.
3075 */
3076
3077 case 12:
3078 if (remain < 4)
3079 goto done;
3080
3081 plen = get_unaligned_be16(b + 2);
3082
3083 if (plen < 4 || plen > 1000) {
3084 error = "Response Packet length error";
3085 goto prot_error;
3086 }
3087
3088 nd->nd_tx_work = 1;
3089
3090 switch (b[1]) {
3091 /*
3092 * Echo packet.
3093 */
3094
3095 case 0:
3096 nd->nd_expect &= ~NR_ECHO;
3097 break;
3098
3099 /*
3100 * Product Response Packet.
3101 */
3102
3103 case 1:
3104 {
3105 int desclen;
3106
3107 nd->nd_hw_ver = (b[8] << 8) | b[9];
3108 nd->nd_sw_ver = (b[10] << 8) | b[11];
3109 nd->nd_hw_id = b[6];
3110 desclen = ((plen - 12) > MAX_DESC_LEN) ? MAX_DESC_LEN :
3111 plen - 12;
3112
3113 if (desclen <= 0) {
3114 error = "Response Packet desclen error";
3115 goto prot_error;
3116 }
3117
3118 strncpy(nd->nd_ps_desc, b + 12, desclen);
3119 nd->nd_ps_desc[desclen] = 0;
3120 }
3121
3122 nd->nd_expect &= ~NR_IDENT;
3123 break;
3124
3125 /*
3126 * Capability Response Packet.
3127 */
3128
3129 case 2:
3130 {
3131 int nn = get_unaligned_be16(b + 4);
3132
3133 if (nn > CHAN_MAX)
3134 nn = CHAN_MAX;
3135
3136 dgrp_chan_count(nd, nn);
3137 }
3138
3139 nd->nd_expect &= ~NR_CAPABILITY;
3140 break;
3141
3142 /*
3143 * VPD Response Packet.
3144 */
3145
3146 case 15:
3147 /*
3148 * NOTE: case 15 is here ONLY because the EtherLite
3149 * is broken, and sends a response to 24 back as 15.
3150 * To resolve this, the EtherLite firmware is now
3151 * fixed to send back 24 correctly, but, for backwards
3152 * compatibility, we now have reserved 15 for the
3153 * bad EtherLite response to 24 as well.
3154 */
3155
3156 /* Fallthru! */
3157
3158 case 24:
3159
3160 /*
3161 * If the product doesn't support VPD,
3162 * it will send back a null IDRESP,
3163 * which is a length of 4 bytes.
3164 */
3165 if (plen > 4) {
3166 memcpy(nd->nd_vpd, b + 4, min(plen - 4, (long) VPDSIZE));
3167 nd->nd_vpd_len = min(plen - 4, (long) VPDSIZE);
3168 }
3169
3170 nd->nd_expect &= ~NR_VPD;
3171 break;
3172
3173 default:
3174 goto decode_error;
3175 }
3176
3177 if (nd->nd_expect == 0 &&
3178 nd->nd_state == NS_WAIT_QUERY) {
3179 nd->nd_state = NS_READY;
3180 }
3181 break;
3182
3183 /*
3184 * Debug packet.
3185 */
3186
3187 case 14:
3188 if (remain < 4)
3189 goto done;
3190
3191 plen = get_unaligned_be16(b + 2) + 4;
3192
3193 if (plen > 1000) {
3194 error = "Debug Packet too large";
3195 goto prot_error;
3196 }
3197
3198 if (remain < plen)
3199 goto done;
3200 break;
3201
3202 /*
3203 * Handle reset packet.
3204 */
3205
3206 case 15:
3207 if (remain < 2)
3208 goto done;
3209
3210 plen = 2 + b[1];
3211
3212 if (remain < plen)
3213 goto done;
3214
3215 nd->nd_tx_work = 1;
3216
3217 n = b[plen];
3218 b[plen] = 0;
3219
3220 b[plen] = n;
3221
3222 error = "Client Reset Acknowledge";
3223 goto prot_error;
3224
3225 default:
3226 goto decode_error;
3227 }
3228 break;
3229
3230 default:
3231 goto decode_error;
3232 }
3233
3234 b += plen;
3235 remain -= plen;
3236 }
3237
3238 /*
3239 * When the buffer is exhausted, copy any data left at the
3240 * top of the buffer back down to the bottom for the next
3241 * read request.
3242 */
3243
3244 done:
3245 if (remain > 0 && b != buf)
3246 memcpy(buf, b, remain);
3247
3248 nd->nd_remain = remain;
3249 return;
3250
3251 /*
3252 * Handle a decode error.
3253 */
3254
3255 decode_error:
3256 error = "Protocol decode error";
3257
3258 /*
3259 * Handle a general protocol error.
3260 */
3261
3262 prot_error:
3263 nd->nd_remain = 0;
3264 nd->nd_state = NS_SEND_ERROR;
3265 nd->nd_error = error;
3266 }
3267
3268 /*
3269 * dgrp_net_write() -- write data to the network device.
3270 *
3271 * A zero byte write indicates that the connection to the RealPort
3272 * device has been broken.
3273 *
3274 * A non-zero write indicates data from the RealPort device.
3275 */
3276 static ssize_t dgrp_net_write(struct file *file, const char __user *buf,
3277 size_t count, loff_t *ppos)
3278 {
3279 struct nd_struct *nd;
3280 ssize_t rtn = 0;
3281 long n;
3282 long total = 0;
3283
3284 /*
3285 * Get the node pointer, and quit if it doesn't exist.
3286 */
3287 nd = (struct nd_struct *)(file->private_data);
3288 if (!nd)
3289 return -ENXIO;
3290
3291 /*
3292 * Grab the NET lock.
3293 */
3294 down(&nd->nd_net_semaphore);
3295
3296 nd->nd_write_count++;
3297
3298 /*
3299 * Handle disconnect.
3300 */
3301
3302 if (count == 0) {
3303 dgrp_net_idle(nd);
3304 /*
3305 * Set the active port count to zero.
3306 */
3307 dgrp_chan_count(nd, 0);
3308 goto unlock;
3309 }
3310
3311 /*
3312 * Loop to process entire receive packet.
3313 */
3314
3315 while (count > 0) {
3316 n = UIO_MAX - nd->nd_remain;
3317
3318 if (n > count)
3319 n = count;
3320
3321 nd->nd_rx_byte += n + nd->nd_link.lk_header_size;
3322
3323 rtn = copy_from_user(nd->nd_iobuf + nd->nd_remain,
3324 (void __user *) buf + total, n);
3325 if (rtn) {
3326 rtn = -EFAULT;
3327 goto unlock;
3328 }
3329
3330 *ppos += n;
3331
3332 total += n;
3333
3334 count -= n;
3335
3336 if (nd->nd_mon_buf)
3337 dgrp_monitor_data(nd, RPDUMP_SERVER,
3338 nd->nd_iobuf + nd->nd_remain, n);
3339
3340 nd->nd_remain += n;
3341
3342 dgrp_receive(nd);
3343 }
3344
3345 rtn = total;
3346
3347 unlock:
3348 /*
3349 * Release the NET lock.
3350 */
3351 up(&nd->nd_net_semaphore);
3352
3353 return rtn;
3354 }
3355
3356
3357 /*
3358 * dgrp_net_select()
3359 * Determine whether a device is ready to be read or written to, and
3360 * sleep if not.
3361 */
3362 static unsigned int dgrp_net_select(struct file *file,
3363 struct poll_table_struct *table)
3364 {
3365 unsigned int retval = 0;
3366 struct nd_struct *nd = file->private_data;
3367
3368 poll_wait(file, &nd->nd_tx_waitq, table);
3369
3370 if (nd->nd_tx_ready)
3371 retval |= POLLIN | POLLRDNORM; /* Conditionally readable */
3372
3373 retval |= POLLOUT | POLLWRNORM; /* Always writeable */
3374
3375 return retval;
3376 }
3377
3378 /*
3379 * dgrp_net_ioctl
3380 *
3381 * Implement those functions which allow the network daemon to control
3382 * the network parameters in the driver. The ioctls include ones to
3383 * get and set the link speed parameters for the PortServer.
3384 */
3385 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
3386 unsigned long arg)
3387 {
3388 struct nd_struct *nd;
3389 int rtn = 0;
3390 long size = _IOC_SIZE(cmd);
3391 struct link_struct link;
3392
3393 nd = file->private_data;
3394
3395 if (_IOC_DIR(cmd) & _IOC_READ)
3396 rtn = access_ok(VERIFY_WRITE, (void __user *) arg, size);
3397 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3398 rtn = access_ok(VERIFY_READ, (void __user *) arg, size);
3399
3400 if (!rtn)
3401 return rtn;
3402
3403 switch (cmd) {
3404 case DIGI_SETLINK:
3405 if (size != sizeof(struct link_struct))
3406 return -EINVAL;
3407
3408 if (copy_from_user((void *)(&link), (void __user *) arg, size))
3409 return -EFAULT;
3410
3411 if (link.lk_fast_rate < 9600)
3412 link.lk_fast_rate = 9600;
3413
3414 if (link.lk_slow_rate < 2400)
3415 link.lk_slow_rate = 2400;
3416
3417 if (link.lk_fast_rate > 10000000)
3418 link.lk_fast_rate = 10000000;
3419
3420 if (link.lk_slow_rate > link.lk_fast_rate)
3421 link.lk_slow_rate = link.lk_fast_rate;
3422
3423 if (link.lk_fast_delay > 2000)
3424 link.lk_fast_delay = 2000;
3425
3426 if (link.lk_slow_delay > 10000)
3427 link.lk_slow_delay = 10000;
3428
3429 if (link.lk_fast_delay < 60)
3430 link.lk_fast_delay = 60;
3431
3432 if (link.lk_slow_delay < link.lk_fast_delay)
3433 link.lk_slow_delay = link.lk_fast_delay;
3434
3435 if (link.lk_header_size < 2)
3436 link.lk_header_size = 2;
3437
3438 if (link.lk_header_size > 128)
3439 link.lk_header_size = 128;
3440
3441 link.lk_fast_rate /= 8 * 1000 / dgrp_poll_tick;
3442 link.lk_slow_rate /= 8 * 1000 / dgrp_poll_tick;
3443
3444 link.lk_fast_delay /= dgrp_poll_tick;
3445 link.lk_slow_delay /= dgrp_poll_tick;
3446
3447 nd->nd_link = link;
3448
3449 break;
3450
3451 case DIGI_GETLINK:
3452 if (size != sizeof(struct link_struct))
3453 return -EINVAL;
3454
3455 if (copy_to_user((void __user *)arg, (void *)(&nd->nd_link),
3456 size))
3457 return -EFAULT;
3458
3459 break;
3460
3461 default:
3462 return -EINVAL;
3463
3464 }
3465
3466 return 0;
3467 }
3468
3469 /**
3470 * dgrp_poll_handler() -- handler for poll timer
3471 *
3472 * As each timer expires, it determines (a) whether the "transmit"
3473 * waiter needs to be woken up, and (b) whether the poller needs to
3474 * be rescheduled.
3475 */
3476 void dgrp_poll_handler(unsigned long arg)
3477 {
3478 struct dgrp_poll_data *poll_data;
3479 struct nd_struct *nd;
3480 struct link_struct *lk;
3481 ulong time;
3482 ulong poll_time;
3483 ulong freq;
3484 ulong lock_flags;
3485
3486 poll_data = (struct dgrp_poll_data *) arg;
3487 freq = 1000 / poll_data->poll_tick;
3488 poll_data->poll_round += 17;
3489
3490 if (poll_data->poll_round >= freq)
3491 poll_data->poll_round -= freq;
3492
3493 /*
3494 * Loop to process all open nodes.
3495 *
3496 * For each node, determine the rate at which it should
3497 * be transmitting data. Then if the node should wake up
3498 * and transmit data now, enable the net receive select
3499 * to get the transmit going.
3500 */
3501
3502 list_for_each_entry(nd, &nd_struct_list, list) {
3503
3504 lk = &nd->nd_link;
3505
3506 /*
3507 * Decrement statistics. These are only for use with
3508 * KME, so don't worry that the operations are done
3509 * unlocked, and so the results are occasionally wrong.
3510 */
3511
3512 nd->nd_read_count -= (nd->nd_read_count +
3513 poll_data->poll_round) / freq;
3514 nd->nd_write_count -= (nd->nd_write_count +
3515 poll_data->poll_round) / freq;
3516 nd->nd_send_count -= (nd->nd_send_count +
3517 poll_data->poll_round) / freq;
3518 nd->nd_tx_byte -= (nd->nd_tx_byte +
3519 poll_data->poll_round) / freq;
3520 nd->nd_rx_byte -= (nd->nd_rx_byte +
3521 poll_data->poll_round) / freq;
3522
3523 /*
3524 * Wake the daemon to transmit data only when there is
3525 * enough byte credit to send data.
3526 *
3527 * The results are approximate because the operations
3528 * are performed unlocked, and we are inspecting
3529 * data asynchronously updated elsewhere. The whole
3530 * thing is just approximation anyway, so that should
3531 * be okay.
3532 */
3533
3534 if (lk->lk_slow_rate >= UIO_MAX) {
3535
3536 nd->nd_delay = 0;
3537 nd->nd_rate = UIO_MAX;
3538
3539 nd->nd_tx_deposit = nd->nd_tx_charge + 3 * UIO_MAX;
3540 nd->nd_tx_credit = 3 * UIO_MAX;
3541
3542 } else {
3543
3544 long rate;
3545 long delay;
3546 long deposit;
3547 long charge;
3548 long size;
3549 long excess;
3550
3551 long seq_in = nd->nd_seq_in;
3552 long seq_out = nd->nd_seq_out;
3553
3554 /*
3555 * If there are no outstanding packets, run at the
3556 * fastest rate.
3557 */
3558
3559 if (seq_in == seq_out) {
3560 delay = 0;
3561 rate = lk->lk_fast_rate;
3562 }
3563
3564 /*
3565 * Otherwise compute the transmit rate based on the
3566 * delay since the oldest packet.
3567 */
3568
3569 else {
3570 /*
3571 * The actual delay is computed as the
3572 * time since the oldest unacknowledged
3573 * packet was sent, minus the time it
3574 * took to send that packet to the server.
3575 */
3576
3577 delay = ((jiffies - nd->nd_seq_time[seq_out])
3578 - (nd->nd_seq_size[seq_out] /
3579 lk->lk_fast_rate));
3580
3581 /*
3582 * If the delay is less than the "fast"
3583 * delay, transmit full speed. If greater
3584 * than the "slow" delay, transmit at the
3585 * "slow" speed. In between, interpolate
3586 * between the fast and slow speeds.
3587 */
3588
3589 rate =
3590 (delay <= lk->lk_fast_delay ?
3591 lk->lk_fast_rate :
3592 delay >= lk->lk_slow_delay ?
3593 lk->lk_slow_rate :
3594 (lk->lk_slow_rate +
3595 (lk->lk_slow_delay - delay) *
3596 (lk->lk_fast_rate - lk->lk_slow_rate) /
3597 (lk->lk_slow_delay - lk->lk_fast_delay)
3598 )
3599 );
3600 }
3601
3602 nd->nd_delay = delay;
3603 nd->nd_rate = rate;
3604
3605 /*
3606 * Increase the transmit credit by depositing the
3607 * current transmit rate.
3608 */
3609
3610 deposit = nd->nd_tx_deposit;
3611 charge = nd->nd_tx_charge;
3612
3613 deposit += rate;
3614
3615 /*
3616 * If the available transmit credit becomes too large,
3617 * reduce the deposit to correct the value.
3618 *
3619 * Too large is the max of:
3620 * 6 times the header size
3621 * 3 times the current transmit rate.
3622 */
3623
3624 size = 2 * nd->nd_link.lk_header_size;
3625
3626 if (size < rate)
3627 size = rate;
3628
3629 size *= 3;
3630
3631 excess = deposit - charge - size;
3632
3633 if (excess > 0)
3634 deposit -= excess;
3635
3636 nd->nd_tx_deposit = deposit;
3637 nd->nd_tx_credit = deposit - charge;
3638
3639 /*
3640 * Wake the transmit task only if the transmit credit
3641 * is at least 3 times the transmit header size.
3642 */
3643
3644 size = 3 * lk->lk_header_size;
3645
3646 if (nd->nd_tx_credit < size)
3647 continue;
3648 }
3649
3650
3651 /*
3652 * Enable the READ select to wake the daemon if there
3653 * is useful work for the drp_read routine to perform.
3654 */
3655
3656 if (waitqueue_active(&nd->nd_tx_waitq) &&
3657 (nd->nd_tx_work != 0 ||
3658 (ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX)) {
3659 nd->nd_tx_ready = 1;
3660
3661 wake_up_interruptible(&nd->nd_tx_waitq);
3662
3663 /* not needed */
3664 /* nd->nd_flag &= ~ND_SELECT; */
3665 }
3666 }
3667
3668
3669 /*
3670 * Schedule ourself back at the nominal wakeup interval.
3671 */
3672 spin_lock_irqsave(&poll_data->poll_lock, lock_flags);
3673
3674 poll_data->node_active_count--;
3675 if (poll_data->node_active_count > 0) {
3676 poll_data->node_active_count++;
3677 poll_time = poll_data->timer.expires +
3678 poll_data->poll_tick * HZ / 1000;
3679
3680 time = poll_time - jiffies;
3681
3682 if (time >= 2 * poll_data->poll_tick)
3683 poll_time = jiffies + dgrp_poll_tick * HZ / 1000;
3684
3685 poll_data->timer.expires = poll_time;
3686 add_timer(&poll_data->timer);
3687 }
3688
3689 spin_unlock_irqrestore(&poll_data->poll_lock, lock_flags);
3690 }