Merge branch 'next/cleanup' into HEAD
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / dgrp / dgrp_net_ops.c
CommitLineData
0b52b749
BP
1/*
2 *
3 * Copyright 1999 Digi International (www.digi.com)
4 * James Puzzo <jamesp at digi dot com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
15 *
16 */
17
18/*
19 *
20 * Filename:
21 *
22 * dgrp_net_ops.c
23 *
24 * Description:
25 *
26 * Handle the file operations required for the "network" devices.
27 * Includes those functions required to register the "net" devices
28 * in "/proc".
29 *
30 * Author:
31 *
32 * James A. Puzzo
33 *
34 */
35
36#include <linux/module.h>
37#include <linux/proc_fs.h>
38#include <linux/types.h>
39#include <linux/string.h>
40#include <linux/tty.h>
41#include <linux/tty_flip.h>
42#include <linux/spinlock.h>
43#include <linux/poll.h>
44#include <linux/sched.h>
45#include <linux/ratelimit.h>
46#include <asm/unaligned.h>
47
48#define MYFLIPLEN TBUF_MAX
49
50#include "dgrp_common.h"
51
52#define TTY_FLIPBUF_SIZE 512
53#define DEVICE_NAME_SIZE 50
54
55/*
56 * Generic helper function declarations
57 */
58static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
59 unsigned char *fbuf, int *len);
60
61/*
62 * File operation declarations
63 */
64static int dgrp_net_open(struct inode *, struct file *);
65static int dgrp_net_release(struct inode *, struct file *);
66static ssize_t dgrp_net_read(struct file *, char __user *, size_t, loff_t *);
67static ssize_t dgrp_net_write(struct file *, const char __user *, size_t,
68 loff_t *);
69static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
70 unsigned long arg);
71static unsigned int dgrp_net_select(struct file *file,
72 struct poll_table_struct *table);
73
74static const struct file_operations net_ops = {
75 .owner = THIS_MODULE,
76 .read = dgrp_net_read,
77 .write = dgrp_net_write,
78 .poll = dgrp_net_select,
79 .unlocked_ioctl = dgrp_net_ioctl,
80 .open = dgrp_net_open,
81 .release = dgrp_net_release,
82};
83
84static struct inode_operations net_inode_ops = {
85 .permission = dgrp_inode_permission
86};
87
88void dgrp_register_net_hook(struct proc_dir_entry *de)
89{
90 struct nd_struct *node = de->data;
91
92 de->proc_iops = &net_inode_ops;
93 de->proc_fops = &net_ops;
94 node->nd_net_de = de;
95 sema_init(&node->nd_net_semaphore, 1);
96 node->nd_state = NS_CLOSED;
97 dgrp_create_node_class_sysfs_files(node);
98}
99
100
101/**
102 * dgrp_dump() -- prints memory for debugging purposes.
103 * @mem: Memory location which should be printed to the console
104 * @len: Number of bytes to be dumped
105 */
106static void dgrp_dump(u8 *mem, int len)
107{
108 int i;
109
110 pr_debug("dgrp dump length = %d, data = ", len);
111 for (i = 0; i < len; ++i)
112 pr_debug("%.2x ", mem[i]);
113 pr_debug("\n");
114}
115
116/**
117 * dgrp_read_data_block() -- Read a data block
118 * @ch: struct ch_struct *
119 * @flipbuf: u8 *
120 * @flipbuf_size: size of flipbuf
121 */
122static void dgrp_read_data_block(struct ch_struct *ch, u8 *flipbuf,
123 int flipbuf_size)
124{
125 int t;
126 int n;
127
128 if (flipbuf_size <= 0)
129 return;
130
131 t = RBUF_MAX - ch->ch_rout;
132 n = flipbuf_size;
133
134 if (n >= t) {
135 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, t);
136 flipbuf += t;
137 n -= t;
138 ch->ch_rout = 0;
139 }
140
141 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, n);
142 flipbuf += n;
143 ch->ch_rout += n;
144}
145
146
147/**
148 * dgrp_input() -- send data to the line disipline
149 * @ch: pointer to channel struct
150 *
151 * Copys the rbuf to the flipbuf and sends to line discipline.
152 * Sends input buffer data to the line discipline.
153 *
154 * There are several modes to consider here:
155 * rawreadok, tty->real_raw, and IF_PARMRK
156 */
157static void dgrp_input(struct ch_struct *ch)
158{
159 struct nd_struct *nd;
160 struct tty_struct *tty;
161 int remain;
162 int data_len;
163 int len;
164 int flip_len;
165 int tty_count;
166 ulong lock_flags;
167 struct tty_ldisc *ld;
168 u8 *myflipbuf;
169 u8 *myflipflagbuf;
170
171 if (!ch)
172 return;
173
174 nd = ch->ch_nd;
175
176 if (!nd)
177 return;
178
179 spin_lock_irqsave(&nd->nd_lock, lock_flags);
180
181 myflipbuf = nd->nd_inputbuf;
182 myflipflagbuf = nd->nd_inputflagbuf;
183
184 if (!ch->ch_open_count) {
185 ch->ch_rout = ch->ch_rin;
186 goto out;
187 }
188
189 if (ch->ch_tun.un_flag & UN_CLOSING) {
190 ch->ch_rout = ch->ch_rin;
191 goto out;
192 }
193
194 tty = (ch->ch_tun).un_tty;
195
196
197 if (!tty || tty->magic != TTY_MAGIC) {
198 ch->ch_rout = ch->ch_rin;
199 goto out;
200 }
201
202 tty_count = tty->count;
203 if (!tty_count) {
204 ch->ch_rout = ch->ch_rin;
205 goto out;
206 }
207
208 if (tty->closing || test_bit(TTY_CLOSING, &tty->flags)) {
209 ch->ch_rout = ch->ch_rin;
210 goto out;
211 }
212
213 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
214
215 /* Decide how much data we can send into the tty layer */
216 if (dgrp_rawreadok && tty->real_raw)
217 flip_len = MYFLIPLEN;
218 else
219 flip_len = TTY_FLIPBUF_SIZE;
220
221 /* data_len should be the number of chars that we read in */
222 data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
223 remain = data_len;
224
225 /* len is the amount of data we are going to transfer here */
226 len = min(data_len, flip_len);
227
228 /* take into consideration length of ldisc */
229 len = min(len, (N_TTY_BUF_SIZE - 1) - tty->read_cnt);
230
231 ld = tty_ldisc_ref(tty);
232
233 /*
234 * If we were unable to get a reference to the ld,
235 * don't flush our buffer, and act like the ld doesn't
236 * have any space to put the data right now.
237 */
238 if (!ld) {
239 len = 0;
240 } else if (!ld->ops->receive_buf) {
241 spin_lock_irqsave(&nd->nd_lock, lock_flags);
242 ch->ch_rout = ch->ch_rin;
243 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
244 len = 0;
245 }
246
247 /* Check DPA flow control */
248 if ((nd->nd_dpa_debug) &&
249 (nd->nd_dpa_flag & DPA_WAIT_SPACE) &&
250 (nd->nd_dpa_port == MINOR(tty_devnum(ch->ch_tun.un_tty))))
251 len = 0;
252
253 if ((len) && !(ch->ch_flag & CH_RXSTOP)) {
254
255 dgrp_read_data_block(ch, myflipbuf, len);
256
257 /*
258 * In high performance mode, we don't have to update
259 * flag_buf or any of the counts or pointers into flip buf.
260 */
261 if (!dgrp_rawreadok || !tty->real_raw) {
262 if (I_PARMRK(tty) || I_BRKINT(tty) || I_INPCK(tty))
263 parity_scan(ch, myflipbuf, myflipflagbuf, &len);
264 else
265 memset(myflipflagbuf, TTY_NORMAL, len);
266 }
267
268 if ((nd->nd_dpa_debug) &&
269 (nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
270 dgrp_dpa_data(nd, 1, myflipbuf, len);
271
272 /*
273 * If we're doing raw reads, jam it right into the
274 * line disc bypassing the flip buffers.
275 */
276 if (dgrp_rawreadok && tty->real_raw)
277 ld->ops->receive_buf(tty, myflipbuf, NULL, len);
278 else {
279 len = tty_buffer_request_room(tty, len);
280 tty_insert_flip_string_flags(tty, myflipbuf,
281 myflipflagbuf, len);
282
283 /* Tell the tty layer its okay to "eat" the data now */
284 tty_flip_buffer_push(tty);
285 }
286
287 ch->ch_rxcount += len;
288 }
289
290 if (ld)
291 tty_ldisc_deref(ld);
292
293 /*
294 * Wake up any sleepers (maybe dgrp close) that might be waiting
295 * for a channel flag state change.
296 */
297 wake_up_interruptible(&ch->ch_flag_wait);
298 return;
299
300out:
301 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
302}
303
304
305/*
306 * parity_scan
307 *
308 * Loop to inspect each single character or 0xFF escape.
309 *
310 * if PARMRK & ~DOSMODE:
311 * 0xFF 0xFF Normal 0xFF character, escaped
312 * to eliminate confusion.
313 * 0xFF 0x00 0x00 Break
314 * 0xFF 0x00 CC Error character CC.
315 * CC Normal character CC.
316 *
317 * if PARMRK & DOSMODE:
318 * 0xFF 0x18 0x00 Break
319 * 0xFF 0x08 0x00 Framing Error
320 * 0xFF 0x04 0x00 Parity error
321 * 0xFF 0x0C 0x00 Both Framing and Parity error
322 *
323 * TODO: do we need to do the XMODEM, XOFF, XON, XANY processing??
324 * as per protocol
325 */
326static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
327 unsigned char *fbuf, int *len)
328{
329 int l = *len;
330 int count = 0;
331 int DOS = ((ch->ch_iflag & IF_DOSMODE) == 0 ? 0 : 1);
332 unsigned char *cout; /* character buffer */
333 unsigned char *fout; /* flag buffer */
334 unsigned char *in;
335 unsigned char c;
336
337 in = cbuf;
338 cout = cbuf;
339 fout = fbuf;
340
341 while (l--) {
342 c = *in;
343 in++;
344
345 switch (ch->ch_pscan_state) {
346 default:
347 /* reset to sanity and fall through */
348 ch->ch_pscan_state = 0 ;
349
350 case 0:
351 /* No FF seen yet */
352 if (c == 0xff) /* delete this character from stream */
353 ch->ch_pscan_state = 1;
354 else {
355 *cout++ = c;
356 *fout++ = TTY_NORMAL;
357 count += 1;
358 }
359 break;
360
361 case 1:
362 /* first FF seen */
363 if (c == 0xff) {
364 /* doubled ff, transform to single ff */
365 *cout++ = c;
366 *fout++ = TTY_NORMAL;
367 count += 1;
368 ch->ch_pscan_state = 0;
369 } else {
370 /* save value examination in next state */
371 ch->ch_pscan_savechar = c;
372 ch->ch_pscan_state = 2;
373 }
374 break;
375
376 case 2:
377 /* third character of ff sequence */
378 *cout++ = c;
379 if (DOS) {
380 if (ch->ch_pscan_savechar & 0x10)
381 *fout++ = TTY_BREAK;
382 else if (ch->ch_pscan_savechar & 0x08)
383 *fout++ = TTY_FRAME;
384 else
385 /*
386 * either marked as a parity error,
387 * indeterminate, or not in DOSMODE
388 * call it a parity error
389 */
390 *fout++ = TTY_PARITY;
391 } else {
392 /* case FF XX ?? where XX is not 00 */
393 if (ch->ch_pscan_savechar & 0xff) {
394 /* this should not happen */
395 pr_info("%s: parity_scan: error unexpected byte\n",
396 __func__);
397 *fout++ = TTY_PARITY;
398 }
399 /* case FF 00 XX where XX is not 00 */
400 else if (c == 0xff)
401 *fout++ = TTY_PARITY;
402 /* case FF 00 00 */
403 else
404 *fout++ = TTY_BREAK;
405
406 }
407 count += 1;
408 ch->ch_pscan_state = 0;
409 }
410 }
411 *len = count;
412}
413
414
415/**
416 * dgrp_net_idle() -- Idle the network connection
417 * @nd: pointer to node structure to idle
418 */
419static void dgrp_net_idle(struct nd_struct *nd)
420{
421 struct ch_struct *ch;
422 int i;
423
424 nd->nd_tx_work = 1;
425
426 nd->nd_state = NS_IDLE;
427 nd->nd_flag = 0;
428
429 for (i = nd->nd_seq_out; ; i = (i + 1) & SEQ_MASK) {
430 if (!nd->nd_seq_wait[i]) {
431 nd->nd_seq_wait[i] = 0;
432 wake_up_interruptible(&nd->nd_seq_wque[i]);
433 }
434
435 if (i == nd->nd_seq_in)
436 break;
437 }
438
439 nd->nd_seq_out = nd->nd_seq_in;
440
441 nd->nd_unack = 0;
442 nd->nd_remain = 0;
443
444 nd->nd_tx_module = 0x10;
445 nd->nd_rx_module = 0x00;
446
447 for (i = 0, ch = nd->nd_chan; i < CHAN_MAX; i++, ch++) {
448 ch->ch_state = CS_IDLE;
449
450 ch->ch_otype = 0;
451 ch->ch_otype_waiting = 0;
452 }
453}
454
455/*
456 * Increase the number of channels, waking up any
457 * threads that might be waiting for the channels
458 * to appear.
459 */
460static void increase_channel_count(struct nd_struct *nd, int n)
461{
462 struct ch_struct *ch;
463 struct device *classp;
464 char name[DEVICE_NAME_SIZE];
465 int ret;
466 u8 *buf;
467 int i;
468
469 for (i = nd->nd_chan_count; i < n; ++i) {
470 ch = nd->nd_chan + i;
471
472 /* FIXME: return a useful error instead! */
473 buf = kmalloc(TBUF_MAX, GFP_KERNEL);
474 if (!buf)
475 return;
476
477 if (ch->ch_tbuf)
478 pr_info_ratelimited("%s - ch_tbuf was not NULL\n",
479 __func__);
480
481 ch->ch_tbuf = buf;
482
483 buf = kmalloc(RBUF_MAX, GFP_KERNEL);
484 if (!buf)
485 return;
486
487 if (ch->ch_rbuf)
488 pr_info("%s - ch_rbuf was not NULL\n",
489 __func__);
490 ch->ch_rbuf = buf;
491
492 classp = tty_port_register_device(&ch->port,
493 nd->nd_serial_ttdriver, i,
494 NULL);
495
496 ch->ch_tun.un_sysfs = classp;
497 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
498
499 dgrp_create_tty_sysfs(&ch->ch_tun, classp);
500 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
501 &classp->kobj, name);
502
503 /* NOTE: We don't support "cu" devices anymore,
504 * so you will notice we don't register them
505 * here anymore. */
506 if (dgrp_register_prdevices) {
507 classp = tty_register_device(nd->nd_xprint_ttdriver,
508 i, NULL);
509 ch->ch_pun.un_sysfs = classp;
510 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
511
512 dgrp_create_tty_sysfs(&ch->ch_pun, classp);
513 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
514 &classp->kobj, name);
515 }
516
517 nd->nd_chan_count = i + 1;
518 wake_up_interruptible(&ch->ch_flag_wait);
519 }
520}
521
522/*
523 * Decrease the number of channels, and wake up any threads that might
524 * be waiting on the channels that vanished.
525 */
526static void decrease_channel_count(struct nd_struct *nd, int n)
527{
528 struct ch_struct *ch;
529 char name[DEVICE_NAME_SIZE];
530 int i;
531
532 for (i = nd->nd_chan_count - 1; i >= n; --i) {
533 ch = nd->nd_chan + i;
534
535 /*
536 * Make any open ports inoperative.
537 */
538 ch->ch_state = CS_IDLE;
539
540 ch->ch_otype = 0;
541 ch->ch_otype_waiting = 0;
542
543 /*
544 * Only "HANGUP" if we care about carrier
545 * transitions and we are already open.
546 */
547 if (ch->ch_open_count != 0) {
548 ch->ch_flag |= CH_HANGUP;
549 dgrp_carrier(ch);
550 }
551
552 /*
553 * Unlike the CH_HANGUP flag above, use another
554 * flag to indicate to the RealPort state machine
555 * that this port has disappeared.
556 */
557 if (ch->ch_open_count != 0)
558 ch->ch_flag |= CH_PORT_GONE;
559
560 wake_up_interruptible(&ch->ch_flag_wait);
561
562 nd->nd_chan_count = i;
563
564 kfree(ch->ch_tbuf);
565 ch->ch_tbuf = NULL;
566
567 kfree(ch->ch_rbuf);
568 ch->ch_rbuf = NULL;
569
570 nd->nd_chan_count = i;
571
572 dgrp_remove_tty_sysfs(ch->ch_tun.un_sysfs);
573 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
574 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
575 tty_unregister_device(nd->nd_serial_ttdriver, i);
576
577 /*
578 * NOTE: We don't support "cu" devices anymore, so don't
579 * unregister them here anymore.
580 */
581
582 if (dgrp_register_prdevices) {
583 dgrp_remove_tty_sysfs(ch->ch_pun.un_sysfs);
584 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
585 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
586 tty_unregister_device(nd->nd_xprint_ttdriver, i);
587 }
588 }
589}
590
591/**
592 * dgrp_chan_count() -- Adjust the node channel count.
593 * @nd: pointer to a node structure
594 * @n: new value for channel count
595 *
596 * Adjusts the node channel count. If new ports have appeared, it tries
597 * to signal those processes that might have been waiting for ports to
598 * appear. If ports have disappeared it tries to signal those processes
599 * that might be hung waiting for a response for the now non-existant port.
600 */
601static void dgrp_chan_count(struct nd_struct *nd, int n)
602{
603 if (n == nd->nd_chan_count)
604 return;
605
606 if (n > nd->nd_chan_count)
607 increase_channel_count(nd, n);
608
609 if (n < nd->nd_chan_count)
610 decrease_channel_count(nd, n);
611}
612
613/**
614 * dgrp_monitor() -- send data to the device monitor queue
615 * @nd: pointer to a node structure
616 * @buf: data to copy to the monitoring buffer
617 * @len: number of bytes to transfer to the buffer
618 *
619 * Called by the net device routines to send data to the device
620 * monitor queue. If the device monitor buffer is too full to
621 * accept the data, it waits until the buffer is ready.
622 */
623static void dgrp_monitor(struct nd_struct *nd, u8 *buf, int len)
624{
625 int n;
626 int r;
627 int rtn;
628
629 /*
630 * Grab monitor lock.
631 */
632 down(&nd->nd_mon_semaphore);
633
634 /*
635 * Loop while data remains.
636 */
637 while ((len > 0) && (nd->nd_mon_buf)) {
638 /*
639 * Determine the amount of available space left in the
640 * buffer. If there's none, wait until some appears.
641 */
642
643 n = (nd->nd_mon_out - nd->nd_mon_in - 1) & MON_MASK;
644
645 if (!n) {
646 nd->nd_mon_flag |= MON_WAIT_SPACE;
647
648 up(&nd->nd_mon_semaphore);
649
650 /*
651 * Go to sleep waiting until the condition becomes true.
652 */
653 rtn = wait_event_interruptible(nd->nd_mon_wqueue,
654 ((nd->nd_mon_flag & MON_WAIT_SPACE) == 0));
655
656/* FIXME: really ignore rtn? */
657
658 /*
659 * We can't exit here if we receive a signal, since
660 * to do so would trash the debug stream.
661 */
662
663 down(&nd->nd_mon_semaphore);
664
665 continue;
666 }
667
668 /*
669 * Copy as much data as will fit.
670 */
671
672 if (n > len)
673 n = len;
674
675 r = MON_MAX - nd->nd_mon_in;
676
677 if (r <= n) {
678 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, r);
679
680 n -= r;
681
682 nd->nd_mon_in = 0;
683
684 buf += r;
685 len -= r;
686 }
687
688 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, n);
689
690 nd->nd_mon_in += n;
691
692 buf += n;
693 len -= n;
694
695 if (nd->nd_mon_in >= MON_MAX)
696 pr_info_ratelimited("%s - nd_mon_in (%i) >= MON_MAX\n",
697 __func__, nd->nd_mon_in);
698
699 /*
700 * Wakeup any thread waiting for data
701 */
702
703 if (nd->nd_mon_flag & MON_WAIT_DATA) {
704 nd->nd_mon_flag &= ~MON_WAIT_DATA;
705 wake_up_interruptible(&nd->nd_mon_wqueue);
706 }
707 }
708
709 /*
710 * Release the monitor lock.
711 */
712 up(&nd->nd_mon_semaphore);
713}
714
715/**
716 * dgrp_encode_time() -- Encodes rpdump time into a 4-byte quantity.
717 * @nd: pointer to a node structure
718 * @buf: destination buffer
719 *
720 * Encodes "rpdump" time into a 4-byte quantity. Time is measured since
721 * open.
722 */
723static void dgrp_encode_time(struct nd_struct *nd, u8 *buf)
724{
725 ulong t;
726
727 /*
728 * Convert time in HZ since open to time in milliseconds
729 * since open.
730 */
731 t = jiffies - nd->nd_mon_lbolt;
732 t = 1000 * (t / HZ) + 1000 * (t % HZ) / HZ;
733
734 put_unaligned_be32((uint)(t & 0xffffffff), buf);
735}
736
737
738
739/**
740 * dgrp_monitor_message() -- Builds a rpdump style message.
741 * @nd: pointer to a node structure
742 * @message: destination buffer
743 */
744static void dgrp_monitor_message(struct nd_struct *nd, char *message)
745{
746 u8 header[7];
747 int n;
748
749 header[0] = RPDUMP_MESSAGE;
750
751 dgrp_encode_time(nd, header + 1);
752
753 n = strlen(message);
754
755 put_unaligned_be16(n, header + 5);
756
757 dgrp_monitor(nd, header, sizeof(header));
758 dgrp_monitor(nd, (u8 *) message, n);
759}
760
761
762
763/**
764 * dgrp_monitor_reset() -- Note a reset in the monitoring buffer.
765 * @nd: pointer to a node structure
766 */
767static void dgrp_monitor_reset(struct nd_struct *nd)
768{
769 u8 header[5];
770
771 header[0] = RPDUMP_RESET;
772
773 dgrp_encode_time(nd, header + 1);
774
775 dgrp_monitor(nd, header, sizeof(header));
776}
777
778/**
779 * dgrp_monitor_data() -- builds a monitor data packet
780 * @nd: pointer to a node structure
781 * @type: type of message to be logged
782 * @buf: data to be logged
783 * @size: number of bytes in the buffer
784 */
785static void dgrp_monitor_data(struct nd_struct *nd, u8 type, u8 *buf, int size)
786{
787 u8 header[7];
788
789 header[0] = type;
790
791 dgrp_encode_time(nd, header + 1);
792
793 put_unaligned_be16(size, header + 5);
794
795 dgrp_monitor(nd, header, sizeof(header));
796 dgrp_monitor(nd, buf, size);
797}
798
799static int alloc_nd_buffers(struct nd_struct *nd)
800{
801
802 nd->nd_iobuf = NULL;
803 nd->nd_writebuf = NULL;
804 nd->nd_inputbuf = NULL;
805 nd->nd_inputflagbuf = NULL;
806
807 /*
808 * Allocate the network read/write buffer.
809 */
810 nd->nd_iobuf = kzalloc(UIO_MAX + 10, GFP_KERNEL);
811 if (!nd->nd_iobuf)
812 goto out_err;
813
814 /*
815 * Allocate a buffer for doing the copy from user space to
816 * kernel space in the write routines.
817 */
818 nd->nd_writebuf = kzalloc(WRITEBUFLEN, GFP_KERNEL);
819 if (!nd->nd_writebuf)
820 goto out_err;
821
822 /*
823 * Allocate a buffer for doing the copy from kernel space to
824 * tty buffer space in the read routines.
825 */
826 nd->nd_inputbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
827 if (!nd->nd_inputbuf)
828 goto out_err;
829
830 /*
831 * Allocate a buffer for doing the copy from kernel space to
832 * tty buffer space in the read routines.
833 */
834 nd->nd_inputflagbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
835 if (!nd->nd_inputflagbuf)
836 goto out_err;
837
838 return 0;
839
840out_err:
841 kfree(nd->nd_iobuf);
842 kfree(nd->nd_writebuf);
843 kfree(nd->nd_inputbuf);
844 kfree(nd->nd_inputflagbuf);
845 return -ENOMEM;
846}
847
848/*
849 * dgrp_net_open() -- Open the NET device for a particular PortServer
850 */
851static int dgrp_net_open(struct inode *inode, struct file *file)
852{
853 struct nd_struct *nd;
854 struct proc_dir_entry *de;
855 ulong lock_flags;
856 int rtn;
857
858 rtn = try_module_get(THIS_MODULE);
859 if (!rtn)
860 return -EAGAIN;
861
862 if (!capable(CAP_SYS_ADMIN)) {
863 rtn = -EPERM;
864 goto done;
865 }
866
867 /*
868 * Make sure that the "private_data" field hasn't already been used.
869 */
870 if (file->private_data) {
871 rtn = -EINVAL;
872 goto done;
873 }
874
875 /*
876 * Get the node pointer, and fail if it doesn't exist.
877 */
878 de = PDE(inode);
879 if (!de) {
880 rtn = -ENXIO;
881 goto done;
882 }
883
884 nd = (struct nd_struct *) de->data;
885 if (!nd) {
886 rtn = -ENXIO;
887 goto done;
888 }
889
890 file->private_data = (void *) nd;
891
892 /*
893 * Grab the NET lock.
894 */
895 down(&nd->nd_net_semaphore);
896
897 if (nd->nd_state != NS_CLOSED) {
898 rtn = -EBUSY;
899 goto unlock;
900 }
901
902 /*
903 * Initialize the link speed parameters.
904 */
905
906 nd->nd_link.lk_fast_rate = UIO_MAX;
907 nd->nd_link.lk_slow_rate = UIO_MAX;
908
909 nd->nd_link.lk_fast_delay = 1000;
910 nd->nd_link.lk_slow_delay = 1000;
911
912 nd->nd_link.lk_header_size = 46;
913
914
915 rtn = alloc_nd_buffers(nd);
916 if (rtn)
917 goto unlock;
918
919 /*
920 * The port is now open, so move it to the IDLE state
921 */
922 dgrp_net_idle(nd);
923
924 nd->nd_tx_time = jiffies;
925
926 /*
927 * If the polling routing is not running, start it running here
928 */
929 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
930
931 if (!dgrp_poll_data.node_active_count) {
932 dgrp_poll_data.node_active_count = 2;
933 dgrp_poll_data.timer.expires = jiffies +
934 dgrp_poll_tick * HZ / 1000;
935 add_timer(&dgrp_poll_data.timer);
936 }
937
938 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
939
940 dgrp_monitor_message(nd, "Net Open");
941
942unlock:
943 /*
944 * Release the NET lock.
945 */
946 up(&nd->nd_net_semaphore);
947
948done:
949 if (rtn)
950 module_put(THIS_MODULE);
951
952 return rtn;
953}
954
955/* dgrp_net_release() -- close the NET device for a particular PortServer */
956static int dgrp_net_release(struct inode *inode, struct file *file)
957{
958 struct nd_struct *nd;
959 ulong lock_flags;
960
961 nd = (struct nd_struct *)(file->private_data);
962 if (!nd)
963 goto done;
964
965/* TODO : historical locking placeholder */
966/*
967 * In the HPUX version of the RealPort driver (which served as a basis
968 * for this driver) this locking code was used. Saved if ever we need
969 * to review the locking under Linux.
970 */
971/* spinlock(&nd->nd_lock); */
972
973
974 /*
975 * Grab the NET lock.
976 */
977 down(&nd->nd_net_semaphore);
978
979 /*
980 * Before "closing" the internal connection, make sure all
981 * ports are "idle".
982 */
983 dgrp_net_idle(nd);
984
985 nd->nd_state = NS_CLOSED;
986 nd->nd_flag = 0;
987
988 /*
989 * TODO ... must the wait queue be reset on close?
990 * should any pending waiters be reset?
991 * Let's decide to assert that the waitq is empty... and see
992 * how soon we break.
993 */
994 if (waitqueue_active(&nd->nd_tx_waitq))
995 pr_info("%s - expected waitqueue_active to be false\n",
996 __func__);
997
998 nd->nd_send = 0;
999
1000 kfree(nd->nd_iobuf);
1001 nd->nd_iobuf = NULL;
1002
1003/* TODO : historical locking placeholder */
1004/*
1005 * In the HPUX version of the RealPort driver (which served as a basis
1006 * for this driver) this locking code was used. Saved if ever we need
1007 * to review the locking under Linux.
1008 */
1009/* spinunlock( &nd->nd_lock ); */
1010
1011
1012 kfree(nd->nd_writebuf);
1013 nd->nd_writebuf = NULL;
1014
1015 kfree(nd->nd_inputbuf);
1016 nd->nd_inputbuf = NULL;
1017
1018 kfree(nd->nd_inputflagbuf);
1019 nd->nd_inputflagbuf = NULL;
1020
1021/* TODO : historical locking placeholder */
1022/*
1023 * In the HPUX version of the RealPort driver (which served as a basis
1024 * for this driver) this locking code was used. Saved if ever we need
1025 * to review the locking under Linux.
1026 */
1027/* spinlock(&nd->nd_lock); */
1028
1029 /*
1030 * Set the active port count to zero.
1031 */
1032 dgrp_chan_count(nd, 0);
1033
1034/* TODO : historical locking placeholder */
1035/*
1036 * In the HPUX version of the RealPort driver (which served as a basis
1037 * for this driver) this locking code was used. Saved if ever we need
1038 * to review the locking under Linux.
1039 */
1040/* spinunlock(&nd->nd_lock); */
1041
1042 /*
1043 * Release the NET lock.
1044 */
1045 up(&nd->nd_net_semaphore);
1046
1047 /*
1048 * Cause the poller to stop scheduling itself if this is
1049 * the last active node.
1050 */
1051 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
1052
1053 if (dgrp_poll_data.node_active_count == 2) {
1054 del_timer(&dgrp_poll_data.timer);
1055 dgrp_poll_data.node_active_count = 0;
1056 }
1057
1058 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
1059
1060done:
1061 down(&nd->nd_net_semaphore);
1062
1063 dgrp_monitor_message(nd, "Net Close");
1064
1065 up(&nd->nd_net_semaphore);
1066
1067 module_put(THIS_MODULE);
1068 file->private_data = NULL;
1069 return 0;
1070}
1071
1072/* used in dgrp_send to setup command header */
1073static inline u8 *set_cmd_header(u8 *b, u8 port, u8 cmd)
1074{
1075 *b++ = 0xb0 + (port & 0x0f);
1076 *b++ = cmd;
1077 return b;
1078}
1079
1080/**
1081 * dgrp_send() -- build a packet for transmission to the server
1082 * @nd: pointer to a node structure
1083 * @tmax: maximum bytes to transmit
1084 *
1085 * returns number of bytes sent
1086 */
1087static int dgrp_send(struct nd_struct *nd, long tmax)
1088{
1089 struct ch_struct *ch = nd->nd_chan;
1090 u8 *b;
1091 u8 *buf;
1092 u8 *mbuf;
1093 u8 port;
1094 int mod;
1095 long send;
1096 int maxport;
1097 long lastport = -1;
1098 ushort rwin;
1099 long in;
1100 ushort n;
1101 long t;
1102 long ttotal;
1103 long tchan;
1104 long tsend;
1105 ushort tsafe;
1106 long work;
1107 long send_sync;
1108 long wanted_sync_port = -1;
1109 ushort tdata[CHAN_MAX];
1110 long used_buffer;
1111
1112 mbuf = nd->nd_iobuf + UIO_BASE;
1113 buf = b = mbuf;
1114
1115 send_sync = nd->nd_link.lk_slow_rate < UIO_MAX;
1116
1117 ttotal = 0;
1118 tchan = 0;
1119
1120 memset(tdata, 0, sizeof(tdata));
1121
1122
1123 /*
1124 * If there are any outstanding requests to be serviced,
1125 * service them here.
1126 */
1127 if (nd->nd_send & NR_PASSWORD) {
1128
1129 /*
1130 * Send Password response.
1131 */
1132
1133 b[0] = 0xfc;
1134 b[1] = 0x20;
1135 put_unaligned_be16(strlen(nd->password), b + 2);
1136 b += 4;
1137 b += strlen(nd->password);
1138 nd->nd_send &= ~(NR_PASSWORD);
1139 }
1140
1141
1142 /*
1143 * Loop over all modules to generate commands, and determine
1144 * the amount of data queued for transmit.
1145 */
1146
1147 for (mod = 0, port = 0; port < nd->nd_chan_count; mod++) {
1148 /*
1149 * If this is not the current module, enter a module select
1150 * code in the buffer.
1151 */
1152
1153 if (mod != nd->nd_tx_module)
1154 mbuf = ++b;
1155
1156 /*
1157 * Loop to process one module.
1158 */
1159
1160 maxport = port + 16;
1161
1162 if (maxport > nd->nd_chan_count)
1163 maxport = nd->nd_chan_count;
1164
1165 for (; port < maxport; port++, ch++) {
1166 /*
1167 * Switch based on channel state.
1168 */
1169
1170 switch (ch->ch_state) {
1171 /*
1172 * Send requests when the port is closed, and there
1173 * are no Open, Close or Cancel requests expected.
1174 */
1175
1176 case CS_IDLE:
1177 /*
1178 * Wait until any open error code
1179 * has been delivered to all
1180 * associated ports.
1181 */
1182
1183 if (ch->ch_open_error) {
1184 if (ch->ch_wait_count[ch->ch_otype]) {
1185 work = 1;
1186 break;
1187 }
1188
1189 ch->ch_open_error = 0;
1190 }
1191
1192 /*
1193 * Wait until the channel HANGUP flag is reset
1194 * before sending the first open. We can only
1195 * get to this state after a server disconnect.
1196 */
1197
1198 if ((ch->ch_flag & CH_HANGUP) != 0)
1199 break;
1200
1201 /*
1202 * If recovering from a TCP disconnect, or if
1203 * there is an immediate open pending, send an
1204 * Immediate Open request.
1205 */
1206 if ((ch->ch_flag & CH_PORT_GONE) ||
1207 ch->ch_wait_count[OTYPE_IMMEDIATE] != 0) {
1208 b = set_cmd_header(b, port, 10);
1209 *b++ = 0;
1210
1211 ch->ch_state = CS_WAIT_OPEN;
1212 ch->ch_otype = OTYPE_IMMEDIATE;
1213 break;
1214 }
1215
1216 /*
1217 * If there is no Persistent or Incoming Open on the wait
1218 * list in the server, and a thread is waiting for a
1219 * Persistent or Incoming Open, send a Persistent or Incoming
1220 * Open Request.
1221 */
1222 if (ch->ch_otype_waiting == 0) {
1223 if (ch->ch_wait_count[OTYPE_PERSISTENT] != 0) {
1224 b = set_cmd_header(b, port, 10);
1225 *b++ = 1;
1226
1227 ch->ch_state = CS_WAIT_OPEN;
1228 ch->ch_otype = OTYPE_PERSISTENT;
1229 } else if (ch->ch_wait_count[OTYPE_INCOMING] != 0) {
1230 b = set_cmd_header(b, port, 10);
1231 *b++ = 2;
1232
1233 ch->ch_state = CS_WAIT_OPEN;
1234 ch->ch_otype = OTYPE_INCOMING;
1235 }
1236 break;
1237 }
1238
1239 /*
1240 * If a Persistent or Incoming Open is pending in
1241 * the server, but there is no longer an open
1242 * thread waiting for it, cancel the request.
1243 */
1244
1245 if (ch->ch_wait_count[ch->ch_otype_waiting] == 0) {
1246 b = set_cmd_header(b, port, 10);
1247 *b++ = 4;
1248
1249 ch->ch_state = CS_WAIT_CANCEL;
1250 ch->ch_otype = ch->ch_otype_waiting;
1251 }
1252 break;
1253
1254 /*
1255 * Send port parameter queries.
1256 */
1257 case CS_SEND_QUERY:
1258 /*
1259 * Clear out all FEP state that might remain
1260 * from the last connection.
1261 */
1262
1263 ch->ch_flag |= CH_PARAM;
1264
1265 ch->ch_flag &= ~CH_RX_FLUSH;
1266
1267 ch->ch_expect = 0;
1268
1269 ch->ch_s_tin = 0;
1270 ch->ch_s_tpos = 0;
1271 ch->ch_s_tsize = 0;
1272 ch->ch_s_treq = 0;
1273 ch->ch_s_elast = 0;
1274
1275 ch->ch_s_rin = 0;
1276 ch->ch_s_rwin = 0;
1277 ch->ch_s_rsize = 0;
1278
1279 ch->ch_s_tmax = 0;
1280 ch->ch_s_ttime = 0;
1281 ch->ch_s_rmax = 0;
1282 ch->ch_s_rtime = 0;
1283 ch->ch_s_rlow = 0;
1284 ch->ch_s_rhigh = 0;
1285
1286 ch->ch_s_brate = 0;
1287 ch->ch_s_iflag = 0;
1288 ch->ch_s_cflag = 0;
1289 ch->ch_s_oflag = 0;
1290 ch->ch_s_xflag = 0;
1291
1292 ch->ch_s_mout = 0;
1293 ch->ch_s_mflow = 0;
1294 ch->ch_s_mctrl = 0;
1295 ch->ch_s_xon = 0;
1296 ch->ch_s_xoff = 0;
1297 ch->ch_s_lnext = 0;
1298 ch->ch_s_xxon = 0;
1299 ch->ch_s_xxoff = 0;
1300
1301 /* Send Sequence Request */
1302 b = set_cmd_header(b, port, 14);
1303
1304 /* Configure Event Conditions Packet */
1305 b = set_cmd_header(b, port, 42);
1306 put_unaligned_be16(0x02c0, b);
1307 b += 2;
1308 *b++ = (DM_DTR | DM_RTS | DM_CTS |
1309 DM_DSR | DM_RI | DM_CD);
1310
1311 /* Send Status Request */
1312 b = set_cmd_header(b, port, 16);
1313
1314 /* Send Buffer Request */
1315 b = set_cmd_header(b, port, 20);
1316
1317 /* Send Port Capability Request */
1318 b = set_cmd_header(b, port, 22);
1319
1320 ch->ch_expect = (RR_SEQUENCE |
1321 RR_STATUS |
1322 RR_BUFFER |
1323 RR_CAPABILITY);
1324
1325 ch->ch_state = CS_WAIT_QUERY;
1326
1327 /* Raise modem signals */
1328 b = set_cmd_header(b, port, 44);
1329
1330 if (ch->ch_flag & CH_PORT_GONE)
1331 ch->ch_s_mout = ch->ch_mout;
1332 else
1333 ch->ch_s_mout = ch->ch_mout = DM_DTR | DM_RTS;
1334
1335 *b++ = ch->ch_mout;
1336 *b++ = ch->ch_s_mflow = 0;
1337 *b++ = ch->ch_s_mctrl = ch->ch_mctrl = 0;
1338
1339 if (ch->ch_flag & CH_PORT_GONE)
1340 ch->ch_flag &= ~CH_PORT_GONE;
1341
1342 break;
1343
1344 /*
1345 * Handle normal open and ready mode.
1346 */
1347
1348 case CS_READY:
1349
1350 /*
1351 * If the port is not open, and there are no
1352 * no longer any ports requesting an open,
1353 * then close the port.
1354 */
1355
1356 if (ch->ch_open_count == 0 &&
1357 ch->ch_wait_count[ch->ch_otype] == 0) {
1358 goto send_close;
1359 }
1360
1361 /*
1362 * Process waiting input.
1363 *
1364 * If there is no one to read it, discard the data.
1365 *
1366 * Otherwise if we are not in fastcook mode, or if there is a
1367 * fastcook thread waiting for data, send the data to the
1368 * line discipline.
1369 */
1370 if (ch->ch_rin != ch->ch_rout) {
1371 if (ch->ch_tun.un_open_count == 0 ||
1372 (ch->ch_tun.un_flag & UN_CLOSING) ||
1373 (ch->ch_cflag & CF_CREAD) == 0) {
1374 ch->ch_rout = ch->ch_rin;
1375 } else if ((ch->ch_flag & CH_FAST_READ) == 0 ||
1376 ch->ch_inwait != 0) {
1377 dgrp_input(ch);
1378
1379 if (ch->ch_rin != ch->ch_rout)
1380 work = 1;
1381 }
1382 }
1383
1384 /*
1385 * Handle receive flush, and changes to
1386 * server port parameters.
1387 */
1388
1389 if (ch->ch_flag & (CH_RX_FLUSH | CH_PARAM)) {
1390 /*
1391 * If we are in receive flush mode,
1392 * and enough data has gone by, reset
1393 * receive flush mode.
1394 */
1395 if (ch->ch_flag & CH_RX_FLUSH) {
1396 if (((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >
1397 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK))
1398 ch->ch_flag &= ~CH_RX_FLUSH;
1399 else
1400 work = 1;
1401 }
1402
1403 /*
1404 * Send TMAX, TTIME.
1405 */
1406
1407 if (ch->ch_s_tmax != ch->ch_tmax ||
1408 ch->ch_s_ttime != ch->ch_ttime) {
1409 b = set_cmd_header(b, port, 48);
1410
1411 ch->ch_s_tmax = ch->ch_tmax;
1412 ch->ch_s_ttime = ch->ch_ttime;
1413
1414 put_unaligned_be16(ch->ch_s_tmax,
1415 b);
1416 b += 2;
1417
1418 put_unaligned_be16(ch->ch_s_ttime,
1419 b);
1420 b += 2;
1421 }
1422
1423 /*
1424 * Send RLOW, RHIGH.
1425 */
1426
1427 if (ch->ch_s_rlow != ch->ch_rlow ||
1428 ch->ch_s_rhigh != ch->ch_rhigh) {
1429 b = set_cmd_header(b, port, 45);
1430
1431 ch->ch_s_rlow = ch->ch_rlow;
1432 ch->ch_s_rhigh = ch->ch_rhigh;
1433
1434 put_unaligned_be16(ch->ch_s_rlow,
1435 b);
1436 b += 2;
1437
1438 put_unaligned_be16(ch->ch_s_rhigh,
1439 b);
1440 b += 2;
1441 }
1442
1443 /*
1444 * Send BRATE, CFLAG, IFLAG,
1445 * OFLAG, XFLAG.
1446 */
1447
1448 if (ch->ch_s_brate != ch->ch_brate ||
1449 ch->ch_s_cflag != ch->ch_cflag ||
1450 ch->ch_s_iflag != ch->ch_iflag ||
1451 ch->ch_s_oflag != ch->ch_oflag ||
1452 ch->ch_s_xflag != ch->ch_xflag) {
1453 b = set_cmd_header(b, port, 40);
1454
1455 ch->ch_s_brate = ch->ch_brate;
1456 ch->ch_s_cflag = ch->ch_cflag;
1457 ch->ch_s_iflag = ch->ch_iflag;
1458 ch->ch_s_oflag = ch->ch_oflag;
1459 ch->ch_s_xflag = ch->ch_xflag;
1460
1461 put_unaligned_be16(ch->ch_s_brate,
1462 b);
1463 b += 2;
1464
1465 put_unaligned_be16(ch->ch_s_cflag,
1466 b);
1467 b += 2;
1468
1469 put_unaligned_be16(ch->ch_s_iflag,
1470 b);
1471 b += 2;
1472
1473 put_unaligned_be16(ch->ch_s_oflag,
1474 b);
1475 b += 2;
1476
1477 put_unaligned_be16(ch->ch_s_xflag,
1478 b);
1479 b += 2;
1480 }
1481
1482 /*
1483 * Send MOUT, MFLOW, MCTRL.
1484 */
1485
1486 if (ch->ch_s_mout != ch->ch_mout ||
1487 ch->ch_s_mflow != ch->ch_mflow ||
1488 ch->ch_s_mctrl != ch->ch_mctrl) {
1489 b = set_cmd_header(b, port, 44);
1490
1491 *b++ = ch->ch_s_mout = ch->ch_mout;
1492 *b++ = ch->ch_s_mflow = ch->ch_mflow;
1493 *b++ = ch->ch_s_mctrl = ch->ch_mctrl;
1494 }
1495
1496 /*
1497 * Send Flow control characters.
1498 */
1499
1500 if (ch->ch_s_xon != ch->ch_xon ||
1501 ch->ch_s_xoff != ch->ch_xoff ||
1502 ch->ch_s_lnext != ch->ch_lnext ||
1503 ch->ch_s_xxon != ch->ch_xxon ||
1504 ch->ch_s_xxoff != ch->ch_xxoff) {
1505 b = set_cmd_header(b, port, 46);
1506
1507 *b++ = ch->ch_s_xon = ch->ch_xon;
1508 *b++ = ch->ch_s_xoff = ch->ch_xoff;
1509 *b++ = ch->ch_s_lnext = ch->ch_lnext;
1510 *b++ = ch->ch_s_xxon = ch->ch_xxon;
1511 *b++ = ch->ch_s_xxoff = ch->ch_xxoff;
1512 }
1513
1514 /*
1515 * Send RMAX, RTIME.
1516 */
1517
1518 if (ch->ch_s_rmax != ch->ch_rmax ||
1519 ch->ch_s_rtime != ch->ch_rtime) {
1520 b = set_cmd_header(b, port, 47);
1521
1522 ch->ch_s_rmax = ch->ch_rmax;
1523 ch->ch_s_rtime = ch->ch_rtime;
1524
1525 put_unaligned_be16(ch->ch_s_rmax,
1526 b);
1527 b += 2;
1528
1529 put_unaligned_be16(ch->ch_s_rtime,
1530 b);
1531 b += 2;
1532 }
1533
1534 ch->ch_flag &= ~CH_PARAM;
1535 wake_up_interruptible(&ch->ch_flag_wait);
1536 }
1537
1538
1539 /*
1540 * Handle action commands.
1541 */
1542
1543 if (ch->ch_send != 0) {
1544 /* int send = ch->ch_send & ~ch->ch_expect; */
1545 send = ch->ch_send & ~ch->ch_expect;
1546
1547 /* Send character immediate */
1548 if ((send & RR_TX_ICHAR) != 0) {
1549 b = set_cmd_header(b, port, 60);
1550
1551 *b++ = ch->ch_xon;
1552 ch->ch_expect |= RR_TX_ICHAR;
1553 }
1554
1555 /* BREAK request */
1556 if ((send & RR_TX_BREAK) != 0) {
1557 if (ch->ch_break_time != 0) {
1558 b = set_cmd_header(b, port, 61);
1559 put_unaligned_be16(ch->ch_break_time,
1560 b);
1561 b += 2;
1562
1563 ch->ch_expect |= RR_TX_BREAK;
1564 ch->ch_break_time = 0;
1565 } else {
1566 ch->ch_send &= ~RR_TX_BREAK;
1567 ch->ch_flag &= ~CH_TX_BREAK;
1568 wake_up_interruptible(&ch->ch_flag_wait);
1569 }
1570 }
1571
1572 /*
1573 * Flush input/output buffers.
1574 */
1575
1576 if ((send & (RR_RX_FLUSH | RR_TX_FLUSH)) != 0) {
1577 b = set_cmd_header(b, port, 62);
1578
1579 *b++ = ((send & RR_TX_FLUSH) == 0 ? 1 :
1580 (send & RR_RX_FLUSH) == 0 ? 2 : 3);
1581
1582 if (send & RR_RX_FLUSH) {
1583 ch->ch_flush_seq = nd->nd_seq_in;
1584 ch->ch_flag |= CH_RX_FLUSH;
1585 work = 1;
1586 send_sync = 1;
1587 wanted_sync_port = port;
1588 }
1589
1590 ch->ch_send &= ~(RR_RX_FLUSH | RR_TX_FLUSH);
1591 }
1592
1593 /* Pause input/output */
1594 if ((send & (RR_RX_STOP | RR_TX_STOP)) != 0) {
1595 b = set_cmd_header(b, port, 63);
1596 *b = 0;
1597
1598 if ((send & RR_TX_STOP) != 0)
1599 *b |= EV_OPU;
1600
1601 if ((send & RR_RX_STOP) != 0)
1602 *b |= EV_IPU;
1603
1604 b++;
1605
1606 ch->ch_send &= ~(RR_RX_STOP | RR_TX_STOP);
1607 }
1608
1609 /* Start input/output */
1610 if ((send & (RR_RX_START | RR_TX_START)) != 0) {
1611 b = set_cmd_header(b, port, 64);
1612 *b = 0;
1613
1614 if ((send & RR_TX_START) != 0)
1615 *b |= EV_OPU | EV_OPS | EV_OPX;
1616
1617 if ((send & RR_RX_START) != 0)
1618 *b |= EV_IPU | EV_IPS;
1619
1620 b++;
1621
1622 ch->ch_send &= ~(RR_RX_START | RR_TX_START);
1623 }
1624 }
1625
1626
1627 /*
1628 * Send a window sequence to acknowledge received data.
1629 */
1630
1631 rwin = (ch->ch_s_rin +
1632 ((ch->ch_rout - ch->ch_rin - 1) & RBUF_MASK));
1633
1634 n = (rwin - ch->ch_s_rwin) & 0xffff;
1635
1636 if (n >= RBUF_MAX / 4) {
1637 b[0] = 0xa0 + (port & 0xf);
1638 ch->ch_s_rwin = rwin;
1639 put_unaligned_be16(rwin, b + 1);
1640 b += 3;
1641 }
1642
1643 /*
1644 * If the terminal is waiting on LOW
1645 * water or EMPTY, and the condition
1646 * is now satisfied, call the line
1647 * discipline to put more data in the
1648 * buffer.
1649 */
1650
1651 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1652
1653 if ((ch->ch_tun.un_flag & (UN_EMPTY|UN_LOW)) != 0) {
1654 if ((ch->ch_tun.un_flag & UN_LOW) != 0 ?
1655 (n <= TBUF_LOW) :
1656 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin)) {
1657 ch->ch_tun.un_flag &= ~(UN_EMPTY|UN_LOW);
1658
1659 if (waitqueue_active(&((ch->ch_tun.un_tty)->write_wait)))
1660 wake_up_interruptible(&((ch->ch_tun.un_tty)->write_wait));
1661 tty_wakeup(ch->ch_tun.un_tty);
1662 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1663 }
1664 }
1665
1666 /*
1667 * If the printer is waiting on LOW
1668 * water, TIME, EMPTY or PWAIT, and is
1669 * now ready to put more data in the
1670 * buffer, call the line discipline to
1671 * do the job.
1672 */
1673
1674 if (ch->ch_pun.un_open_count &&
1675 (ch->ch_pun.un_flag &
1676 (UN_EMPTY|UN_TIME|UN_LOW|UN_PWAIT)) != 0) {
1677
1678 if ((ch->ch_pun.un_flag & UN_LOW) != 0 ?
1679 (n <= TBUF_LOW) :
1680 (ch->ch_pun.un_flag & UN_TIME) != 0 ?
1681 ((jiffies - ch->ch_waketime) >= 0) :
1682 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin) &&
1683 ((ch->ch_pun.un_flag & UN_EMPTY) != 0 ||
1684 ((ch->ch_tun.un_open_count &&
1685 ch->ch_tun.un_tty->ops->chars_in_buffer) ?
1686 (ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) == 0
1687 : 1
1688 )
1689 )) {
1690 ch->ch_pun.un_flag &= ~(UN_EMPTY | UN_TIME | UN_LOW | UN_PWAIT);
1691
1692 if (waitqueue_active(&((ch->ch_pun.un_tty)->write_wait)))
1693 wake_up_interruptible(&((ch->ch_pun.un_tty)->write_wait));
1694 tty_wakeup(ch->ch_pun.un_tty);
1695 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1696
1697 } else if ((ch->ch_pun.un_flag & UN_TIME) != 0) {
1698 work = 1;
1699 }
1700 }
1701
1702
1703 /*
1704 * Determine the max number of bytes
1705 * this port can send, including
1706 * packet header overhead.
1707 */
1708
1709 t = ((ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff);
1710
1711 if (n > t)
1712 n = t;
1713
1714 if (n != 0) {
1715 n += (n <= 8 ? 1 : n <= 255 ? 2 : 3);
1716
1717 tdata[tchan++] = n;
1718 ttotal += n;
1719 }
1720 break;
1721
1722 /*
1723 * Close the port.
1724 */
1725
1726send_close:
1727 case CS_SEND_CLOSE:
1728 b = set_cmd_header(b, port, 10);
1729 if (ch->ch_otype == OTYPE_IMMEDIATE)
1730 *b++ = 3;
1731 else
1732 *b++ = 4;
1733
1734 ch->ch_state = CS_WAIT_CLOSE;
1735 break;
1736
1737 /*
1738 * Wait for a previous server request.
1739 */
1740
1741 case CS_WAIT_OPEN:
1742 case CS_WAIT_CANCEL:
1743 case CS_WAIT_FAIL:
1744 case CS_WAIT_QUERY:
1745 case CS_WAIT_CLOSE:
1746 break;
1747
1748 default:
1749 pr_info("%s - unexpected channel state (%i)\n",
1750 __func__, ch->ch_state);
1751 }
1752 }
1753
1754 /*
1755 * If a module select code is needed, drop one in. If space
1756 * was reserved for one, but none is needed, recover the space.
1757 */
1758
1759 if (mod != nd->nd_tx_module) {
1760 if (b != mbuf) {
1761 mbuf[-1] = 0xf0 | mod;
1762 nd->nd_tx_module = mod;
1763 } else {
1764 b--;
1765 }
1766 }
1767 }
1768
1769 /*
1770 * Adjust "tmax" so that under worst case conditions we do
1771 * not overflow either the daemon buffer or the internal
1772 * buffer in the loop that follows. Leave a safe area
1773 * of 64 bytes so we start getting asserts before we start
1774 * losing data or clobbering memory.
1775 */
1776
1777 n = UIO_MAX - UIO_BASE;
1778
1779 if (tmax > n)
1780 tmax = n;
1781
1782 tmax -= 64;
1783
1784 tsafe = tmax;
1785
1786 /*
1787 * Allocate space for 5 Module Selects, 1 Sequence Request,
1788 * and 1 Set TREQ for each active channel.
1789 */
1790
1791 tmax -= 5 + 3 + 4 * nd->nd_chan_count;
1792
1793 /*
1794 * Further reduce "tmax" to the available transmit credit.
1795 * Note that this is a soft constraint; The transmit credit
1796 * can go negative for a time and then recover.
1797 */
1798
1799 n = nd->nd_tx_deposit - nd->nd_tx_charge - nd->nd_link.lk_header_size;
1800
1801 if (tmax > n)
1802 tmax = n;
1803
1804 /*
1805 * Finally reduce tmax by the number of bytes already in
1806 * the buffer.
1807 */
1808
1809 tmax -= b - buf;
1810
1811 /*
1812 * Suspend data transmit unless every ready channel can send
1813 * at least 1 character.
1814 */
1815 if (tmax < 2 * nd->nd_chan_count) {
1816 tsend = 1;
1817
1818 } else if (tchan > 1 && ttotal > tmax) {
1819
1820 /*
1821 * If transmit is limited by the credit budget, find the
1822 * largest number of characters we can send without driving
1823 * the credit negative.
1824 */
1825
1826 long tm = tmax;
1827 int tc = tchan;
1828 int try;
1829
1830 tsend = tm / tc;
1831
1832 for (try = 0; try < 3; try++) {
1833 int i;
1834 int c = 0;
1835
1836 for (i = 0; i < tc; i++) {
1837 if (tsend < tdata[i])
1838 tdata[c++] = tdata[i];
1839 else
1840 tm -= tdata[i];
1841 }
1842
1843 if (c == tc)
1844 break;
1845
1846 tsend = tm / c;
1847
1848 if (c == 1)
1849 break;
1850
1851 tc = c;
1852 }
1853
1854 tsend = tm / nd->nd_chan_count;
1855
1856 if (tsend < 2)
1857 tsend = 1;
1858
1859 } else {
1860 /*
1861 * If no budgetary constraints, or only one channel ready
1862 * to send, set the character limit to the remaining
1863 * buffer size.
1864 */
1865
1866 tsend = tmax;
1867 }
1868
1869 tsend -= (tsend <= 9) ? 1 : (tsend <= 257) ? 2 : 3;
1870
1871 /*
1872 * Loop over all channels, sending queued data.
1873 */
1874
1875 port = 0;
1876 ch = nd->nd_chan;
1877 used_buffer = tmax;
1878
1879 for (mod = 0; port < nd->nd_chan_count; mod++) {
1880 /*
1881 * If this is not the current module, enter a module select
1882 * code in the buffer.
1883 */
1884
1885 if (mod != nd->nd_tx_module)
1886 mbuf = ++b;
1887
1888 /*
1889 * Loop to process one module.
1890 */
1891
1892 maxport = port + 16;
1893
1894 if (maxport > nd->nd_chan_count)
1895 maxport = nd->nd_chan_count;
1896
1897 for (; port < maxport; port++, ch++) {
1898 if (ch->ch_state != CS_READY)
1899 continue;
1900
1901 lastport = port;
1902
1903 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1904
1905 /*
1906 * If there is data that can be sent, send it.
1907 */
1908
1909 if (n != 0 && used_buffer > 0) {
1910 t = (ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff;
1911
1912 if (n > t)
1913 n = t;
1914
1915 if (n > tsend) {
1916 work = 1;
1917 n = tsend;
1918 }
1919
1920 if (n > used_buffer) {
1921 work = 1;
1922 n = used_buffer;
1923 }
1924
1925 if (n <= 0)
1926 continue;
1927
1928 /*
1929 * Create the correct size transmit header,
1930 * depending on the amount of data to transmit.
1931 */
1932
1933 if (n <= 8) {
1934
1935 b[0] = ((n - 1) << 4) + (port & 0xf);
1936 b += 1;
1937
1938 } else if (n <= 255) {
1939
1940 b[0] = 0x80 + (port & 0xf);
1941 b[1] = n;
1942 b += 2;
1943
1944 } else {
1945
1946 b[0] = 0x90 + (port & 0xf);
1947 put_unaligned_be16(n, b + 1);
1948 b += 3;
1949 }
1950
1951 ch->ch_s_tin = (ch->ch_s_tin + n) & 0xffff;
1952
1953 /*
1954 * Copy transmit data to the packet.
1955 */
1956
1957 t = TBUF_MAX - ch->ch_tout;
1958
1959 if (n >= t) {
1960 memcpy(b, ch->ch_tbuf + ch->ch_tout, t);
1961 b += t;
1962 n -= t;
1963 used_buffer -= t;
1964 ch->ch_tout = 0;
1965 }
1966
1967 memcpy(b, ch->ch_tbuf + ch->ch_tout, n);
1968 b += n;
1969 used_buffer -= n;
1970 ch->ch_tout += n;
1971 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1972 }
1973
1974 /*
1975 * Wake any terminal unit process waiting in the
1976 * dgrp_write routine for low water.
1977 */
1978
1979 if (n > TBUF_LOW)
1980 continue;
1981
1982 if ((ch->ch_flag & CH_LOW) != 0) {
1983 ch->ch_flag &= ~CH_LOW;
1984 wake_up_interruptible(&ch->ch_flag_wait);
1985 }
1986
1987 /* selwakeup tty_sel */
1988 if (ch->ch_tun.un_open_count) {
1989 struct tty_struct *tty = (ch->ch_tun.un_tty);
1990
1991 if (waitqueue_active(&tty->write_wait))
1992 wake_up_interruptible(&tty->write_wait);
1993
1994 tty_wakeup(tty);
1995 }
1996
1997 if (ch->ch_pun.un_open_count) {
1998 struct tty_struct *tty = (ch->ch_pun.un_tty);
1999
2000 if (waitqueue_active(&tty->write_wait))
2001 wake_up_interruptible(&tty->write_wait);
2002
2003 tty_wakeup(tty);
2004 }
2005
2006 /*
2007 * Do EMPTY processing.
2008 */
2009
2010 if (n != 0)
2011 continue;
2012
2013 if ((ch->ch_flag & (CH_EMPTY | CH_DRAIN)) != 0 ||
2014 (ch->ch_pun.un_flag & UN_EMPTY) != 0) {
2015 /*
2016 * If there is still data in the server, ask the server
2017 * to notify us when its all gone.
2018 */
2019
2020 if (ch->ch_s_treq != ch->ch_s_tin) {
2021 b = set_cmd_header(b, port, 43);
2022
2023 ch->ch_s_treq = ch->ch_s_tin;
2024 put_unaligned_be16(ch->ch_s_treq,
2025 b);
2026 b += 2;
2027 }
2028
2029 /*
2030 * If there is a thread waiting for buffer empty,
2031 * and we are truly empty, wake the thread.
2032 */
2033
2034 else if ((ch->ch_flag & CH_EMPTY) != 0 &&
2035 (ch->ch_send & RR_TX_BREAK) == 0) {
2036 ch->ch_flag &= ~CH_EMPTY;
2037
2038 wake_up_interruptible(&ch->ch_flag_wait);
2039 }
2040 }
2041 }
2042
2043 /*
2044 * If a module select code is needed, drop one in. If space
2045 * was reserved for one, but none is needed, recover the space.
2046 */
2047
2048 if (mod != nd->nd_tx_module) {
2049 if (b != mbuf) {
2050 mbuf[-1] = 0xf0 | mod;
2051 nd->nd_tx_module = mod;
2052 } else {
2053 b--;
2054 }
2055 }
2056 }
2057
2058 /*
2059 * Send a synchronization sequence associated with the last open
2060 * channel that sent data, and remember the time when the data was
2061 * sent.
2062 */
2063
2064 in = nd->nd_seq_in;
2065
2066 if ((send_sync || nd->nd_seq_wait[in] != 0) && lastport >= 0) {
2067 u8 *bb = b;
2068
2069 /*
2070 * Attempt the use the port that really wanted the sync.
2071 * This gets around a race condition where the "lastport" is in
2072 * the middle of the close() routine, and by the time we
2073 * send this command, it will have already acked the close, and
2074 * thus not send the sync response.
2075 */
2076 if (wanted_sync_port >= 0)
2077 lastport = wanted_sync_port;
2078 /*
2079 * Set a flag just in case the port is in the middle of a close,
2080 * it will not be permitted to actually close until we get an
2081 * sync response, and clear the flag there.
2082 */
2083 ch = nd->nd_chan + lastport;
2084 ch->ch_flag |= CH_WAITING_SYNC;
2085
2086 mod = lastport >> 4;
2087
2088 if (mod != nd->nd_tx_module) {
2089 bb[0] = 0xf0 + mod;
2090 bb += 1;
2091
2092 nd->nd_tx_module = mod;
2093 }
2094
2095 bb = set_cmd_header(bb, lastport, 12);
2096 *bb++ = in;
2097
2098 nd->nd_seq_size[in] = bb - buf;
2099 nd->nd_seq_time[in] = jiffies;
2100
2101 if (++in >= SEQ_MAX)
2102 in = 0;
2103
2104 if (in != nd->nd_seq_out) {
2105 b = bb;
2106 nd->nd_seq_in = in;
2107 nd->nd_unack += b - buf;
2108 }
2109 }
2110
2111 /*
2112 * If there are no open ports, a sync cannot be sent.
2113 * There is nothing left to wait for anyway, so wake any
2114 * thread waiting for an acknowledgement.
2115 */
2116
2117 else if (nd->nd_seq_wait[in] != 0) {
2118 nd->nd_seq_wait[in] = 0;
2119
2120 wake_up_interruptible(&nd->nd_seq_wque[in]);
2121 }
2122
2123 /*
2124 * If there is no traffic for an interval of IDLE_MAX, then
2125 * send a single byte packet.
2126 */
2127
2128 if (b != buf) {
2129 nd->nd_tx_time = jiffies;
2130 } else if ((ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX) {
2131 *b++ = 0xf0 | nd->nd_tx_module;
2132 nd->nd_tx_time = jiffies;
2133 }
2134
2135 n = b - buf;
2136
2137 if (n >= tsafe)
2138 pr_info("%s - n(%i) >= tsafe(%i)\n",
2139 __func__, n, tsafe);
2140
2141 if (tsend < 0)
2142 dgrp_dump(buf, n);
2143
2144 nd->nd_tx_work = work;
2145
2146 return n;
2147}
2148
2149/*
2150 * dgrp_net_read()
2151 * Data to be sent TO the PortServer from the "async." half of the driver.
2152 */
2153static ssize_t dgrp_net_read(struct file *file, char __user *buf, size_t count,
2154 loff_t *ppos)
2155{
2156 struct nd_struct *nd;
2157 long n;
2158 u8 *local_buf;
2159 u8 *b;
2160 ssize_t rtn;
2161
2162 /*
2163 * Get the node pointer, and quit if it doesn't exist.
2164 */
2165 nd = (struct nd_struct *)(file->private_data);
2166 if (!nd)
2167 return -ENXIO;
2168
2169 if (count < UIO_MIN)
2170 return -EINVAL;
2171
2172 /*
2173 * Only one read/write operation may be in progress at
2174 * any given time.
2175 */
2176
2177 /*
2178 * Grab the NET lock.
2179 */
2180 down(&nd->nd_net_semaphore);
2181
2182 nd->nd_read_count++;
2183
2184 nd->nd_tx_ready = 0;
2185
2186 /*
2187 * Determine the effective size of the buffer.
2188 */
2189
2190 if (nd->nd_remain > UIO_BASE)
2191 pr_info_ratelimited("%s - nd_remain(%i) > UIO_BASE\n",
2192 __func__, nd->nd_remain);
2193
2194 b = local_buf = nd->nd_iobuf + UIO_BASE;
2195
2196 /*
2197 * Generate data according to the node state.
2198 */
2199
2200 switch (nd->nd_state) {
2201 /*
2202 * Initialize the connection.
2203 */
2204
2205 case NS_IDLE:
2206 if (nd->nd_mon_buf)
2207 dgrp_monitor_reset(nd);
2208
2209 /*
2210 * Request a Product ID Packet.
2211 */
2212
2213 b[0] = 0xfb;
2214 b[1] = 0x01;
2215 b += 2;
2216
2217 nd->nd_expect |= NR_IDENT;
2218
2219 /*
2220 * Request a Server Capability ID Response.
2221 */
2222
2223 b[0] = 0xfb;
2224 b[1] = 0x02;
2225 b += 2;
2226
2227 nd->nd_expect |= NR_CAPABILITY;
2228
2229 /*
2230 * Request a Server VPD Response.
2231 */
2232
2233 b[0] = 0xfb;
2234 b[1] = 0x18;
2235 b += 2;
2236
2237 nd->nd_expect |= NR_VPD;
2238
2239 nd->nd_state = NS_WAIT_QUERY;
2240 break;
2241
2242 /*
2243 * We do serious communication with the server only in
2244 * the READY state.
2245 */
2246
2247 case NS_READY:
2248 b = dgrp_send(nd, count) + local_buf;
2249 break;
2250
2251 /*
2252 * Send off an error after receiving a bogus message
2253 * from the server.
2254 */
2255
2256 case NS_SEND_ERROR:
2257 n = strlen(nd->nd_error);
2258
2259 b[0] = 0xff;
2260 b[1] = n;
2261 memcpy(b + 2, nd->nd_error, n);
2262 b += 2 + n;
2263
2264 dgrp_net_idle(nd);
2265 /*
2266 * Set the active port count to zero.
2267 */
2268 dgrp_chan_count(nd, 0);
2269 break;
2270
2271 default:
2272 break;
2273 }
2274
2275 n = b - local_buf;
2276
2277 if (n != 0) {
2278 nd->nd_send_count++;
2279
2280 nd->nd_tx_byte += n + nd->nd_link.lk_header_size;
2281 nd->nd_tx_charge += n + nd->nd_link.lk_header_size;
2282 }
2283
2284 rtn = copy_to_user((void __user *)buf, local_buf, n);
2285 if (rtn) {
2286 rtn = -EFAULT;
2287 goto done;
2288 }
2289
2290 *ppos += n;
2291
2292 rtn = n;
2293
2294 if (nd->nd_mon_buf)
2295 dgrp_monitor_data(nd, RPDUMP_CLIENT, local_buf, n);
2296
2297 /*
2298 * Release the NET lock.
2299 */
2300done:
2301 up(&nd->nd_net_semaphore);
2302
2303 return rtn;
2304}
2305
2306/**
2307 * dgrp_receive() -- decode data packets received from the remote PortServer.
2308 * @nd: pointer to a node structure
2309 */
2310static void dgrp_receive(struct nd_struct *nd)
2311{
2312 struct ch_struct *ch;
2313 u8 *buf;
2314 u8 *b;
2315 u8 *dbuf;
2316 char *error;
2317 long port;
2318 long dlen;
2319 long plen;
2320 long remain;
2321 long n;
2322 long mlast;
2323 long elast;
2324 long mstat;
2325 long estat;
2326
2327 char ID[3];
2328
2329 nd->nd_tx_time = jiffies;
2330
2331 ID_TO_CHAR(nd->nd_ID, ID);
2332
2333 b = buf = nd->nd_iobuf;
2334 remain = nd->nd_remain;
2335
2336 /*
2337 * Loop to process Realport protocol packets.
2338 */
2339
2340 while (remain > 0) {
2341 int n0 = b[0] >> 4;
2342 int n1 = b[0] & 0x0f;
2343
2344 if (n0 <= 12) {
2345 port = (nd->nd_rx_module << 4) + n1;
2346
2347 if (port >= nd->nd_chan_count) {
2348 error = "Improper Port Number";
2349 goto prot_error;
2350 }
2351
2352 ch = nd->nd_chan + port;
2353 } else {
2354 port = -1;
2355 ch = NULL;
2356 }
2357
2358 /*
2359 * Process by major packet type.
2360 */
2361
2362 switch (n0) {
2363
2364 /*
2365 * Process 1-byte header data packet.
2366 */
2367
2368 case 0:
2369 case 1:
2370 case 2:
2371 case 3:
2372 case 4:
2373 case 5:
2374 case 6:
2375 case 7:
2376 dlen = n0 + 1;
2377 plen = dlen + 1;
2378
2379 dbuf = b + 1;
2380 goto data;
2381
2382 /*
2383 * Process 2-byte header data packet.
2384 */
2385
2386 case 8:
2387 if (remain < 3)
2388 goto done;
2389
2390 dlen = b[1];
2391 plen = dlen + 2;
2392
2393 dbuf = b + 2;
2394 goto data;
2395
2396 /*
2397 * Process 3-byte header data packet.
2398 */
2399
2400 case 9:
2401 if (remain < 4)
2402 goto done;
2403
2404 dlen = get_unaligned_be16(b + 1);
2405 plen = dlen + 3;
2406
2407 dbuf = b + 3;
2408
2409 /*
2410 * Common packet handling code.
2411 */
2412
2413data:
2414 nd->nd_tx_work = 1;
2415
2416 /*
2417 * Otherwise data should appear only when we are
2418 * in the CS_READY state.
2419 */
2420
2421 if (ch->ch_state < CS_READY) {
2422 error = "Data received before RWIN established";
2423 goto prot_error;
2424 }
2425
2426 /*
2427 * Assure that the data received is within the
2428 * allowable window.
2429 */
2430
2431 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2432
2433 if (dlen > n) {
2434 error = "Receive data overrun";
2435 goto prot_error;
2436 }
2437
2438 /*
2439 * If we received 3 or less characters,
2440 * assume it is a human typing, and set RTIME
2441 * to 10 milliseconds.
2442 *
2443 * If we receive 10 or more characters,
2444 * assume its not a human typing, and set RTIME
2445 * to 100 milliseconds.
2446 */
2447
2448 if (ch->ch_edelay != DGRP_RTIME) {
2449 if (ch->ch_rtime != ch->ch_edelay) {
2450 ch->ch_rtime = ch->ch_edelay;
2451 ch->ch_flag |= CH_PARAM;
2452 }
2453 } else if (dlen <= 3) {
2454 if (ch->ch_rtime != 10) {
2455 ch->ch_rtime = 10;
2456 ch->ch_flag |= CH_PARAM;
2457 }
2458 } else {
2459 if (ch->ch_rtime != DGRP_RTIME) {
2460 ch->ch_rtime = DGRP_RTIME;
2461 ch->ch_flag |= CH_PARAM;
2462 }
2463 }
2464
2465 /*
2466 * If a portion of the packet is outside the
2467 * buffer, shorten the effective length of the
2468 * data packet to be the amount of data received.
2469 */
2470
2471 if (remain < plen)
2472 dlen -= plen - remain;
2473
2474 /*
2475 * Detect if receive flush is now complete.
2476 */
2477
2478 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2479 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2480 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2481 ch->ch_flag &= ~CH_RX_FLUSH;
2482 }
2483
2484 /*
2485 * If we are ready to receive, move the data into
2486 * the receive buffer.
2487 */
2488
2489 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2490
2491 if (ch->ch_state == CS_READY &&
2492 (ch->ch_tun.un_open_count != 0) &&
2493 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2494 (ch->ch_cflag & CF_CREAD) != 0 &&
2495 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2496 (ch->ch_send & RR_RX_FLUSH) == 0) {
2497
2498 if (ch->ch_rin + dlen >= RBUF_MAX) {
2499 n = RBUF_MAX - ch->ch_rin;
2500
2501 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2502
2503 ch->ch_rin = 0;
2504 dbuf += n;
2505 dlen -= n;
2506 }
2507
2508 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2509
2510 ch->ch_rin += dlen;
2511
2512
2513 /*
2514 * If we are not in fastcook mode, or
2515 * if there is a fastcook thread
2516 * waiting for data, send the data to
2517 * the line discipline.
2518 */
2519
2520 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2521 ch->ch_inwait != 0) {
2522 dgrp_input(ch);
2523 }
2524
2525 /*
2526 * If there is a read thread waiting
2527 * in select, and we are in fastcook
2528 * mode, wake him up.
2529 */
2530
2531 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2532 (ch->ch_flag & CH_FAST_READ) != 0)
2533 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2534
2535 /*
2536 * Wake any thread waiting in the
2537 * fastcook loop.
2538 */
2539
2540 if ((ch->ch_flag & CH_INPUT) != 0) {
2541 ch->ch_flag &= ~CH_INPUT;
2542
2543 wake_up_interruptible(&ch->ch_flag_wait);
2544 }
2545 }
2546
2547 /*
2548 * Fabricate and insert a data packet header to
2549 * preceed the remaining data when it comes in.
2550 */
2551
2552 if (remain < plen) {
2553 dlen = plen - remain;
2554 b = buf;
2555
2556 b[0] = 0x90 + n1;
2557 put_unaligned_be16(dlen, b + 1);
2558
2559 remain = 3;
2560 goto done;
2561 }
2562 break;
2563
2564 /*
2565 * Handle Window Sequence packets.
2566 */
2567
2568 case 10:
2569 plen = 3;
2570 if (remain < plen)
2571 goto done;
2572
2573 nd->nd_tx_work = 1;
2574
2575 {
2576 ushort tpos = get_unaligned_be16(b + 1);
2577
2578 ushort ack = (tpos - ch->ch_s_tpos) & 0xffff;
2579 ushort unack = (ch->ch_s_tin - ch->ch_s_tpos) & 0xffff;
2580 ushort notify = (ch->ch_s_treq - ch->ch_s_tpos) & 0xffff;
2581
2582 if (ch->ch_state < CS_READY || ack > unack) {
2583 error = "Improper Window Sequence";
2584 goto prot_error;
2585 }
2586
2587 ch->ch_s_tpos = tpos;
2588
2589 if (notify <= ack)
2590 ch->ch_s_treq = tpos;
2591 }
2592 break;
2593
2594 /*
2595 * Handle Command response packets.
2596 */
2597
2598 case 11:
2599
2600 /*
2601 * RealPort engine fix - 03/11/2004
2602 *
2603 * This check did not used to be here.
2604 *
2605 * We were using b[1] without verifying that the data
2606 * is actually there and valid. On a split packet, it
2607 * might not be yet.
2608 *
2609 * NOTE: I have never actually seen the failure happen
2610 * under Linux, but since I have seen it occur
2611 * under both Solaris and HP-UX, the assumption
2612 * is that it *could* happen here as well...
2613 */
2614 if (remain < 2)
2615 goto done;
2616
2617
2618 switch (b[1]) {
2619
2620 /*
2621 * Handle Open Response.
2622 */
2623
2624 case 11:
2625 plen = 6;
2626 if (remain < plen)
2627 goto done;
2628
2629 nd->nd_tx_work = 1;
2630
2631 {
2632 int req = b[2];
2633 int resp = b[3];
2634 port = get_unaligned_be16(b + 4);
2635
2636 if (port >= nd->nd_chan_count) {
2637 error = "Open channel number out of range";
2638 goto prot_error;
2639 }
2640
2641 ch = nd->nd_chan + port;
2642
2643 /*
2644 * How we handle an open response depends primarily
2645 * on our current channel state.
2646 */
2647
2648 switch (ch->ch_state) {
2649 case CS_IDLE:
2650
2651 /*
2652 * Handle a delayed open.
2653 */
2654
2655 if (ch->ch_otype_waiting != 0 &&
2656 req == ch->ch_otype_waiting &&
2657 resp == 0) {
2658 ch->ch_otype = req;
2659 ch->ch_otype_waiting = 0;
2660 ch->ch_state = CS_SEND_QUERY;
2661 break;
2662 }
2663 goto open_error;
2664
2665 case CS_WAIT_OPEN:
2666
2667 /*
2668 * Handle the open response.
2669 */
2670
2671 if (req == ch->ch_otype) {
2672 switch (resp) {
2673
2674 /*
2675 * On successful response, open the
2676 * port and proceed normally.
2677 */
2678
2679 case 0:
2680 ch->ch_state = CS_SEND_QUERY;
2681 break;
2682
2683 /*
2684 * On a busy response to a persistent open,
2685 * remember that the open is pending.
2686 */
2687
2688 case 1:
2689 case 2:
2690 if (req != OTYPE_IMMEDIATE) {
2691 ch->ch_otype_waiting = req;
2692 ch->ch_state = CS_IDLE;
2693 break;
2694 }
2695
2696 /*
2697 * Otherwise the server open failed. If
2698 * the Unix port is open, hang it up.
2699 */
2700
2701 default:
2702 if (ch->ch_open_count != 0) {
2703 ch->ch_flag |= CH_HANGUP;
2704 dgrp_carrier(ch);
2705 ch->ch_state = CS_IDLE;
2706 break;
2707 }
2708
2709 ch->ch_open_error = resp;
2710 ch->ch_state = CS_IDLE;
2711
2712 wake_up_interruptible(&ch->ch_flag_wait);
2713 }
2714 break;
2715 }
2716
2717 /*
2718 * Handle delayed response arrival preceeding
2719 * the open response we are waiting for.
2720 */
2721
2722 if (ch->ch_otype_waiting != 0 &&
2723 req == ch->ch_otype_waiting &&
2724 resp == 0) {
2725 ch->ch_otype = ch->ch_otype_waiting;
2726 ch->ch_otype_waiting = 0;
2727 ch->ch_state = CS_WAIT_FAIL;
2728 break;
2729 }
2730 goto open_error;
2731
2732
2733 case CS_WAIT_FAIL:
2734
2735 /*
2736 * Handle response to immediate open arriving
2737 * after a delayed open success.
2738 */
2739
2740 if (req == OTYPE_IMMEDIATE) {
2741 ch->ch_state = CS_SEND_QUERY;
2742 break;
2743 }
2744 goto open_error;
2745
2746
2747 case CS_WAIT_CANCEL:
2748 /*
2749 * Handle delayed open response arriving before
2750 * the cancel response.
2751 */
2752
2753 if (req == ch->ch_otype_waiting &&
2754 resp == 0) {
2755 ch->ch_otype_waiting = 0;
2756 break;
2757 }
2758
2759 /*
2760 * Handle cancel response.
2761 */
2762
2763 if (req == 4 && resp == 0) {
2764 ch->ch_otype_waiting = 0;
2765 ch->ch_state = CS_IDLE;
2766 break;
2767 }
2768 goto open_error;
2769
2770
2771 case CS_WAIT_CLOSE:
2772 /*
2773 * Handle a successful response to a port
2774 * close.
2775 */
2776
2777 if (req >= 3) {
2778 ch->ch_state = CS_IDLE;
2779 break;
2780 }
2781 goto open_error;
2782
2783open_error:
2784 default:
2785 {
2786 error = "Improper Open Response";
2787 goto prot_error;
2788 }
2789 }
2790 }
2791 break;
2792
2793 /*
2794 * Handle Synchronize Response.
2795 */
2796
2797 case 13:
2798 plen = 3;
2799 if (remain < plen)
2800 goto done;
2801 {
2802 int seq = b[2];
2803 int s;
2804
2805 /*
2806 * If channel was waiting for this sync response,
2807 * unset the flag, and wake up anyone waiting
2808 * on the event.
2809 */
2810 if (ch->ch_flag & CH_WAITING_SYNC) {
2811 ch->ch_flag &= ~(CH_WAITING_SYNC);
2812 wake_up_interruptible(&ch->ch_flag_wait);
2813 }
2814
2815 if (((seq - nd->nd_seq_out) & SEQ_MASK) >=
2816 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2817 break;
2818 }
2819
2820 for (s = nd->nd_seq_out;; s = (s + 1) & SEQ_MASK) {
2821 if (nd->nd_seq_wait[s] != 0) {
2822 nd->nd_seq_wait[s] = 0;
2823
2824 wake_up_interruptible(&nd->nd_seq_wque[s]);
2825 }
2826
2827 nd->nd_unack -= nd->nd_seq_size[s];
2828
2829 if (s == seq)
2830 break;
2831 }
2832
2833 nd->nd_seq_out = (seq + 1) & SEQ_MASK;
2834 }
2835 break;
2836
2837 /*
2838 * Handle Sequence Response.
2839 */
2840
2841 case 15:
2842 plen = 6;
2843 if (remain < plen)
2844 goto done;
2845
2846 {
2847 /* Record that we have received the Sequence
2848 * Response, but we aren't interested in the
2849 * sequence numbers. We were using RIN like it
2850 * was ROUT and that was causing problems,
2851 * fixed 7-13-2001 David Fries. See comment in
2852 * drp.h for ch_s_rin variable.
2853 int rin = get_unaligned_be16(b + 2);
2854 int tpos = get_unaligned_be16(b + 4);
2855 */
2856
2857 ch->ch_send &= ~RR_SEQUENCE;
2858 ch->ch_expect &= ~RR_SEQUENCE;
2859 }
2860 goto check_query;
2861
2862 /*
2863 * Handle Status Response.
2864 */
2865
2866 case 17:
2867 plen = 5;
2868 if (remain < plen)
2869 goto done;
2870
2871 {
2872 ch->ch_s_elast = get_unaligned_be16(b + 2);
2873 ch->ch_s_mlast = b[4];
2874
2875 ch->ch_expect &= ~RR_STATUS;
2876 ch->ch_send &= ~RR_STATUS;
2877
2878 /*
2879 * CH_PHYS_CD is cleared because something _could_ be
2880 * waiting for the initial sense of carrier... and if
2881 * carrier is high immediately, we want to be sure to
2882 * wake them as soon as possible.
2883 */
2884 ch->ch_flag &= ~CH_PHYS_CD;
2885
2886 dgrp_carrier(ch);
2887 }
2888 goto check_query;
2889
2890 /*
2891 * Handle Line Error Response.
2892 */
2893
2894 case 19:
2895 plen = 14;
2896 if (remain < plen)
2897 goto done;
2898
2899 break;
2900
2901 /*
2902 * Handle Buffer Response.
2903 */
2904
2905 case 21:
2906 plen = 6;
2907 if (remain < plen)
2908 goto done;
2909
2910 {
2911 ch->ch_s_rsize = get_unaligned_be16(b + 2);
2912 ch->ch_s_tsize = get_unaligned_be16(b + 4);
2913
2914 ch->ch_send &= ~RR_BUFFER;
2915 ch->ch_expect &= ~RR_BUFFER;
2916 }
2917 goto check_query;
2918
2919 /*
2920 * Handle Port Capability Response.
2921 */
2922
2923 case 23:
2924 plen = 32;
2925 if (remain < plen)
2926 goto done;
2927
2928 {
2929 ch->ch_send &= ~RR_CAPABILITY;
2930 ch->ch_expect &= ~RR_CAPABILITY;
2931 }
2932
2933 /*
2934 * When all queries are complete, set those parameters
2935 * derived from the query results, then transition
2936 * to the READY state.
2937 */
2938
2939check_query:
2940 if (ch->ch_state == CS_WAIT_QUERY &&
2941 (ch->ch_expect & (RR_SEQUENCE |
2942 RR_STATUS |
2943 RR_BUFFER |
2944 RR_CAPABILITY)) == 0) {
2945 ch->ch_tmax = ch->ch_s_tsize / 4;
2946
2947 if (ch->ch_edelay == DGRP_TTIME)
2948 ch->ch_ttime = DGRP_TTIME;
2949 else
2950 ch->ch_ttime = ch->ch_edelay;
2951
2952 ch->ch_rmax = ch->ch_s_rsize / 4;
2953
2954 if (ch->ch_edelay == DGRP_RTIME)
2955 ch->ch_rtime = DGRP_RTIME;
2956 else
2957 ch->ch_rtime = ch->ch_edelay;
2958
2959 ch->ch_rlow = 2 * ch->ch_s_rsize / 8;
2960 ch->ch_rhigh = 6 * ch->ch_s_rsize / 8;
2961
2962 ch->ch_state = CS_READY;
2963
2964 nd->nd_tx_work = 1;
2965 wake_up_interruptible(&ch->ch_flag_wait);
2966
2967 }
2968 break;
2969
2970 default:
2971 goto decode_error;
2972 }
2973 break;
2974
2975 /*
2976 * Handle Events.
2977 */
2978
2979 case 12:
2980 plen = 4;
2981 if (remain < plen)
2982 goto done;
2983
2984 mlast = ch->ch_s_mlast;
2985 elast = ch->ch_s_elast;
2986
2987 mstat = ch->ch_s_mlast = b[1];
2988 estat = ch->ch_s_elast = get_unaligned_be16(b + 2);
2989
2990 /*
2991 * Handle modem changes.
2992 */
2993
2994 if (((mstat ^ mlast) & DM_CD) != 0)
2995 dgrp_carrier(ch);
2996
2997
2998 /*
2999 * Handle received break.
3000 */
3001
3002 if ((estat & ~elast & EV_RXB) != 0 &&
3003 (ch->ch_tun.un_open_count != 0) &&
3004 I_BRKINT(ch->ch_tun.un_tty) &&
3005 !(I_IGNBRK(ch->ch_tun.un_tty))) {
3006
3007 tty_buffer_request_room(ch->ch_tun.un_tty, 1);
3008 tty_insert_flip_char(ch->ch_tun.un_tty, 0, TTY_BREAK);
3009 tty_flip_buffer_push(ch->ch_tun.un_tty);
3010
3011 }
3012
3013 /*
3014 * On transmit break complete, if more break traffic
3015 * is waiting then send it. Otherwise wake any threads
3016 * waiting for transmitter empty.
3017 */
3018
3019 if ((~estat & elast & EV_TXB) != 0 &&
3020 (ch->ch_expect & RR_TX_BREAK) != 0) {
3021
3022 nd->nd_tx_work = 1;
3023
3024 ch->ch_expect &= ~RR_TX_BREAK;
3025
3026 if (ch->ch_break_time != 0) {
3027 ch->ch_send |= RR_TX_BREAK;
3028 } else {
3029 ch->ch_send &= ~RR_TX_BREAK;
3030 ch->ch_flag &= ~CH_TX_BREAK;
3031 wake_up_interruptible(&ch->ch_flag_wait);
3032 }
3033 }
3034 break;
3035
3036 case 13:
3037 case 14:
3038 error = "Unrecognized command";
3039 goto prot_error;
3040
3041 /*
3042 * Decode Special Codes.
3043 */
3044
3045 case 15:
3046 switch (n1) {
3047 /*
3048 * One byte module select.
3049 */
3050
3051 case 0:
3052 case 1:
3053 case 2:
3054 case 3:
3055 case 4:
3056 case 5:
3057 case 6:
3058 case 7:
3059 plen = 1;
3060 nd->nd_rx_module = n1;
3061 break;
3062
3063 /*
3064 * Two byte module select.
3065 */
3066
3067 case 8:
3068 plen = 2;
3069 if (remain < plen)
3070 goto done;
3071
3072 nd->nd_rx_module = b[1];
3073 break;
3074
3075 /*
3076 * ID Request packet.
3077 */
3078
3079 case 11:
3080 if (remain < 4)
3081 goto done;
3082
3083 plen = get_unaligned_be16(b + 2);
3084
3085 if (plen < 12 || plen > 1000) {
3086 error = "Response Packet length error";
3087 goto prot_error;
3088 }
3089
3090 nd->nd_tx_work = 1;
3091
3092 switch (b[1]) {
3093 /*
3094 * Echo packet.
3095 */
3096
3097 case 0:
3098 nd->nd_send |= NR_ECHO;
3099 break;
3100
3101 /*
3102 * ID Response packet.
3103 */
3104
3105 case 1:
3106 nd->nd_send |= NR_IDENT;
3107 break;
3108
3109 /*
3110 * ID Response packet.
3111 */
3112
3113 case 32:
3114 nd->nd_send |= NR_PASSWORD;
3115 break;
3116
3117 }
3118 break;
3119
3120 /*
3121 * Various node-level response packets.
3122 */
3123
3124 case 12:
3125 if (remain < 4)
3126 goto done;
3127
3128 plen = get_unaligned_be16(b + 2);
3129
3130 if (plen < 4 || plen > 1000) {
3131 error = "Response Packet length error";
3132 goto prot_error;
3133 }
3134
3135 nd->nd_tx_work = 1;
3136
3137 switch (b[1]) {
3138 /*
3139 * Echo packet.
3140 */
3141
3142 case 0:
3143 nd->nd_expect &= ~NR_ECHO;
3144 break;
3145
3146 /*
3147 * Product Response Packet.
3148 */
3149
3150 case 1:
3151 {
3152 int desclen;
3153
3154 nd->nd_hw_ver = (b[8] << 8) | b[9];
3155 nd->nd_sw_ver = (b[10] << 8) | b[11];
3156 nd->nd_hw_id = b[6];
3157 desclen = ((plen - 12) > MAX_DESC_LEN) ? MAX_DESC_LEN :
3158 plen - 12;
ad0c6e36
BP
3159
3160 if (desclen <= 0) {
3161 error = "Response Packet desclen error";
3162 goto prot_error;
3163 }
3164
0b52b749
BP
3165 strncpy(nd->nd_ps_desc, b + 12, desclen);
3166 nd->nd_ps_desc[desclen] = 0;
3167 }
3168
3169 nd->nd_expect &= ~NR_IDENT;
3170 break;
3171
3172 /*
3173 * Capability Response Packet.
3174 */
3175
3176 case 2:
3177 {
3178 int nn = get_unaligned_be16(b + 4);
3179
3180 if (nn > CHAN_MAX)
3181 nn = CHAN_MAX;
3182
3183 dgrp_chan_count(nd, nn);
3184 }
3185
3186 nd->nd_expect &= ~NR_CAPABILITY;
3187 break;
3188
3189 /*
3190 * VPD Response Packet.
3191 */
3192
3193 case 15:
3194 /*
3195 * NOTE: case 15 is here ONLY because the EtherLite
3196 * is broken, and sends a response to 24 back as 15.
3197 * To resolve this, the EtherLite firmware is now
3198 * fixed to send back 24 correctly, but, for backwards
3199 * compatibility, we now have reserved 15 for the
3200 * bad EtherLite response to 24 as well.
3201 */
3202
3203 /* Fallthru! */
3204
3205 case 24:
3206
3207 /*
3208 * If the product doesn't support VPD,
3209 * it will send back a null IDRESP,
3210 * which is a length of 4 bytes.
3211 */
3212 if (plen > 4) {
3213 memcpy(nd->nd_vpd, b + 4, min(plen - 4, (long) VPDSIZE));
3214 nd->nd_vpd_len = min(plen - 4, (long) VPDSIZE);
3215 }
3216
3217 nd->nd_expect &= ~NR_VPD;
3218 break;
3219
3220 default:
3221 goto decode_error;
3222 }
3223
3224 if (nd->nd_expect == 0 &&
3225 nd->nd_state == NS_WAIT_QUERY) {
3226 nd->nd_state = NS_READY;
3227 }
3228 break;
3229
3230 /*
3231 * Debug packet.
3232 */
3233
3234 case 14:
3235 if (remain < 4)
3236 goto done;
3237
3238 plen = get_unaligned_be16(b + 2) + 4;
3239
3240 if (plen > 1000) {
3241 error = "Debug Packet too large";
3242 goto prot_error;
3243 }
3244
3245 if (remain < plen)
3246 goto done;
3247 break;
3248
3249 /*
3250 * Handle reset packet.
3251 */
3252
3253 case 15:
3254 if (remain < 2)
3255 goto done;
3256
3257 plen = 2 + b[1];
3258
3259 if (remain < plen)
3260 goto done;
3261
3262 nd->nd_tx_work = 1;
3263
3264 n = b[plen];
3265 b[plen] = 0;
3266
3267 b[plen] = n;
3268
3269 error = "Client Reset Acknowledge";
3270 goto prot_error;
3271
3272 default:
3273 goto decode_error;
3274 }
3275 break;
3276
3277 default:
3278 goto decode_error;
3279 }
3280
3281 b += plen;
3282 remain -= plen;
3283 }
3284
3285 /*
3286 * When the buffer is exhausted, copy any data left at the
3287 * top of the buffer back down to the bottom for the next
3288 * read request.
3289 */
3290
3291done:
3292 if (remain > 0 && b != buf)
3293 memcpy(buf, b, remain);
3294
3295 nd->nd_remain = remain;
3296 return;
3297
3298/*
3299 * Handle a decode error.
3300 */
3301
3302decode_error:
3303 error = "Protocol decode error";
3304
3305/*
3306 * Handle a general protocol error.
3307 */
3308
3309prot_error:
3310 nd->nd_remain = 0;
3311 nd->nd_state = NS_SEND_ERROR;
3312 nd->nd_error = error;
3313}
3314
3315/*
3316 * dgrp_net_write() -- write data to the network device.
3317 *
3318 * A zero byte write indicates that the connection to the RealPort
3319 * device has been broken.
3320 *
3321 * A non-zero write indicates data from the RealPort device.
3322 */
3323static ssize_t dgrp_net_write(struct file *file, const char __user *buf,
3324 size_t count, loff_t *ppos)
3325{
3326 struct nd_struct *nd;
3327 ssize_t rtn = 0;
3328 long n;
3329 long total = 0;
3330
3331 /*
3332 * Get the node pointer, and quit if it doesn't exist.
3333 */
3334 nd = (struct nd_struct *)(file->private_data);
3335 if (!nd)
3336 return -ENXIO;
3337
3338 /*
3339 * Grab the NET lock.
3340 */
3341 down(&nd->nd_net_semaphore);
3342
3343 nd->nd_write_count++;
3344
3345 /*
3346 * Handle disconnect.
3347 */
3348
3349 if (count == 0) {
3350 dgrp_net_idle(nd);
3351 /*
3352 * Set the active port count to zero.
3353 */
3354 dgrp_chan_count(nd, 0);
3355 goto unlock;
3356 }
3357
3358 /*
3359 * Loop to process entire receive packet.
3360 */
3361
3362 while (count > 0) {
3363 n = UIO_MAX - nd->nd_remain;
3364
3365 if (n > count)
3366 n = count;
3367
3368 nd->nd_rx_byte += n + nd->nd_link.lk_header_size;
3369
3370 rtn = copy_from_user(nd->nd_iobuf + nd->nd_remain,
3371 (void __user *) buf + total, n);
3372 if (rtn) {
3373 rtn = -EFAULT;
3374 goto unlock;
3375 }
3376
3377 *ppos += n;
3378
3379 total += n;
3380
3381 count -= n;
3382
3383 if (nd->nd_mon_buf)
3384 dgrp_monitor_data(nd, RPDUMP_SERVER,
3385 nd->nd_iobuf + nd->nd_remain, n);
3386
3387 nd->nd_remain += n;
3388
3389 dgrp_receive(nd);
3390 }
3391
3392 rtn = total;
3393
3394unlock:
3395 /*
3396 * Release the NET lock.
3397 */
3398 up(&nd->nd_net_semaphore);
3399
3400 return rtn;
3401}
3402
3403
3404/*
3405 * dgrp_net_select()
3406 * Determine whether a device is ready to be read or written to, and
3407 * sleep if not.
3408 */
3409static unsigned int dgrp_net_select(struct file *file,
3410 struct poll_table_struct *table)
3411{
3412 unsigned int retval = 0;
3413 struct nd_struct *nd = file->private_data;
3414
3415 poll_wait(file, &nd->nd_tx_waitq, table);
3416
3417 if (nd->nd_tx_ready)
3418 retval |= POLLIN | POLLRDNORM; /* Conditionally readable */
3419
3420 retval |= POLLOUT | POLLWRNORM; /* Always writeable */
3421
3422 return retval;
3423}
3424
3425/*
3426 * dgrp_net_ioctl
3427 *
3428 * Implement those functions which allow the network daemon to control
3429 * the network parameters in the driver. The ioctls include ones to
3430 * get and set the link speed parameters for the PortServer.
3431 */
3432static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
3433 unsigned long arg)
3434{
3435 struct nd_struct *nd;
3436 int rtn = 0;
3437 long size = _IOC_SIZE(cmd);
3438 struct link_struct link;
3439
3440 nd = file->private_data;
3441
3442 if (_IOC_DIR(cmd) & _IOC_READ)
3443 rtn = access_ok(VERIFY_WRITE, (void __user *) arg, size);
3444 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3445 rtn = access_ok(VERIFY_READ, (void __user *) arg, size);
3446
3447 if (!rtn)
3448 return rtn;
3449
3450 switch (cmd) {
3451 case DIGI_SETLINK:
3452 if (size != sizeof(struct link_struct))
3453 return -EINVAL;
3454
3455 if (copy_from_user((void *)(&link), (void __user *) arg, size))
3456 return -EFAULT;
3457
3458 if (link.lk_fast_rate < 9600)
3459 link.lk_fast_rate = 9600;
3460
3461 if (link.lk_slow_rate < 2400)
3462 link.lk_slow_rate = 2400;
3463
3464 if (link.lk_fast_rate > 10000000)
3465 link.lk_fast_rate = 10000000;
3466
3467 if (link.lk_slow_rate > link.lk_fast_rate)
3468 link.lk_slow_rate = link.lk_fast_rate;
3469
3470 if (link.lk_fast_delay > 2000)
3471 link.lk_fast_delay = 2000;
3472
3473 if (link.lk_slow_delay > 10000)
3474 link.lk_slow_delay = 10000;
3475
3476 if (link.lk_fast_delay < 60)
3477 link.lk_fast_delay = 60;
3478
3479 if (link.lk_slow_delay < link.lk_fast_delay)
3480 link.lk_slow_delay = link.lk_fast_delay;
3481
3482 if (link.lk_header_size < 2)
3483 link.lk_header_size = 2;
3484
3485 if (link.lk_header_size > 128)
3486 link.lk_header_size = 128;
3487
3488 link.lk_fast_rate /= 8 * 1000 / dgrp_poll_tick;
3489 link.lk_slow_rate /= 8 * 1000 / dgrp_poll_tick;
3490
3491 link.lk_fast_delay /= dgrp_poll_tick;
3492 link.lk_slow_delay /= dgrp_poll_tick;
3493
3494 nd->nd_link = link;
3495
3496 break;
3497
3498 case DIGI_GETLINK:
3499 if (size != sizeof(struct link_struct))
3500 return -EINVAL;
3501
3502 if (copy_to_user((void __user *)arg, (void *)(&nd->nd_link),
3503 size))
3504 return -EFAULT;
3505
3506 break;
3507
3508 default:
3509 return -EINVAL;
3510
3511 }
3512
3513 return 0;
3514}
3515
3516/**
3517 * dgrp_poll_handler() -- handler for poll timer
3518 *
3519 * As each timer expires, it determines (a) whether the "transmit"
3520 * waiter needs to be woken up, and (b) whether the poller needs to
3521 * be rescheduled.
3522 */
3523void dgrp_poll_handler(unsigned long arg)
3524{
3525 struct dgrp_poll_data *poll_data;
3526 struct nd_struct *nd;
3527 struct link_struct *lk;
3528 ulong time;
3529 ulong poll_time;
3530 ulong freq;
3531 ulong lock_flags;
3532
3533 poll_data = (struct dgrp_poll_data *) arg;
3534 freq = 1000 / poll_data->poll_tick;
3535 poll_data->poll_round += 17;
3536
3537 if (poll_data->poll_round >= freq)
3538 poll_data->poll_round -= freq;
3539
3540 /*
3541 * Loop to process all open nodes.
3542 *
3543 * For each node, determine the rate at which it should
3544 * be transmitting data. Then if the node should wake up
3545 * and transmit data now, enable the net receive select
3546 * to get the transmit going.
3547 */
3548
3549 list_for_each_entry(nd, &nd_struct_list, list) {
3550
3551 lk = &nd->nd_link;
3552
3553 /*
3554 * Decrement statistics. These are only for use with
3555 * KME, so don't worry that the operations are done
3556 * unlocked, and so the results are occassionally wrong.
3557 */
3558
3559 nd->nd_read_count -= (nd->nd_read_count +
3560 poll_data->poll_round) / freq;
3561 nd->nd_write_count -= (nd->nd_write_count +
3562 poll_data->poll_round) / freq;
3563 nd->nd_send_count -= (nd->nd_send_count +
3564 poll_data->poll_round) / freq;
3565 nd->nd_tx_byte -= (nd->nd_tx_byte +
3566 poll_data->poll_round) / freq;
3567 nd->nd_rx_byte -= (nd->nd_rx_byte +
3568 poll_data->poll_round) / freq;
3569
3570 /*
3571 * Wake the daemon to transmit data only when there is
3572 * enough byte credit to send data.
3573 *
3574 * The results are approximate because the operations
3575 * are performed unlocked, and we are inspecting
3576 * data asynchronously updated elsewhere. The whole
3577 * thing is just approximation anyway, so that should
3578 * be okay.
3579 */
3580
3581 if (lk->lk_slow_rate >= UIO_MAX) {
3582
3583 nd->nd_delay = 0;
3584 nd->nd_rate = UIO_MAX;
3585
3586 nd->nd_tx_deposit = nd->nd_tx_charge + 3 * UIO_MAX;
3587 nd->nd_tx_credit = 3 * UIO_MAX;
3588
3589 } else {
3590
3591 long rate;
3592 long delay;
3593 long deposit;
3594 long charge;
3595 long size;
3596 long excess;
3597
3598 long seq_in = nd->nd_seq_in;
3599 long seq_out = nd->nd_seq_out;
3600
3601 /*
3602 * If there are no outstanding packets, run at the
3603 * fastest rate.
3604 */
3605
3606 if (seq_in == seq_out) {
3607 delay = 0;
3608 rate = lk->lk_fast_rate;
3609 }
3610
3611 /*
3612 * Otherwise compute the transmit rate based on the
3613 * delay since the oldest packet.
3614 */
3615
3616 else {
3617 /*
3618 * The actual delay is computed as the
3619 * time since the oldest unacknowledged
3620 * packet was sent, minus the time it
3621 * took to send that packet to the server.
3622 */
3623
3624 delay = ((jiffies - nd->nd_seq_time[seq_out])
3625 - (nd->nd_seq_size[seq_out] /
3626 lk->lk_fast_rate));
3627
3628 /*
3629 * If the delay is less than the "fast"
3630 * delay, transmit full speed. If greater
3631 * than the "slow" delay, transmit at the
3632 * "slow" speed. In between, interpolate
3633 * between the fast and slow speeds.
3634 */
3635
3636 rate =
3637 (delay <= lk->lk_fast_delay ?
3638 lk->lk_fast_rate :
3639 delay >= lk->lk_slow_delay ?
3640 lk->lk_slow_rate :
3641 (lk->lk_slow_rate +
3642 (lk->lk_slow_delay - delay) *
3643 (lk->lk_fast_rate - lk->lk_slow_rate) /
3644 (lk->lk_slow_delay - lk->lk_fast_delay)
3645 )
3646 );
3647 }
3648
3649 nd->nd_delay = delay;
3650 nd->nd_rate = rate;
3651
3652 /*
3653 * Increase the transmit credit by depositing the
3654 * current transmit rate.
3655 */
3656
3657 deposit = nd->nd_tx_deposit;
3658 charge = nd->nd_tx_charge;
3659
3660 deposit += rate;
3661
3662 /*
3663 * If the available transmit credit becomes too large,
3664 * reduce the deposit to correct the value.
3665 *
3666 * Too large is the max of:
3667 * 6 times the header size
3668 * 3 times the current transmit rate.
3669 */
3670
3671 size = 2 * nd->nd_link.lk_header_size;
3672
3673 if (size < rate)
3674 size = rate;
3675
3676 size *= 3;
3677
3678 excess = deposit - charge - size;
3679
3680 if (excess > 0)
3681 deposit -= excess;
3682
3683 nd->nd_tx_deposit = deposit;
3684 nd->nd_tx_credit = deposit - charge;
3685
3686 /*
3687 * Wake the transmit task only if the transmit credit
3688 * is at least 3 times the transmit header size.
3689 */
3690
3691 size = 3 * lk->lk_header_size;
3692
3693 if (nd->nd_tx_credit < size)
3694 continue;
3695 }
3696
3697
3698 /*
3699 * Enable the READ select to wake the daemon if there
3700 * is useful work for the drp_read routine to perform.
3701 */
3702
3703 if (waitqueue_active(&nd->nd_tx_waitq) &&
3704 (nd->nd_tx_work != 0 ||
3705 (ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX)) {
3706 nd->nd_tx_ready = 1;
3707
3708 wake_up_interruptible(&nd->nd_tx_waitq);
3709
3710 /* not needed */
3711 /* nd->nd_flag &= ~ND_SELECT; */
3712 }
3713 }
3714
3715
3716 /*
3717 * Schedule ourself back at the nominal wakeup interval.
3718 */
3719 spin_lock_irqsave(&poll_data->poll_lock, lock_flags);
3720
3721 poll_data->node_active_count--;
3722 if (poll_data->node_active_count > 0) {
3723 poll_data->node_active_count++;
3724 poll_time = poll_data->timer.expires +
3725 poll_data->poll_tick * HZ / 1000;
3726
3727 time = poll_time - jiffies;
3728
3729 if (time >= 2 * poll_data->poll_tick)
3730 poll_time = jiffies + dgrp_poll_tick * HZ / 1000;
3731
3732 poll_data->timer.expires = poll_time;
3733 add_timer(&poll_data->timer);
3734 }
3735
3736 spin_unlock_irqrestore(&poll_data->poll_lock, lock_flags);
3737}