WAN: Port COSA driver to generic HDLC.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wan / z85230.c
CommitLineData
1da177e4
LT
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8 * (c) Copyright 2000, 2001 Red Hat Inc
9 *
10 * Development of this driver was funded by Equiinet Ltd
11 * http://www.equiinet.com
12 *
13 * ChangeLog:
14 *
15 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16 * unification of all the Z85x30 asynchronous drivers for real.
17 *
18 * DMA now uses get_free_page as kmalloc buffers may span a 64K
19 * boundary.
20 *
21 * Modified for SMP safety and SMP locking by Alan Cox <alan@redhat.com>
22 *
23 * Performance
24 *
25 * Z85230:
26 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
27 * X.25 is not unrealistic on all machines. DMA mode can in theory
28 * handle T1/E1 quite nicely. In practice the limit seems to be about
29 * 512Kbit->1Mbit depending on motherboard.
30 *
31 * Z85C30:
32 * 64K will take DMA, 9600 baud X.25 should be ok.
33 *
34 * Z8530:
35 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
36 */
37
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/net.h>
42#include <linux/skbuff.h>
43#include <linux/netdevice.h>
44#include <linux/if_arp.h>
45#include <linux/delay.h>
46#include <linux/ioport.h>
47#include <linux/init.h>
48#include <asm/dma.h>
49#include <asm/io.h>
50#define RT_LOCK
51#define RT_UNLOCK
52#include <linux/spinlock.h>
53
54#include <net/syncppp.h>
55#include "z85230.h"
56
57
58/**
59 * z8530_read_port - Architecture specific interface function
60 * @p: port to read
61 *
62 * Provided port access methods. The Comtrol SV11 requires no delays
63 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
64 *
65 * In the longer term this should become an architecture specific
66 * section so that this can become a generic driver interface for all
67 * platforms. For now we only handle PC I/O ports with or without the
68 * dread 5uS sanity delay.
69 *
70 * The caller must hold sufficient locks to avoid violating the horrible
71 * 5uS delay rule.
72 */
73
74static inline int z8530_read_port(unsigned long p)
75{
76 u8 r=inb(Z8530_PORT_OF(p));
77 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
78 udelay(5);
79 return r;
80}
81
82/**
83 * z8530_write_port - Architecture specific interface function
84 * @p: port to write
85 * @d: value to write
86 *
87 * Write a value to a port with delays if need be. Note that the
88 * caller must hold locks to avoid read/writes from other contexts
89 * violating the 5uS rule
90 *
91 * In the longer term this should become an architecture specific
92 * section so that this can become a generic driver interface for all
93 * platforms. For now we only handle PC I/O ports with or without the
94 * dread 5uS sanity delay.
95 */
96
97
98static inline void z8530_write_port(unsigned long p, u8 d)
99{
100 outb(d,Z8530_PORT_OF(p));
101 if(p&Z8530_PORT_SLEEP)
102 udelay(5);
103}
104
105
106
107static void z8530_rx_done(struct z8530_channel *c);
108static void z8530_tx_done(struct z8530_channel *c);
109
110
111/**
112 * read_zsreg - Read a register from a Z85230
113 * @c: Z8530 channel to read from (2 per chip)
114 * @reg: Register to read
115 * FIXME: Use a spinlock.
116 *
117 * Most of the Z8530 registers are indexed off the control registers.
118 * A read is done by writing to the control register and reading the
119 * register back. The caller must hold the lock
120 */
121
122static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
123{
124 if(reg)
125 z8530_write_port(c->ctrlio, reg);
126 return z8530_read_port(c->ctrlio);
127}
128
129/**
130 * read_zsdata - Read the data port of a Z8530 channel
131 * @c: The Z8530 channel to read the data port from
132 *
133 * The data port provides fast access to some things. We still
134 * have all the 5uS delays to worry about.
135 */
136
137static inline u8 read_zsdata(struct z8530_channel *c)
138{
139 u8 r;
140 r=z8530_read_port(c->dataio);
141 return r;
142}
143
144/**
145 * write_zsreg - Write to a Z8530 channel register
146 * @c: The Z8530 channel
147 * @reg: Register number
148 * @val: Value to write
149 *
150 * Write a value to an indexed register. The caller must hold the lock
151 * to honour the irritating delay rules. We know about register 0
152 * being fast to access.
153 *
154 * Assumes c->lock is held.
155 */
156static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
157{
158 if(reg)
159 z8530_write_port(c->ctrlio, reg);
160 z8530_write_port(c->ctrlio, val);
161
162}
163
164/**
165 * write_zsctrl - Write to a Z8530 control register
166 * @c: The Z8530 channel
167 * @val: Value to write
168 *
169 * Write directly to the control register on the Z8530
170 */
171
172static inline void write_zsctrl(struct z8530_channel *c, u8 val)
173{
174 z8530_write_port(c->ctrlio, val);
175}
176
177/**
178 * write_zsdata - Write to a Z8530 control register
179 * @c: The Z8530 channel
180 * @val: Value to write
181 *
182 * Write directly to the data register on the Z8530
183 */
184
185
186static inline void write_zsdata(struct z8530_channel *c, u8 val)
187{
188 z8530_write_port(c->dataio, val);
189}
190
191/*
192 * Register loading parameters for a dead port
193 */
194
195u8 z8530_dead_port[]=
196{
197 255
198};
199
200EXPORT_SYMBOL(z8530_dead_port);
201
202/*
203 * Register loading parameters for currently supported circuit types
204 */
205
206
207/*
208 * Data clocked by telco end. This is the correct data for the UK
209 * "kilostream" service, and most other similar services.
210 */
211
212u8 z8530_hdlc_kilostream[]=
213{
214 4, SYNC_ENAB|SDLC|X1CLK,
215 2, 0, /* No vector */
216 1, 0,
217 3, ENT_HM|RxCRC_ENAB|Rx8,
218 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
219 9, 0, /* Disable interrupts */
220 6, 0xFF,
221 7, FLAG,
222 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
223 11, TCTRxCP,
224 14, DISDPLL,
225 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
226 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
227 9, NV|MIE|NORESET,
228 255
229};
230
231EXPORT_SYMBOL(z8530_hdlc_kilostream);
232
233/*
234 * As above but for enhanced chips.
235 */
236
237u8 z8530_hdlc_kilostream_85230[]=
238{
239 4, SYNC_ENAB|SDLC|X1CLK,
240 2, 0, /* No vector */
241 1, 0,
242 3, ENT_HM|RxCRC_ENAB|Rx8,
243 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
244 9, 0, /* Disable interrupts */
245 6, 0xFF,
246 7, FLAG,
247 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
248 11, TCTRxCP,
249 14, DISDPLL,
250 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
251 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
252 9, NV|MIE|NORESET,
253 23, 3, /* Extended mode AUTO TX and EOM*/
254
255 255
256};
257
258EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
259
260/**
261 * z8530_flush_fifo - Flush on chip RX FIFO
262 * @c: Channel to flush
263 *
264 * Flush the receive FIFO. There is no specific option for this, we
265 * blindly read bytes and discard them. Reading when there is no data
266 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
267 *
268 * All locking is handled for the caller. On return data may still be
269 * present if it arrived during the flush.
270 */
271
272static void z8530_flush_fifo(struct z8530_channel *c)
273{
274 read_zsreg(c, R1);
275 read_zsreg(c, R1);
276 read_zsreg(c, R1);
277 read_zsreg(c, R1);
278 if(c->dev->type==Z85230)
279 {
280 read_zsreg(c, R1);
281 read_zsreg(c, R1);
282 read_zsreg(c, R1);
283 read_zsreg(c, R1);
284 }
285}
286
287/**
288 * z8530_rtsdtr - Control the outgoing DTS/RTS line
289 * @c: The Z8530 channel to control;
290 * @set: 1 to set, 0 to clear
291 *
292 * Sets or clears DTR/RTS on the requested line. All locking is handled
293 * by the caller. For now we assume all boards use the actual RTS/DTR
294 * on the chip. Apparently one or two don't. We'll scream about them
295 * later.
296 */
297
298static void z8530_rtsdtr(struct z8530_channel *c, int set)
299{
300 if (set)
301 c->regs[5] |= (RTS | DTR);
302 else
303 c->regs[5] &= ~(RTS | DTR);
304 write_zsreg(c, R5, c->regs[5]);
305}
306
307/**
308 * z8530_rx - Handle a PIO receive event
309 * @c: Z8530 channel to process
310 *
311 * Receive handler for receiving in PIO mode. This is much like the
312 * async one but not quite the same or as complex
313 *
314 * Note: Its intended that this handler can easily be separated from
315 * the main code to run realtime. That'll be needed for some machines
316 * (eg to ever clock 64kbits on a sparc ;)).
317 *
318 * The RT_LOCK macros don't do anything now. Keep the code covered
319 * by them as short as possible in all circumstances - clocks cost
320 * baud. The interrupt handler is assumed to be atomic w.r.t. to
321 * other code - this is true in the RT case too.
322 *
323 * We only cover the sync cases for this. If you want 2Mbit async
324 * do it yourself but consider medical assistance first. This non DMA
325 * synchronous mode is portable code. The DMA mode assumes PCI like
326 * ISA DMA
327 *
328 * Called with the device lock held
329 */
330
331static void z8530_rx(struct z8530_channel *c)
332{
333 u8 ch,stat;
45d3ac4e 334
1da177e4
LT
335 while(1)
336 {
337 /* FIFO empty ? */
338 if(!(read_zsreg(c, R0)&1))
339 break;
340 ch=read_zsdata(c);
341 stat=read_zsreg(c, R1);
342
343 /*
344 * Overrun ?
345 */
346 if(c->count < c->max)
347 {
348 *c->dptr++=ch;
349 c->count++;
350 }
351
352 if(stat&END_FR)
353 {
354
355 /*
356 * Error ?
357 */
358 if(stat&(Rx_OVR|CRC_ERR))
359 {
360 /* Rewind the buffer and return */
361 if(c->skb)
362 c->dptr=c->skb->data;
363 c->count=0;
364 if(stat&Rx_OVR)
365 {
366 printk(KERN_WARNING "%s: overrun\n", c->dev->name);
367 c->rx_overrun++;
368 }
369 if(stat&CRC_ERR)
370 {
371 c->rx_crc_err++;
372 /* printk("crc error\n"); */
373 }
374 /* Shove the frame upstream */
375 }
376 else
377 {
378 /*
379 * Drop the lock for RX processing, or
380 * there are deadlocks
381 */
382 z8530_rx_done(c);
383 write_zsctrl(c, RES_Rx_CRC);
384 }
385 }
386 }
387 /*
388 * Clear irq
389 */
390 write_zsctrl(c, ERR_RES);
391 write_zsctrl(c, RES_H_IUS);
1da177e4
LT
392}
393
394
395/**
396 * z8530_tx - Handle a PIO transmit event
397 * @c: Z8530 channel to process
398 *
399 * Z8530 transmit interrupt handler for the PIO mode. The basic
400 * idea is to attempt to keep the FIFO fed. We fill as many bytes
401 * in as possible, its quite possible that we won't keep up with the
402 * data rate otherwise.
403 */
404
405static void z8530_tx(struct z8530_channel *c)
406{
1da177e4
LT
407 while(c->txcount) {
408 /* FIFO full ? */
409 if(!(read_zsreg(c, R0)&4))
fe797455 410 return;
1da177e4
LT
411 c->txcount--;
412 /*
413 * Shovel out the byte
414 */
415 write_zsreg(c, R8, *c->tx_ptr++);
416 write_zsctrl(c, RES_H_IUS);
417 /* We are about to underflow */
418 if(c->txcount==0)
419 {
420 write_zsctrl(c, RES_EOM_L);
421 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
422 }
423 }
424
425
426 /*
427 * End of frame TX - fire another one
428 */
429
430 write_zsctrl(c, RES_Tx_P);
431
432 z8530_tx_done(c);
433 write_zsctrl(c, RES_H_IUS);
1da177e4
LT
434}
435
436/**
437 * z8530_status - Handle a PIO status exception
438 * @chan: Z8530 channel to process
439 *
440 * A status event occurred in PIO synchronous mode. There are several
441 * reasons the chip will bother us here. A transmit underrun means we
442 * failed to feed the chip fast enough and just broke a packet. A DCD
443 * change is a line up or down. We communicate that back to the protocol
444 * layer for synchronous PPP to renegotiate.
445 */
446
447static void z8530_status(struct z8530_channel *chan)
448{
449 u8 status, altered;
450
1da177e4
LT
451 status=read_zsreg(chan, R0);
452 altered=chan->status^status;
453
454 chan->status=status;
455
456 if(status&TxEOM)
457 {
458/* printk("%s: Tx underrun.\n", chan->dev->name); */
459 chan->stats.tx_fifo_errors++;
460 write_zsctrl(chan, ERR_RES);
461 z8530_tx_done(chan);
462 }
463
464 if(altered&chan->dcdcheck)
465 {
466 if(status&chan->dcdcheck)
467 {
468 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
469 write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
470 if(chan->netdevice &&
471 ((chan->netdevice->type == ARPHRD_HDLC) ||
472 (chan->netdevice->type == ARPHRD_PPP)))
473 sppp_reopen(chan->netdevice);
474 }
475 else
476 {
477 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
478 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
479 z8530_flush_fifo(chan);
480 }
481
482 }
483 write_zsctrl(chan, RES_EXT_INT);
484 write_zsctrl(chan, RES_H_IUS);
1da177e4
LT
485}
486
487struct z8530_irqhandler z8530_sync=
488{
489 z8530_rx,
490 z8530_tx,
491 z8530_status
492};
493
494EXPORT_SYMBOL(z8530_sync);
495
496/**
497 * z8530_dma_rx - Handle a DMA RX event
498 * @chan: Channel to handle
499 *
500 * Non bus mastering DMA interfaces for the Z8x30 devices. This
501 * is really pretty PC specific. The DMA mode means that most receive
502 * events are handled by the DMA hardware. We get a kick here only if
503 * a frame ended.
504 */
505
506static void z8530_dma_rx(struct z8530_channel *chan)
507{
1da177e4
LT
508 if(chan->rxdma_on)
509 {
510 /* Special condition check only */
511 u8 status;
512
513 read_zsreg(chan, R7);
514 read_zsreg(chan, R6);
515
516 status=read_zsreg(chan, R1);
517
518 if(status&END_FR)
519 {
520 z8530_rx_done(chan); /* Fire up the next one */
521 }
522 write_zsctrl(chan, ERR_RES);
523 write_zsctrl(chan, RES_H_IUS);
524 }
525 else
526 {
527 /* DMA is off right now, drain the slow way */
528 z8530_rx(chan);
529 }
1da177e4
LT
530}
531
532/**
533 * z8530_dma_tx - Handle a DMA TX event
534 * @chan: The Z8530 channel to handle
535 *
536 * We have received an interrupt while doing DMA transmissions. It
537 * shouldn't happen. Scream loudly if it does.
538 */
539
540static void z8530_dma_tx(struct z8530_channel *chan)
541{
1da177e4
LT
542 if(!chan->dma_tx)
543 {
544 printk(KERN_WARNING "Hey who turned the DMA off?\n");
545 z8530_tx(chan);
546 return;
547 }
548 /* This shouldnt occur in DMA mode */
549 printk(KERN_ERR "DMA tx - bogus event!\n");
550 z8530_tx(chan);
1da177e4
LT
551}
552
553/**
554 * z8530_dma_status - Handle a DMA status exception
555 * @chan: Z8530 channel to process
556 *
557 * A status event occurred on the Z8530. We receive these for two reasons
558 * when in DMA mode. Firstly if we finished a packet transfer we get one
559 * and kick the next packet out. Secondly we may see a DCD change and
560 * have to poke the protocol layer.
561 *
562 */
563
564static void z8530_dma_status(struct z8530_channel *chan)
565{
566 u8 status, altered;
567
568 status=read_zsreg(chan, R0);
569 altered=chan->status^status;
570
571 chan->status=status;
572
573
574 if(chan->dma_tx)
575 {
576 if(status&TxEOM)
577 {
578 unsigned long flags;
579
580 flags=claim_dma_lock();
581 disable_dma(chan->txdma);
582 clear_dma_ff(chan->txdma);
583 chan->txdma_on=0;
584 release_dma_lock(flags);
585 z8530_tx_done(chan);
586 }
587 }
588
1da177e4
LT
589 if(altered&chan->dcdcheck)
590 {
591 if(status&chan->dcdcheck)
592 {
593 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
594 write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
595 if(chan->netdevice &&
596 ((chan->netdevice->type == ARPHRD_HDLC) ||
597 (chan->netdevice->type == ARPHRD_PPP)))
598 sppp_reopen(chan->netdevice);
599 }
600 else
601 {
602 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
603 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
604 z8530_flush_fifo(chan);
605 }
606 }
607
608 write_zsctrl(chan, RES_EXT_INT);
609 write_zsctrl(chan, RES_H_IUS);
1da177e4
LT
610}
611
612struct z8530_irqhandler z8530_dma_sync=
613{
614 z8530_dma_rx,
615 z8530_dma_tx,
616 z8530_dma_status
617};
618
619EXPORT_SYMBOL(z8530_dma_sync);
620
621struct z8530_irqhandler z8530_txdma_sync=
622{
623 z8530_rx,
624 z8530_dma_tx,
625 z8530_dma_status
626};
627
628EXPORT_SYMBOL(z8530_txdma_sync);
629
630/**
631 * z8530_rx_clear - Handle RX events from a stopped chip
632 * @c: Z8530 channel to shut up
633 *
634 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
635 * For machines with PCI Z85x30 cards, or level triggered interrupts
636 * (eg the MacII) we must clear the interrupt cause or die.
637 */
638
639
640static void z8530_rx_clear(struct z8530_channel *c)
641{
642 /*
643 * Data and status bytes
644 */
645 u8 stat;
646
647 read_zsdata(c);
648 stat=read_zsreg(c, R1);
649
650 if(stat&END_FR)
651 write_zsctrl(c, RES_Rx_CRC);
652 /*
653 * Clear irq
654 */
655 write_zsctrl(c, ERR_RES);
656 write_zsctrl(c, RES_H_IUS);
657}
658
659/**
660 * z8530_tx_clear - Handle TX events from a stopped chip
661 * @c: Z8530 channel to shut up
662 *
663 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
664 * For machines with PCI Z85x30 cards, or level triggered interrupts
665 * (eg the MacII) we must clear the interrupt cause or die.
666 */
667
668static void z8530_tx_clear(struct z8530_channel *c)
669{
670 write_zsctrl(c, RES_Tx_P);
671 write_zsctrl(c, RES_H_IUS);
672}
673
674/**
675 * z8530_status_clear - Handle status events from a stopped chip
676 * @chan: Z8530 channel to shut up
677 *
678 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
679 * For machines with PCI Z85x30 cards, or level triggered interrupts
680 * (eg the MacII) we must clear the interrupt cause or die.
681 */
682
683static void z8530_status_clear(struct z8530_channel *chan)
684{
685 u8 status=read_zsreg(chan, R0);
686 if(status&TxEOM)
687 write_zsctrl(chan, ERR_RES);
688 write_zsctrl(chan, RES_EXT_INT);
689 write_zsctrl(chan, RES_H_IUS);
690}
691
692struct z8530_irqhandler z8530_nop=
693{
694 z8530_rx_clear,
695 z8530_tx_clear,
696 z8530_status_clear
697};
698
699
700EXPORT_SYMBOL(z8530_nop);
701
702/**
703 * z8530_interrupt - Handle an interrupt from a Z8530
704 * @irq: Interrupt number
705 * @dev_id: The Z8530 device that is interrupting.
706 * @regs: unused
707 *
708 * A Z85[2]30 device has stuck its hand in the air for attention.
709 * We scan both the channels on the chip for events and then call
710 * the channel specific call backs for each channel that has events.
711 * We have to use callback functions because the two channels can be
712 * in different modes.
713 *
714 * Locking is done for the handlers. Note that locking is done
715 * at the chip level (the 5uS delay issue is per chip not per
716 * channel). c->lock for both channels points to dev->lock
717 */
718
7d12e780 719irqreturn_t z8530_interrupt(int irq, void *dev_id)
1da177e4
LT
720{
721 struct z8530_dev *dev=dev_id;
722 u8 intr;
723 static volatile int locker=0;
724 int work=0;
725 struct z8530_irqhandler *irqs;
726
727 if(locker)
728 {
729 printk(KERN_ERR "IRQ re-enter\n");
730 return IRQ_NONE;
731 }
732 locker=1;
733
734 spin_lock(&dev->lock);
735
736 while(++work<5000)
737 {
738
739 intr = read_zsreg(&dev->chanA, R3);
740 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
741 break;
742
743 /* This holds the IRQ status. On the 8530 you must read it from chan
744 A even though it applies to the whole chip */
745
746 /* Now walk the chip and see what it is wanting - it may be
747 an IRQ for someone else remember */
748
749 irqs=dev->chanA.irqs;
750
751 if(intr & (CHARxIP|CHATxIP|CHAEXT))
752 {
753 if(intr&CHARxIP)
754 irqs->rx(&dev->chanA);
755 if(intr&CHATxIP)
756 irqs->tx(&dev->chanA);
757 if(intr&CHAEXT)
758 irqs->status(&dev->chanA);
759 }
760
761 irqs=dev->chanB.irqs;
762
763 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
764 {
765 if(intr&CHBRxIP)
766 irqs->rx(&dev->chanB);
767 if(intr&CHBTxIP)
768 irqs->tx(&dev->chanB);
769 if(intr&CHBEXT)
770 irqs->status(&dev->chanB);
771 }
772 }
773 spin_unlock(&dev->lock);
774 if(work==5000)
775 printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
776 /* Ok all done */
777 locker=0;
778 return IRQ_HANDLED;
779}
780
781EXPORT_SYMBOL(z8530_interrupt);
782
783static char reg_init[16]=
784{
785 0,0,0,0,
786 0,0,0,0,
787 0,0,0,0,
788 0x55,0,0,0
789};
790
791
792/**
793 * z8530_sync_open - Open a Z8530 channel for PIO
794 * @dev: The network interface we are using
795 * @c: The Z8530 channel to open in synchronous PIO mode
796 *
797 * Switch a Z8530 into synchronous mode without DMA assist. We
798 * raise the RTS/DTR and commence network operation.
799 */
800
801int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
802{
803 unsigned long flags;
804
805 spin_lock_irqsave(c->lock, flags);
806
807 c->sync = 1;
808 c->mtu = dev->mtu+64;
809 c->count = 0;
810 c->skb = NULL;
811 c->skb2 = NULL;
812 c->irqs = &z8530_sync;
813
814 /* This loads the double buffer up */
815 z8530_rx_done(c); /* Load the frame ring */
816 z8530_rx_done(c); /* Load the backup frame */
817 z8530_rtsdtr(c,1);
818 c->dma_tx = 0;
819 c->regs[R1]|=TxINT_ENAB;
820 write_zsreg(c, R1, c->regs[R1]);
821 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
822
823 spin_unlock_irqrestore(c->lock, flags);
824 return 0;
825}
826
827
828EXPORT_SYMBOL(z8530_sync_open);
829
830/**
831 * z8530_sync_close - Close a PIO Z8530 channel
832 * @dev: Network device to close
833 * @c: Z8530 channel to disassociate and move to idle
834 *
835 * Close down a Z8530 interface and switch its interrupt handlers
836 * to discard future events.
837 */
838
839int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
840{
841 u8 chk;
842 unsigned long flags;
843
844 spin_lock_irqsave(c->lock, flags);
845 c->irqs = &z8530_nop;
846 c->max = 0;
847 c->sync = 0;
848
849 chk=read_zsreg(c,R0);
850 write_zsreg(c, R3, c->regs[R3]);
851 z8530_rtsdtr(c,0);
852
853 spin_unlock_irqrestore(c->lock, flags);
854 return 0;
855}
856
857EXPORT_SYMBOL(z8530_sync_close);
858
859/**
860 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
861 * @dev: The network device to attach
862 * @c: The Z8530 channel to configure in sync DMA mode.
863 *
864 * Set up a Z85x30 device for synchronous DMA in both directions. Two
865 * ISA DMA channels must be available for this to work. We assume ISA
866 * DMA driven I/O and PC limits on access.
867 */
868
869int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
870{
871 unsigned long cflags, dflags;
872
873 c->sync = 1;
874 c->mtu = dev->mtu+64;
875 c->count = 0;
876 c->skb = NULL;
877 c->skb2 = NULL;
878 /*
879 * Load the DMA interfaces up
880 */
881 c->rxdma_on = 0;
882 c->txdma_on = 0;
883
884 /*
885 * Allocate the DMA flip buffers. Limit by page size.
886 * Everyone runs 1500 mtu or less on wan links so this
887 * should be fine.
888 */
889
890 if(c->mtu > PAGE_SIZE/2)
891 return -EMSGSIZE;
892
893 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
894 if(c->rx_buf[0]==NULL)
895 return -ENOBUFS;
896 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
897
898 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
899 if(c->tx_dma_buf[0]==NULL)
900 {
901 free_page((unsigned long)c->rx_buf[0]);
902 c->rx_buf[0]=NULL;
903 return -ENOBUFS;
904 }
905 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
906
907 c->tx_dma_used=0;
908 c->dma_tx = 1;
909 c->dma_num=0;
910 c->dma_ready=1;
911
912 /*
913 * Enable DMA control mode
914 */
915
916 spin_lock_irqsave(c->lock, cflags);
917
918 /*
919 * TX DMA via DIR/REQ
920 */
921
922 c->regs[R14]|= DTRREQ;
923 write_zsreg(c, R14, c->regs[R14]);
924
925 c->regs[R1]&= ~TxINT_ENAB;
926 write_zsreg(c, R1, c->regs[R1]);
927
928 /*
929 * RX DMA via W/Req
930 */
931
932 c->regs[R1]|= WT_FN_RDYFN;
933 c->regs[R1]|= WT_RDY_RT;
934 c->regs[R1]|= INT_ERR_Rx;
935 c->regs[R1]&= ~TxINT_ENAB;
936 write_zsreg(c, R1, c->regs[R1]);
937 c->regs[R1]|= WT_RDY_ENAB;
938 write_zsreg(c, R1, c->regs[R1]);
939
940 /*
941 * DMA interrupts
942 */
943
944 /*
945 * Set up the DMA configuration
946 */
947
948 dflags=claim_dma_lock();
949
950 disable_dma(c->rxdma);
951 clear_dma_ff(c->rxdma);
952 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
953 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
954 set_dma_count(c->rxdma, c->mtu);
955 enable_dma(c->rxdma);
956
957 disable_dma(c->txdma);
958 clear_dma_ff(c->txdma);
959 set_dma_mode(c->txdma, DMA_MODE_WRITE);
960 disable_dma(c->txdma);
961
962 release_dma_lock(dflags);
963
964 /*
965 * Select the DMA interrupt handlers
966 */
967
968 c->rxdma_on = 1;
969 c->txdma_on = 1;
970 c->tx_dma_used = 1;
971
972 c->irqs = &z8530_dma_sync;
973 z8530_rtsdtr(c,1);
974 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
975
976 spin_unlock_irqrestore(c->lock, cflags);
977
978 return 0;
979}
980
981EXPORT_SYMBOL(z8530_sync_dma_open);
982
983/**
984 * z8530_sync_dma_close - Close down DMA I/O
985 * @dev: Network device to detach
986 * @c: Z8530 channel to move into discard mode
987 *
988 * Shut down a DMA mode synchronous interface. Halt the DMA, and
989 * free the buffers.
990 */
991
992int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
993{
994 u8 chk;
995 unsigned long flags;
996
997 c->irqs = &z8530_nop;
998 c->max = 0;
999 c->sync = 0;
1000
1001 /*
1002 * Disable the PC DMA channels
1003 */
1004
1005 flags=claim_dma_lock();
1006 disable_dma(c->rxdma);
1007 clear_dma_ff(c->rxdma);
1008
1009 c->rxdma_on = 0;
1010
1011 disable_dma(c->txdma);
1012 clear_dma_ff(c->txdma);
1013 release_dma_lock(flags);
1014
1015 c->txdma_on = 0;
1016 c->tx_dma_used = 0;
1017
1018 spin_lock_irqsave(c->lock, flags);
1019
1020 /*
1021 * Disable DMA control mode
1022 */
1023
1024 c->regs[R1]&= ~WT_RDY_ENAB;
1025 write_zsreg(c, R1, c->regs[R1]);
1026 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1027 c->regs[R1]|= INT_ALL_Rx;
1028 write_zsreg(c, R1, c->regs[R1]);
1029 c->regs[R14]&= ~DTRREQ;
1030 write_zsreg(c, R14, c->regs[R14]);
1031
1032 if(c->rx_buf[0])
1033 {
1034 free_page((unsigned long)c->rx_buf[0]);
1035 c->rx_buf[0]=NULL;
1036 }
1037 if(c->tx_dma_buf[0])
1038 {
1039 free_page((unsigned long)c->tx_dma_buf[0]);
1040 c->tx_dma_buf[0]=NULL;
1041 }
1042 chk=read_zsreg(c,R0);
1043 write_zsreg(c, R3, c->regs[R3]);
1044 z8530_rtsdtr(c,0);
1045
1046 spin_unlock_irqrestore(c->lock, flags);
1047
1048 return 0;
1049}
1050
1051EXPORT_SYMBOL(z8530_sync_dma_close);
1052
1053/**
1054 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1055 * @dev: The network device to attach
1056 * @c: The Z8530 channel to configure in sync DMA mode.
1057 *
1058 * Set up a Z85x30 device for synchronous DMA tranmission. One
1059 * ISA DMA channel must be available for this to work. The receive
1060 * side is run in PIO mode, but then it has the bigger FIFO.
1061 */
1062
1063int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1064{
1065 unsigned long cflags, dflags;
1066
1067 printk("Opening sync interface for TX-DMA\n");
1068 c->sync = 1;
1069 c->mtu = dev->mtu+64;
1070 c->count = 0;
1071 c->skb = NULL;
1072 c->skb2 = NULL;
1073
1074 /*
1075 * Allocate the DMA flip buffers. Limit by page size.
1076 * Everyone runs 1500 mtu or less on wan links so this
1077 * should be fine.
1078 */
1079
1080 if(c->mtu > PAGE_SIZE/2)
1081 return -EMSGSIZE;
1082
1083 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1084 if(c->tx_dma_buf[0]==NULL)
1085 return -ENOBUFS;
1086
1087 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1088
1089
1090 spin_lock_irqsave(c->lock, cflags);
1091
1092 /*
1093 * Load the PIO receive ring
1094 */
1095
1096 z8530_rx_done(c);
1097 z8530_rx_done(c);
1098
1099 /*
1100 * Load the DMA interfaces up
1101 */
1102
1103 c->rxdma_on = 0;
1104 c->txdma_on = 0;
1105
1106 c->tx_dma_used=0;
1107 c->dma_num=0;
1108 c->dma_ready=1;
1109 c->dma_tx = 1;
1110
1111 /*
1112 * Enable DMA control mode
1113 */
1114
1115 /*
1116 * TX DMA via DIR/REQ
1117 */
1118 c->regs[R14]|= DTRREQ;
1119 write_zsreg(c, R14, c->regs[R14]);
1120
1121 c->regs[R1]&= ~TxINT_ENAB;
1122 write_zsreg(c, R1, c->regs[R1]);
1123
1124 /*
1125 * Set up the DMA configuration
1126 */
1127
1128 dflags = claim_dma_lock();
1129
1130 disable_dma(c->txdma);
1131 clear_dma_ff(c->txdma);
1132 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1133 disable_dma(c->txdma);
1134
1135 release_dma_lock(dflags);
1136
1137 /*
1138 * Select the DMA interrupt handlers
1139 */
1140
1141 c->rxdma_on = 0;
1142 c->txdma_on = 1;
1143 c->tx_dma_used = 1;
1144
1145 c->irqs = &z8530_txdma_sync;
1146 z8530_rtsdtr(c,1);
1147 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1148 spin_unlock_irqrestore(c->lock, cflags);
1149
1150 return 0;
1151}
1152
1153EXPORT_SYMBOL(z8530_sync_txdma_open);
1154
1155/**
1156 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1157 * @dev: Network device to detach
1158 * @c: Z8530 channel to move into discard mode
1159 *
1160 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1161 * and free the buffers.
1162 */
1163
1164int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1165{
1166 unsigned long dflags, cflags;
1167 u8 chk;
1168
1169
1170 spin_lock_irqsave(c->lock, cflags);
1171
1172 c->irqs = &z8530_nop;
1173 c->max = 0;
1174 c->sync = 0;
1175
1176 /*
1177 * Disable the PC DMA channels
1178 */
1179
1180 dflags = claim_dma_lock();
1181
1182 disable_dma(c->txdma);
1183 clear_dma_ff(c->txdma);
1184 c->txdma_on = 0;
1185 c->tx_dma_used = 0;
1186
1187 release_dma_lock(dflags);
1188
1189 /*
1190 * Disable DMA control mode
1191 */
1192
1193 c->regs[R1]&= ~WT_RDY_ENAB;
1194 write_zsreg(c, R1, c->regs[R1]);
1195 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1196 c->regs[R1]|= INT_ALL_Rx;
1197 write_zsreg(c, R1, c->regs[R1]);
1198 c->regs[R14]&= ~DTRREQ;
1199 write_zsreg(c, R14, c->regs[R14]);
1200
1201 if(c->tx_dma_buf[0])
1202 {
1203 free_page((unsigned long)c->tx_dma_buf[0]);
1204 c->tx_dma_buf[0]=NULL;
1205 }
1206 chk=read_zsreg(c,R0);
1207 write_zsreg(c, R3, c->regs[R3]);
1208 z8530_rtsdtr(c,0);
1209
1210 spin_unlock_irqrestore(c->lock, cflags);
1211 return 0;
1212}
1213
1214
1215EXPORT_SYMBOL(z8530_sync_txdma_close);
1216
1217
1218/*
1219 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1220 * it exists...
1221 */
1222
1223static char *z8530_type_name[]={
1224 "Z8530",
1225 "Z85C30",
1226 "Z85230"
1227};
1228
1229/**
1230 * z8530_describe - Uniformly describe a Z8530 port
1231 * @dev: Z8530 device to describe
1232 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1233 * @io: the port value in question
1234 *
1235 * Describe a Z8530 in a standard format. We must pass the I/O as
1236 * the port offset isnt predictable. The main reason for this function
1237 * is to try and get a common format of report.
1238 */
1239
1240void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1241{
1242 printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
1243 dev->name,
1244 z8530_type_name[dev->type],
1245 mapping,
1246 Z8530_PORT_OF(io),
1247 dev->irq);
1248}
1249
1250EXPORT_SYMBOL(z8530_describe);
1251
1252/*
1253 * Locked operation part of the z8530 init code
1254 */
1255
1256static inline int do_z8530_init(struct z8530_dev *dev)
1257{
1258 /* NOP the interrupt handlers first - we might get a
1259 floating IRQ transition when we reset the chip */
1260 dev->chanA.irqs=&z8530_nop;
1261 dev->chanB.irqs=&z8530_nop;
1262 dev->chanA.dcdcheck=DCD;
1263 dev->chanB.dcdcheck=DCD;
1264
1265 /* Reset the chip */
1266 write_zsreg(&dev->chanA, R9, 0xC0);
1267 udelay(200);
1268 /* Now check its valid */
1269 write_zsreg(&dev->chanA, R12, 0xAA);
1270 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1271 return -ENODEV;
1272 write_zsreg(&dev->chanA, R12, 0x55);
1273 if(read_zsreg(&dev->chanA, R12)!=0x55)
1274 return -ENODEV;
1275
1276 dev->type=Z8530;
1277
1278 /*
1279 * See the application note.
1280 */
1281
1282 write_zsreg(&dev->chanA, R15, 0x01);
1283
1284 /*
1285 * If we can set the low bit of R15 then
1286 * the chip is enhanced.
1287 */
1288
1289 if(read_zsreg(&dev->chanA, R15)==0x01)
1290 {
1291 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1292 /* Put a char in the fifo */
1293 write_zsreg(&dev->chanA, R8, 0);
1294 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1295 dev->type = Z85230; /* Has a FIFO */
1296 else
1297 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1298 }
1299
1300 /*
1301 * The code assumes R7' and friends are
1302 * off. Use write_zsext() for these and keep
1303 * this bit clear.
1304 */
1305
1306 write_zsreg(&dev->chanA, R15, 0);
1307
1308 /*
1309 * At this point it looks like the chip is behaving
1310 */
1311
1312 memcpy(dev->chanA.regs, reg_init, 16);
1313 memcpy(dev->chanB.regs, reg_init ,16);
1314
1315 return 0;
1316}
1317
1318/**
1319 * z8530_init - Initialise a Z8530 device
1320 * @dev: Z8530 device to initialise.
1321 *
1322 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1323 * is present, identify the type and then program it to hopefully
1324 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1325 * state will sometimes get into stupid modes generating 10Khz
1326 * interrupt streams and the like.
1327 *
1328 * We set the interrupt handler up to discard any events, in case
1329 * we get them during reset or setp.
1330 *
1331 * Return 0 for success, or a negative value indicating the problem
1332 * in errno form.
1333 */
1334
1335int z8530_init(struct z8530_dev *dev)
1336{
1337 unsigned long flags;
1338 int ret;
1339
1340 /* Set up the chip level lock */
1341 spin_lock_init(&dev->lock);
1342 dev->chanA.lock = &dev->lock;
1343 dev->chanB.lock = &dev->lock;
1344
1345 spin_lock_irqsave(&dev->lock, flags);
1346 ret = do_z8530_init(dev);
1347 spin_unlock_irqrestore(&dev->lock, flags);
1348
1349 return ret;
1350}
1351
1352
1353EXPORT_SYMBOL(z8530_init);
1354
1355/**
1356 * z8530_shutdown - Shutdown a Z8530 device
1357 * @dev: The Z8530 chip to shutdown
1358 *
1359 * We set the interrupt handlers to silence any interrupts. We then
1360 * reset the chip and wait 100uS to be sure the reset completed. Just
1361 * in case the caller then tries to do stuff.
1362 *
1363 * This is called without the lock held
1364 */
1365
1366int z8530_shutdown(struct z8530_dev *dev)
1367{
1368 unsigned long flags;
1369 /* Reset the chip */
1370
1371 spin_lock_irqsave(&dev->lock, flags);
1372 dev->chanA.irqs=&z8530_nop;
1373 dev->chanB.irqs=&z8530_nop;
1374 write_zsreg(&dev->chanA, R9, 0xC0);
1375 /* We must lock the udelay, the chip is offlimits here */
1376 udelay(100);
1377 spin_unlock_irqrestore(&dev->lock, flags);
1378 return 0;
1379}
1380
1381EXPORT_SYMBOL(z8530_shutdown);
1382
1383/**
1384 * z8530_channel_load - Load channel data
1385 * @c: Z8530 channel to configure
1386 * @rtable: table of register, value pairs
1387 * FIXME: ioctl to allow user uploaded tables
1388 *
1389 * Load a Z8530 channel up from the system data. We use +16 to
1390 * indicate the "prime" registers. The value 255 terminates the
1391 * table.
1392 */
1393
1394int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1395{
1396 unsigned long flags;
1397
1398 spin_lock_irqsave(c->lock, flags);
1399
1400 while(*rtable!=255)
1401 {
1402 int reg=*rtable++;
1403 if(reg>0x0F)
1404 write_zsreg(c, R15, c->regs[15]|1);
1405 write_zsreg(c, reg&0x0F, *rtable);
1406 if(reg>0x0F)
1407 write_zsreg(c, R15, c->regs[15]&~1);
1408 c->regs[reg]=*rtable++;
1409 }
1410 c->rx_function=z8530_null_rx;
1411 c->skb=NULL;
1412 c->tx_skb=NULL;
1413 c->tx_next_skb=NULL;
1414 c->mtu=1500;
1415 c->max=0;
1416 c->count=0;
1417 c->status=read_zsreg(c, R0);
1418 c->sync=1;
1419 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1420
1421 spin_unlock_irqrestore(c->lock, flags);
1422 return 0;
1423}
1424
1425EXPORT_SYMBOL(z8530_channel_load);
1426
1427
1428/**
1429 * z8530_tx_begin - Begin packet transmission
1430 * @c: The Z8530 channel to kick
1431 *
1432 * This is the speed sensitive side of transmission. If we are called
1433 * and no buffer is being transmitted we commence the next buffer. If
1434 * nothing is queued we idle the sync.
1435 *
1436 * Note: We are handling this code path in the interrupt path, keep it
1437 * fast or bad things will happen.
1438 *
1439 * Called with the lock held.
1440 */
1441
1442static void z8530_tx_begin(struct z8530_channel *c)
1443{
1444 unsigned long flags;
1445 if(c->tx_skb)
1446 return;
1447
1448 c->tx_skb=c->tx_next_skb;
1449 c->tx_next_skb=NULL;
1450 c->tx_ptr=c->tx_next_ptr;
1451
1452 if(c->tx_skb==NULL)
1453 {
1454 /* Idle on */
1455 if(c->dma_tx)
1456 {
1457 flags=claim_dma_lock();
1458 disable_dma(c->txdma);
1459 /*
1460 * Check if we crapped out.
1461 */
1462 if(get_dma_residue(c->txdma))
1463 {
1464 c->stats.tx_dropped++;
1465 c->stats.tx_fifo_errors++;
1466 }
1467 release_dma_lock(flags);
1468 }
1469 c->txcount=0;
1470 }
1471 else
1472 {
1473 c->txcount=c->tx_skb->len;
1474
1475
1476 if(c->dma_tx)
1477 {
1478 /*
1479 * FIXME. DMA is broken for the original 8530,
1480 * on the older parts we need to set a flag and
1481 * wait for a further TX interrupt to fire this
1482 * stage off
1483 */
1484
1485 flags=claim_dma_lock();
1486 disable_dma(c->txdma);
1487
1488 /*
1489 * These two are needed by the 8530/85C30
1490 * and must be issued when idling.
1491 */
1492
1493 if(c->dev->type!=Z85230)
1494 {
1495 write_zsctrl(c, RES_Tx_CRC);
1496 write_zsctrl(c, RES_EOM_L);
1497 }
1498 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1499 clear_dma_ff(c->txdma);
1500 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1501 set_dma_count(c->txdma, c->txcount);
1502 enable_dma(c->txdma);
1503 release_dma_lock(flags);
1504 write_zsctrl(c, RES_EOM_L);
1505 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1506 }
1507 else
1508 {
1509
1510 /* ABUNDER off */
1511 write_zsreg(c, R10, c->regs[10]);
1512 write_zsctrl(c, RES_Tx_CRC);
1513
1514 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1515 {
1516 write_zsreg(c, R8, *c->tx_ptr++);
1517 c->txcount--;
1518 }
1519
1520 }
1521 }
1522 /*
1523 * Since we emptied tx_skb we can ask for more
1524 */
1525 netif_wake_queue(c->netdevice);
1526}
1527
1528/**
1529 * z8530_tx_done - TX complete callback
1530 * @c: The channel that completed a transmit.
1531 *
1532 * This is called when we complete a packet send. We wake the queue,
1533 * start the next packet going and then free the buffer of the existing
1534 * packet. This code is fairly timing sensitive.
1535 *
1536 * Called with the register lock held.
1537 */
1538
1539static void z8530_tx_done(struct z8530_channel *c)
1540{
1541 struct sk_buff *skb;
1542
1543 /* Actually this can happen.*/
1544 if(c->tx_skb==NULL)
1545 return;
1546
1547 skb=c->tx_skb;
1548 c->tx_skb=NULL;
1549 z8530_tx_begin(c);
1550 c->stats.tx_packets++;
1551 c->stats.tx_bytes+=skb->len;
1552 dev_kfree_skb_irq(skb);
1553}
1554
1555/**
1556 * z8530_null_rx - Discard a packet
1557 * @c: The channel the packet arrived on
1558 * @skb: The buffer
1559 *
1560 * We point the receive handler at this function when idle. Instead
1561 * of syncppp processing the frames we get to throw them away.
1562 */
1563
1564void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1565{
1566 dev_kfree_skb_any(skb);
1567}
1568
1569EXPORT_SYMBOL(z8530_null_rx);
1570
1571/**
1572 * z8530_rx_done - Receive completion callback
1573 * @c: The channel that completed a receive
1574 *
1575 * A new packet is complete. Our goal here is to get back into receive
1576 * mode as fast as possible. On the Z85230 we could change to using
1577 * ESCC mode, but on the older chips we have no choice. We flip to the
1578 * new buffer immediately in DMA mode so that the DMA of the next
1579 * frame can occur while we are copying the previous buffer to an sk_buff
1580 *
1581 * Called with the lock held
1582 */
1583
1584static void z8530_rx_done(struct z8530_channel *c)
1585{
1586 struct sk_buff *skb;
1587 int ct;
1588
1589 /*
1590 * Is our receive engine in DMA mode
1591 */
1592
1593 if(c->rxdma_on)
1594 {
1595 /*
1596 * Save the ready state and the buffer currently
1597 * being used as the DMA target
1598 */
1599
1600 int ready=c->dma_ready;
1601 unsigned char *rxb=c->rx_buf[c->dma_num];
1602 unsigned long flags;
1603
1604 /*
1605 * Complete this DMA. Neccessary to find the length
1606 */
1607
1608 flags=claim_dma_lock();
1609
1610 disable_dma(c->rxdma);
1611 clear_dma_ff(c->rxdma);
1612 c->rxdma_on=0;
1613 ct=c->mtu-get_dma_residue(c->rxdma);
1614 if(ct<0)
1615 ct=2; /* Shit happens.. */
1616 c->dma_ready=0;
1617
1618 /*
1619 * Normal case: the other slot is free, start the next DMA
1620 * into it immediately.
1621 */
1622
1623 if(ready)
1624 {
1625 c->dma_num^=1;
1626 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1627 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1628 set_dma_count(c->rxdma, c->mtu);
1629 c->rxdma_on = 1;
1630 enable_dma(c->rxdma);
1631 /* Stop any frames that we missed the head of
1632 from passing */
1633 write_zsreg(c, R0, RES_Rx_CRC);
1634 }
1635 else
1636 /* Can't occur as we dont reenable the DMA irq until
1637 after the flip is done */
1638 printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name);
1639
1640 release_dma_lock(flags);
1641
1642 /*
1643 * Shove the old buffer into an sk_buff. We can't DMA
1644 * directly into one on a PC - it might be above the 16Mb
1645 * boundary. Optimisation - we could check to see if we
1646 * can avoid the copy. Optimisation 2 - make the memcpy
1647 * a copychecksum.
1648 */
1649
1650 skb=dev_alloc_skb(ct);
1651 if(skb==NULL)
1652 {
1653 c->stats.rx_dropped++;
1654 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);
1655 }
1656 else
1657 {
1658 skb_put(skb, ct);
27d7ff46 1659 skb_copy_to_linear_data(skb, rxb, ct);
1da177e4
LT
1660 c->stats.rx_packets++;
1661 c->stats.rx_bytes+=ct;
1662 }
1663 c->dma_ready=1;
1664 }
1665 else
1666 {
1667 RT_LOCK;
1668 skb=c->skb;
1669
1670 /*
1671 * The game we play for non DMA is similar. We want to
1672 * get the controller set up for the next packet as fast
1673 * as possible. We potentially only have one byte + the
1674 * fifo length for this. Thus we want to flip to the new
1675 * buffer and then mess around copying and allocating
1676 * things. For the current case it doesn't matter but
1677 * if you build a system where the sync irq isnt blocked
1678 * by the kernel IRQ disable then you need only block the
1679 * sync IRQ for the RT_LOCK area.
1680 *
1681 */
1682 ct=c->count;
1683
1684 c->skb = c->skb2;
1685 c->count = 0;
1686 c->max = c->mtu;
1687 if(c->skb)
1688 {
1689 c->dptr = c->skb->data;
1690 c->max = c->mtu;
1691 }
1692 else
1693 {
1694 c->count= 0;
1695 c->max = 0;
1696 }
1697 RT_UNLOCK;
1698
1699 c->skb2 = dev_alloc_skb(c->mtu);
1700 if(c->skb2==NULL)
1701 printk(KERN_WARNING "%s: memory squeeze.\n",
1702 c->netdevice->name);
1703 else
1704 {
1705 skb_put(c->skb2,c->mtu);
1706 }
1707 c->stats.rx_packets++;
1708 c->stats.rx_bytes+=ct;
1709
1710 }
1711 /*
1712 * If we received a frame we must now process it.
1713 */
1714 if(skb)
1715 {
1716 skb_trim(skb, ct);
1717 c->rx_function(c,skb);
1718 }
1719 else
1720 {
1721 c->stats.rx_dropped++;
1722 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1723 }
1724}
1725
1726/**
1727 * spans_boundary - Check a packet can be ISA DMA'd
1728 * @skb: The buffer to check
1729 *
1730 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1731 * thing can only DMA within a 64K block not across the edges of it.
1732 */
1733
1734static inline int spans_boundary(struct sk_buff *skb)
1735{
1736 unsigned long a=(unsigned long)skb->data;
1737 a^=(a+skb->len);
1738 if(a&0x00010000) /* If the 64K bit is different.. */
1739 return 1;
1740 return 0;
1741}
1742
1743/**
1744 * z8530_queue_xmit - Queue a packet
1745 * @c: The channel to use
1746 * @skb: The packet to kick down the channel
1747 *
1748 * Queue a packet for transmission. Because we have rather
1749 * hard to hit interrupt latencies for the Z85230 per packet
1750 * even in DMA mode we do the flip to DMA buffer if needed here
1751 * not in the IRQ.
1752 *
1753 * Called from the network code. The lock is not held at this
1754 * point.
1755 */
1756
1757int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1758{
1759 unsigned long flags;
1760
1761 netif_stop_queue(c->netdevice);
1762 if(c->tx_next_skb)
1763 {
1764 return 1;
1765 }
1766
1767 /* PC SPECIFIC - DMA limits */
1768
1769 /*
1770 * If we will DMA the transmit and its gone over the ISA bus
1771 * limit, then copy to the flip buffer
1772 */
1773
1774 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1775 {
1776 /*
1777 * Send the flip buffer, and flip the flippy bit.
1778 * We don't care which is used when just so long as
1779 * we never use the same buffer twice in a row. Since
1780 * only one buffer can be going out at a time the other
1781 * has to be safe.
1782 */
1783 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1784 c->tx_dma_used^=1; /* Flip temp buffer */
d626f62b 1785 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1da177e4
LT
1786 }
1787 else
1788 c->tx_next_ptr=skb->data;
1789 RT_LOCK;
1790 c->tx_next_skb=skb;
1791 RT_UNLOCK;
1792
1793 spin_lock_irqsave(c->lock, flags);
1794 z8530_tx_begin(c);
1795 spin_unlock_irqrestore(c->lock, flags);
1796
1797 return 0;
1798}
1799
1800EXPORT_SYMBOL(z8530_queue_xmit);
1801
1802/**
1803 * z8530_get_stats - Get network statistics
1804 * @c: The channel to use
1805 *
1806 * Get the statistics block. We keep the statistics in software as
1807 * the chip doesn't do it for us.
1808 *
1809 * Locking is ignored here - we could lock for a copy but its
1810 * not likely to be that big an issue
1811 */
1812
1813struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1814{
1815 return &c->stats;
1816}
1817
1818EXPORT_SYMBOL(z8530_get_stats);
1819
1820/*
1821 * Module support
1822 */
1823static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1824
1825static int __init z85230_init_driver(void)
1826{
1827 printk(banner);
1828 return 0;
1829}
1830module_init(z85230_init_driver);
1831
1832static void __exit z85230_cleanup_driver(void)
1833{
1834}
1835module_exit(z85230_cleanup_driver);
1836
1837MODULE_AUTHOR("Red Hat Inc.");
1838MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1839MODULE_LICENSE("GPL");