f78bc5d40708ac3b8572c5b95c891e9c4caa4af6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / tokenring / olympic.c
1 /*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
7 *
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
10 *
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
13 *
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
16 *
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
19 *
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
22 *
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
41 *
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
43 *
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
45 *
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
48 *
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
51 *
52 * 06/02/01 - Clean up, copy skb for small packets
53 *
54 * 06/22/01 - Add EISR error handling routines
55 *
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
69 *
70 * Wake on lan
71 *
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
76 */
77
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
79
80 #define OLYMPIC_DEBUG 0
81
82
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/errno.h>
86 #include <linux/timer.h>
87 #include <linux/in.h>
88 #include <linux/ioport.h>
89 #include <linux/string.h>
90 #include <linux/proc_fs.h>
91 #include <linux/ptrace.h>
92 #include <linux/skbuff.h>
93 #include <linux/interrupt.h>
94 #include <linux/delay.h>
95 #include <linux/netdevice.h>
96 #include <linux/trdevice.h>
97 #include <linux/stddef.h>
98 #include <linux/init.h>
99 #include <linux/pci.h>
100 #include <linux/spinlock.h>
101 #include <linux/bitops.h>
102 #include <linux/jiffies.h>
103
104 #include <net/checksum.h>
105 #include <net/net_namespace.h>
106
107 #include <asm/io.h>
108 #include <asm/system.h>
109
110 #include "olympic.h"
111
112 /* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
116 *
117 * Official releases will only have an a.b.c version number format.
118 */
119
120 static char version[] =
121 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
122
123 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
124 "Address Verification", "Neighbor Notification (Ring Poll)",
125 "Request Parameters","FDX Registration Request",
126 "FDX Duplicate Address Check", "Station registration Query Wait",
127 "Unknown stage"};
128
129 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131 "Duplicate Node Address","Request Parameters","Remove Received",
132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
134
135 /* Module paramters */
136
137 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
139
140 /* Ring Speed 0,4,16,100
141 * 0 = Autosense
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
148 *
149 * WARNING: Some hubs will allow you to insert
150 * at the wrong speed
151 */
152
153 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154 module_param_array(ringspeed, int, NULL, 0);
155
156 /* Packet buffer size */
157
158 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159 module_param_array(pkt_buf_sz, int, NULL, 0) ;
160
161 /* Message Level */
162
163 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164 module_param_array(message_level, int, NULL, 0) ;
165
166 /* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
171 */
172 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173 module_param_array(network_monitor, int, NULL, 0);
174
175 static struct pci_device_id olympic_pci_tbl[] = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */
178 };
179 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
180
181
182 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183 static int olympic_init(struct net_device *dev);
184 static int olympic_open(struct net_device *dev);
185 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
186 static int olympic_close(struct net_device *dev);
187 static void olympic_set_rx_mode(struct net_device *dev);
188 static void olympic_freemem(struct net_device *dev) ;
189 static irqreturn_t olympic_interrupt(int irq, void *dev_id);
190 static struct net_device_stats * olympic_get_stats(struct net_device *dev);
191 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192 static void olympic_arb_cmd(struct net_device *dev);
193 static int olympic_change_mtu(struct net_device *dev, int mtu);
194 static void olympic_srb_bh(struct net_device *dev) ;
195 static void olympic_asb_bh(struct net_device *dev) ;
196 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197
198 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199 {
200 struct net_device *dev ;
201 struct olympic_private *olympic_priv;
202 static int card_no = -1 ;
203 int i ;
204
205 card_no++ ;
206
207 if ((i = pci_enable_device(pdev))) {
208 return i ;
209 }
210
211 pci_set_master(pdev);
212
213 if ((i = pci_request_regions(pdev,"olympic"))) {
214 goto op_disable_dev;
215 }
216
217 dev = alloc_trdev(sizeof(struct olympic_private)) ;
218 if (!dev) {
219 i = -ENOMEM;
220 goto op_release_dev;
221 }
222
223 olympic_priv = netdev_priv(dev) ;
224
225 spin_lock_init(&olympic_priv->olympic_lock) ;
226
227 init_waitqueue_head(&olympic_priv->srb_wait);
228 init_waitqueue_head(&olympic_priv->trb_wait);
229 #if OLYMPIC_DEBUG
230 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
231 #endif
232 dev->irq=pdev->irq;
233 dev->base_addr=pci_resource_start(pdev, 0);
234 olympic_priv->olympic_card_name = pci_name(pdev);
235 olympic_priv->pdev = pdev;
236 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
237 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
238 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
239 goto op_free_iomap;
240 }
241
242 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
243 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
244 else
245 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
246
247 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
248 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
249 olympic_priv->olympic_message_level = message_level[card_no] ;
250 olympic_priv->olympic_network_monitor = network_monitor[card_no];
251
252 if ((i = olympic_init(dev))) {
253 goto op_free_iomap;
254 }
255
256 dev->open=&olympic_open;
257 dev->hard_start_xmit=&olympic_xmit;
258 dev->change_mtu=&olympic_change_mtu;
259 dev->stop=&olympic_close;
260 dev->do_ioctl=NULL;
261 dev->set_multicast_list=&olympic_set_rx_mode;
262 dev->get_stats=&olympic_get_stats ;
263 dev->set_mac_address=&olympic_set_mac_address ;
264 SET_NETDEV_DEV(dev, &pdev->dev);
265
266 pci_set_drvdata(pdev,dev) ;
267 register_netdev(dev) ;
268 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
269 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
270 char proc_name[20] ;
271 strcpy(proc_name,"olympic_") ;
272 strcat(proc_name,dev->name) ;
273 create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ;
274 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
275 }
276 return 0 ;
277
278 op_free_iomap:
279 if (olympic_priv->olympic_mmio)
280 iounmap(olympic_priv->olympic_mmio);
281 if (olympic_priv->olympic_lap)
282 iounmap(olympic_priv->olympic_lap);
283
284 free_netdev(dev);
285 op_release_dev:
286 pci_release_regions(pdev);
287
288 op_disable_dev:
289 pci_disable_device(pdev);
290 return i;
291 }
292
293 static int olympic_init(struct net_device *dev)
294 {
295 struct olympic_private *olympic_priv;
296 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
297 unsigned long t;
298 unsigned int uaa_addr;
299
300 olympic_priv=netdev_priv(dev);
301 olympic_mmio=olympic_priv->olympic_mmio;
302
303 printk("%s \n", version);
304 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
305
306 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
307 t=jiffies;
308 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
309 schedule();
310 if(time_after(jiffies, t + 40*HZ)) {
311 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
312 return -ENODEV;
313 }
314 }
315
316
317 /* Needed for cardbus */
318 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
319 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
320 }
321
322 #if OLYMPIC_DEBUG
323 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
324 printk("GPR: %x\n",readw(olympic_mmio+GPR));
325 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
326 #endif
327 /* Aaaahhh, You have got to be real careful setting GPR, the card
328 holds the previous values from flash memory, including autosense
329 and ring speed */
330
331 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
332
333 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
334 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
335 if (olympic_priv->olympic_message_level)
336 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
337 } else if (olympic_priv->olympic_ring_speed == 16) {
338 if (olympic_priv->olympic_message_level)
339 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
340 writew(GPR_16MBPS, olympic_mmio+GPR);
341 } else if (olympic_priv->olympic_ring_speed == 4) {
342 if (olympic_priv->olympic_message_level)
343 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
344 writew(0, olympic_mmio+GPR);
345 }
346
347 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
348
349 #if OLYMPIC_DEBUG
350 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
351 #endif
352 /* Solo has been paused to meet the Cardbus power
353 * specs if the adapter is cardbus. Check to
354 * see its been paused and then restart solo. The
355 * adapter should set the pause bit within 1 second.
356 */
357
358 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
359 t=jiffies;
360 while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
361 schedule() ;
362 if(time_after(jiffies, t + 2*HZ)) {
363 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
364 return -ENODEV;
365 }
366 }
367 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
368 }
369
370 /* start solo init */
371 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
372
373 t=jiffies;
374 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
375 schedule();
376 if(time_after(jiffies, t + 15*HZ)) {
377 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
378 return -ENODEV;
379 }
380 }
381
382 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
383
384 #if OLYMPIC_DEBUG
385 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
386 #endif
387
388 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
389
390 #if OLYMPIC_DEBUG
391 {
392 int i;
393 printk("init_srb(%p): ",init_srb);
394 for(i=0;i<20;i++)
395 printk("%x ",readb(init_srb+i));
396 printk("\n");
397 }
398 #endif
399 if(readw(init_srb+6)) {
400 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
401 return -ENODEV;
402 }
403
404 if (olympic_priv->olympic_message_level) {
405 if ( readb(init_srb +2) & 0x40) {
406 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
407 } else {
408 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
409 }
410 }
411
412 uaa_addr=swab16(readw(init_srb+8));
413
414 #if OLYMPIC_DEBUG
415 printk("UAA resides at %x\n",uaa_addr);
416 #endif
417
418 writel(uaa_addr,olympic_mmio+LAPA);
419 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
420
421 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
422
423 #if OLYMPIC_DEBUG
424 printk("adapter address: %pM\n", dev->dev_addr);
425 #endif
426
427 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
428 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
429
430 return 0;
431
432 }
433
434 static int olympic_open(struct net_device *dev)
435 {
436 struct olympic_private *olympic_priv=netdev_priv(dev);
437 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
438 unsigned long flags, t;
439 int i, open_finished = 1 ;
440 u8 resp, err;
441
442 DECLARE_WAITQUEUE(wait,current) ;
443
444 olympic_init(dev);
445
446 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) {
447 return -EAGAIN;
448 }
449
450 #if OLYMPIC_DEBUG
451 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
452 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
453 #endif
454
455 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
456
457 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
458
459 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
460
461 /* adapter is closed, so SRB is pointed to by LAPWWO */
462
463 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
464 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
465
466 #if OLYMPIC_DEBUG
467 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
468 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
469 printk("Before the open command \n");
470 #endif
471 do {
472 memset_io(init_srb,0,SRB_COMMAND_SIZE);
473
474 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
475 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
476
477 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
478 if (olympic_priv->olympic_network_monitor)
479 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
480 else
481 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
482
483 /* Test OR of first 3 bytes as its totally possible for
484 * someone to set the first 2 bytes to be zero, although this
485 * is an error, the first byte must have bit 6 set to 1 */
486
487 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
488 writeb(olympic_priv->olympic_laa[0],init_srb+12);
489 writeb(olympic_priv->olympic_laa[1],init_srb+13);
490 writeb(olympic_priv->olympic_laa[2],init_srb+14);
491 writeb(olympic_priv->olympic_laa[3],init_srb+15);
492 writeb(olympic_priv->olympic_laa[4],init_srb+16);
493 writeb(olympic_priv->olympic_laa[5],init_srb+17);
494 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
495 }
496 writeb(1,init_srb+30);
497
498 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
499 olympic_priv->srb_queued=1;
500
501 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
502 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
503
504 t = jiffies ;
505
506 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
507 set_current_state(TASK_INTERRUPTIBLE) ;
508
509 while(olympic_priv->srb_queued) {
510 schedule() ;
511 if(signal_pending(current)) {
512 printk(KERN_WARNING "%s: Signal received in open.\n",
513 dev->name);
514 printk(KERN_WARNING "SISR=%x LISR=%x\n",
515 readl(olympic_mmio+SISR),
516 readl(olympic_mmio+LISR));
517 olympic_priv->srb_queued=0;
518 break;
519 }
520 if (time_after(jiffies, t + 10*HZ)) {
521 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
522 olympic_priv->srb_queued=0;
523 break ;
524 }
525 set_current_state(TASK_INTERRUPTIBLE) ;
526 }
527 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
528 set_current_state(TASK_RUNNING) ;
529 olympic_priv->srb_queued = 0 ;
530 #if OLYMPIC_DEBUG
531 printk("init_srb(%p): ",init_srb);
532 for(i=0;i<20;i++)
533 printk("%02x ",readb(init_srb+i));
534 printk("\n");
535 #endif
536
537 /* If we get the same return response as we set, the interrupt wasn't raised and the open
538 * timed out.
539 */
540
541 switch (resp = readb(init_srb+2)) {
542 case OLYMPIC_CLEAR_RET_CODE:
543 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
544 goto out;
545 case 0:
546 open_finished = 1;
547 break;
548 case 0x07:
549 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
550 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
551 open_finished = 0 ;
552 continue;
553 }
554
555 err = readb(init_srb+7);
556
557 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
558 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
559 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
560 } else {
561 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
562 open_maj_error[(err & 0xf0) >> 4],
563 open_min_error[(err & 0x0f)]);
564 }
565 goto out;
566
567 case 0x32:
568 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
569 dev->name, olympic_priv->olympic_laa);
570 goto out;
571
572 default:
573 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
574 goto out;
575
576 }
577 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
578
579 if (readb(init_srb+18) & (1<<3))
580 if (olympic_priv->olympic_message_level)
581 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
582
583 if (readb(init_srb+18) & (1<<1))
584 olympic_priv->olympic_ring_speed = 100 ;
585 else if (readb(init_srb+18) & 1)
586 olympic_priv->olympic_ring_speed = 16 ;
587 else
588 olympic_priv->olympic_ring_speed = 4 ;
589
590 if (olympic_priv->olympic_message_level)
591 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
592
593 olympic_priv->asb = swab16(readw(init_srb+8));
594 olympic_priv->srb = swab16(readw(init_srb+10));
595 olympic_priv->arb = swab16(readw(init_srb+12));
596 olympic_priv->trb = swab16(readw(init_srb+16));
597
598 olympic_priv->olympic_receive_options = 0x01 ;
599 olympic_priv->olympic_copy_all_options = 0 ;
600
601 /* setup rx ring */
602
603 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
604
605 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
606
607 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
608
609 struct sk_buff *skb;
610
611 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
612 if(skb == NULL)
613 break;
614
615 skb->dev = dev;
616
617 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
618 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
619 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
620 olympic_priv->rx_ring_skb[i]=skb;
621 }
622
623 if (i==0) {
624 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
625 goto out;
626 }
627
628 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
629 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
630 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
631 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
632 writew(i, olympic_mmio+RXDESCQCNT);
633
634 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
635 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
636 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
637 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
638
639 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
640 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
641
642 writew(i, olympic_mmio+RXSTATQCNT);
643
644 #if OLYMPIC_DEBUG
645 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
646 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
647 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
648 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
649 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
650
651 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
652 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
653 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
654 #endif
655
656 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
657
658 #if OLYMPIC_DEBUG
659 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
660 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
661 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
662 #endif
663
664 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
665
666 /* setup tx ring */
667
668 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
669 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
670 olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
671
672 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
673 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
674 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
675 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
676 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
677 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
678
679 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
680 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
681 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
682 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
683 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
684
685 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
686 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
687
688 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
689 writel(0,olympic_mmio+EISR) ;
690 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
691 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
692
693 #if OLYMPIC_DEBUG
694 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
695 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
696 #endif
697
698 if (olympic_priv->olympic_network_monitor) {
699 u8 __iomem *oat;
700 u8 __iomem *opt;
701 int i;
702 u8 addr[6];
703 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
704 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
705
706 for (i = 0; i < 6; i++)
707 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
708 printk("%s: Node Address: %pM\n", dev->name, addr);
709 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
710 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
711 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
712 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
713 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
714
715 for (i = 0; i < 6; i++)
716 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
717 printk("%s: NAUN Address: %pM\n", dev->name, addr);
718 }
719
720 netif_start_queue(dev);
721 return 0;
722
723 out:
724 free_irq(dev->irq, dev);
725 return -EIO;
726 }
727
728 /*
729 * When we enter the rx routine we do not know how many frames have been
730 * queued on the rx channel. Therefore we start at the next rx status
731 * position and travel around the receive ring until we have completed
732 * all the frames.
733 *
734 * This means that we may process the frame before we receive the end
735 * of frame interrupt. This is why we always test the status instead
736 * of blindly processing the next frame.
737 *
738 * We also remove the last 4 bytes from the packet as well, these are
739 * just token ring trailer info and upset protocols that don't check
740 * their own length, i.e. SNA.
741 *
742 */
743 static void olympic_rx(struct net_device *dev)
744 {
745 struct olympic_private *olympic_priv=netdev_priv(dev);
746 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
747 struct olympic_rx_status *rx_status;
748 struct olympic_rx_desc *rx_desc ;
749 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
750 struct sk_buff *skb, *skb2;
751 int i;
752
753 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
754
755 while (rx_status->status_buffercnt) {
756 u32 l_status_buffercnt;
757
758 olympic_priv->rx_status_last_received++ ;
759 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
760 #if OLYMPIC_DEBUG
761 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
762 #endif
763 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
764 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
765 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
766 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
767
768 #if OLYMPIC_DEBUG
769 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
770 #endif
771 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
772 if(l_status_buffercnt & 0xC0000000) {
773 if (l_status_buffercnt & 0x3B000000) {
774 if (olympic_priv->olympic_message_level) {
775 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
776 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
777 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
778 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
779 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
780 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
781 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
782 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
783 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
784 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
785 }
786 olympic_priv->rx_ring_last_received += i ;
787 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
788 olympic_priv->olympic_stats.rx_errors++;
789 } else {
790
791 if (buffer_cnt == 1) {
792 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
793 } else {
794 skb = dev_alloc_skb(length) ;
795 }
796
797 if (skb == NULL) {
798 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
799 olympic_priv->olympic_stats.rx_dropped++ ;
800 /* Update counters even though we don't transfer the frame */
801 olympic_priv->rx_ring_last_received += i ;
802 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
803 } else {
804 /* Optimise based upon number of buffers used.
805 If only one buffer is used we can simply swap the buffers around.
806 If more than one then we must use the new buffer and copy the information
807 first. Ideally all frames would be in a single buffer, this can be tuned by
808 altering the buffer size. If the length of the packet is less than
809 1500 bytes we're going to copy it over anyway to stop packets getting
810 dropped from sockets with buffers smaller than our pkt_buf_sz. */
811
812 if (buffer_cnt==1) {
813 olympic_priv->rx_ring_last_received++ ;
814 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
815 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
816 if (length > 1500) {
817 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
818 /* unmap buffer */
819 pci_unmap_single(olympic_priv->pdev,
820 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
821 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
822 skb_put(skb2,length-4);
823 skb2->protocol = tr_type_trans(skb2,dev);
824 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
825 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
826 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
827 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
828 cpu_to_le32(olympic_priv->pkt_buf_sz);
829 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
830 netif_rx(skb2) ;
831 } else {
832 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
833 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
834 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
835 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
836 skb_put(skb,length - 4),
837 length - 4);
838 pci_dma_sync_single_for_device(olympic_priv->pdev,
839 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
840 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
841 skb->protocol = tr_type_trans(skb,dev) ;
842 netif_rx(skb) ;
843 }
844 } else {
845 do { /* Walk the buffers */
846 olympic_priv->rx_ring_last_received++ ;
847 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
848 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
849 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
850 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
851 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
852 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
853 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
854 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
855 skb_put(skb, cpy_length),
856 cpy_length);
857 pci_dma_sync_single_for_device(olympic_priv->pdev,
858 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
859 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
860 } while (--i) ;
861 skb_trim(skb,skb->len-4) ;
862 skb->protocol = tr_type_trans(skb,dev);
863 netif_rx(skb) ;
864 }
865 dev->last_rx = jiffies ;
866 olympic_priv->olympic_stats.rx_packets++ ;
867 olympic_priv->olympic_stats.rx_bytes += length ;
868 } /* if skb == null */
869 } /* If status & 0x3b */
870
871 } else { /*if buffercnt & 0xC */
872 olympic_priv->rx_ring_last_received += i ;
873 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
874 }
875
876 rx_status->fragmentcnt_framelen = 0 ;
877 rx_status->status_buffercnt = 0 ;
878 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
879
880 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
881 } /* while */
882
883 }
884
885 static void olympic_freemem(struct net_device *dev)
886 {
887 struct olympic_private *olympic_priv=netdev_priv(dev);
888 int i;
889
890 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
891 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
892 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
893 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
894 }
895 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
896 pci_unmap_single(olympic_priv->pdev,
897 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
898 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
899 }
900 olympic_priv->rx_status_last_received++;
901 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
902 }
903 /* unmap rings */
904 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
905 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
906 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
907 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
908
909 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
910 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
911 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
912 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
913
914 return ;
915 }
916
917 static irqreturn_t olympic_interrupt(int irq, void *dev_id)
918 {
919 struct net_device *dev= (struct net_device *)dev_id;
920 struct olympic_private *olympic_priv=netdev_priv(dev);
921 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
922 u32 sisr;
923 u8 __iomem *adapter_check_area ;
924
925 /*
926 * Read sisr but don't reset it yet.
927 * The indication bit may have been set but the interrupt latch
928 * bit may not be set, so we'd lose the interrupt later.
929 */
930 sisr=readl(olympic_mmio+SISR) ;
931 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
932 return IRQ_NONE;
933 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
934
935 spin_lock(&olympic_priv->olympic_lock);
936
937 /* Hotswap gives us this on removal */
938 if (sisr == 0xffffffff) {
939 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
940 spin_unlock(&olympic_priv->olympic_lock) ;
941 return IRQ_NONE;
942 }
943
944 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
945 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
946
947 /* If we ever get this the adapter is seriously dead. Only a reset is going to
948 * bring it back to life. We're talking pci bus errors and such like :( */
949 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
950 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
951 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
952 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
953 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
954 wake_up_interruptible(&olympic_priv->srb_wait);
955 spin_unlock(&olympic_priv->olympic_lock) ;
956 return IRQ_HANDLED;
957 } /* SISR_ERR */
958
959 if(sisr & SISR_SRB_REPLY) {
960 if(olympic_priv->srb_queued==1) {
961 wake_up_interruptible(&olympic_priv->srb_wait);
962 } else if (olympic_priv->srb_queued==2) {
963 olympic_srb_bh(dev) ;
964 }
965 olympic_priv->srb_queued=0;
966 } /* SISR_SRB_REPLY */
967
968 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
969 we get all tx completions. */
970 if (sisr & SISR_TX1_EOF) {
971 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
972 olympic_priv->tx_ring_last_status++;
973 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
974 olympic_priv->free_tx_ring_entries++;
975 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
976 olympic_priv->olympic_stats.tx_packets++ ;
977 pci_unmap_single(olympic_priv->pdev,
978 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
979 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
980 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
981 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
982 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
983 }
984 netif_wake_queue(dev);
985 } /* SISR_TX1_EOF */
986
987 if (sisr & SISR_RX_STATUS) {
988 olympic_rx(dev);
989 } /* SISR_RX_STATUS */
990
991 if (sisr & SISR_ADAPTER_CHECK) {
992 netif_stop_queue(dev);
993 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
994 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
995 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
996 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
997 spin_unlock(&olympic_priv->olympic_lock) ;
998 return IRQ_HANDLED;
999 } /* SISR_ADAPTER_CHECK */
1000
1001 if (sisr & SISR_ASB_FREE) {
1002 /* Wake up anything that is waiting for the asb response */
1003 if (olympic_priv->asb_queued) {
1004 olympic_asb_bh(dev) ;
1005 }
1006 } /* SISR_ASB_FREE */
1007
1008 if (sisr & SISR_ARB_CMD) {
1009 olympic_arb_cmd(dev) ;
1010 } /* SISR_ARB_CMD */
1011
1012 if (sisr & SISR_TRB_REPLY) {
1013 /* Wake up anything that is waiting for the trb response */
1014 if (olympic_priv->trb_queued) {
1015 wake_up_interruptible(&olympic_priv->trb_wait);
1016 }
1017 olympic_priv->trb_queued = 0 ;
1018 } /* SISR_TRB_REPLY */
1019
1020 if (sisr & SISR_RX_NOBUF) {
1021 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1022 /var/log/messages. */
1023 } /* SISR_RX_NOBUF */
1024 } else {
1025 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1026 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1027 } /* One if the interrupts we want */
1028 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1029
1030 spin_unlock(&olympic_priv->olympic_lock) ;
1031 return IRQ_HANDLED;
1032 }
1033
1034 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1035 {
1036 struct olympic_private *olympic_priv=netdev_priv(dev);
1037 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1038 unsigned long flags ;
1039
1040 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1041
1042 netif_stop_queue(dev);
1043
1044 if(olympic_priv->free_tx_ring_entries) {
1045 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1046 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1047 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1048 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1049 olympic_priv->free_tx_ring_entries--;
1050
1051 olympic_priv->tx_ring_free++;
1052 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1053 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1054 netif_wake_queue(dev);
1055 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1056 return 0;
1057 } else {
1058 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1059 return 1;
1060 }
1061
1062 }
1063
1064
1065 static int olympic_close(struct net_device *dev)
1066 {
1067 struct olympic_private *olympic_priv=netdev_priv(dev);
1068 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1069 unsigned long t,flags;
1070
1071 DECLARE_WAITQUEUE(wait,current) ;
1072
1073 netif_stop_queue(dev);
1074
1075 writel(olympic_priv->srb,olympic_mmio+LAPA);
1076 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1077
1078 writeb(SRB_CLOSE_ADAPTER,srb+0);
1079 writeb(0,srb+1);
1080 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1081
1082 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1083 set_current_state(TASK_INTERRUPTIBLE) ;
1084
1085 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1086 olympic_priv->srb_queued=1;
1087
1088 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1089 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1090
1091 while(olympic_priv->srb_queued) {
1092
1093 t = schedule_timeout_interruptible(60*HZ);
1094
1095 if(signal_pending(current)) {
1096 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1097 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1098 olympic_priv->srb_queued=0;
1099 break;
1100 }
1101
1102 if (t == 0) {
1103 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1104 }
1105 olympic_priv->srb_queued=0;
1106 }
1107 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1108
1109 olympic_priv->rx_status_last_received++;
1110 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1111
1112 olympic_freemem(dev) ;
1113
1114 /* reset tx/rx fifo's and busmaster logic */
1115
1116 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1117 udelay(1);
1118 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1119
1120 #if OLYMPIC_DEBUG
1121 {
1122 int i ;
1123 printk("srb(%p): ",srb);
1124 for(i=0;i<4;i++)
1125 printk("%x ",readb(srb+i));
1126 printk("\n");
1127 }
1128 #endif
1129 free_irq(dev->irq,dev);
1130
1131 return 0;
1132
1133 }
1134
1135 static void olympic_set_rx_mode(struct net_device *dev)
1136 {
1137 struct olympic_private *olympic_priv = netdev_priv(dev);
1138 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1139 u8 options = 0;
1140 u8 __iomem *srb;
1141 struct dev_mc_list *dmi ;
1142 unsigned char dev_mc_address[4] ;
1143 int i ;
1144
1145 writel(olympic_priv->srb,olympic_mmio+LAPA);
1146 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1147 options = olympic_priv->olympic_copy_all_options;
1148
1149 if (dev->flags&IFF_PROMISC)
1150 options |= 0x61 ;
1151 else
1152 options &= ~0x61 ;
1153
1154 /* Only issue the srb if there is a change in options */
1155
1156 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1157
1158 /* Now to issue the srb command to alter the copy.all.options */
1159
1160 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1161 writeb(0,srb+1);
1162 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1163 writeb(0,srb+3);
1164 writeb(olympic_priv->olympic_receive_options,srb+4);
1165 writeb(options,srb+5);
1166
1167 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1168
1169 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1170
1171 olympic_priv->olympic_copy_all_options = options ;
1172
1173 return ;
1174 }
1175
1176 /* Set the functional addresses we need for multicast */
1177
1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1179
1180 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1181 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1182 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1183 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1184 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1185 }
1186
1187 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1188 writeb(0,srb+1);
1189 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1190 writeb(0,srb+3);
1191 writeb(0,srb+4);
1192 writeb(0,srb+5);
1193 writeb(dev_mc_address[0],srb+6);
1194 writeb(dev_mc_address[1],srb+7);
1195 writeb(dev_mc_address[2],srb+8);
1196 writeb(dev_mc_address[3],srb+9);
1197
1198 olympic_priv->srb_queued = 2 ;
1199 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1200
1201 }
1202
1203 static void olympic_srb_bh(struct net_device *dev)
1204 {
1205 struct olympic_private *olympic_priv = netdev_priv(dev);
1206 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1207 u8 __iomem *srb;
1208
1209 writel(olympic_priv->srb,olympic_mmio+LAPA);
1210 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1211
1212 switch (readb(srb)) {
1213
1214 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1215 * At some point we should do something if we get an error, such as
1216 * resetting the IFF_PROMISC flag in dev
1217 */
1218
1219 case SRB_MODIFY_RECEIVE_OPTIONS:
1220 switch (readb(srb+2)) {
1221 case 0x01:
1222 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1223 break ;
1224 case 0x04:
1225 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1226 break ;
1227 default:
1228 if (olympic_priv->olympic_message_level)
1229 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1230 break ;
1231 } /* switch srb[2] */
1232 break ;
1233
1234 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1235 */
1236
1237 case SRB_SET_GROUP_ADDRESS:
1238 switch (readb(srb+2)) {
1239 case 0x00:
1240 break ;
1241 case 0x01:
1242 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1243 break ;
1244 case 0x04:
1245 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1246 break ;
1247 case 0x3c:
1248 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1249 break ;
1250 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1251 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1252 break ;
1253 case 0x55:
1254 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1255 break ;
1256 default:
1257 break ;
1258 } /* switch srb[2] */
1259 break ;
1260
1261 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1262 */
1263
1264 case SRB_RESET_GROUP_ADDRESS:
1265 switch (readb(srb+2)) {
1266 case 0x00:
1267 break ;
1268 case 0x01:
1269 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1270 break ;
1271 case 0x04:
1272 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1273 break ;
1274 case 0x39: /* Must deal with this if individual multicast addresses used */
1275 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1276 break ;
1277 default:
1278 break ;
1279 } /* switch srb[2] */
1280 break ;
1281
1282
1283 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1284 */
1285
1286 case SRB_SET_FUNC_ADDRESS:
1287 switch (readb(srb+2)) {
1288 case 0x00:
1289 if (olympic_priv->olympic_message_level)
1290 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1291 break ;
1292 case 0x01:
1293 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1294 break ;
1295 case 0x04:
1296 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1297 break ;
1298 default:
1299 break ;
1300 } /* switch srb[2] */
1301 break ;
1302
1303 /* SRB_READ_LOG - Read and reset the adapter error counters
1304 */
1305
1306 case SRB_READ_LOG:
1307 switch (readb(srb+2)) {
1308 case 0x00:
1309 if (olympic_priv->olympic_message_level)
1310 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1311 break ;
1312 case 0x01:
1313 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1314 break ;
1315 case 0x04:
1316 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1317 break ;
1318
1319 } /* switch srb[2] */
1320 break ;
1321
1322 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1323
1324 case SRB_READ_SR_COUNTERS:
1325 switch (readb(srb+2)) {
1326 case 0x00:
1327 if (olympic_priv->olympic_message_level)
1328 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1329 break ;
1330 case 0x01:
1331 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1332 break ;
1333 case 0x04:
1334 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1335 break ;
1336 default:
1337 break ;
1338 } /* switch srb[2] */
1339 break ;
1340
1341 default:
1342 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1343 break ;
1344 } /* switch srb[0] */
1345
1346 }
1347
1348 static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1349 {
1350 struct olympic_private *olympic_priv ;
1351 olympic_priv=netdev_priv(dev);
1352 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1353 }
1354
1355 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1356 {
1357 struct sockaddr *saddr = addr ;
1358 struct olympic_private *olympic_priv = netdev_priv(dev);
1359
1360 if (netif_running(dev)) {
1361 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1362 return -EIO ;
1363 }
1364
1365 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1366
1367 if (olympic_priv->olympic_message_level) {
1368 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1369 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1370 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1371 olympic_priv->olympic_laa[5]);
1372 }
1373
1374 return 0 ;
1375 }
1376
1377 static void olympic_arb_cmd(struct net_device *dev)
1378 {
1379 struct olympic_private *olympic_priv = netdev_priv(dev);
1380 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1381 u8 __iomem *arb_block, *asb_block, *srb ;
1382 u8 header_len ;
1383 u16 frame_len, buffer_len ;
1384 struct sk_buff *mac_frame ;
1385 u8 __iomem *buf_ptr ;
1386 u8 __iomem *frame_data ;
1387 u16 buff_off ;
1388 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1389 u8 fdx_prot_error ;
1390 u16 next_ptr;
1391
1392 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1393 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1394 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1395
1396 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1397
1398 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1399 frame_len = swab16(readw(arb_block + 10)) ;
1400
1401 buff_off = swab16(readw(arb_block + 6)) ;
1402
1403 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1404
1405 #if OLYMPIC_DEBUG
1406 {
1407 int i;
1408 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1409
1410 for (i=0 ; i < 14 ; i++) {
1411 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1412 }
1413
1414 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1415 }
1416 #endif
1417 mac_frame = dev_alloc_skb(frame_len) ;
1418 if (!mac_frame) {
1419 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1420 goto drop_frame;
1421 }
1422
1423 /* Walk the buffer chain, creating the frame */
1424
1425 do {
1426 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1427 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1428 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1429 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1430 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
1431
1432 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1433
1434 if (olympic_priv->olympic_network_monitor) {
1435 struct trh_hdr *mac_hdr;
1436 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
1437 mac_hdr = tr_hdr(mac_frame);
1438 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1439 dev->name, mac_hdr->daddr);
1440 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1441 dev->name, mac_hdr->saddr);
1442 }
1443 netif_rx(mac_frame);
1444 dev->last_rx = jiffies;
1445
1446 drop_frame:
1447 /* Now tell the card we have dealt with the received frame */
1448
1449 /* Set LISR Bit 1 */
1450 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1451
1452 /* Is the ASB free ? */
1453
1454 if (readb(asb_block + 2) != 0xff) {
1455 olympic_priv->asb_queued = 1 ;
1456 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1457 return ;
1458 /* Drop out and wait for the bottom half to be run */
1459 }
1460
1461 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1462 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1463 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1464 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1465
1466 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1467
1468 olympic_priv->asb_queued = 2 ;
1469
1470 return ;
1471
1472 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1473 lan_status = swab16(readw(arb_block+6));
1474 fdx_prot_error = readb(arb_block+8) ;
1475
1476 /* Issue ARB Free */
1477 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1478
1479 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1480
1481 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1482 if (lan_status_diff & LSC_LWF)
1483 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1484 if (lan_status_diff & LSC_ARW)
1485 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1486 if (lan_status_diff & LSC_FPE)
1487 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1488 if (lan_status_diff & LSC_RR)
1489 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1490
1491 /* Adapter has been closed by the hardware */
1492
1493 /* reset tx/rx fifo's and busmaster logic */
1494
1495 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1496 udelay(1);
1497 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1498 netif_stop_queue(dev);
1499 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1500 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1501 } /* If serious error */
1502
1503 if (olympic_priv->olympic_message_level) {
1504 if (lan_status_diff & LSC_SIG_LOSS)
1505 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1506 if (lan_status_diff & LSC_HARD_ERR)
1507 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1508 if (lan_status_diff & LSC_SOFT_ERR)
1509 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1510 if (lan_status_diff & LSC_TRAN_BCN)
1511 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1512 if (lan_status_diff & LSC_SS)
1513 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1514 if (lan_status_diff & LSC_RING_REC)
1515 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1516 if (lan_status_diff & LSC_FDX_MODE)
1517 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1518 }
1519
1520 if (lan_status_diff & LSC_CO) {
1521
1522 if (olympic_priv->olympic_message_level)
1523 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1524
1525 /* Issue READ.LOG command */
1526
1527 writeb(SRB_READ_LOG, srb);
1528 writeb(0,srb+1);
1529 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1530 writeb(0,srb+3);
1531 writeb(0,srb+4);
1532 writeb(0,srb+5);
1533
1534 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1535
1536 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1537
1538 }
1539
1540 if (lan_status_diff & LSC_SR_CO) {
1541
1542 if (olympic_priv->olympic_message_level)
1543 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1544
1545 /* Issue a READ.SR.COUNTERS */
1546
1547 writeb(SRB_READ_SR_COUNTERS,srb);
1548 writeb(0,srb+1);
1549 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1550 writeb(0,srb+3);
1551
1552 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1553
1554 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1555
1556 }
1557
1558 olympic_priv->olympic_lan_status = lan_status ;
1559
1560 } /* Lan.change.status */
1561 else
1562 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1563 }
1564
1565 static void olympic_asb_bh(struct net_device *dev)
1566 {
1567 struct olympic_private *olympic_priv = netdev_priv(dev);
1568 u8 __iomem *arb_block, *asb_block ;
1569
1570 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1571 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1572
1573 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1574
1575 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1576 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1577 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1578 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1579
1580 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1581 olympic_priv->asb_queued = 2 ;
1582
1583 return ;
1584 }
1585
1586 if (olympic_priv->asb_queued == 2) {
1587 switch (readb(asb_block+2)) {
1588 case 0x01:
1589 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1590 break ;
1591 case 0x26:
1592 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1593 break ;
1594 case 0xFF:
1595 /* Valid response, everything should be ok again */
1596 break ;
1597 default:
1598 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1599 break ;
1600 }
1601 }
1602 olympic_priv->asb_queued = 0 ;
1603 }
1604
1605 static int olympic_change_mtu(struct net_device *dev, int mtu)
1606 {
1607 struct olympic_private *olympic_priv = netdev_priv(dev);
1608 u16 max_mtu ;
1609
1610 if (olympic_priv->olympic_ring_speed == 4)
1611 max_mtu = 4500 ;
1612 else
1613 max_mtu = 18000 ;
1614
1615 if (mtu > max_mtu)
1616 return -EINVAL ;
1617 if (mtu < 100)
1618 return -EINVAL ;
1619
1620 dev->mtu = mtu ;
1621 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1622
1623 return 0 ;
1624 }
1625
1626 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1627 {
1628 struct net_device *dev = (struct net_device *)data ;
1629 struct olympic_private *olympic_priv=netdev_priv(dev);
1630 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1631 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1632 int size = 0 ;
1633 int len=0;
1634 off_t begin=0;
1635 off_t pos=0;
1636 u8 addr[6];
1637 u8 addr2[6];
1638 int i;
1639
1640 size = sprintf(buffer,
1641 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1642 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1643 dev->name);
1644
1645 for (i = 0 ; i < 6 ; i++)
1646 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1647
1648 size += sprintf(buffer+size, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1649 dev->name,
1650 dev->dev_addr, addr,
1651 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1652 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1653 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1654 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1655
1656 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1657
1658 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1659 dev->name) ;
1660
1661 for (i = 0 ; i < 6 ; i++)
1662 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
1663 for (i = 0 ; i < 6 ; i++)
1664 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1665
1666 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1667 dev->name,
1668 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1669 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1670 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1671 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1672 addr, addr2,
1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1674 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1675 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1676
1677 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1678 dev->name) ;
1679
1680 for (i = 0 ; i < 6 ; i++)
1681 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1682 size += sprintf(buffer+size, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1683 dev->name, addr,
1684 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1685 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1686 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1687 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1688 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1689 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1690
1691 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1692 dev->name) ;
1693
1694 for (i = 0 ; i < 6 ; i++)
1695 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1696 size += sprintf(buffer+size, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1697 dev->name,
1698 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1699 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1700 addr,
1701 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1702 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1703 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1704 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1705
1706 len=size;
1707 pos=begin+size;
1708 if (pos<offset) {
1709 len=0;
1710 begin=pos;
1711 }
1712 *start=buffer+(offset-begin); /* Start of wanted data */
1713 len-=(offset-begin); /* Start slop */
1714 if(len>length)
1715 len=length; /* Ending slop */
1716 return len;
1717 }
1718
1719 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1720 {
1721 struct net_device *dev = pci_get_drvdata(pdev) ;
1722 struct olympic_private *olympic_priv=netdev_priv(dev);
1723
1724 if (olympic_priv->olympic_network_monitor) {
1725 char proc_name[20] ;
1726 strcpy(proc_name,"olympic_") ;
1727 strcat(proc_name,dev->name) ;
1728 remove_proc_entry(proc_name,init_net.proc_net);
1729 }
1730 unregister_netdev(dev) ;
1731 iounmap(olympic_priv->olympic_mmio) ;
1732 iounmap(olympic_priv->olympic_lap) ;
1733 pci_release_regions(pdev) ;
1734 pci_set_drvdata(pdev,NULL) ;
1735 free_netdev(dev) ;
1736 }
1737
1738 static struct pci_driver olympic_driver = {
1739 .name = "olympic",
1740 .id_table = olympic_pci_tbl,
1741 .probe = olympic_probe,
1742 .remove = __devexit_p(olympic_remove_one),
1743 };
1744
1745 static int __init olympic_pci_init(void)
1746 {
1747 return pci_register_driver(&olympic_driver) ;
1748 }
1749
1750 static void __exit olympic_pci_cleanup(void)
1751 {
1752 pci_unregister_driver(&olympic_driver) ;
1753 }
1754
1755
1756 module_init(olympic_pci_init) ;
1757 module_exit(olympic_pci_cleanup) ;
1758
1759 MODULE_LICENSE("GPL");