netdev: convert bulk of drivers to netdev_tx_t
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / skfp / skfddi.c
CommitLineData
1da177e4
LT
1/*
2 * File Name:
3 * skfddi.c
4 *
5 * Copyright Information:
6 * Copyright SysKonnect 1998,1999.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * The information in this file is provided "AS IS" without warranty.
14 *
15 * Abstract:
16 * A Linux device driver supporting the SysKonnect FDDI PCI controller
17 * familie.
18 *
19 * Maintainers:
20 * CG Christoph Goos (cgoos@syskonnect.de)
21 *
22 * Contributors:
23 * DM David S. Miller
24 *
25 * Address all question to:
26 * linux@syskonnect.de
27 *
28 * The technical manual for the adapters is available from SysKonnect's
29 * web pages: www.syskonnect.com
30 * Goto "Support" and search Knowledge Base for "manual".
31 *
32 * Driver Architecture:
33 * The driver architecture is based on the DEC FDDI driver by
34 * Lawrence V. Stefani and several ethernet drivers.
35 * I also used an existing Windows NT miniport driver.
36 * All hardware dependent fuctions are handled by the SysKonnect
37 * Hardware Module.
38 * The only headerfiles that are directly related to this source
39 * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
40 * The others belong to the SysKonnect FDDI Hardware Module and
41 * should better not be changed.
42 *
43 * Modification History:
44 * Date Name Description
45 * 02-Mar-98 CG Created.
46 *
47 * 10-Mar-99 CG Support for 2.2.x added.
48 * 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
49 * 26-Oct-99 CG Fixed compilation error on 2.2.13
50 * 12-Nov-99 CG Source code release
51 * 22-Nov-99 CG Included in kernel source.
52 * 07-May-00 DM 64 bit fixes, new dma interface
53 * 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
54 * Daniele Bellucci <bellucda@tiscali.it>
55 * 03-Dec-03 SH Convert to PCI device model
56 *
57 * Compilation options (-Dxxx):
58 * DRIVERDEBUG print lots of messages to log file
59 * DUMPPACKETS print received/transmitted packets to logfile
60 *
61 * Tested cpu architectures:
62 * - i386
63 * - sparc64
64 */
65
66/* Version information string - should be updated prior to */
67/* each new release!!! */
68#define VERSION "2.07"
69
f71e1309 70static const char * const boot_msg =
1da177e4
LT
71 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
72 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
73
74/* Include files */
75
76#include <linux/module.h>
77#include <linux/kernel.h>
78#include <linux/errno.h>
79#include <linux/ioport.h>
80#include <linux/slab.h>
81#include <linux/interrupt.h>
82#include <linux/pci.h>
83#include <linux/netdevice.h>
84#include <linux/fddidevice.h>
85#include <linux/skbuff.h>
86#include <linux/bitops.h>
87
88#include <asm/byteorder.h>
89#include <asm/io.h>
90#include <asm/uaccess.h>
91
92#include "h/types.h"
93#undef ADDR // undo Linux definition
94#include "h/skfbi.h"
95#include "h/fddi.h"
96#include "h/smc.h"
97#include "h/smtstate.h"
98
99
100// Define module-wide (static) routines
101static int skfp_driver_init(struct net_device *dev);
102static int skfp_open(struct net_device *dev);
103static int skfp_close(struct net_device *dev);
7d12e780 104static irqreturn_t skfp_interrupt(int irq, void *dev_id);
1da177e4
LT
105static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
106static void skfp_ctl_set_multicast_list(struct net_device *dev);
107static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
108static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
109static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
61357325
SH
110static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
111 struct net_device *dev);
1da177e4
LT
112static void send_queued_packets(struct s_smc *smc);
113static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
114static void ResetAdapter(struct s_smc *smc);
115
116
117// Functions needed by the hardware module
118void *mac_drv_get_space(struct s_smc *smc, u_int size);
119void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
120unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
121unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
122void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
123 int flag);
124void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
125void llc_restart_tx(struct s_smc *smc);
126void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
127 int frag_count, int len);
128void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
129 int frag_count);
130void mac_drv_fill_rxd(struct s_smc *smc);
131void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
132 int frag_count);
133int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
134 int la_len);
135void dump_data(unsigned char *Data, int length);
136
137// External functions from the hardware module
138extern u_int mac_drv_check_space(void);
1da177e4
LT
139extern int mac_drv_init(struct s_smc *smc);
140extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
141 int len, int frame_status);
142extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
143 int frame_len, int frame_status);
1da177e4
LT
144extern void fddi_isr(struct s_smc *smc);
145extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
146 int len, int frame_status);
147extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
148extern void mac_drv_clear_rx_queue(struct s_smc *smc);
149extern void enable_tx_irq(struct s_smc *smc, u_short queue);
1da177e4
LT
150
151static struct pci_device_id skfddi_pci_tbl[] = {
152 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
153 { } /* Terminating entry */
154};
155MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
156MODULE_LICENSE("GPL");
157MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
158
159// Define module-wide (static) variables
160
161static int num_boards; /* total number of adapters configured */
162
145186a3
SH
163static const struct net_device_ops skfp_netdev_ops = {
164 .ndo_open = skfp_open,
165 .ndo_stop = skfp_close,
166 .ndo_start_xmit = skfp_send_pkt,
167 .ndo_get_stats = skfp_ctl_get_stats,
168 .ndo_change_mtu = fddi_change_mtu,
169 .ndo_set_multicast_list = skfp_ctl_set_multicast_list,
170 .ndo_set_mac_address = skfp_ctl_set_mac_address,
171 .ndo_do_ioctl = skfp_ioctl,
172};
173
1da177e4
LT
174/*
175 * =================
176 * = skfp_init_one =
177 * =================
178 *
179 * Overview:
180 * Probes for supported FDDI PCI controllers
181 *
182 * Returns:
183 * Condition code
184 *
185 * Arguments:
186 * pdev - pointer to PCI device information
187 *
188 * Functional Description:
189 * This is now called by PCI driver registration process
190 * for each board found.
191 *
192 * Return Codes:
193 * 0 - This device (fddi0, fddi1, etc) configured successfully
194 * -ENODEV - No devices present, or no SysKonnect FDDI PCI device
195 * present for this device name
196 *
197 *
198 * Side Effects:
199 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
200 * initialized and the board resources are read and stored in
201 * the device structure.
202 */
203static int skfp_init_one(struct pci_dev *pdev,
204 const struct pci_device_id *ent)
205{
206 struct net_device *dev;
207 struct s_smc *smc; /* board pointer */
208 void __iomem *mem;
209 int err;
210
ebc06eeb 211 pr_debug(KERN_INFO "entering skfp_init_one\n");
1da177e4
LT
212
213 if (num_boards == 0)
214 printk("%s\n", boot_msg);
215
216 err = pci_enable_device(pdev);
217 if (err)
218 return err;
219
220 err = pci_request_regions(pdev, "skfddi");
221 if (err)
222 goto err_out1;
223
224 pci_set_master(pdev);
225
226#ifdef MEM_MAPPED_IO
227 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
228 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
229 err = -EIO;
230 goto err_out2;
231 }
232
233 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
234#else
235 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
236 printk(KERN_ERR "skfp: region is not PIO resource\n");
237 err = -EIO;
238 goto err_out2;
239 }
240
241 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
242#endif
243 if (!mem) {
244 printk(KERN_ERR "skfp: Unable to map register, "
245 "FDDI adapter will be disabled.\n");
246 err = -EIO;
247 goto err_out2;
248 }
249
250 dev = alloc_fddidev(sizeof(struct s_smc));
251 if (!dev) {
252 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
253 "FDDI adapter will be disabled.\n");
254 err = -ENOMEM;
255 goto err_out3;
256 }
257
258 dev->irq = pdev->irq;
145186a3 259 dev->netdev_ops = &skfp_netdev_ops;
1da177e4 260
1da177e4
LT
261 SET_NETDEV_DEV(dev, &pdev->dev);
262
263 /* Initialize board structure with bus-specific info */
264 smc = netdev_priv(dev);
265 smc->os.dev = dev;
266 smc->os.bus_type = SK_BUS_TYPE_PCI;
267 smc->os.pdev = *pdev;
268 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
269 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
270 smc->os.dev = dev;
271 smc->hw.slot = -1;
272 smc->hw.iop = mem;
273 smc->os.ResetRequested = FALSE;
274 skb_queue_head_init(&smc->os.SendSkbQueue);
275
276 dev->base_addr = (unsigned long)mem;
277
278 err = skfp_driver_init(dev);
279 if (err)
280 goto err_out4;
281
282 err = register_netdev(dev);
283 if (err)
284 goto err_out5;
285
286 ++num_boards;
287 pci_set_drvdata(pdev, dev);
288
289 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
290 (pdev->subsystem_device & 0xff00) == 0x5800)
291 printk("%s: SysKonnect FDDI PCI adapter"
292 " found (SK-%04X)\n", dev->name,
293 pdev->subsystem_device);
294 else
295 printk("%s: FDDI PCI adapter found\n", dev->name);
296
297 return 0;
298err_out5:
299 if (smc->os.SharedMemAddr)
300 pci_free_consistent(pdev, smc->os.SharedMemSize,
301 smc->os.SharedMemAddr,
302 smc->os.SharedMemDMA);
303 pci_free_consistent(pdev, MAX_FRAME_SIZE,
304 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
305err_out4:
306 free_netdev(dev);
307err_out3:
308#ifdef MEM_MAPPED_IO
309 iounmap(mem);
310#else
311 ioport_unmap(mem);
312#endif
313err_out2:
314 pci_release_regions(pdev);
315err_out1:
316 pci_disable_device(pdev);
317 return err;
318}
319
320/*
321 * Called for each adapter board from pci_unregister_driver
322 */
323static void __devexit skfp_remove_one(struct pci_dev *pdev)
324{
325 struct net_device *p = pci_get_drvdata(pdev);
326 struct s_smc *lp = netdev_priv(p);
327
328 unregister_netdev(p);
329
330 if (lp->os.SharedMemAddr) {
331 pci_free_consistent(&lp->os.pdev,
332 lp->os.SharedMemSize,
333 lp->os.SharedMemAddr,
334 lp->os.SharedMemDMA);
335 lp->os.SharedMemAddr = NULL;
336 }
337 if (lp->os.LocalRxBuffer) {
338 pci_free_consistent(&lp->os.pdev,
339 MAX_FRAME_SIZE,
340 lp->os.LocalRxBuffer,
341 lp->os.LocalRxBufferDMA);
342 lp->os.LocalRxBuffer = NULL;
343 }
344#ifdef MEM_MAPPED_IO
345 iounmap(lp->hw.iop);
346#else
347 ioport_unmap(lp->hw.iop);
348#endif
349 pci_release_regions(pdev);
350 free_netdev(p);
351
352 pci_disable_device(pdev);
353 pci_set_drvdata(pdev, NULL);
354}
355
356/*
357 * ====================
358 * = skfp_driver_init =
359 * ====================
360 *
361 * Overview:
362 * Initializes remaining adapter board structure information
363 * and makes sure adapter is in a safe state prior to skfp_open().
364 *
365 * Returns:
366 * Condition code
367 *
368 * Arguments:
369 * dev - pointer to device information
370 *
371 * Functional Description:
372 * This function allocates additional resources such as the host memory
373 * blocks needed by the adapter.
374 * The adapter is also reset. The OS must call skfp_open() to open
375 * the adapter and bring it on-line.
376 *
377 * Return Codes:
378 * 0 - initialization succeeded
379 * -1 - initialization failed
380 */
381static int skfp_driver_init(struct net_device *dev)
382{
383 struct s_smc *smc = netdev_priv(dev);
384 skfddi_priv *bp = &smc->os;
385 int err = -EIO;
386
ebc06eeb 387 pr_debug(KERN_INFO "entering skfp_driver_init\n");
1da177e4
LT
388
389 // set the io address in private structures
390 bp->base_addr = dev->base_addr;
391
392 // Get the interrupt level from the PCI Configuration Table
393 smc->hw.irq = dev->irq;
394
395 spin_lock_init(&bp->DriverLock);
396
397 // Allocate invalid frame
398 bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA);
399 if (!bp->LocalRxBuffer) {
400 printk("could not allocate mem for ");
401 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
402 goto fail;
403 }
404
405 // Determine the required size of the 'shared' memory area.
406 bp->SharedMemSize = mac_drv_check_space();
ebc06eeb 407 pr_debug(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
1da177e4
LT
408 if (bp->SharedMemSize > 0) {
409 bp->SharedMemSize += 16; // for descriptor alignment
410
411 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
412 bp->SharedMemSize,
413 &bp->SharedMemDMA);
414 if (!bp->SharedMemSize) {
415 printk("could not allocate mem for ");
416 printk("hardware module: %ld byte\n",
417 bp->SharedMemSize);
418 goto fail;
419 }
420 bp->SharedMemHeap = 0; // Nothing used yet.
421
422 } else {
423 bp->SharedMemAddr = NULL;
424 bp->SharedMemHeap = 0;
425 } // SharedMemSize > 0
426
427 memset(bp->SharedMemAddr, 0, bp->SharedMemSize);
428
429 card_stop(smc); // Reset adapter.
430
ebc06eeb 431 pr_debug(KERN_INFO "mac_drv_init()..\n");
1da177e4 432 if (mac_drv_init(smc) != 0) {
ebc06eeb 433 pr_debug(KERN_INFO "mac_drv_init() failed.\n");
1da177e4
LT
434 goto fail;
435 }
436 read_address(smc, NULL);
ebc06eeb 437 pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
1da177e4
LT
438 smc->hw.fddi_canon_addr.a[0],
439 smc->hw.fddi_canon_addr.a[1],
440 smc->hw.fddi_canon_addr.a[2],
441 smc->hw.fddi_canon_addr.a[3],
442 smc->hw.fddi_canon_addr.a[4],
443 smc->hw.fddi_canon_addr.a[5]);
444 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
445
446 smt_reset_defaults(smc, 0);
447
448 return (0);
449
450fail:
451 if (bp->SharedMemAddr) {
452 pci_free_consistent(&bp->pdev,
453 bp->SharedMemSize,
454 bp->SharedMemAddr,
455 bp->SharedMemDMA);
456 bp->SharedMemAddr = NULL;
457 }
458 if (bp->LocalRxBuffer) {
459 pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE,
460 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
461 bp->LocalRxBuffer = NULL;
462 }
463 return err;
464} // skfp_driver_init
465
466
467/*
468 * =============
469 * = skfp_open =
470 * =============
471 *
472 * Overview:
473 * Opens the adapter
474 *
475 * Returns:
476 * Condition code
477 *
478 * Arguments:
479 * dev - pointer to device information
480 *
481 * Functional Description:
482 * This function brings the adapter to an operational state.
483 *
484 * Return Codes:
485 * 0 - Adapter was successfully opened
486 * -EAGAIN - Could not register IRQ
487 */
488static int skfp_open(struct net_device *dev)
489{
490 struct s_smc *smc = netdev_priv(dev);
491 int err;
492
ebc06eeb 493 pr_debug(KERN_INFO "entering skfp_open\n");
1da177e4 494 /* Register IRQ - support shared interrupts by passing device ptr */
2f220e30 495 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
1da177e4
LT
496 dev->name, dev);
497 if (err)
498 return err;
499
500 /*
501 * Set current address to factory MAC address
502 *
503 * Note: We've already done this step in skfp_driver_init.
504 * However, it's possible that a user has set a node
505 * address override, then closed and reopened the
506 * adapter. Unless we reset the device address field
507 * now, we'll continue to use the existing modified
508 * address.
509 */
510 read_address(smc, NULL);
511 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
512
513 init_smt(smc, NULL);
514 smt_online(smc, 1);
515 STI_FBI();
516
517 /* Clear local multicast address tables */
518 mac_clear_multicast(smc);
519
520 /* Disable promiscuous filter settings */
521 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
522
523 netif_start_queue(dev);
524 return (0);
525} // skfp_open
526
527
528/*
529 * ==============
530 * = skfp_close =
531 * ==============
532 *
533 * Overview:
534 * Closes the device/module.
535 *
536 * Returns:
537 * Condition code
538 *
539 * Arguments:
540 * dev - pointer to device information
541 *
542 * Functional Description:
543 * This routine closes the adapter and brings it to a safe state.
544 * The interrupt service routine is deregistered with the OS.
545 * The adapter can be opened again with another call to skfp_open().
546 *
547 * Return Codes:
548 * Always return 0.
549 *
550 * Assumptions:
551 * No further requests for this adapter are made after this routine is
552 * called. skfp_open() can be called to reset and reinitialize the
553 * adapter.
554 */
555static int skfp_close(struct net_device *dev)
556{
557 struct s_smc *smc = netdev_priv(dev);
558 skfddi_priv *bp = &smc->os;
559
560 CLI_FBI();
561 smt_reset_defaults(smc, 1);
562 card_stop(smc);
563 mac_drv_clear_tx_queue(smc);
564 mac_drv_clear_rx_queue(smc);
565
566 netif_stop_queue(dev);
567 /* Deregister (free) IRQ */
568 free_irq(dev->irq, dev);
569
570 skb_queue_purge(&bp->SendSkbQueue);
571 bp->QueueSkb = MAX_TX_QUEUE_LEN;
572
573 return (0);
574} // skfp_close
575
576
577/*
578 * ==================
579 * = skfp_interrupt =
580 * ==================
581 *
582 * Overview:
583 * Interrupt processing routine
584 *
585 * Returns:
586 * None
587 *
588 * Arguments:
589 * irq - interrupt vector
590 * dev_id - pointer to device information
1da177e4
LT
591 *
592 * Functional Description:
593 * This routine calls the interrupt processing routine for this adapter. It
594 * disables and reenables adapter interrupts, as appropriate. We can support
595 * shared interrupts since the incoming dev_id pointer provides our device
596 * structure context. All the real work is done in the hardware module.
597 *
598 * Return Codes:
599 * None
600 *
601 * Assumptions:
602 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
603 * on Intel-based systems) is done by the operating system outside this
604 * routine.
605 *
606 * System interrupts are enabled through this call.
607 *
608 * Side Effects:
609 * Interrupts are disabled, then reenabled at the adapter.
610 */
611
409b2044 612static irqreturn_t skfp_interrupt(int irq, void *dev_id)
1da177e4 613{
c31f28e7 614 struct net_device *dev = dev_id;
1da177e4
LT
615 struct s_smc *smc; /* private board structure pointer */
616 skfddi_priv *bp;
617
1da177e4
LT
618 smc = netdev_priv(dev);
619 bp = &smc->os;
620
621 // IRQs enabled or disabled ?
622 if (inpd(ADDR(B0_IMSK)) == 0) {
623 // IRQs are disabled: must be shared interrupt
624 return IRQ_NONE;
625 }
626 // Note: At this point, IRQs are enabled.
627 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
628 // Adapter did not issue an IRQ: must be shared interrupt
629 return IRQ_NONE;
630 }
631 CLI_FBI(); // Disable IRQs from our adapter.
632 spin_lock(&bp->DriverLock);
633
634 // Call interrupt handler in hardware module (HWM).
635 fddi_isr(smc);
636
637 if (smc->os.ResetRequested) {
638 ResetAdapter(smc);
639 smc->os.ResetRequested = FALSE;
640 }
641 spin_unlock(&bp->DriverLock);
642 STI_FBI(); // Enable IRQs from our adapter.
643
644 return IRQ_HANDLED;
645} // skfp_interrupt
646
647
648/*
649 * ======================
650 * = skfp_ctl_get_stats =
651 * ======================
652 *
653 * Overview:
654 * Get statistics for FDDI adapter
655 *
656 * Returns:
657 * Pointer to FDDI statistics structure
658 *
659 * Arguments:
660 * dev - pointer to device information
661 *
662 * Functional Description:
663 * Gets current MIB objects from adapter, then
664 * returns FDDI statistics structure as defined
665 * in if_fddi.h.
666 *
667 * Note: Since the FDDI statistics structure is
668 * still new and the device structure doesn't
669 * have an FDDI-specific get statistics handler,
670 * we'll return the FDDI statistics structure as
671 * a pointer to an Ethernet statistics structure.
672 * That way, at least the first part of the statistics
673 * structure can be decoded properly.
674 * We'll have to pay attention to this routine as the
675 * device structure becomes more mature and LAN media
676 * independent.
677 *
678 */
409b2044 679static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
1da177e4
LT
680{
681 struct s_smc *bp = netdev_priv(dev);
682
683 /* Fill the bp->stats structure with driver-maintained counters */
684
685 bp->os.MacStat.port_bs_flag[0] = 0x1234;
686 bp->os.MacStat.port_bs_flag[1] = 0x5678;
687// goos: need to fill out fddi statistic
688#if 0
689 /* Get FDDI SMT MIB objects */
690
691/* Fill the bp->stats structure with the SMT MIB object values */
692
693 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
694 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
695 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
696 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
697 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
698 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
699 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
700 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
701 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
702 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
703 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
704 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
705 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
706 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
707 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
708 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
709 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
710 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
711 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
712 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
713 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
714 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
715 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
716 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
717 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
718 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
719 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
720 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
721 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
722 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
723 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
724 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
725 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
726 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
727 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
728 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
729 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
730 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
731 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
732 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
733 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
734 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
735 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
736 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
737 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
738 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
739 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
740 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
741 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
742 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
743 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
744 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
745 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
746 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
747 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
748 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
749 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
750 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
751 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
752 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
753 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
754 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
755 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
756 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
757 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
758 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
759 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
760 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
761 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
762 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
763 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
764 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
765 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
766 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
767 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
768 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
769 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
770 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
771 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
772 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
773 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
774 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
775 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
776 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
777 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
778 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
779 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
780 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
781 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
782 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
783 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
784 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
785
786
787 /* Fill the bp->stats structure with the FDDI counter values */
788
789 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
790 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
791 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
792 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
793 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
794 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
795 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
796 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
797 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
798 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
799 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
800
801#endif
802 return ((struct net_device_stats *) &bp->os.MacStat);
803} // ctl_get_stat
804
805
806/*
807 * ==============================
808 * = skfp_ctl_set_multicast_list =
809 * ==============================
810 *
811 * Overview:
812 * Enable/Disable LLC frame promiscuous mode reception
813 * on the adapter and/or update multicast address table.
814 *
815 * Returns:
816 * None
817 *
818 * Arguments:
819 * dev - pointer to device information
820 *
821 * Functional Description:
822 * This function acquires the driver lock and only calls
823 * skfp_ctl_set_multicast_list_wo_lock then.
824 * This routine follows a fairly simple algorithm for setting the
825 * adapter filters and CAM:
826 *
827 * if IFF_PROMISC flag is set
828 * enable promiscuous mode
829 * else
830 * disable promiscuous mode
831 * if number of multicast addresses <= max. multicast number
832 * add mc addresses to adapter table
833 * else
834 * enable promiscuous mode
835 * update adapter filters
836 *
837 * Assumptions:
838 * Multicast addresses are presented in canonical (LSB) format.
839 *
840 * Side Effects:
841 * On-board adapter filters are updated.
842 */
843static void skfp_ctl_set_multicast_list(struct net_device *dev)
844{
845 struct s_smc *smc = netdev_priv(dev);
846 skfddi_priv *bp = &smc->os;
847 unsigned long Flags;
848
849 spin_lock_irqsave(&bp->DriverLock, Flags);
850 skfp_ctl_set_multicast_list_wo_lock(dev);
851 spin_unlock_irqrestore(&bp->DriverLock, Flags);
852 return;
853} // skfp_ctl_set_multicast_list
854
855
856
857static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
858{
859 struct s_smc *smc = netdev_priv(dev);
860 struct dev_mc_list *dmi; /* ptr to multicast addr entry */
861 int i;
862
863 /* Enable promiscuous mode, if necessary */
864 if (dev->flags & IFF_PROMISC) {
865 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
ebc06eeb 866 pr_debug(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
1da177e4
LT
867 }
868 /* Else, update multicast address table */
869 else {
870 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
ebc06eeb 871 pr_debug(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
1da177e4
LT
872
873 // Reset all MC addresses
874 mac_clear_multicast(smc);
875 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
876
877 if (dev->flags & IFF_ALLMULTI) {
878 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
ebc06eeb 879 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
1da177e4
LT
880 } else if (dev->mc_count > 0) {
881 if (dev->mc_count <= FPMAX_MULTICAST) {
882 /* use exact filtering */
883
884 // point to first multicast addr
885 dmi = dev->mc_list;
886
887 for (i = 0; i < dev->mc_count; i++) {
888 mac_add_multicast(smc,
889 (struct fddi_addr *)dmi->dmi_addr,
890 1);
891
ebc06eeb
AB
892 pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
893 pr_debug(" %02x %02x %02x ",
1da177e4
LT
894 dmi->dmi_addr[0],
895 dmi->dmi_addr[1],
896 dmi->dmi_addr[2]);
ebc06eeb 897 pr_debug("%02x %02x %02x\n",
1da177e4
LT
898 dmi->dmi_addr[3],
899 dmi->dmi_addr[4],
900 dmi->dmi_addr[5]);
901 dmi = dmi->next;
902 } // for
903
904 } else { // more MC addresses than HW supports
905
906 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
ebc06eeb 907 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
1da177e4
LT
908 }
909 } else { // no MC addresses
910
ebc06eeb 911 pr_debug(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
1da177e4
LT
912 }
913
914 /* Update adapter filters */
915 mac_update_multicast(smc);
916 }
917 return;
918} // skfp_ctl_set_multicast_list_wo_lock
919
920
921/*
922 * ===========================
923 * = skfp_ctl_set_mac_address =
924 * ===========================
925 *
926 * Overview:
927 * set new mac address on adapter and update dev_addr field in device table.
928 *
929 * Returns:
930 * None
931 *
932 * Arguments:
933 * dev - pointer to device information
934 * addr - pointer to sockaddr structure containing unicast address to set
935 *
936 * Assumptions:
937 * The address pointed to by addr->sa_data is a valid unicast
938 * address and is presented in canonical (LSB) format.
939 */
940static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
941{
942 struct s_smc *smc = netdev_priv(dev);
943 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
944 skfddi_priv *bp = &smc->os;
945 unsigned long Flags;
946
947
948 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
949 spin_lock_irqsave(&bp->DriverLock, Flags);
950 ResetAdapter(smc);
951 spin_unlock_irqrestore(&bp->DriverLock, Flags);
952
953 return (0); /* always return zero */
954} // skfp_ctl_set_mac_address
955
956
957/*
958 * ==============
959 * = skfp_ioctl =
960 * ==============
961 *
962 * Overview:
963 *
964 * Perform IOCTL call functions here. Some are privileged operations and the
965 * effective uid is checked in those cases.
966 *
967 * Returns:
968 * status value
969 * 0 - success
970 * other - failure
971 *
972 * Arguments:
973 * dev - pointer to device information
974 * rq - pointer to ioctl request structure
975 * cmd - ?
976 *
977 */
978
979
980static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
981{
982 struct s_smc *smc = netdev_priv(dev);
983 skfddi_priv *lp = &smc->os;
984 struct s_skfp_ioctl ioc;
985 int status = 0;
986
987 if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
988 return -EFAULT;
989
990 switch (ioc.cmd) {
991 case SKFP_GET_STATS: /* Get the driver statistics */
992 ioc.len = sizeof(lp->MacStat);
993 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
994 ? -EFAULT : 0;
995 break;
996 case SKFP_CLR_STATS: /* Zero out the driver statistics */
997 if (!capable(CAP_NET_ADMIN)) {
1da177e4 998 status = -EPERM;
c25b9abb
RK
999 } else {
1000 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
1da177e4
LT
1001 }
1002 break;
1003 default:
1004 printk("ioctl for %s: unknow cmd: %04x\n", dev->name, ioc.cmd);
1005 status = -EOPNOTSUPP;
1006
1007 } // switch
1008
1009 return status;
1010} // skfp_ioctl
1011
1012
1013/*
1014 * =====================
1015 * = skfp_send_pkt =
1016 * =====================
1017 *
1018 * Overview:
1019 * Queues a packet for transmission and try to transmit it.
1020 *
1021 * Returns:
1022 * Condition code
1023 *
1024 * Arguments:
1025 * skb - pointer to sk_buff to queue for transmission
1026 * dev - pointer to device information
1027 *
1028 * Functional Description:
1029 * Here we assume that an incoming skb transmit request
1030 * is contained in a single physically contiguous buffer
1031 * in which the virtual address of the start of packet
1032 * (skb->data) can be converted to a physical address
1033 * by using pci_map_single().
1034 *
1035 * We have an internal queue for packets we can not send
1036 * immediately. Packets in this queue can be given to the
1037 * adapter if transmit buffers are freed.
1038 *
1039 * We can't free the skb until after it's been DMA'd
1040 * out by the adapter, so we'll keep it in the driver and
1041 * return it in mac_drv_tx_complete.
1042 *
1043 * Return Codes:
1044 * 0 - driver has queued and/or sent packet
1045 * 1 - caller should requeue the sk_buff for later transmission
1046 *
1047 * Assumptions:
1048 * The entire packet is stored in one physically
1049 * contiguous buffer which is not cached and whose
1050 * 32-bit physical address can be determined.
1051 *
1052 * It's vital that this routine is NOT reentered for the
1053 * same board and that the OS is not in another section of
1054 * code (eg. skfp_interrupt) for the same board on a
1055 * different thread.
1056 *
1057 * Side Effects:
1058 * None
1059 */
61357325
SH
1060static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1061 struct net_device *dev)
1da177e4
LT
1062{
1063 struct s_smc *smc = netdev_priv(dev);
1064 skfddi_priv *bp = &smc->os;
1065
ebc06eeb 1066 pr_debug(KERN_INFO "skfp_send_pkt\n");
1da177e4
LT
1067
1068 /*
1069 * Verify that incoming transmit request is OK
1070 *
1071 * Note: The packet size check is consistent with other
1072 * Linux device drivers, although the correct packet
1073 * size should be verified before calling the
1074 * transmit routine.
1075 */
1076
1077 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1078 bp->MacStat.gen.tx_errors++; /* bump error counter */
1079 // dequeue packets from xmt queue and send them
1080 netif_start_queue(dev);
1081 dev_kfree_skb(skb);
ec634fe3 1082 return NETDEV_TX_OK; /* return "success" */
1da177e4
LT
1083 }
1084 if (bp->QueueSkb == 0) { // return with tbusy set: queue full
1085
1086 netif_stop_queue(dev);
5b548140 1087 return NETDEV_TX_BUSY;
1da177e4
LT
1088 }
1089 bp->QueueSkb--;
1090 skb_queue_tail(&bp->SendSkbQueue, skb);
1091 send_queued_packets(netdev_priv(dev));
1092 if (bp->QueueSkb == 0) {
1093 netif_stop_queue(dev);
1094 }
1095 dev->trans_start = jiffies;
6ed10654 1096 return NETDEV_TX_OK;
1da177e4
LT
1097
1098} // skfp_send_pkt
1099
1100
1101/*
1102 * =======================
1103 * = send_queued_packets =
1104 * =======================
1105 *
1106 * Overview:
1107 * Send packets from the driver queue as long as there are some and
1108 * transmit resources are available.
1109 *
1110 * Returns:
1111 * None
1112 *
1113 * Arguments:
1114 * smc - pointer to smc (adapter) structure
1115 *
1116 * Functional Description:
1117 * Take a packet from queue if there is any. If not, then we are done.
1118 * Check if there are resources to send the packet. If not, requeue it
1119 * and exit.
1120 * Set packet descriptor flags and give packet to adapter.
1121 * Check if any send resources can be freed (we do not use the
1122 * transmit complete interrupt).
1123 */
1124static void send_queued_packets(struct s_smc *smc)
1125{
1126 skfddi_priv *bp = &smc->os;
1127 struct sk_buff *skb;
1128 unsigned char fc;
1129 int queue;
1130 struct s_smt_fp_txd *txd; // Current TxD.
1131 dma_addr_t dma_address;
1132 unsigned long Flags;
1133
1134 int frame_status; // HWM tx frame status.
1135
ebc06eeb 1136 pr_debug(KERN_INFO "send queued packets\n");
1da177e4
LT
1137 for (;;) {
1138 // send first buffer from queue
1139 skb = skb_dequeue(&bp->SendSkbQueue);
1140
1141 if (!skb) {
ebc06eeb 1142 pr_debug(KERN_INFO "queue empty\n");
1da177e4
LT
1143 return;
1144 } // queue empty !
1145
1146 spin_lock_irqsave(&bp->DriverLock, Flags);
1147 fc = skb->data[0];
1148 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1149#ifdef ESS
1150 // Check if the frame may/must be sent as a synchronous frame.
1151
1152 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1153 // It's an LLC frame.
1154 if (!smc->ess.sync_bw_available)
1155 fc &= ~FC_SYNC_BIT; // No bandwidth available.
1156
1157 else { // Bandwidth is available.
1158
1159 if (smc->mib.fddiESSSynchTxMode) {
1160 // Send as sync. frame.
1161 fc |= FC_SYNC_BIT;
1162 }
1163 }
1164 }
1165#endif // ESS
1166 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1167
1168 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1169 // Unable to send the frame.
1170
1171 if ((frame_status & RING_DOWN) != 0) {
1172 // Ring is down.
ebc06eeb 1173 pr_debug("Tx attempt while ring down.\n");
1da177e4 1174 } else if ((frame_status & OUT_OF_TXD) != 0) {
ebc06eeb 1175 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1da177e4 1176 } else {
ebc06eeb 1177 pr_debug("%s: out of transmit resources",
1da177e4
LT
1178 bp->dev->name);
1179 }
1180
1181 // Note: We will retry the operation as soon as
1182 // transmit resources become available.
1183 skb_queue_head(&bp->SendSkbQueue, skb);
1184 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1185 return; // Packet has been queued.
1186
1187 } // if (unable to send frame)
1188
1189 bp->QueueSkb++; // one packet less in local queue
1190
1191 // source address in packet ?
1192 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1193
1194 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1195
1196 dma_address = pci_map_single(&bp->pdev, skb->data,
1197 skb->len, PCI_DMA_TODEVICE);
1198 if (frame_status & LAN_TX) {
1199 txd->txd_os.skb = skb; // save skb
1200 txd->txd_os.dma_addr = dma_address; // save dma mapping
1201 }
1202 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1203 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1204
1205 if (!(frame_status & LAN_TX)) { // local only frame
1206 pci_unmap_single(&bp->pdev, dma_address,
1207 skb->len, PCI_DMA_TODEVICE);
1208 dev_kfree_skb_irq(skb);
1209 }
1210 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1211 } // for
1212
1213 return; // never reached
1214
1215} // send_queued_packets
1216
1217
1218/************************
1219 *
1220 * CheckSourceAddress
1221 *
1222 * Verify if the source address is set. Insert it if necessary.
1223 *
1224 ************************/
409b2044 1225static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1da177e4
LT
1226{
1227 unsigned char SRBit;
1228
1229 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
1230
1231 return;
1232 if ((unsigned short) frame[1 + 10] != 0)
1233 return;
1234 SRBit = frame[1 + 6] & 0x01;
1235 memcpy(&frame[1 + 6], hw_addr, 6);
1236 frame[8] |= SRBit;
1237} // CheckSourceAddress
1238
1239
1240/************************
1241 *
1242 * ResetAdapter
1243 *
1244 * Reset the adapter and bring it back to operational mode.
1245 * Args
1246 * smc - A pointer to the SMT context struct.
1247 * Out
1248 * Nothing.
1249 *
1250 ************************/
1251static void ResetAdapter(struct s_smc *smc)
1252{
1253
ebc06eeb 1254 pr_debug(KERN_INFO "[fddi: ResetAdapter]\n");
1da177e4
LT
1255
1256 // Stop the adapter.
1257
1258 card_stop(smc); // Stop all activity.
1259
1260 // Clear the transmit and receive descriptor queues.
1261 mac_drv_clear_tx_queue(smc);
1262 mac_drv_clear_rx_queue(smc);
1263
1264 // Restart the adapter.
1265
1266 smt_reset_defaults(smc, 1); // Initialize the SMT module.
1267
1268 init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
1269
1270 smt_online(smc, 1); // Insert into the ring again.
1271 STI_FBI();
1272
1273 // Restore original receive mode (multicasts, promiscuous, etc.).
1274 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1275} // ResetAdapter
1276
1277
1278//--------------- functions called by hardware module ----------------
1279
1280/************************
1281 *
1282 * llc_restart_tx
1283 *
1284 * The hardware driver calls this routine when the transmit complete
1285 * interrupt bits (end of frame) for the synchronous or asynchronous
1286 * queue is set.
1287 *
1288 * NOTE The hardware driver calls this function also if no packets are queued.
1289 * The routine must be able to handle this case.
1290 * Args
1291 * smc - A pointer to the SMT context struct.
1292 * Out
1293 * Nothing.
1294 *
1295 ************************/
1296void llc_restart_tx(struct s_smc *smc)
1297{
1298 skfddi_priv *bp = &smc->os;
1299
ebc06eeb 1300 pr_debug(KERN_INFO "[llc_restart_tx]\n");
1da177e4
LT
1301
1302 // Try to send queued packets
1303 spin_unlock(&bp->DriverLock);
1304 send_queued_packets(smc);
1305 spin_lock(&bp->DriverLock);
1306 netif_start_queue(bp->dev);// system may send again if it was blocked
1307
1308} // llc_restart_tx
1309
1310
1311/************************
1312 *
1313 * mac_drv_get_space
1314 *
1315 * The hardware module calls this function to allocate the memory
1316 * for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
1317 * Args
1318 * smc - A pointer to the SMT context struct.
1319 *
1320 * size - Size of memory in bytes to allocate.
1321 * Out
1322 * != 0 A pointer to the virtual address of the allocated memory.
1323 * == 0 Allocation error.
1324 *
1325 ************************/
1326void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1327{
1328 void *virt;
1329
ebc06eeb 1330 pr_debug(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
1da177e4
LT
1331 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1332
1333 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1334 printk("Unexpected SMT memory size requested: %d\n", size);
1335 return (NULL);
1336 }
1337 smc->os.SharedMemHeap += size; // Move heap pointer.
1338
ebc06eeb
AB
1339 pr_debug(KERN_INFO "mac_drv_get_space end\n");
1340 pr_debug(KERN_INFO "virt addr: %lx\n", (ulong) virt);
1341 pr_debug(KERN_INFO "bus addr: %lx\n", (ulong)
1da177e4
LT
1342 (smc->os.SharedMemDMA +
1343 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1344 return (virt);
1345} // mac_drv_get_space
1346
1347
1348/************************
1349 *
1350 * mac_drv_get_desc_mem
1351 *
1352 * This function is called by the hardware dependent module.
1353 * It allocates the memory for the RxD and TxD descriptors.
1354 *
1355 * This memory must be non-cached, non-movable and non-swappable.
1356 * This memory should start at a physical page boundary.
1357 * Args
1358 * smc - A pointer to the SMT context struct.
1359 *
1360 * size - Size of memory in bytes to allocate.
1361 * Out
1362 * != 0 A pointer to the virtual address of the allocated memory.
1363 * == 0 Allocation error.
1364 *
1365 ************************/
1366void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1367{
1368
1369 char *virt;
1370
ebc06eeb 1371 pr_debug(KERN_INFO "mac_drv_get_desc_mem\n");
1da177e4
LT
1372
1373 // Descriptor memory must be aligned on 16-byte boundary.
1374
1375 virt = mac_drv_get_space(smc, size);
1376
1377 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1378 size = size % 16;
1379
ebc06eeb
AB
1380 pr_debug("Allocate %u bytes alignment gap ", size);
1381 pr_debug("for descriptor memory.\n");
1da177e4
LT
1382
1383 if (!mac_drv_get_space(smc, size)) {
1384 printk("fddi: Unable to align descriptor memory.\n");
1385 return (NULL);
1386 }
1387 return (virt + size);
1388} // mac_drv_get_desc_mem
1389
1390
1391/************************
1392 *
1393 * mac_drv_virt2phys
1394 *
1395 * Get the physical address of a given virtual address.
1396 * Args
1397 * smc - A pointer to the SMT context struct.
1398 *
1399 * virt - A (virtual) pointer into our 'shared' memory area.
1400 * Out
1401 * Physical address of the given virtual address.
1402 *
1403 ************************/
1404unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1405{
1406 return (smc->os.SharedMemDMA +
1407 ((char *) virt - (char *)smc->os.SharedMemAddr));
1408} // mac_drv_virt2phys
1409
1410
1411/************************
1412 *
1413 * dma_master
1414 *
1415 * The HWM calls this function, when the driver leads through a DMA
1416 * transfer. If the OS-specific module must prepare the system hardware
1417 * for the DMA transfer, it should do it in this function.
1418 *
1419 * The hardware module calls this dma_master if it wants to send an SMT
1420 * frame. This means that the virt address passed in here is part of
1421 * the 'shared' memory area.
1422 * Args
1423 * smc - A pointer to the SMT context struct.
1424 *
1425 * virt - The virtual address of the data.
1426 *
1427 * len - The length in bytes of the data.
1428 *
1429 * flag - Indicates the transmit direction and the buffer type:
1430 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1431 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1432 * SMT_BUF (0x80) SMT buffer
1433 *
1434 * >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
1435 * Out
1436 * Returns the pyhsical address for the DMA transfer.
1437 *
1438 ************************/
1439u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1440{
1441 return (smc->os.SharedMemDMA +
1442 ((char *) virt - (char *)smc->os.SharedMemAddr));
1443} // dma_master
1444
1445
1446/************************
1447 *
1448 * dma_complete
1449 *
1450 * The hardware module calls this routine when it has completed a DMA
1451 * transfer. If the operating system dependent module has set up the DMA
1452 * channel via dma_master() (e.g. Windows NT or AIX) it should clean up
1453 * the DMA channel.
1454 * Args
1455 * smc - A pointer to the SMT context struct.
1456 *
1457 * descr - A pointer to a TxD or RxD, respectively.
1458 *
1459 * flag - Indicates the DMA transfer direction / SMT buffer:
1460 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1461 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1462 * SMT_BUF (0x80) SMT buffer (managed by HWM)
1463 * Out
1464 * Nothing.
1465 *
1466 ************************/
1467void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1468{
1469 /* For TX buffers, there are two cases. If it is an SMT transmit
1470 * buffer, there is nothing to do since we use consistent memory
1471 * for the 'shared' memory area. The other case is for normal
1472 * transmit packets given to us by the networking stack, and in
1473 * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
1474 * below.
1475 *
1476 * For RX buffers, we have to unmap dynamic PCI DMA mappings here
1477 * because the hardware module is about to potentially look at
1478 * the contents of the buffer. If we did not call the PCI DMA
1479 * unmap first, the hardware module could read inconsistent data.
1480 */
1481 if (flag & DMA_WR) {
1482 skfddi_priv *bp = &smc->os;
1483 volatile struct s_smt_fp_rxd *r = &descr->r;
1484
1485 /* If SKB is NULL, we used the local buffer. */
1486 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1487 int MaxFrameSize = bp->MaxFrameSize;
1488
1489 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1490 MaxFrameSize, PCI_DMA_FROMDEVICE);
1491 r->rxd_os.dma_addr = 0;
1492 }
1493 }
1494} // dma_complete
1495
1496
1497/************************
1498 *
1499 * mac_drv_tx_complete
1500 *
1501 * Transmit of a packet is complete. Release the tx staging buffer.
1502 *
1503 * Args
1504 * smc - A pointer to the SMT context struct.
1505 *
1506 * txd - A pointer to the last TxD which is used by the frame.
1507 * Out
1508 * Returns nothing.
1509 *
1510 ************************/
1511void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1512{
1513 struct sk_buff *skb;
1514
ebc06eeb 1515 pr_debug(KERN_INFO "entering mac_drv_tx_complete\n");
1da177e4
LT
1516 // Check if this TxD points to a skb
1517
1518 if (!(skb = txd->txd_os.skb)) {
ebc06eeb 1519 pr_debug("TXD with no skb assigned.\n");
1da177e4
LT
1520 return;
1521 }
1522 txd->txd_os.skb = NULL;
1523
1524 // release the DMA mapping
1525 pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1526 skb->len, PCI_DMA_TODEVICE);
1527 txd->txd_os.dma_addr = 0;
1528
1529 smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
1530 smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
1531
1532 // free the skb
1533 dev_kfree_skb_irq(skb);
1534
ebc06eeb 1535 pr_debug(KERN_INFO "leaving mac_drv_tx_complete\n");
1da177e4
LT
1536} // mac_drv_tx_complete
1537
1538
1539/************************
1540 *
1541 * dump packets to logfile
1542 *
1543 ************************/
1544#ifdef DUMPPACKETS
1545void dump_data(unsigned char *Data, int length)
1546{
1547 int i, j;
1548 unsigned char s[255], sh[10];
1549 if (length > 64) {
1550 length = 64;
1551 }
1552 printk(KERN_INFO "---Packet start---\n");
1553 for (i = 0, j = 0; i < length / 8; i++, j += 8)
1554 printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
1555 Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
1556 Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
1557 strcpy(s, "");
1558 for (i = 0; i < length % 8; i++) {
1559 sprintf(sh, "%02x ", Data[j + i]);
1560 strcat(s, sh);
1561 }
1562 printk(KERN_INFO "%s\n", s);
1563 printk(KERN_INFO "------------------\n");
1564} // dump_data
1565#else
1566#define dump_data(data,len)
1567#endif // DUMPPACKETS
1568
1569/************************
1570 *
1571 * mac_drv_rx_complete
1572 *
1573 * The hardware module calls this function if an LLC frame is received
1574 * in a receive buffer. Also the SMT, NSA, and directed beacon frames
1575 * from the network will be passed to the LLC layer by this function
1576 * if passing is enabled.
1577 *
1578 * mac_drv_rx_complete forwards the frame to the LLC layer if it should
1579 * be received. It also fills the RxD ring with new receive buffers if
1580 * some can be queued.
1581 * Args
1582 * smc - A pointer to the SMT context struct.
1583 *
1584 * rxd - A pointer to the first RxD which is used by the receive frame.
1585 *
1586 * frag_count - Count of RxDs used by the received frame.
1587 *
1588 * len - Frame length.
1589 * Out
1590 * Nothing.
1591 *
1592 ************************/
1593void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1594 int frag_count, int len)
1595{
1596 skfddi_priv *bp = &smc->os;
1597 struct sk_buff *skb;
1598 unsigned char *virt, *cp;
1599 unsigned short ri;
1600 u_int RifLength;
1601
ebc06eeb 1602 pr_debug(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
1da177e4
LT
1603 if (frag_count != 1) { // This is not allowed to happen.
1604
1605 printk("fddi: Multi-fragment receive!\n");
1606 goto RequeueRxd; // Re-use the given RXD(s).
1607
1608 }
1609 skb = rxd->rxd_os.skb;
1610 if (!skb) {
ebc06eeb 1611 pr_debug(KERN_INFO "No skb in rxd\n");
1da177e4
LT
1612 smc->os.MacStat.gen.rx_errors++;
1613 goto RequeueRxd;
1614 }
1615 virt = skb->data;
1616
1617 // The DMA mapping was released in dma_complete above.
1618
1619 dump_data(skb->data, len);
1620
1621 /*
1622 * FDDI Frame format:
1623 * +-------+-------+-------+------------+--------+------------+
1624 * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
1625 * +-------+-------+-------+------------+--------+------------+
1626 *
1627 * FC = Frame Control
1628 * DA = Destination Address
1629 * SA = Source Address
1630 * RIF = Routing Information Field
1631 * LLC = Logical Link Control
1632 */
1633
1634 // Remove Routing Information Field (RIF), if present.
1635
1636 if ((virt[1 + 6] & FDDI_RII) == 0)
1637 RifLength = 0;
1638 else {
1639 int n;
1640// goos: RIF removal has still to be tested
ebc06eeb 1641 pr_debug(KERN_INFO "RIF found\n");
1da177e4
LT
1642 // Get RIF length from Routing Control (RC) field.
1643 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1644
2f220e30 1645 ri = ntohs(*((__be16 *) cp));
1da177e4
LT
1646 RifLength = ri & FDDI_RCF_LEN_MASK;
1647 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1648 printk("fddi: Invalid RIF.\n");
1649 goto RequeueRxd; // Discard the frame.
1650
1651 }
1652 virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
1653 // regions overlap
1654
1655 virt = cp + RifLength;
1656 for (n = FDDI_MAC_HDR_LEN; n; n--)
1657 *--virt = *--cp;
1658 // adjust sbd->data pointer
1659 skb_pull(skb, RifLength);
1660 len -= RifLength;
1661 RifLength = 0;
1662 }
1663
1664 // Count statistics.
1665 smc->os.MacStat.gen.rx_packets++; // Count indicated receive
1666 // packets.
1667 smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
1668
1669 // virt points to header again
1670 if (virt[1] & 0x01) { // Check group (multicast) bit.
1671
1672 smc->os.MacStat.gen.multicast++;
1673 }
1674
1675 // deliver frame to system
1676 rxd->rxd_os.skb = NULL;
1677 skb_trim(skb, len);
1678 skb->protocol = fddi_type_trans(skb, bp->dev);
1da177e4
LT
1679
1680 netif_rx(skb);
1da177e4
LT
1681
1682 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1683 return;
1684
1685 RequeueRxd:
ebc06eeb 1686 pr_debug(KERN_INFO "Rx: re-queue RXD.\n");
1da177e4
LT
1687 mac_drv_requeue_rxd(smc, rxd, frag_count);
1688 smc->os.MacStat.gen.rx_errors++; // Count receive packets
1689 // not indicated.
1690
1691} // mac_drv_rx_complete
1692
1693
1694/************************
1695 *
1696 * mac_drv_requeue_rxd
1697 *
1698 * The hardware module calls this function to request the OS-specific
1699 * module to queue the receive buffer(s) represented by the pointer
1700 * to the RxD and the frag_count into the receive queue again. This
1701 * buffer was filled with an invalid frame or an SMT frame.
1702 * Args
1703 * smc - A pointer to the SMT context struct.
1704 *
1705 * rxd - A pointer to the first RxD which is used by the receive frame.
1706 *
1707 * frag_count - Count of RxDs used by the received frame.
1708 * Out
1709 * Nothing.
1710 *
1711 ************************/
1712void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1713 int frag_count)
1714{
1715 volatile struct s_smt_fp_rxd *next_rxd;
1716 volatile struct s_smt_fp_rxd *src_rxd;
1717 struct sk_buff *skb;
1718 int MaxFrameSize;
1719 unsigned char *v_addr;
1720 dma_addr_t b_addr;
1721
1722 if (frag_count != 1) // This is not allowed to happen.
1723
1724 printk("fddi: Multi-fragment requeue!\n");
1725
1726 MaxFrameSize = smc->os.MaxFrameSize;
1727 src_rxd = rxd;
1728 for (; frag_count > 0; frag_count--) {
1729 next_rxd = src_rxd->rxd_next;
1730 rxd = HWM_GET_CURR_RXD(smc);
1731
1732 skb = src_rxd->rxd_os.skb;
1733 if (skb == NULL) { // this should not happen
1734
ebc06eeb 1735 pr_debug("Requeue with no skb in rxd!\n");
1da177e4
LT
1736 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1737 if (skb) {
1738 // we got a skb
1739 rxd->rxd_os.skb = skb;
1740 skb_reserve(skb, 3);
1741 skb_put(skb, MaxFrameSize);
1742 v_addr = skb->data;
1743 b_addr = pci_map_single(&smc->os.pdev,
1744 v_addr,
1745 MaxFrameSize,
1746 PCI_DMA_FROMDEVICE);
1747 rxd->rxd_os.dma_addr = b_addr;
1748 } else {
1749 // no skb available, use local buffer
ebc06eeb 1750 pr_debug("Queueing invalid buffer!\n");
1da177e4
LT
1751 rxd->rxd_os.skb = NULL;
1752 v_addr = smc->os.LocalRxBuffer;
1753 b_addr = smc->os.LocalRxBufferDMA;
1754 }
1755 } else {
1756 // we use skb from old rxd
1757 rxd->rxd_os.skb = skb;
1758 v_addr = skb->data;
1759 b_addr = pci_map_single(&smc->os.pdev,
1760 v_addr,
1761 MaxFrameSize,
1762 PCI_DMA_FROMDEVICE);
1763 rxd->rxd_os.dma_addr = b_addr;
1764 }
1765 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1766 FIRST_FRAG | LAST_FRAG);
1767
1768 src_rxd = next_rxd;
1769 }
1770} // mac_drv_requeue_rxd
1771
1772
1773/************************
1774 *
1775 * mac_drv_fill_rxd
1776 *
1777 * The hardware module calls this function at initialization time
1778 * to fill the RxD ring with receive buffers. It is also called by
1779 * mac_drv_rx_complete if rx_free is large enough to queue some new
1780 * receive buffers into the RxD ring. mac_drv_fill_rxd queues new
1781 * receive buffers as long as enough RxDs and receive buffers are
1782 * available.
1783 * Args
1784 * smc - A pointer to the SMT context struct.
1785 * Out
1786 * Nothing.
1787 *
1788 ************************/
1789void mac_drv_fill_rxd(struct s_smc *smc)
1790{
1791 int MaxFrameSize;
1792 unsigned char *v_addr;
1793 unsigned long b_addr;
1794 struct sk_buff *skb;
1795 volatile struct s_smt_fp_rxd *rxd;
1796
ebc06eeb 1797 pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n");
1da177e4
LT
1798
1799 // Walk through the list of free receive buffers, passing receive
1800 // buffers to the HWM as long as RXDs are available.
1801
1802 MaxFrameSize = smc->os.MaxFrameSize;
1803 // Check if there is any RXD left.
1804 while (HWM_GET_RX_FREE(smc) > 0) {
ebc06eeb 1805 pr_debug(KERN_INFO ".\n");
1da177e4
LT
1806
1807 rxd = HWM_GET_CURR_RXD(smc);
1808 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1809 if (skb) {
1810 // we got a skb
1811 skb_reserve(skb, 3);
1812 skb_put(skb, MaxFrameSize);
1813 v_addr = skb->data;
1814 b_addr = pci_map_single(&smc->os.pdev,
1815 v_addr,
1816 MaxFrameSize,
1817 PCI_DMA_FROMDEVICE);
1818 rxd->rxd_os.dma_addr = b_addr;
1819 } else {
1820 // no skb available, use local buffer
1821 // System has run out of buffer memory, but we want to
1822 // keep the receiver running in hope of better times.
1823 // Multiple descriptors may point to this local buffer,
1824 // so data in it must be considered invalid.
ebc06eeb 1825 pr_debug("Queueing invalid buffer!\n");
1da177e4
LT
1826 v_addr = smc->os.LocalRxBuffer;
1827 b_addr = smc->os.LocalRxBufferDMA;
1828 }
1829
1830 rxd->rxd_os.skb = skb;
1831
1832 // Pass receive buffer to HWM.
1833 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1834 FIRST_FRAG | LAST_FRAG);
1835 }
ebc06eeb 1836 pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n");
1da177e4
LT
1837} // mac_drv_fill_rxd
1838
1839
1840/************************
1841 *
1842 * mac_drv_clear_rxd
1843 *
1844 * The hardware module calls this function to release unused
1845 * receive buffers.
1846 * Args
1847 * smc - A pointer to the SMT context struct.
1848 *
1849 * rxd - A pointer to the first RxD which is used by the receive buffer.
1850 *
1851 * frag_count - Count of RxDs used by the receive buffer.
1852 * Out
1853 * Nothing.
1854 *
1855 ************************/
1856void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1857 int frag_count)
1858{
1859
1860 struct sk_buff *skb;
1861
ebc06eeb 1862 pr_debug("entering mac_drv_clear_rxd\n");
1da177e4
LT
1863
1864 if (frag_count != 1) // This is not allowed to happen.
1865
1866 printk("fddi: Multi-fragment clear!\n");
1867
1868 for (; frag_count > 0; frag_count--) {
1869 skb = rxd->rxd_os.skb;
1870 if (skb != NULL) {
1871 skfddi_priv *bp = &smc->os;
1872 int MaxFrameSize = bp->MaxFrameSize;
1873
1874 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1875 MaxFrameSize, PCI_DMA_FROMDEVICE);
1876
1877 dev_kfree_skb(skb);
1878 rxd->rxd_os.skb = NULL;
1879 }
1880 rxd = rxd->rxd_next; // Next RXD.
1881
1882 }
1883} // mac_drv_clear_rxd
1884
1885
1886/************************
1887 *
1888 * mac_drv_rx_init
1889 *
1890 * The hardware module calls this routine when an SMT or NSA frame of the
1891 * local SMT should be delivered to the LLC layer.
1892 *
1893 * It is necessary to have this function, because there is no other way to
1894 * copy the contents of SMT MBufs into receive buffers.
1895 *
1896 * mac_drv_rx_init allocates the required target memory for this frame,
1897 * and receives the frame fragment by fragment by calling mac_drv_rx_frag.
1898 * Args
1899 * smc - A pointer to the SMT context struct.
1900 *
1901 * len - The length (in bytes) of the received frame (FC, DA, SA, Data).
1902 *
1903 * fc - The Frame Control field of the received frame.
1904 *
1905 * look_ahead - A pointer to the lookahead data buffer (may be NULL).
1906 *
1907 * la_len - The length of the lookahead data stored in the lookahead
1908 * buffer (may be zero).
1909 * Out
1910 * Always returns zero (0).
1911 *
1912 ************************/
1913int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1914 char *look_ahead, int la_len)
1915{
1916 struct sk_buff *skb;
1917
ebc06eeb 1918 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1da177e4
LT
1919
1920 // "Received" a SMT or NSA frame of the local SMT.
1921
1922 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
ebc06eeb
AB
1923 pr_debug("fddi: Discard invalid local SMT frame\n");
1924 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1da177e4
LT
1925 len, la_len, (unsigned long) look_ahead);
1926 return (0);
1927 }
1928 skb = alloc_skb(len + 3, GFP_ATOMIC);
1929 if (!skb) {
ebc06eeb 1930 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1da177e4
LT
1931 return (0);
1932 }
1933 skb_reserve(skb, 3);
1934 skb_put(skb, len);
27d7ff46 1935 skb_copy_to_linear_data(skb, look_ahead, len);
1da177e4
LT
1936
1937 // deliver frame to system
1938 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1da177e4
LT
1939 netif_rx(skb);
1940
1941 return (0);
1942} // mac_drv_rx_init
1943
1944
1945/************************
1946 *
1947 * smt_timer_poll
1948 *
1949 * This routine is called periodically by the SMT module to clean up the
1950 * driver.
1951 *
1952 * Return any queued frames back to the upper protocol layers if the ring
1953 * is down.
1954 * Args
1955 * smc - A pointer to the SMT context struct.
1956 * Out
1957 * Nothing.
1958 *
1959 ************************/
1960void smt_timer_poll(struct s_smc *smc)
1961{
1962} // smt_timer_poll
1963
1964
1965/************************
1966 *
1967 * ring_status_indication
1968 *
1969 * This function indicates a change of the ring state.
1970 * Args
1971 * smc - A pointer to the SMT context struct.
1972 *
1973 * status - The current ring status.
1974 * Out
1975 * Nothing.
1976 *
1977 ************************/
1978void ring_status_indication(struct s_smc *smc, u_long status)
1979{
ebc06eeb 1980 pr_debug("ring_status_indication( ");
1da177e4 1981 if (status & RS_RES15)
ebc06eeb 1982 pr_debug("RS_RES15 ");
1da177e4 1983 if (status & RS_HARDERROR)
ebc06eeb 1984 pr_debug("RS_HARDERROR ");
1da177e4 1985 if (status & RS_SOFTERROR)
ebc06eeb 1986 pr_debug("RS_SOFTERROR ");
1da177e4 1987 if (status & RS_BEACON)
ebc06eeb 1988 pr_debug("RS_BEACON ");
1da177e4 1989 if (status & RS_PATHTEST)
ebc06eeb 1990 pr_debug("RS_PATHTEST ");
1da177e4 1991 if (status & RS_SELFTEST)
ebc06eeb 1992 pr_debug("RS_SELFTEST ");
1da177e4 1993 if (status & RS_RES9)
ebc06eeb 1994 pr_debug("RS_RES9 ");
1da177e4 1995 if (status & RS_DISCONNECT)
ebc06eeb 1996 pr_debug("RS_DISCONNECT ");
1da177e4 1997 if (status & RS_RES7)
ebc06eeb 1998 pr_debug("RS_RES7 ");
1da177e4 1999 if (status & RS_DUPADDR)
ebc06eeb 2000 pr_debug("RS_DUPADDR ");
1da177e4 2001 if (status & RS_NORINGOP)
ebc06eeb 2002 pr_debug("RS_NORINGOP ");
1da177e4 2003 if (status & RS_VERSION)
ebc06eeb 2004 pr_debug("RS_VERSION ");
1da177e4 2005 if (status & RS_STUCKBYPASSS)
ebc06eeb 2006 pr_debug("RS_STUCKBYPASSS ");
1da177e4 2007 if (status & RS_EVENT)
ebc06eeb 2008 pr_debug("RS_EVENT ");
1da177e4 2009 if (status & RS_RINGOPCHANGE)
ebc06eeb 2010 pr_debug("RS_RINGOPCHANGE ");
1da177e4 2011 if (status & RS_RES0)
ebc06eeb
AB
2012 pr_debug("RS_RES0 ");
2013 pr_debug("]\n");
1da177e4
LT
2014} // ring_status_indication
2015
2016
2017/************************
2018 *
2019 * smt_get_time
2020 *
2021 * Gets the current time from the system.
2022 * Args
2023 * None.
2024 * Out
2025 * The current time in TICKS_PER_SECOND.
2026 *
2027 * TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
2028 * defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
2029 * to the time returned by smt_get_time().
2030 *
2031 ************************/
2032unsigned long smt_get_time(void)
2033{
2034 return jiffies;
2035} // smt_get_time
2036
2037
2038/************************
2039 *
2040 * smt_stat_counter
2041 *
2042 * Status counter update (ring_op, fifo full).
2043 * Args
2044 * smc - A pointer to the SMT context struct.
2045 *
2046 * stat - = 0: A ring operational change occurred.
2047 * = 1: The FORMAC FIFO buffer is full / FIFO overflow.
2048 * Out
2049 * Nothing.
2050 *
2051 ************************/
2052void smt_stat_counter(struct s_smc *smc, int stat)
2053{
2054// BOOLEAN RingIsUp ;
2055
ebc06eeb 2056 pr_debug(KERN_INFO "smt_stat_counter\n");
1da177e4
LT
2057 switch (stat) {
2058 case 0:
ebc06eeb 2059 pr_debug(KERN_INFO "Ring operational change.\n");
1da177e4
LT
2060 break;
2061 case 1:
ebc06eeb 2062 pr_debug(KERN_INFO "Receive fifo overflow.\n");
1da177e4
LT
2063 smc->os.MacStat.gen.rx_errors++;
2064 break;
2065 default:
ebc06eeb 2066 pr_debug(KERN_INFO "Unknown status (%d).\n", stat);
1da177e4
LT
2067 break;
2068 }
2069} // smt_stat_counter
2070
2071
2072/************************
2073 *
2074 * cfm_state_change
2075 *
2076 * Sets CFM state in custom statistics.
2077 * Args
2078 * smc - A pointer to the SMT context struct.
2079 *
2080 * c_state - Possible values are:
2081 *
2082 * EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
2083 * EC5_INSERT, EC6_CHECK, EC7_DEINSERT
2084 * Out
2085 * Nothing.
2086 *
2087 ************************/
2088void cfm_state_change(struct s_smc *smc, int c_state)
2089{
2090#ifdef DRIVERDEBUG
2091 char *s;
2092
2093 switch (c_state) {
2094 case SC0_ISOLATED:
2095 s = "SC0_ISOLATED";
2096 break;
2097 case SC1_WRAP_A:
2098 s = "SC1_WRAP_A";
2099 break;
2100 case SC2_WRAP_B:
2101 s = "SC2_WRAP_B";
2102 break;
2103 case SC4_THRU_A:
2104 s = "SC4_THRU_A";
2105 break;
2106 case SC5_THRU_B:
2107 s = "SC5_THRU_B";
2108 break;
2109 case SC7_WRAP_S:
2110 s = "SC7_WRAP_S";
2111 break;
2112 case SC9_C_WRAP_A:
2113 s = "SC9_C_WRAP_A";
2114 break;
2115 case SC10_C_WRAP_B:
2116 s = "SC10_C_WRAP_B";
2117 break;
2118 case SC11_C_WRAP_S:
2119 s = "SC11_C_WRAP_S";
2120 break;
2121 default:
ebc06eeb 2122 pr_debug(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
1da177e4
LT
2123 return;
2124 }
ebc06eeb 2125 pr_debug(KERN_INFO "cfm_state_change: %s\n", s);
1da177e4
LT
2126#endif // DRIVERDEBUG
2127} // cfm_state_change
2128
2129
2130/************************
2131 *
2132 * ecm_state_change
2133 *
2134 * Sets ECM state in custom statistics.
2135 * Args
2136 * smc - A pointer to the SMT context struct.
2137 *
2138 * e_state - Possible values are:
2139 *
2140 * SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
2141 * SC5_THRU_B (7), SC7_WRAP_S (8)
2142 * Out
2143 * Nothing.
2144 *
2145 ************************/
2146void ecm_state_change(struct s_smc *smc, int e_state)
2147{
2148#ifdef DRIVERDEBUG
2149 char *s;
2150
2151 switch (e_state) {
2152 case EC0_OUT:
2153 s = "EC0_OUT";
2154 break;
2155 case EC1_IN:
2156 s = "EC1_IN";
2157 break;
2158 case EC2_TRACE:
2159 s = "EC2_TRACE";
2160 break;
2161 case EC3_LEAVE:
2162 s = "EC3_LEAVE";
2163 break;
2164 case EC4_PATH_TEST:
2165 s = "EC4_PATH_TEST";
2166 break;
2167 case EC5_INSERT:
2168 s = "EC5_INSERT";
2169 break;
2170 case EC6_CHECK:
2171 s = "EC6_CHECK";
2172 break;
2173 case EC7_DEINSERT:
2174 s = "EC7_DEINSERT";
2175 break;
2176 default:
2177 s = "unknown";
2178 break;
2179 }
ebc06eeb 2180 pr_debug(KERN_INFO "ecm_state_change: %s\n", s);
1da177e4
LT
2181#endif //DRIVERDEBUG
2182} // ecm_state_change
2183
2184
2185/************************
2186 *
2187 * rmt_state_change
2188 *
2189 * Sets RMT state in custom statistics.
2190 * Args
2191 * smc - A pointer to the SMT context struct.
2192 *
2193 * r_state - Possible values are:
2194 *
2195 * RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
2196 * RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
2197 * Out
2198 * Nothing.
2199 *
2200 ************************/
2201void rmt_state_change(struct s_smc *smc, int r_state)
2202{
2203#ifdef DRIVERDEBUG
2204 char *s;
2205
2206 switch (r_state) {
2207 case RM0_ISOLATED:
2208 s = "RM0_ISOLATED";
2209 break;
2210 case RM1_NON_OP:
2211 s = "RM1_NON_OP - not operational";
2212 break;
2213 case RM2_RING_OP:
2214 s = "RM2_RING_OP - ring operational";
2215 break;
2216 case RM3_DETECT:
2217 s = "RM3_DETECT - detect dupl addresses";
2218 break;
2219 case RM4_NON_OP_DUP:
2220 s = "RM4_NON_OP_DUP - dupl. addr detected";
2221 break;
2222 case RM5_RING_OP_DUP:
2223 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2224 break;
2225 case RM6_DIRECTED:
2226 s = "RM6_DIRECTED - sending directed beacons";
2227 break;
2228 case RM7_TRACE:
2229 s = "RM7_TRACE - trace initiated";
2230 break;
2231 default:
2232 s = "unknown";
2233 break;
2234 }
ebc06eeb 2235 pr_debug(KERN_INFO "[rmt_state_change: %s]\n", s);
1da177e4
LT
2236#endif // DRIVERDEBUG
2237} // rmt_state_change
2238
2239
2240/************************
2241 *
2242 * drv_reset_indication
2243 *
2244 * This function is called by the SMT when it has detected a severe
2245 * hardware problem. The driver should perform a reset on the adapter
2246 * as soon as possible, but not from within this function.
2247 * Args
2248 * smc - A pointer to the SMT context struct.
2249 * Out
2250 * Nothing.
2251 *
2252 ************************/
2253void drv_reset_indication(struct s_smc *smc)
2254{
ebc06eeb 2255 pr_debug(KERN_INFO "entering drv_reset_indication\n");
1da177e4
LT
2256
2257 smc->os.ResetRequested = TRUE; // Set flag.
2258
2259} // drv_reset_indication
2260
2261static struct pci_driver skfddi_pci_driver = {
2262 .name = "skfddi",
2263 .id_table = skfddi_pci_tbl,
2264 .probe = skfp_init_one,
2265 .remove = __devexit_p(skfp_remove_one),
2266};
2267
2268static int __init skfd_init(void)
2269{
29917620 2270 return pci_register_driver(&skfddi_pci_driver);
1da177e4
LT
2271}
2272
2273static void __exit skfd_exit(void)
2274{
2275 pci_unregister_driver(&skfddi_pci_driver);
2276}
2277
2278module_init(skfd_init);
2279module_exit(skfd_exit);