Revert mmiocfg heuristics and blacklist changes
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / atm / he.c
CommitLineData
1da177e4
LT
1/* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2
3/*
4
5 he.c
6
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
9
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
14
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
19
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23
24*/
25
26/*
27
28 he.c
29
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
32
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
38
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
42
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45
46 AUTHORS:
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49
50 NOTES:
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
55
56 */
57
1da177e4 58#include <linux/module.h>
1da177e4
LT
59#include <linux/kernel.h>
60#include <linux/skbuff.h>
61#include <linux/pci.h>
62#include <linux/errno.h>
63#include <linux/types.h>
64#include <linux/string.h>
65#include <linux/delay.h>
66#include <linux/init.h>
67#include <linux/mm.h>
68#include <linux/sched.h>
69#include <linux/timer.h>
70#include <linux/interrupt.h>
e5695f08 71#include <linux/dma-mapping.h>
1da177e4
LT
72#include <asm/io.h>
73#include <asm/byteorder.h>
74#include <asm/uaccess.h>
75
76#include <linux/atmdev.h>
77#include <linux/atm.h>
78#include <linux/sonet.h>
79
80#define USE_TASKLET
81#undef USE_SCATTERGATHER
82#undef USE_CHECKSUM_HW /* still confused about this */
83#define USE_RBPS
84#undef USE_RBPS_POOL /* if memory is tight try this */
85#undef USE_RBPL_POOL /* if memory is tight try this */
86#define USE_TPD_POOL
87/* #undef CONFIG_ATM_HE_USE_SUNI */
88/* #undef HE_DEBUG */
89
90#include "he.h"
91#include "suni.h"
92#include <linux/atm_he.h>
93
94#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
95
96#ifdef HE_DEBUG
97#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
98#else /* !HE_DEBUG */
99#define HPRINTK(fmt,args...) do { } while (0)
100#endif /* HE_DEBUG */
101
102/* version definition */
103
104static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
105
106/* declarations */
107
108static int he_open(struct atm_vcc *vcc);
109static void he_close(struct atm_vcc *vcc);
110static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
111static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
112static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
113static void he_tasklet(unsigned long data);
114static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
115static int he_start(struct atm_dev *dev);
116static void he_stop(struct he_dev *dev);
117static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
118static unsigned char he_phy_get(struct atm_dev *, unsigned long);
119
120static u8 read_prom_byte(struct he_dev *he_dev, int addr);
121
122/* globals */
123
124static struct he_dev *he_devs;
125static int disable64;
126static short nvpibits = -1;
127static short nvcibits = -1;
128static short rx_skb_reserve = 16;
129static int irq_coalesce = 1;
130static int sdh = 0;
131
132/* Read from EEPROM = 0000 0011b */
133static unsigned int readtab[] = {
134 CS_HIGH | CLK_HIGH,
135 CS_LOW | CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW,
138 CLK_HIGH, /* 0 */
139 CLK_LOW,
140 CLK_HIGH, /* 0 */
141 CLK_LOW,
142 CLK_HIGH, /* 0 */
143 CLK_LOW,
144 CLK_HIGH, /* 0 */
145 CLK_LOW,
146 CLK_HIGH, /* 0 */
147 CLK_LOW | SI_HIGH,
148 CLK_HIGH | SI_HIGH, /* 1 */
149 CLK_LOW | SI_HIGH,
150 CLK_HIGH | SI_HIGH /* 1 */
151};
152
153/* Clock to read from/write to the EEPROM */
154static unsigned int clocktab[] = {
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW,
162 CLK_HIGH,
163 CLK_LOW,
164 CLK_HIGH,
165 CLK_LOW,
166 CLK_HIGH,
167 CLK_LOW,
168 CLK_HIGH,
169 CLK_LOW,
170 CLK_HIGH,
171 CLK_LOW
172};
173
174static struct atmdev_ops he_ops =
175{
176 .open = he_open,
177 .close = he_close,
178 .ioctl = he_ioctl,
179 .send = he_send,
180 .phy_put = he_phy_put,
181 .phy_get = he_phy_get,
182 .proc_read = he_proc_read,
183 .owner = THIS_MODULE
184};
185
186#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
187#define he_readl(dev, reg) readl((dev)->membase + (reg))
188
189/* section 2.12 connection memory access */
190
191static __inline__ void
192he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
193 unsigned flags)
194{
195 he_writel(he_dev, val, CON_DAT);
196 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
197 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
198 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
199}
200
201#define he_writel_rcm(dev, val, reg) \
202 he_writel_internal(dev, val, reg, CON_CTL_RCM)
203
204#define he_writel_tcm(dev, val, reg) \
205 he_writel_internal(dev, val, reg, CON_CTL_TCM)
206
207#define he_writel_mbox(dev, val, reg) \
208 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
209
210static unsigned
211he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
212{
213 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
214 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
215 return he_readl(he_dev, CON_DAT);
216}
217
218#define he_readl_rcm(dev, reg) \
219 he_readl_internal(dev, reg, CON_CTL_RCM)
220
221#define he_readl_tcm(dev, reg) \
222 he_readl_internal(dev, reg, CON_CTL_TCM)
223
224#define he_readl_mbox(dev, reg) \
225 he_readl_internal(dev, reg, CON_CTL_MBOX)
226
227
228/* figure 2.2 connection id */
229
230#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
231
232/* 2.5.1 per connection transmit state registers */
233
234#define he_writel_tsr0(dev, val, cid) \
235 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
236#define he_readl_tsr0(dev, cid) \
237 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
238
239#define he_writel_tsr1(dev, val, cid) \
240 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
241
242#define he_writel_tsr2(dev, val, cid) \
243 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
244
245#define he_writel_tsr3(dev, val, cid) \
246 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
247
248#define he_writel_tsr4(dev, val, cid) \
249 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
250
251 /* from page 2-20
252 *
253 * NOTE While the transmit connection is active, bits 23 through 0
254 * of this register must not be written by the host. Byte
255 * enables should be used during normal operation when writing
256 * the most significant byte.
257 */
258
259#define he_writel_tsr4_upper(dev, val, cid) \
260 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
261 CON_CTL_TCM \
262 | CON_BYTE_DISABLE_2 \
263 | CON_BYTE_DISABLE_1 \
264 | CON_BYTE_DISABLE_0)
265
266#define he_readl_tsr4(dev, cid) \
267 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
268
269#define he_writel_tsr5(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
271
272#define he_writel_tsr6(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
274
275#define he_writel_tsr7(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
277
278
279#define he_writel_tsr8(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
281
282#define he_writel_tsr9(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
284
285#define he_writel_tsr10(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
287
288#define he_writel_tsr11(dev, val, cid) \
289 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
290
291
292#define he_writel_tsr12(dev, val, cid) \
293 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
294
295#define he_writel_tsr13(dev, val, cid) \
296 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
297
298
299#define he_writel_tsr14(dev, val, cid) \
300 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
301
302#define he_writel_tsr14_upper(dev, val, cid) \
303 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
304 CON_CTL_TCM \
305 | CON_BYTE_DISABLE_2 \
306 | CON_BYTE_DISABLE_1 \
307 | CON_BYTE_DISABLE_0)
308
309/* 2.7.1 per connection receive state registers */
310
311#define he_writel_rsr0(dev, val, cid) \
312 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
313#define he_readl_rsr0(dev, cid) \
314 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
315
316#define he_writel_rsr1(dev, val, cid) \
317 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
318
319#define he_writel_rsr2(dev, val, cid) \
320 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
321
322#define he_writel_rsr3(dev, val, cid) \
323 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
324
325#define he_writel_rsr4(dev, val, cid) \
326 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
327
328#define he_writel_rsr5(dev, val, cid) \
329 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
330
331#define he_writel_rsr6(dev, val, cid) \
332 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
333
334#define he_writel_rsr7(dev, val, cid) \
335 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
336
337static __inline__ struct atm_vcc*
338__find_vcc(struct he_dev *he_dev, unsigned cid)
339{
340 struct hlist_head *head;
341 struct atm_vcc *vcc;
342 struct hlist_node *node;
343 struct sock *s;
344 short vpi;
345 int vci;
346
347 vpi = cid >> he_dev->vcibits;
348 vci = cid & ((1 << he_dev->vcibits) - 1);
349 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
350
351 sk_for_each(s, node, head) {
352 vcc = atm_sk(s);
353 if (vcc->dev == he_dev->atm_dev &&
354 vcc->vci == vci && vcc->vpi == vpi &&
355 vcc->qos.rxtp.traffic_class != ATM_NONE) {
356 return vcc;
357 }
358 }
359 return NULL;
360}
361
362static int __devinit
363he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
364{
365 struct atm_dev *atm_dev = NULL;
366 struct he_dev *he_dev = NULL;
367 int err = 0;
368
369 printk(KERN_INFO "he: %s\n", version);
370
371 if (pci_enable_device(pci_dev))
372 return -EIO;
e5695f08 373 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
1da177e4
LT
374 printk(KERN_WARNING "he: no suitable dma available\n");
375 err = -EIO;
376 goto init_one_failure;
377 }
378
379 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
380 if (!atm_dev) {
381 err = -ENODEV;
382 goto init_one_failure;
383 }
384 pci_set_drvdata(pci_dev, atm_dev);
385
386 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
387 GFP_KERNEL);
388 if (!he_dev) {
389 err = -ENOMEM;
390 goto init_one_failure;
391 }
392 memset(he_dev, 0, sizeof(struct he_dev));
393
394 he_dev->pci_dev = pci_dev;
395 he_dev->atm_dev = atm_dev;
396 he_dev->atm_dev->dev_data = he_dev;
397 atm_dev->dev_data = he_dev;
398 he_dev->number = atm_dev->number;
399 if (he_start(atm_dev)) {
400 he_stop(he_dev);
401 err = -ENODEV;
402 goto init_one_failure;
403 }
404 he_dev->next = NULL;
405 if (he_devs)
406 he_dev->next = he_devs;
407 he_devs = he_dev;
408 return 0;
409
410init_one_failure:
411 if (atm_dev)
412 atm_dev_deregister(atm_dev);
a2c1aa54 413 kfree(he_dev);
1da177e4
LT
414 pci_disable_device(pci_dev);
415 return err;
416}
417
418static void __devexit
419he_remove_one (struct pci_dev *pci_dev)
420{
421 struct atm_dev *atm_dev;
422 struct he_dev *he_dev;
423
424 atm_dev = pci_get_drvdata(pci_dev);
425 he_dev = HE_DEV(atm_dev);
426
427 /* need to remove from he_devs */
428
429 he_stop(he_dev);
430 atm_dev_deregister(atm_dev);
431 kfree(he_dev);
432
433 pci_set_drvdata(pci_dev, NULL);
434 pci_disable_device(pci_dev);
435}
436
437
438static unsigned
439rate_to_atmf(unsigned rate) /* cps to atm forum format */
440{
441#define NONZERO (1 << 14)
442
443 unsigned exp = 0;
444
445 if (rate == 0)
446 return 0;
447
448 rate <<= 9;
449 while (rate > 0x3ff) {
450 ++exp;
451 rate >>= 1;
452 }
453
454 return (NONZERO | (exp << 9) | (rate & 0x1ff));
455}
456
457static void __init
458he_init_rx_lbfp0(struct he_dev *he_dev)
459{
460 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
461 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
462 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
463 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
464
465 lbufd_index = 0;
466 lbm_offset = he_readl(he_dev, RCMLBM_BA);
467
468 he_writel(he_dev, lbufd_index, RLBF0_H);
469
470 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
471 lbufd_index += 2;
472 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
473
474 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
475 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
476
477 if (++lbuf_count == lbufs_per_row) {
478 lbuf_count = 0;
479 row_offset += he_dev->bytes_per_row;
480 }
481 lbm_offset += 4;
482 }
483
484 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
485 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
486}
487
488static void __init
489he_init_rx_lbfp1(struct he_dev *he_dev)
490{
491 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
492 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
493 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
494 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
495
496 lbufd_index = 1;
497 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
498
499 he_writel(he_dev, lbufd_index, RLBF1_H);
500
501 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
502 lbufd_index += 2;
503 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
504
505 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
506 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
507
508 if (++lbuf_count == lbufs_per_row) {
509 lbuf_count = 0;
510 row_offset += he_dev->bytes_per_row;
511 }
512 lbm_offset += 4;
513 }
514
515 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
516 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
517}
518
519static void __init
520he_init_tx_lbfp(struct he_dev *he_dev)
521{
522 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
523 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
524 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
525 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
526
527 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
528 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
529
530 he_writel(he_dev, lbufd_index, TLBF_H);
531
532 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
533 lbufd_index += 1;
534 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
535
536 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
537 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
538
539 if (++lbuf_count == lbufs_per_row) {
540 lbuf_count = 0;
541 row_offset += he_dev->bytes_per_row;
542 }
543 lbm_offset += 2;
544 }
545
546 he_writel(he_dev, lbufd_index - 1, TLBF_T);
547}
548
549static int __init
550he_init_tpdrq(struct he_dev *he_dev)
551{
552 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
553 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
554 if (he_dev->tpdrq_base == NULL) {
555 hprintk("failed to alloc tpdrq\n");
556 return -ENOMEM;
557 }
558 memset(he_dev->tpdrq_base, 0,
559 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
560
561 he_dev->tpdrq_tail = he_dev->tpdrq_base;
562 he_dev->tpdrq_head = he_dev->tpdrq_base;
563
564 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
565 he_writel(he_dev, 0, TPDRQ_T);
566 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
567
568 return 0;
569}
570
571static void __init
572he_init_cs_block(struct he_dev *he_dev)
573{
574 unsigned clock, rate, delta;
575 int reg;
576
577 /* 5.1.7 cs block initialization */
578
579 for (reg = 0; reg < 0x20; ++reg)
580 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
581
582 /* rate grid timer reload values */
583
584 clock = he_is622(he_dev) ? 66667000 : 50000000;
585 rate = he_dev->atm_dev->link_rate;
586 delta = rate / 16 / 2;
587
588 for (reg = 0; reg < 0x10; ++reg) {
589 /* 2.4 internal transmit function
590 *
591 * we initialize the first row in the rate grid.
592 * values are period (in clock cycles) of timer
593 */
594 unsigned period = clock / rate;
595
596 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
597 rate -= delta;
598 }
599
600 if (he_is622(he_dev)) {
601 /* table 5.2 (4 cells per lbuf) */
602 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
603 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
604 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
605 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
606 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
607
608 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
609 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
610 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
611 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
612 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
613 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
614 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
615
616 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
617
618 /* table 5.8 */
619 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
620 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
621 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
622 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
623 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
624 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
625
626 /* table 5.9 */
627 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
628 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
629 } else {
630 /* table 5.1 (4 cells per lbuf) */
631 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
632 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
633 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
634 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
635 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
636
637 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
638 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
639 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
640 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
641 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
642 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
643 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
644
645 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
646
647 /* table 5.8 */
648 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
649 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
650 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
651 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
652 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
653 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
654
655 /* table 5.9 */
656 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
657 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
658 }
659
660 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
661
662 for (reg = 0; reg < 0x8; ++reg)
663 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
664
665}
666
667static int __init
668he_init_cs_block_rcm(struct he_dev *he_dev)
669{
670 unsigned (*rategrid)[16][16];
671 unsigned rate, delta;
672 int i, j, reg;
673
674 unsigned rate_atmf, exp, man;
675 unsigned long long rate_cps;
676 int mult, buf, buf_limit = 4;
677
678 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
679 if (!rategrid)
680 return -ENOMEM;
681
682 /* initialize rate grid group table */
683
684 for (reg = 0x0; reg < 0xff; ++reg)
685 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
686
687 /* initialize rate controller groups */
688
689 for (reg = 0x100; reg < 0x1ff; ++reg)
690 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
691
692 /* initialize tNrm lookup table */
693
694 /* the manual makes reference to a routine in a sample driver
695 for proper configuration; fortunately, we only need this
696 in order to support abr connection */
697
698 /* initialize rate to group table */
699
700 rate = he_dev->atm_dev->link_rate;
701 delta = rate / 32;
702
703 /*
704 * 2.4 transmit internal functions
705 *
706 * we construct a copy of the rate grid used by the scheduler
707 * in order to construct the rate to group table below
708 */
709
710 for (j = 0; j < 16; j++) {
711 (*rategrid)[0][j] = rate;
712 rate -= delta;
713 }
714
715 for (i = 1; i < 16; i++)
716 for (j = 0; j < 16; j++)
717 if (i > 14)
718 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
719 else
720 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
721
722 /*
723 * 2.4 transmit internal function
724 *
725 * this table maps the upper 5 bits of exponent and mantissa
726 * of the atm forum representation of the rate into an index
727 * on rate grid
728 */
729
730 rate_atmf = 0;
731 while (rate_atmf < 0x400) {
732 man = (rate_atmf & 0x1f) << 4;
733 exp = rate_atmf >> 5;
734
735 /*
736 instead of '/ 512', use '>> 9' to prevent a call
737 to divdu3 on x86 platforms
738 */
739 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
740
741 if (rate_cps < 10)
742 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
743
744 for (i = 255; i > 0; i--)
745 if ((*rategrid)[i/16][i%16] >= rate_cps)
746 break; /* pick nearest rate instead? */
747
748 /*
749 * each table entry is 16 bits: (rate grid index (8 bits)
750 * and a buffer limit (8 bits)
751 * there are two table entries in each 32-bit register
752 */
753
754#ifdef notdef
755 buf = rate_cps * he_dev->tx_numbuffs /
756 (he_dev->atm_dev->link_rate * 2);
757#else
758 /* this is pretty, but avoids _divdu3 and is mostly correct */
759 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
760 if (rate_cps > (272 * mult))
761 buf = 4;
762 else if (rate_cps > (204 * mult))
763 buf = 3;
764 else if (rate_cps > (136 * mult))
765 buf = 2;
766 else if (rate_cps > (68 * mult))
767 buf = 1;
768 else
769 buf = 0;
770#endif
771 if (buf > buf_limit)
772 buf = buf_limit;
773 reg = (reg << 16) | ((i << 8) | buf);
774
775#define RTGTBL_OFFSET 0x400
776
777 if (rate_atmf & 0x1)
778 he_writel_rcm(he_dev, reg,
779 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
780
781 ++rate_atmf;
782 }
783
784 kfree(rategrid);
785 return 0;
786}
787
788static int __init
789he_init_group(struct he_dev *he_dev, int group)
790{
791 int i;
792
793#ifdef USE_RBPS
794 /* small buffer pool */
795#ifdef USE_RBPS_POOL
796 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
797 CONFIG_RBPS_BUFSIZE, 8, 0);
798 if (he_dev->rbps_pool == NULL) {
799 hprintk("unable to create rbps pages\n");
800 return -ENOMEM;
801 }
802#else /* !USE_RBPS_POOL */
803 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
804 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
805 if (he_dev->rbps_pages == NULL) {
806 hprintk("unable to create rbps page pool\n");
807 return -ENOMEM;
808 }
809#endif /* USE_RBPS_POOL */
810
811 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
812 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
813 if (he_dev->rbps_base == NULL) {
814 hprintk("failed to alloc rbps\n");
815 return -ENOMEM;
816 }
817 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
818 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
819
820 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
821 dma_addr_t dma_handle;
822 void *cpuaddr;
823
824#ifdef USE_RBPS_POOL
825 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
826 if (cpuaddr == NULL)
827 return -ENOMEM;
828#else
829 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
830 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
831#endif
832
833 he_dev->rbps_virt[i].virt = cpuaddr;
834 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
835 he_dev->rbps_base[i].phys = dma_handle;
836
837 }
838 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
839
840 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
841 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
842 G0_RBPS_T + (group * 32));
843 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
844 G0_RBPS_BS + (group * 32));
845 he_writel(he_dev,
846 RBP_THRESH(CONFIG_RBPS_THRESH) |
847 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
848 RBP_INT_ENB,
849 G0_RBPS_QI + (group * 32));
850#else /* !USE_RBPS */
851 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
852 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
853 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
854 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
855 G0_RBPS_BS + (group * 32));
856#endif /* USE_RBPS */
857
858 /* large buffer pool */
859#ifdef USE_RBPL_POOL
860 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
861 CONFIG_RBPL_BUFSIZE, 8, 0);
862 if (he_dev->rbpl_pool == NULL) {
863 hprintk("unable to create rbpl pool\n");
864 return -ENOMEM;
865 }
866#else /* !USE_RBPL_POOL */
867 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
868 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
869 if (he_dev->rbpl_pages == NULL) {
870 hprintk("unable to create rbpl pages\n");
871 return -ENOMEM;
872 }
873#endif /* USE_RBPL_POOL */
874
875 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
876 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
877 if (he_dev->rbpl_base == NULL) {
878 hprintk("failed to alloc rbpl\n");
879 return -ENOMEM;
880 }
881 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
882 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
883
884 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
885 dma_addr_t dma_handle;
886 void *cpuaddr;
887
888#ifdef USE_RBPL_POOL
889 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
890 if (cpuaddr == NULL)
891 return -ENOMEM;
892#else
893 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
894 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
895#endif
896
897 he_dev->rbpl_virt[i].virt = cpuaddr;
898 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
899 he_dev->rbpl_base[i].phys = dma_handle;
900 }
901 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
902
903 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
904 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
905 G0_RBPL_T + (group * 32));
906 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
907 G0_RBPL_BS + (group * 32));
908 he_writel(he_dev,
909 RBP_THRESH(CONFIG_RBPL_THRESH) |
910 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
911 RBP_INT_ENB,
912 G0_RBPL_QI + (group * 32));
913
914 /* rx buffer ready queue */
915
916 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
917 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
918 if (he_dev->rbrq_base == NULL) {
919 hprintk("failed to allocate rbrq\n");
920 return -ENOMEM;
921 }
922 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
923
924 he_dev->rbrq_head = he_dev->rbrq_base;
925 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
926 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
927 he_writel(he_dev,
928 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
929 G0_RBRQ_Q + (group * 16));
930 if (irq_coalesce) {
931 hprintk("coalescing interrupts\n");
932 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
933 G0_RBRQ_I + (group * 16));
934 } else
935 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
936 G0_RBRQ_I + (group * 16));
937
938 /* tx buffer ready queue */
939
940 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
941 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
942 if (he_dev->tbrq_base == NULL) {
943 hprintk("failed to allocate tbrq\n");
944 return -ENOMEM;
945 }
946 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
947
948 he_dev->tbrq_head = he_dev->tbrq_base;
949
950 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
951 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
952 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
953 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
954
955 return 0;
956}
957
958static int __init
959he_init_irq(struct he_dev *he_dev)
960{
961 int i;
962
963 /* 2.9.3.5 tail offset for each interrupt queue is located after the
964 end of the interrupt queue */
965
966 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
967 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
968 if (he_dev->irq_base == NULL) {
969 hprintk("failed to allocate irq\n");
970 return -ENOMEM;
971 }
972 he_dev->irq_tailoffset = (unsigned *)
973 &he_dev->irq_base[CONFIG_IRQ_SIZE];
974 *he_dev->irq_tailoffset = 0;
975 he_dev->irq_head = he_dev->irq_base;
976 he_dev->irq_tail = he_dev->irq_base;
977
978 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
979 he_dev->irq_base[i].isw = ITYPE_INVALID;
980
981 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
982 he_writel(he_dev,
983 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
984 IRQ0_HEAD);
985 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
986 he_writel(he_dev, 0x0, IRQ0_DATA);
987
988 he_writel(he_dev, 0x0, IRQ1_BASE);
989 he_writel(he_dev, 0x0, IRQ1_HEAD);
990 he_writel(he_dev, 0x0, IRQ1_CNTL);
991 he_writel(he_dev, 0x0, IRQ1_DATA);
992
993 he_writel(he_dev, 0x0, IRQ2_BASE);
994 he_writel(he_dev, 0x0, IRQ2_HEAD);
995 he_writel(he_dev, 0x0, IRQ2_CNTL);
996 he_writel(he_dev, 0x0, IRQ2_DATA);
997
998 he_writel(he_dev, 0x0, IRQ3_BASE);
999 he_writel(he_dev, 0x0, IRQ3_HEAD);
1000 he_writel(he_dev, 0x0, IRQ3_CNTL);
1001 he_writel(he_dev, 0x0, IRQ3_DATA);
1002
1003 /* 2.9.3.2 interrupt queue mapping registers */
1004
1005 he_writel(he_dev, 0x0, GRP_10_MAP);
1006 he_writel(he_dev, 0x0, GRP_32_MAP);
1007 he_writel(he_dev, 0x0, GRP_54_MAP);
1008 he_writel(he_dev, 0x0, GRP_76_MAP);
1009
dace1453 1010 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1da177e4
LT
1011 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1012 return -EINVAL;
1013 }
1014
1015 he_dev->irq = he_dev->pci_dev->irq;
1016
1017 return 0;
1018}
1019
d17f0865 1020static int __devinit
1da177e4
LT
1021he_start(struct atm_dev *dev)
1022{
1023 struct he_dev *he_dev;
1024 struct pci_dev *pci_dev;
1025 unsigned long membase;
1026
1027 u16 command;
1028 u32 gen_cntl_0, host_cntl, lb_swap;
1029 u8 cache_size, timer;
1030
1031 unsigned err;
1032 unsigned int status, reg;
1033 int i, group;
1034
1035 he_dev = HE_DEV(dev);
1036 pci_dev = he_dev->pci_dev;
1037
1038 membase = pci_resource_start(pci_dev, 0);
1039 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1040
1041 /*
1042 * pci bus controller initialization
1043 */
1044
1045 /* 4.3 pci bus controller-specific initialization */
1046 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1047 hprintk("can't read GEN_CNTL_0\n");
1048 return -EINVAL;
1049 }
1050 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1051 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1052 hprintk("can't write GEN_CNTL_0.\n");
1053 return -EINVAL;
1054 }
1055
1056 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1057 hprintk("can't read PCI_COMMAND.\n");
1058 return -EINVAL;
1059 }
1060
1061 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1062 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1063 hprintk("can't enable memory.\n");
1064 return -EINVAL;
1065 }
1066
1067 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1068 hprintk("can't read cache line size?\n");
1069 return -EINVAL;
1070 }
1071
1072 if (cache_size < 16) {
1073 cache_size = 16;
1074 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1075 hprintk("can't set cache line size to %d\n", cache_size);
1076 }
1077
1078 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1079 hprintk("can't read latency timer?\n");
1080 return -EINVAL;
1081 }
1082
1083 /* from table 3.9
1084 *
1085 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1086 *
1087 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1088 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1089 *
1090 */
1091#define LAT_TIMER 209
1092 if (timer < LAT_TIMER) {
1093 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1094 timer = LAT_TIMER;
1095 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1096 hprintk("can't set latency timer to %d\n", timer);
1097 }
1098
1099 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1100 hprintk("can't set up page mapping\n");
1101 return -EINVAL;
1102 }
1103
1104 /* 4.4 card reset */
1105 he_writel(he_dev, 0x0, RESET_CNTL);
1106 he_writel(he_dev, 0xff, RESET_CNTL);
1107
1108 udelay(16*1000); /* 16 ms */
1109 status = he_readl(he_dev, RESET_CNTL);
1110 if ((status & BOARD_RST_STATUS) == 0) {
1111 hprintk("reset failed\n");
1112 return -EINVAL;
1113 }
1114
1115 /* 4.5 set bus width */
1116 host_cntl = he_readl(he_dev, HOST_CNTL);
1117 if (host_cntl & PCI_BUS_SIZE64)
1118 gen_cntl_0 |= ENBL_64;
1119 else
1120 gen_cntl_0 &= ~ENBL_64;
1121
1122 if (disable64 == 1) {
1123 hprintk("disabling 64-bit pci bus transfers\n");
1124 gen_cntl_0 &= ~ENBL_64;
1125 }
1126
1127 if (gen_cntl_0 & ENBL_64)
1128 hprintk("64-bit transfers enabled\n");
1129
1130 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1131
1132 /* 4.7 read prom contents */
1133 for (i = 0; i < PROD_ID_LEN; ++i)
1134 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1135
1136 he_dev->media = read_prom_byte(he_dev, MEDIA);
1137
1138 for (i = 0; i < 6; ++i)
1139 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1140
1141 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1142 he_dev->prod_id,
1143 he_dev->media & 0x40 ? "SM" : "MM",
1144 dev->esi[0],
1145 dev->esi[1],
1146 dev->esi[2],
1147 dev->esi[3],
1148 dev->esi[4],
1149 dev->esi[5]);
1150 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1151 ATM_OC12_PCR : ATM_OC3_PCR;
1152
1153 /* 4.6 set host endianess */
1154 lb_swap = he_readl(he_dev, LB_SWAP);
1155 if (he_is622(he_dev))
1156 lb_swap &= ~XFER_SIZE; /* 4 cells */
1157 else
1158 lb_swap |= XFER_SIZE; /* 8 cells */
1159#ifdef __BIG_ENDIAN
1160 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1161#else
1162 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1163 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1164#endif /* __BIG_ENDIAN */
1165 he_writel(he_dev, lb_swap, LB_SWAP);
1166
1167 /* 4.8 sdram controller initialization */
1168 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1169
1170 /* 4.9 initialize rnum value */
1171 lb_swap |= SWAP_RNUM_MAX(0xf);
1172 he_writel(he_dev, lb_swap, LB_SWAP);
1173
1174 /* 4.10 initialize the interrupt queues */
1175 if ((err = he_init_irq(he_dev)) != 0)
1176 return err;
1177
1178#ifdef USE_TASKLET
1179 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1180#endif
1181 spin_lock_init(&he_dev->global_lock);
1182
1183 /* 4.11 enable pci bus controller state machines */
1184 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1185 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1186 he_writel(he_dev, host_cntl, HOST_CNTL);
1187
1188 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1189 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1190
1191 /*
1192 * atm network controller initialization
1193 */
1194
1195 /* 5.1.1 generic configuration state */
1196
1197 /*
1198 * local (cell) buffer memory map
1199 *
1200 * HE155 HE622
1201 *
1202 * 0 ____________1023 bytes 0 _______________________2047 bytes
1203 * | | | | |
1204 * | utility | | rx0 | |
1205 * 5|____________| 255|___________________| u |
1206 * 6| | 256| | t |
1207 * | | | | i |
1208 * | rx0 | row | tx | l |
1209 * | | | | i |
1210 * | | 767|___________________| t |
1211 * 517|____________| 768| | y |
1212 * row 518| | | rx1 | |
1213 * | | 1023|___________________|___|
1214 * | |
1215 * | tx |
1216 * | |
1217 * | |
1218 * 1535|____________|
1219 * 1536| |
1220 * | rx1 |
1221 * 2047|____________|
1222 *
1223 */
1224
1225 /* total 4096 connections */
1226 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1227 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1228
1229 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1230 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1231 return -ENODEV;
1232 }
1233
1234 if (nvpibits != -1) {
1235 he_dev->vpibits = nvpibits;
1236 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1237 }
1238
1239 if (nvcibits != -1) {
1240 he_dev->vcibits = nvcibits;
1241 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1242 }
1243
1244
1245 if (he_is622(he_dev)) {
1246 he_dev->cells_per_row = 40;
1247 he_dev->bytes_per_row = 2048;
1248 he_dev->r0_numrows = 256;
1249 he_dev->tx_numrows = 512;
1250 he_dev->r1_numrows = 256;
1251 he_dev->r0_startrow = 0;
1252 he_dev->tx_startrow = 256;
1253 he_dev->r1_startrow = 768;
1254 } else {
1255 he_dev->cells_per_row = 20;
1256 he_dev->bytes_per_row = 1024;
1257 he_dev->r0_numrows = 512;
1258 he_dev->tx_numrows = 1018;
1259 he_dev->r1_numrows = 512;
1260 he_dev->r0_startrow = 6;
1261 he_dev->tx_startrow = 518;
1262 he_dev->r1_startrow = 1536;
1263 }
1264
1265 he_dev->cells_per_lbuf = 4;
1266 he_dev->buffer_limit = 4;
1267 he_dev->r0_numbuffs = he_dev->r0_numrows *
1268 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1269 if (he_dev->r0_numbuffs > 2560)
1270 he_dev->r0_numbuffs = 2560;
1271
1272 he_dev->r1_numbuffs = he_dev->r1_numrows *
1273 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1274 if (he_dev->r1_numbuffs > 2560)
1275 he_dev->r1_numbuffs = 2560;
1276
1277 he_dev->tx_numbuffs = he_dev->tx_numrows *
1278 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1279 if (he_dev->tx_numbuffs > 5120)
1280 he_dev->tx_numbuffs = 5120;
1281
1282 /* 5.1.2 configure hardware dependent registers */
1283
1284 he_writel(he_dev,
1285 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1286 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1287 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1288 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1289 LBARB);
1290
1291 he_writel(he_dev, BANK_ON |
1292 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1293 SDRAMCON);
1294
1295 he_writel(he_dev,
1296 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1297 RM_RW_WAIT(1), RCMCONFIG);
1298 he_writel(he_dev,
1299 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1300 TM_RW_WAIT(1), TCMCONFIG);
1301
1302 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1303
1304 he_writel(he_dev,
1305 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1306 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1307 RX_VALVP(he_dev->vpibits) |
1308 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1309
1310 he_writel(he_dev, DRF_THRESH(0x20) |
1311 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1312 TX_VCI_MASK(he_dev->vcibits) |
1313 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1314
1315 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1316
1317 he_writel(he_dev, PHY_INT_ENB |
1318 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1319 RH_CONFIG);
1320
1321 /* 5.1.3 initialize connection memory */
1322
1323 for (i = 0; i < TCM_MEM_SIZE; ++i)
1324 he_writel_tcm(he_dev, 0, i);
1325
1326 for (i = 0; i < RCM_MEM_SIZE; ++i)
1327 he_writel_rcm(he_dev, 0, i);
1328
1329 /*
1330 * transmit connection memory map
1331 *
1332 * tx memory
1333 * 0x0 ___________________
1334 * | |
1335 * | |
1336 * | TSRa |
1337 * | |
1338 * | |
1339 * 0x8000|___________________|
1340 * | |
1341 * | TSRb |
1342 * 0xc000|___________________|
1343 * | |
1344 * | TSRc |
1345 * 0xe000|___________________|
1346 * | TSRd |
1347 * 0xf000|___________________|
1348 * | tmABR |
1349 * 0x10000|___________________|
1350 * | |
1351 * | tmTPD |
1352 * |___________________|
1353 * | |
1354 * ....
1355 * 0x1ffff|___________________|
1356 *
1357 *
1358 */
1359
1360 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1361 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1362 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1363 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1364 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1365
1366
1367 /*
1368 * receive connection memory map
1369 *
1370 * 0x0 ___________________
1371 * | |
1372 * | |
1373 * | RSRa |
1374 * | |
1375 * | |
1376 * 0x8000|___________________|
1377 * | |
1378 * | rx0/1 |
1379 * | LBM | link lists of local
1380 * | tx | buffer memory
1381 * | |
1382 * 0xd000|___________________|
1383 * | |
1384 * | rmABR |
1385 * 0xe000|___________________|
1386 * | |
1387 * | RSRb |
1388 * |___________________|
1389 * | |
1390 * ....
1391 * 0xffff|___________________|
1392 */
1393
1394 he_writel(he_dev, 0x08000, RCMLBM_BA);
1395 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1396 he_writel(he_dev, 0x0d800, RCMABR_BA);
1397
1398 /* 5.1.4 initialize local buffer free pools linked lists */
1399
1400 he_init_rx_lbfp0(he_dev);
1401 he_init_rx_lbfp1(he_dev);
1402
1403 he_writel(he_dev, 0x0, RLBC_H);
1404 he_writel(he_dev, 0x0, RLBC_T);
1405 he_writel(he_dev, 0x0, RLBC_H2);
1406
1407 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1408 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1409
1410 he_init_tx_lbfp(he_dev);
1411
1412 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1413
1414 /* 5.1.5 initialize intermediate receive queues */
1415
1416 if (he_is622(he_dev)) {
1417 he_writel(he_dev, 0x000f, G0_INMQ_S);
1418 he_writel(he_dev, 0x200f, G0_INMQ_L);
1419
1420 he_writel(he_dev, 0x001f, G1_INMQ_S);
1421 he_writel(he_dev, 0x201f, G1_INMQ_L);
1422
1423 he_writel(he_dev, 0x002f, G2_INMQ_S);
1424 he_writel(he_dev, 0x202f, G2_INMQ_L);
1425
1426 he_writel(he_dev, 0x003f, G3_INMQ_S);
1427 he_writel(he_dev, 0x203f, G3_INMQ_L);
1428
1429 he_writel(he_dev, 0x004f, G4_INMQ_S);
1430 he_writel(he_dev, 0x204f, G4_INMQ_L);
1431
1432 he_writel(he_dev, 0x005f, G5_INMQ_S);
1433 he_writel(he_dev, 0x205f, G5_INMQ_L);
1434
1435 he_writel(he_dev, 0x006f, G6_INMQ_S);
1436 he_writel(he_dev, 0x206f, G6_INMQ_L);
1437
1438 he_writel(he_dev, 0x007f, G7_INMQ_S);
1439 he_writel(he_dev, 0x207f, G7_INMQ_L);
1440 } else {
1441 he_writel(he_dev, 0x0000, G0_INMQ_S);
1442 he_writel(he_dev, 0x0008, G0_INMQ_L);
1443
1444 he_writel(he_dev, 0x0001, G1_INMQ_S);
1445 he_writel(he_dev, 0x0009, G1_INMQ_L);
1446
1447 he_writel(he_dev, 0x0002, G2_INMQ_S);
1448 he_writel(he_dev, 0x000a, G2_INMQ_L);
1449
1450 he_writel(he_dev, 0x0003, G3_INMQ_S);
1451 he_writel(he_dev, 0x000b, G3_INMQ_L);
1452
1453 he_writel(he_dev, 0x0004, G4_INMQ_S);
1454 he_writel(he_dev, 0x000c, G4_INMQ_L);
1455
1456 he_writel(he_dev, 0x0005, G5_INMQ_S);
1457 he_writel(he_dev, 0x000d, G5_INMQ_L);
1458
1459 he_writel(he_dev, 0x0006, G6_INMQ_S);
1460 he_writel(he_dev, 0x000e, G6_INMQ_L);
1461
1462 he_writel(he_dev, 0x0007, G7_INMQ_S);
1463 he_writel(he_dev, 0x000f, G7_INMQ_L);
1464 }
1465
1466 /* 5.1.6 application tunable parameters */
1467
1468 he_writel(he_dev, 0x0, MCC);
1469 he_writel(he_dev, 0x0, OEC);
1470 he_writel(he_dev, 0x0, DCC);
1471 he_writel(he_dev, 0x0, CEC);
1472
1473 /* 5.1.7 cs block initialization */
1474
1475 he_init_cs_block(he_dev);
1476
1477 /* 5.1.8 cs block connection memory initialization */
1478
1479 if (he_init_cs_block_rcm(he_dev) < 0)
1480 return -ENOMEM;
1481
1482 /* 5.1.10 initialize host structures */
1483
1484 he_init_tpdrq(he_dev);
1485
1486#ifdef USE_TPD_POOL
1487 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1488 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1489 if (he_dev->tpd_pool == NULL) {
1490 hprintk("unable to create tpd pci_pool\n");
1491 return -ENOMEM;
1492 }
1493
1494 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1495#else
1496 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1497 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1498 if (!he_dev->tpd_base)
1499 return -ENOMEM;
1500
1501 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1502 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1503 he_dev->tpd_base[i].inuse = 0;
1504 }
1505
1506 he_dev->tpd_head = he_dev->tpd_base;
1507 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1508#endif
1509
1510 if (he_init_group(he_dev, 0) != 0)
1511 return -ENOMEM;
1512
1513 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1514 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1515 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1516 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1517 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1518 G0_RBPS_BS + (group * 32));
1519
1520 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1521 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1522 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1523 G0_RBPL_QI + (group * 32));
1524 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1525
1526 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1527 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1528 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1529 G0_RBRQ_Q + (group * 16));
1530 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1531
1532 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1533 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1534 he_writel(he_dev, TBRQ_THRESH(0x1),
1535 G0_TBRQ_THRESH + (group * 16));
1536 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1537 }
1538
1539 /* host status page */
1540
1541 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1542 sizeof(struct he_hsp), &he_dev->hsp_phys);
1543 if (he_dev->hsp == NULL) {
1544 hprintk("failed to allocate host status page\n");
1545 return -ENOMEM;
1546 }
1547 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1548 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1549
1550 /* initialize framer */
1551
1552#ifdef CONFIG_ATM_HE_USE_SUNI
1553 suni_init(he_dev->atm_dev);
1554 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1555 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1556#endif /* CONFIG_ATM_HE_USE_SUNI */
1557
1558 if (sdh) {
1559 /* this really should be in suni.c but for now... */
1560 int val;
1561
1562 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1563 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1564 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1565 }
1566
1567 /* 5.1.12 enable transmit and receive */
1568
1569 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1570 reg |= TX_ENABLE|ER_ENABLE;
1571 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1572
1573 reg = he_readl(he_dev, RC_CONFIG);
1574 reg |= RX_ENABLE;
1575 he_writel(he_dev, reg, RC_CONFIG);
1576
1577 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1578 he_dev->cs_stper[i].inuse = 0;
1579 he_dev->cs_stper[i].pcr = -1;
1580 }
1581 he_dev->total_bw = 0;
1582
1583
1584 /* atm linux initialization */
1585
1586 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1587 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1588
1589 he_dev->irq_peak = 0;
1590 he_dev->rbrq_peak = 0;
1591 he_dev->rbpl_peak = 0;
1592 he_dev->tbrq_peak = 0;
1593
1594 HPRINTK("hell bent for leather!\n");
1595
1596 return 0;
1597}
1598
1599static void
1600he_stop(struct he_dev *he_dev)
1601{
1602 u16 command;
1603 u32 gen_cntl_0, reg;
1604 struct pci_dev *pci_dev;
1605
1606 pci_dev = he_dev->pci_dev;
1607
1608 /* disable interrupts */
1609
1610 if (he_dev->membase) {
1611 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1612 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1613 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1614
1615#ifdef USE_TASKLET
1616 tasklet_disable(&he_dev->tasklet);
1617#endif
1618
1619 /* disable recv and transmit */
1620
1621 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1622 reg &= ~(TX_ENABLE|ER_ENABLE);
1623 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1624
1625 reg = he_readl(he_dev, RC_CONFIG);
1626 reg &= ~(RX_ENABLE);
1627 he_writel(he_dev, reg, RC_CONFIG);
1628 }
1629
1630#ifdef CONFIG_ATM_HE_USE_SUNI
1631 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1632 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1633#endif /* CONFIG_ATM_HE_USE_SUNI */
1634
1635 if (he_dev->irq)
1636 free_irq(he_dev->irq, he_dev);
1637
1638 if (he_dev->irq_base)
1639 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1640 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1641
1642 if (he_dev->hsp)
1643 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1644 he_dev->hsp, he_dev->hsp_phys);
1645
1646 if (he_dev->rbpl_base) {
1647#ifdef USE_RBPL_POOL
1648 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1649 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1650 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1651
1652 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1653 }
1654#else
1655 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1656 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1657#endif
1658 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1659 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1660 }
1661
1662#ifdef USE_RBPL_POOL
1663 if (he_dev->rbpl_pool)
1664 pci_pool_destroy(he_dev->rbpl_pool);
1665#endif
1666
1667#ifdef USE_RBPS
1668 if (he_dev->rbps_base) {
1669#ifdef USE_RBPS_POOL
1670 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1671 void *cpuaddr = he_dev->rbps_virt[i].virt;
1672 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1673
1674 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1675 }
1676#else
1677 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1678 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1679#endif
1680 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1681 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1682 }
1683
1684#ifdef USE_RBPS_POOL
1685 if (he_dev->rbps_pool)
1686 pci_pool_destroy(he_dev->rbps_pool);
1687#endif
1688
1689#endif /* USE_RBPS */
1690
1691 if (he_dev->rbrq_base)
1692 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1693 he_dev->rbrq_base, he_dev->rbrq_phys);
1694
1695 if (he_dev->tbrq_base)
1696 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1697 he_dev->tbrq_base, he_dev->tbrq_phys);
1698
1699 if (he_dev->tpdrq_base)
1700 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1701 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1702
1703#ifdef USE_TPD_POOL
1704 if (he_dev->tpd_pool)
1705 pci_pool_destroy(he_dev->tpd_pool);
1706#else
1707 if (he_dev->tpd_base)
1708 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1709 he_dev->tpd_base, he_dev->tpd_base_phys);
1710#endif
1711
1712 if (he_dev->pci_dev) {
1713 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1714 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1715 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1716 }
1717
1718 if (he_dev->membase)
1719 iounmap(he_dev->membase);
1720}
1721
1722static struct he_tpd *
1723__alloc_tpd(struct he_dev *he_dev)
1724{
1725#ifdef USE_TPD_POOL
1726 struct he_tpd *tpd;
1727 dma_addr_t dma_handle;
1728
1729 tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1730 if (tpd == NULL)
1731 return NULL;
1732
1733 tpd->status = TPD_ADDR(dma_handle);
1734 tpd->reserved = 0;
1735 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1736 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1737 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1738
1739 return tpd;
1740#else
1741 int i;
1742
1743 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1744 ++he_dev->tpd_head;
1745 if (he_dev->tpd_head > he_dev->tpd_end) {
1746 he_dev->tpd_head = he_dev->tpd_base;
1747 }
1748
1749 if (!he_dev->tpd_head->inuse) {
1750 he_dev->tpd_head->inuse = 1;
1751 he_dev->tpd_head->status &= TPD_MASK;
1752 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1753 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1754 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1755 return he_dev->tpd_head;
1756 }
1757 }
1758 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1759 return NULL;
1760#endif
1761}
1762
1763#define AAL5_LEN(buf,len) \
1764 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1765 (((unsigned char *)(buf))[(len)-5]))
1766
1767/* 2.10.1.2 receive
1768 *
1769 * aal5 packets can optionally return the tcp checksum in the lower
1770 * 16 bits of the crc (RSR0_TCP_CKSUM)
1771 */
1772
1773#define TCP_CKSUM(buf,len) \
1774 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1775 (((unsigned char *)(buf))[(len-1)]))
1776
1777static int
1778he_service_rbrq(struct he_dev *he_dev, int group)
1779{
1780 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1781 ((unsigned long)he_dev->rbrq_base |
1782 he_dev->hsp->group[group].rbrq_tail);
1783 struct he_rbp *rbp = NULL;
1784 unsigned cid, lastcid = -1;
1785 unsigned buf_len = 0;
1786 struct sk_buff *skb;
1787 struct atm_vcc *vcc = NULL;
1788 struct he_vcc *he_vcc;
1789 struct he_iovec *iov;
1790 int pdus_assembled = 0;
1791 int updated = 0;
1792
1793 read_lock(&vcc_sklist_lock);
1794 while (he_dev->rbrq_head != rbrq_tail) {
1795 ++updated;
1796
1797 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1798 he_dev->rbrq_head, group,
1799 RBRQ_ADDR(he_dev->rbrq_head),
1800 RBRQ_BUFLEN(he_dev->rbrq_head),
1801 RBRQ_CID(he_dev->rbrq_head),
1802 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1803 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1804 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1805 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1806 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1807 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1808
1809#ifdef USE_RBPS
1810 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1811 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1812 else
1813#endif
1814 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1815
1816 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1817 cid = RBRQ_CID(he_dev->rbrq_head);
1818
1819 if (cid != lastcid)
1820 vcc = __find_vcc(he_dev, cid);
1821 lastcid = cid;
1822
1823 if (vcc == NULL) {
1824 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1825 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1826 rbp->status &= ~RBP_LOANED;
1827
1828 goto next_rbrq_entry;
1829 }
1830
1831 he_vcc = HE_VCC(vcc);
1832 if (he_vcc == NULL) {
1833 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1834 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1835 rbp->status &= ~RBP_LOANED;
1836 goto next_rbrq_entry;
1837 }
1838
1839 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1840 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1841 atomic_inc(&vcc->stats->rx_drop);
1842 goto return_host_buffers;
1843 }
1844
1845 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1846 he_vcc->iov_tail->iov_len = buf_len;
1847 he_vcc->pdu_len += buf_len;
1848 ++he_vcc->iov_tail;
1849
1850 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1851 lastcid = -1;
1852 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1853 wake_up(&he_vcc->rx_waitq);
1854 goto return_host_buffers;
1855 }
1856
1857#ifdef notdef
1858 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1859 hprintk("iovec full! cid 0x%x\n", cid);
1860 goto return_host_buffers;
1861 }
1862#endif
1863 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1864 goto next_rbrq_entry;
1865
1866 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1867 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1868 HPRINTK("%s%s (%d.%d)\n",
1869 RBRQ_CRC_ERR(he_dev->rbrq_head)
1870 ? "CRC_ERR " : "",
1871 RBRQ_LEN_ERR(he_dev->rbrq_head)
1872 ? "LEN_ERR" : "",
1873 vcc->vpi, vcc->vci);
1874 atomic_inc(&vcc->stats->rx_err);
1875 goto return_host_buffers;
1876 }
1877
1878 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1879 GFP_ATOMIC);
1880 if (!skb) {
1881 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1882 goto return_host_buffers;
1883 }
1884
1885 if (rx_skb_reserve > 0)
1886 skb_reserve(skb, rx_skb_reserve);
1887
a61bbcf2 1888 __net_timestamp(skb);
1da177e4
LT
1889
1890 for (iov = he_vcc->iov_head;
1891 iov < he_vcc->iov_tail; ++iov) {
1892#ifdef USE_RBPS
1893 if (iov->iov_base & RBP_SMALLBUF)
1894 memcpy(skb_put(skb, iov->iov_len),
1895 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1896 else
1897#endif
1898 memcpy(skb_put(skb, iov->iov_len),
1899 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1900 }
1901
1902 switch (vcc->qos.aal) {
1903 case ATM_AAL0:
1904 /* 2.10.1.5 raw cell receive */
1905 skb->len = ATM_AAL0_SDU;
1906 skb->tail = skb->data + skb->len;
1907 break;
1908 case ATM_AAL5:
1909 /* 2.10.1.2 aal5 receive */
1910
1911 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1912 skb->tail = skb->data + skb->len;
1913#ifdef USE_CHECKSUM_HW
1914 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1915 skb->ip_summed = CHECKSUM_HW;
1916 skb->csum = TCP_CKSUM(skb->data,
1917 he_vcc->pdu_len);
1918 }
1919#endif
1920 break;
1921 }
1922
1923#ifdef should_never_happen
1924 if (skb->len > vcc->qos.rxtp.max_sdu)
1925 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1926#endif
1927
1928#ifdef notdef
1929 ATM_SKB(skb)->vcc = vcc;
1930#endif
1931 vcc->push(vcc, skb);
1932
1933 atomic_inc(&vcc->stats->rx);
1934
1935return_host_buffers:
1936 ++pdus_assembled;
1937
1938 for (iov = he_vcc->iov_head;
1939 iov < he_vcc->iov_tail; ++iov) {
1940#ifdef USE_RBPS
1941 if (iov->iov_base & RBP_SMALLBUF)
1942 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1943 else
1944#endif
1945 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1946
1947 rbp->status &= ~RBP_LOANED;
1948 }
1949
1950 he_vcc->iov_tail = he_vcc->iov_head;
1951 he_vcc->pdu_len = 0;
1952
1953next_rbrq_entry:
1954 he_dev->rbrq_head = (struct he_rbrq *)
1955 ((unsigned long) he_dev->rbrq_base |
1956 RBRQ_MASK(++he_dev->rbrq_head));
1957
1958 }
1959 read_unlock(&vcc_sklist_lock);
1960
1961 if (updated) {
1962 if (updated > he_dev->rbrq_peak)
1963 he_dev->rbrq_peak = updated;
1964
1965 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1966 G0_RBRQ_H + (group * 16));
1967 }
1968
1969 return pdus_assembled;
1970}
1971
1972static void
1973he_service_tbrq(struct he_dev *he_dev, int group)
1974{
1975 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1976 ((unsigned long)he_dev->tbrq_base |
1977 he_dev->hsp->group[group].tbrq_tail);
1978 struct he_tpd *tpd;
1979 int slot, updated = 0;
1980#ifdef USE_TPD_POOL
1981 struct he_tpd *__tpd;
1982#endif
1983
1984 /* 2.1.6 transmit buffer return queue */
1985
1986 while (he_dev->tbrq_head != tbrq_tail) {
1987 ++updated;
1988
1989 HPRINTK("tbrq%d 0x%x%s%s\n",
1990 group,
1991 TBRQ_TPD(he_dev->tbrq_head),
1992 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1993 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1994#ifdef USE_TPD_POOL
1995 tpd = NULL;
1996 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1997 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1998 tpd = __tpd;
1999 list_del(&__tpd->entry);
2000 break;
2001 }
2002 }
2003
2004 if (tpd == NULL) {
2005 hprintk("unable to locate tpd for dma buffer %x\n",
2006 TBRQ_TPD(he_dev->tbrq_head));
2007 goto next_tbrq_entry;
2008 }
2009#else
2010 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2011#endif
2012
2013 if (TBRQ_EOS(he_dev->tbrq_head)) {
2014 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2015 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2016 if (tpd->vcc)
2017 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2018
2019 goto next_tbrq_entry;
2020 }
2021
2022 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2023 if (tpd->iovec[slot].addr)
2024 pci_unmap_single(he_dev->pci_dev,
2025 tpd->iovec[slot].addr,
2026 tpd->iovec[slot].len & TPD_LEN_MASK,
2027 PCI_DMA_TODEVICE);
2028 if (tpd->iovec[slot].len & TPD_LST)
2029 break;
2030
2031 }
2032
2033 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2034 if (tpd->vcc && tpd->vcc->pop)
2035 tpd->vcc->pop(tpd->vcc, tpd->skb);
2036 else
2037 dev_kfree_skb_any(tpd->skb);
2038 }
2039
2040next_tbrq_entry:
2041#ifdef USE_TPD_POOL
2042 if (tpd)
2043 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2044#else
2045 tpd->inuse = 0;
2046#endif
2047 he_dev->tbrq_head = (struct he_tbrq *)
2048 ((unsigned long) he_dev->tbrq_base |
2049 TBRQ_MASK(++he_dev->tbrq_head));
2050 }
2051
2052 if (updated) {
2053 if (updated > he_dev->tbrq_peak)
2054 he_dev->tbrq_peak = updated;
2055
2056 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2057 G0_TBRQ_H + (group * 16));
2058 }
2059}
2060
2061
2062static void
2063he_service_rbpl(struct he_dev *he_dev, int group)
2064{
2065 struct he_rbp *newtail;
2066 struct he_rbp *rbpl_head;
2067 int moved = 0;
2068
2069 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2070 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2071
2072 for (;;) {
2073 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2074 RBPL_MASK(he_dev->rbpl_tail+1));
2075
2076 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2077 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2078 break;
2079
2080 newtail->status |= RBP_LOANED;
2081 he_dev->rbpl_tail = newtail;
2082 ++moved;
2083 }
2084
2085 if (moved)
2086 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2087}
2088
2089#ifdef USE_RBPS
2090static void
2091he_service_rbps(struct he_dev *he_dev, int group)
2092{
2093 struct he_rbp *newtail;
2094 struct he_rbp *rbps_head;
2095 int moved = 0;
2096
2097 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2098 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2099
2100 for (;;) {
2101 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2102 RBPS_MASK(he_dev->rbps_tail+1));
2103
2104 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2105 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2106 break;
2107
2108 newtail->status |= RBP_LOANED;
2109 he_dev->rbps_tail = newtail;
2110 ++moved;
2111 }
2112
2113 if (moved)
2114 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2115}
2116#endif /* USE_RBPS */
2117
2118static void
2119he_tasklet(unsigned long data)
2120{
2121 unsigned long flags;
2122 struct he_dev *he_dev = (struct he_dev *) data;
2123 int group, type;
2124 int updated = 0;
2125
2126 HPRINTK("tasklet (0x%lx)\n", data);
2127#ifdef USE_TASKLET
2128 spin_lock_irqsave(&he_dev->global_lock, flags);
2129#endif
2130
2131 while (he_dev->irq_head != he_dev->irq_tail) {
2132 ++updated;
2133
2134 type = ITYPE_TYPE(he_dev->irq_head->isw);
2135 group = ITYPE_GROUP(he_dev->irq_head->isw);
2136
2137 switch (type) {
2138 case ITYPE_RBRQ_THRESH:
2139 HPRINTK("rbrq%d threshold\n", group);
2140 /* fall through */
2141 case ITYPE_RBRQ_TIMER:
2142 if (he_service_rbrq(he_dev, group)) {
2143 he_service_rbpl(he_dev, group);
2144#ifdef USE_RBPS
2145 he_service_rbps(he_dev, group);
2146#endif /* USE_RBPS */
2147 }
2148 break;
2149 case ITYPE_TBRQ_THRESH:
2150 HPRINTK("tbrq%d threshold\n", group);
2151 /* fall through */
2152 case ITYPE_TPD_COMPLETE:
2153 he_service_tbrq(he_dev, group);
2154 break;
2155 case ITYPE_RBPL_THRESH:
2156 he_service_rbpl(he_dev, group);
2157 break;
2158 case ITYPE_RBPS_THRESH:
2159#ifdef USE_RBPS
2160 he_service_rbps(he_dev, group);
2161#endif /* USE_RBPS */
2162 break;
2163 case ITYPE_PHY:
2164 HPRINTK("phy interrupt\n");
2165#ifdef CONFIG_ATM_HE_USE_SUNI
2166 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2167 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2168 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2169 spin_lock_irqsave(&he_dev->global_lock, flags);
2170#endif
2171 break;
2172 case ITYPE_OTHER:
2173 switch (type|group) {
2174 case ITYPE_PARITY:
2175 hprintk("parity error\n");
2176 break;
2177 case ITYPE_ABORT:
2178 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2179 break;
2180 }
2181 break;
2182 case ITYPE_TYPE(ITYPE_INVALID):
2183 /* see 8.1.1 -- check all queues */
2184
2185 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2186
2187 he_service_rbrq(he_dev, 0);
2188 he_service_rbpl(he_dev, 0);
2189#ifdef USE_RBPS
2190 he_service_rbps(he_dev, 0);
2191#endif /* USE_RBPS */
2192 he_service_tbrq(he_dev, 0);
2193 break;
2194 default:
2195 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2196 }
2197
2198 he_dev->irq_head->isw = ITYPE_INVALID;
2199
2200 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2201 }
2202
2203 if (updated) {
2204 if (updated > he_dev->irq_peak)
2205 he_dev->irq_peak = updated;
2206
2207 he_writel(he_dev,
2208 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2209 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2210 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2211 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2212 }
2213#ifdef USE_TASKLET
2214 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2215#endif
2216}
2217
2218static irqreturn_t
2219he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2220{
2221 unsigned long flags;
2222 struct he_dev *he_dev = (struct he_dev * )dev_id;
2223 int handled = 0;
2224
2225 if (he_dev == NULL)
2226 return IRQ_NONE;
2227
2228 spin_lock_irqsave(&he_dev->global_lock, flags);
2229
2230 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2231 (*he_dev->irq_tailoffset << 2));
2232
2233 if (he_dev->irq_tail == he_dev->irq_head) {
2234 HPRINTK("tailoffset not updated?\n");
2235 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2236 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2237 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2238 }
2239
2240#ifdef DEBUG
2241 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2242 hprintk("spurious (or shared) interrupt?\n");
2243#endif
2244
2245 if (he_dev->irq_head != he_dev->irq_tail) {
2246 handled = 1;
2247#ifdef USE_TASKLET
2248 tasklet_schedule(&he_dev->tasklet);
2249#else
2250 he_tasklet((unsigned long) he_dev);
2251#endif
2252 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2253 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2254 }
2255 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2256 return IRQ_RETVAL(handled);
2257
2258}
2259
2260static __inline__ void
2261__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2262{
2263 struct he_tpdrq *new_tail;
2264
2265 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2266 tpd, cid, he_dev->tpdrq_tail);
2267
2268 /* new_tail = he_dev->tpdrq_tail; */
2269 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2270 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2271
2272 /*
2273 * check to see if we are about to set the tail == head
2274 * if true, update the head pointer from the adapter
2275 * to see if this is really the case (reading the queue
2276 * head for every enqueue would be unnecessarily slow)
2277 */
2278
2279 if (new_tail == he_dev->tpdrq_head) {
2280 he_dev->tpdrq_head = (struct he_tpdrq *)
2281 (((unsigned long)he_dev->tpdrq_base) |
2282 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2283
2284 if (new_tail == he_dev->tpdrq_head) {
d730e103
CW
2285 int slot;
2286
1da177e4
LT
2287 hprintk("tpdrq full (cid 0x%x)\n", cid);
2288 /*
2289 * FIXME
2290 * push tpd onto a transmit backlog queue
2291 * after service_tbrq, service the backlog
2292 * for now, we just drop the pdu
2293 */
d730e103
CW
2294 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2295 if (tpd->iovec[slot].addr)
2296 pci_unmap_single(he_dev->pci_dev,
2297 tpd->iovec[slot].addr,
2298 tpd->iovec[slot].len & TPD_LEN_MASK,
2299 PCI_DMA_TODEVICE);
2300 }
1da177e4
LT
2301 if (tpd->skb) {
2302 if (tpd->vcc->pop)
2303 tpd->vcc->pop(tpd->vcc, tpd->skb);
2304 else
2305 dev_kfree_skb_any(tpd->skb);
2306 atomic_inc(&tpd->vcc->stats->tx_err);
2307 }
2308#ifdef USE_TPD_POOL
2309 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2310#else
2311 tpd->inuse = 0;
2312#endif
2313 return;
2314 }
2315 }
2316
2317 /* 2.1.5 transmit packet descriptor ready queue */
2318#ifdef USE_TPD_POOL
2319 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2320 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2321#else
2322 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2323 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2324#endif
2325 he_dev->tpdrq_tail->cid = cid;
2326 wmb();
2327
2328 he_dev->tpdrq_tail = new_tail;
2329
2330 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2331 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2332}
2333
2334static int
2335he_open(struct atm_vcc *vcc)
2336{
2337 unsigned long flags;
2338 struct he_dev *he_dev = HE_DEV(vcc->dev);
2339 struct he_vcc *he_vcc;
2340 int err = 0;
2341 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2342 short vpi = vcc->vpi;
2343 int vci = vcc->vci;
2344
2345 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2346 return 0;
2347
2348 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2349
2350 set_bit(ATM_VF_ADDR, &vcc->flags);
2351
2352 cid = he_mkcid(he_dev, vpi, vci);
2353
2354 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2355 if (he_vcc == NULL) {
2356 hprintk("unable to allocate he_vcc during open\n");
2357 return -ENOMEM;
2358 }
2359
2360 he_vcc->iov_tail = he_vcc->iov_head;
2361 he_vcc->pdu_len = 0;
2362 he_vcc->rc_index = -1;
2363
2364 init_waitqueue_head(&he_vcc->rx_waitq);
2365 init_waitqueue_head(&he_vcc->tx_waitq);
2366
2367 vcc->dev_data = he_vcc;
2368
2369 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2370 int pcr_goal;
2371
2372 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2373 if (pcr_goal == 0)
2374 pcr_goal = he_dev->atm_dev->link_rate;
2375 if (pcr_goal < 0) /* means round down, technically */
2376 pcr_goal = -pcr_goal;
2377
2378 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2379
2380 switch (vcc->qos.aal) {
2381 case ATM_AAL5:
2382 tsr0_aal = TSR0_AAL5;
2383 tsr4 = TSR4_AAL5;
2384 break;
2385 case ATM_AAL0:
2386 tsr0_aal = TSR0_AAL0_SDU;
2387 tsr4 = TSR4_AAL0_SDU;
2388 break;
2389 default:
2390 err = -EINVAL;
2391 goto open_failed;
2392 }
2393
2394 spin_lock_irqsave(&he_dev->global_lock, flags);
2395 tsr0 = he_readl_tsr0(he_dev, cid);
2396 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2397
2398 if (TSR0_CONN_STATE(tsr0) != 0) {
2399 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2400 err = -EBUSY;
2401 goto open_failed;
2402 }
2403
2404 switch (vcc->qos.txtp.traffic_class) {
2405 case ATM_UBR:
2406 /* 2.3.3.1 open connection ubr */
2407
2408 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2409 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2410 break;
2411
2412 case ATM_CBR:
2413 /* 2.3.3.2 open connection cbr */
2414
2415 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2416 if ((he_dev->total_bw + pcr_goal)
2417 > (he_dev->atm_dev->link_rate * 9 / 10))
2418 {
2419 err = -EBUSY;
2420 goto open_failed;
2421 }
2422
2423 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2424
2425 /* find an unused cs_stper register */
2426 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2427 if (he_dev->cs_stper[reg].inuse == 0 ||
2428 he_dev->cs_stper[reg].pcr == pcr_goal)
2429 break;
2430
2431 if (reg == HE_NUM_CS_STPER) {
2432 err = -EBUSY;
2433 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2434 goto open_failed;
2435 }
2436
2437 he_dev->total_bw += pcr_goal;
2438
2439 he_vcc->rc_index = reg;
2440 ++he_dev->cs_stper[reg].inuse;
2441 he_dev->cs_stper[reg].pcr = pcr_goal;
2442
2443 clock = he_is622(he_dev) ? 66667000 : 50000000;
2444 period = clock / pcr_goal;
2445
2446 HPRINTK("rc_index = %d period = %d\n",
2447 reg, period);
2448
2449 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2450 CS_STPER0 + reg);
2451 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2452
2453 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2454 TSR0_RC_INDEX(reg);
2455
2456 break;
2457 default:
2458 err = -EINVAL;
2459 goto open_failed;
2460 }
2461
2462 spin_lock_irqsave(&he_dev->global_lock, flags);
2463
2464 he_writel_tsr0(he_dev, tsr0, cid);
2465 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2466 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2467 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2468 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2469 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2470
2471 he_writel_tsr3(he_dev, 0x0, cid);
2472 he_writel_tsr5(he_dev, 0x0, cid);
2473 he_writel_tsr6(he_dev, 0x0, cid);
2474 he_writel_tsr7(he_dev, 0x0, cid);
2475 he_writel_tsr8(he_dev, 0x0, cid);
2476 he_writel_tsr10(he_dev, 0x0, cid);
2477 he_writel_tsr11(he_dev, 0x0, cid);
2478 he_writel_tsr12(he_dev, 0x0, cid);
2479 he_writel_tsr13(he_dev, 0x0, cid);
2480 he_writel_tsr14(he_dev, 0x0, cid);
2481 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2482 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2483 }
2484
2485 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2486 unsigned aal;
2487
2488 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2489 &HE_VCC(vcc)->rx_waitq);
2490
2491 switch (vcc->qos.aal) {
2492 case ATM_AAL5:
2493 aal = RSR0_AAL5;
2494 break;
2495 case ATM_AAL0:
2496 aal = RSR0_RAWCELL;
2497 break;
2498 default:
2499 err = -EINVAL;
2500 goto open_failed;
2501 }
2502
2503 spin_lock_irqsave(&he_dev->global_lock, flags);
2504
2505 rsr0 = he_readl_rsr0(he_dev, cid);
2506 if (rsr0 & RSR0_OPEN_CONN) {
2507 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2508
2509 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2510 err = -EBUSY;
2511 goto open_failed;
2512 }
2513
2514#ifdef USE_RBPS
2515 rsr1 = RSR1_GROUP(0);
2516 rsr4 = RSR4_GROUP(0);
2517#else /* !USE_RBPS */
2518 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2519 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2520#endif /* USE_RBPS */
2521 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2522 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2523
2524#ifdef USE_CHECKSUM_HW
2525 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2526 rsr0 |= RSR0_TCP_CKSUM;
2527#endif
2528
2529 he_writel_rsr4(he_dev, rsr4, cid);
2530 he_writel_rsr1(he_dev, rsr1, cid);
2531 /* 5.1.11 last parameter initialized should be
2532 the open/closed indication in rsr0 */
2533 he_writel_rsr0(he_dev,
2534 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2535 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2536
2537 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2538 }
2539
2540open_failed:
2541
2542 if (err) {
a2c1aa54 2543 kfree(he_vcc);
1da177e4
LT
2544 clear_bit(ATM_VF_ADDR, &vcc->flags);
2545 }
2546 else
2547 set_bit(ATM_VF_READY, &vcc->flags);
2548
2549 return err;
2550}
2551
2552static void
2553he_close(struct atm_vcc *vcc)
2554{
2555 unsigned long flags;
2556 DECLARE_WAITQUEUE(wait, current);
2557 struct he_dev *he_dev = HE_DEV(vcc->dev);
2558 struct he_tpd *tpd;
2559 unsigned cid;
2560 struct he_vcc *he_vcc = HE_VCC(vcc);
2561#define MAX_RETRY 30
2562 int retry = 0, sleep = 1, tx_inuse;
2563
2564 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2565
2566 clear_bit(ATM_VF_READY, &vcc->flags);
2567 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2568
2569 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2570 int timeout;
2571
2572 HPRINTK("close rx cid 0x%x\n", cid);
2573
2574 /* 2.7.2.2 close receive operation */
2575
2576 /* wait for previous close (if any) to finish */
2577
2578 spin_lock_irqsave(&he_dev->global_lock, flags);
2579 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2580 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2581 udelay(250);
2582 }
2583
2584 set_current_state(TASK_UNINTERRUPTIBLE);
2585 add_wait_queue(&he_vcc->rx_waitq, &wait);
2586
2587 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2588 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2589 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2590 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2591
2592 timeout = schedule_timeout(30*HZ);
2593
2594 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2595 set_current_state(TASK_RUNNING);
2596
2597 if (timeout == 0)
2598 hprintk("close rx timeout cid 0x%x\n", cid);
2599
2600 HPRINTK("close rx cid 0x%x complete\n", cid);
2601
2602 }
2603
2604 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2605 volatile unsigned tsr4, tsr0;
2606 int timeout;
2607
2608 HPRINTK("close tx cid 0x%x\n", cid);
2609
2610 /* 2.1.2
2611 *
2612 * ... the host must first stop queueing packets to the TPDRQ
2613 * on the connection to be closed, then wait for all outstanding
2614 * packets to be transmitted and their buffers returned to the
2615 * TBRQ. When the last packet on the connection arrives in the
2616 * TBRQ, the host issues the close command to the adapter.
2617 */
2618
2619 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2620 (retry < MAX_RETRY)) {
2621 msleep(sleep);
2622 if (sleep < 250)
2623 sleep = sleep * 2;
2624
2625 ++retry;
2626 }
2627
2628 if (tx_inuse)
2629 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2630
2631 /* 2.3.1.1 generic close operations with flush */
2632
2633 spin_lock_irqsave(&he_dev->global_lock, flags);
2634 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2635 /* also clears TSR4_SESSION_ENDED */
2636
2637 switch (vcc->qos.txtp.traffic_class) {
2638 case ATM_UBR:
2639 he_writel_tsr1(he_dev,
2640 TSR1_MCR(rate_to_atmf(200000))
2641 | TSR1_PCR(0), cid);
2642 break;
2643 case ATM_CBR:
2644 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2645 break;
2646 }
2647 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2648
2649 tpd = __alloc_tpd(he_dev);
2650 if (tpd == NULL) {
2651 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2652 goto close_tx_incomplete;
2653 }
2654 tpd->status |= TPD_EOS | TPD_INT;
2655 tpd->skb = NULL;
2656 tpd->vcc = vcc;
2657 wmb();
2658
2659 set_current_state(TASK_UNINTERRUPTIBLE);
2660 add_wait_queue(&he_vcc->tx_waitq, &wait);
2661 __enqueue_tpd(he_dev, tpd, cid);
2662 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2663
2664 timeout = schedule_timeout(30*HZ);
2665
2666 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2667 set_current_state(TASK_RUNNING);
2668
2669 spin_lock_irqsave(&he_dev->global_lock, flags);
2670
2671 if (timeout == 0) {
2672 hprintk("close tx timeout cid 0x%x\n", cid);
2673 goto close_tx_incomplete;
2674 }
2675
2676 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2677 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2678 udelay(250);
2679 }
2680
2681 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2682 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2683 udelay(250);
2684 }
2685
2686close_tx_incomplete:
2687
2688 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2689 int reg = he_vcc->rc_index;
2690
2691 HPRINTK("cs_stper reg = %d\n", reg);
2692
2693 if (he_dev->cs_stper[reg].inuse == 0)
2694 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2695 else
2696 --he_dev->cs_stper[reg].inuse;
2697
2698 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2699 }
2700 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2701
2702 HPRINTK("close tx cid 0x%x complete\n", cid);
2703 }
2704
2705 kfree(he_vcc);
2706
2707 clear_bit(ATM_VF_ADDR, &vcc->flags);
2708}
2709
2710static int
2711he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2712{
2713 unsigned long flags;
2714 struct he_dev *he_dev = HE_DEV(vcc->dev);
2715 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2716 struct he_tpd *tpd;
2717#ifdef USE_SCATTERGATHER
2718 int i, slot = 0;
2719#endif
2720
2721#define HE_TPD_BUFSIZE 0xffff
2722
2723 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2724
2725 if ((skb->len > HE_TPD_BUFSIZE) ||
2726 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2727 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2728 if (vcc->pop)
2729 vcc->pop(vcc, skb);
2730 else
2731 dev_kfree_skb_any(skb);
2732 atomic_inc(&vcc->stats->tx_err);
2733 return -EINVAL;
2734 }
2735
2736#ifndef USE_SCATTERGATHER
2737 if (skb_shinfo(skb)->nr_frags) {
2738 hprintk("no scatter/gather support\n");
2739 if (vcc->pop)
2740 vcc->pop(vcc, skb);
2741 else
2742 dev_kfree_skb_any(skb);
2743 atomic_inc(&vcc->stats->tx_err);
2744 return -EINVAL;
2745 }
2746#endif
2747 spin_lock_irqsave(&he_dev->global_lock, flags);
2748
2749 tpd = __alloc_tpd(he_dev);
2750 if (tpd == NULL) {
2751 if (vcc->pop)
2752 vcc->pop(vcc, skb);
2753 else
2754 dev_kfree_skb_any(skb);
2755 atomic_inc(&vcc->stats->tx_err);
2756 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2757 return -ENOMEM;
2758 }
2759
2760 if (vcc->qos.aal == ATM_AAL5)
2761 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2762 else {
2763 char *pti_clp = (void *) (skb->data + 3);
2764 int clp, pti;
2765
2766 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2767 clp = (*pti_clp & ATM_HDR_CLP);
2768 tpd->status |= TPD_CELLTYPE(pti);
2769 if (clp)
2770 tpd->status |= TPD_CLP;
2771
2772 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2773 }
2774
2775#ifdef USE_SCATTERGATHER
2776 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2777 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2778 tpd->iovec[slot].len = skb->len - skb->data_len;
2779 ++slot;
2780
2781 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2783
2784 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2785 tpd->vcc = vcc;
2786 tpd->skb = NULL; /* not the last fragment
2787 so dont ->push() yet */
2788 wmb();
2789
2790 __enqueue_tpd(he_dev, tpd, cid);
2791 tpd = __alloc_tpd(he_dev);
2792 if (tpd == NULL) {
2793 if (vcc->pop)
2794 vcc->pop(vcc, skb);
2795 else
2796 dev_kfree_skb_any(skb);
2797 atomic_inc(&vcc->stats->tx_err);
2798 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2799 return -ENOMEM;
2800 }
2801 tpd->status |= TPD_USERCELL;
2802 slot = 0;
2803 }
2804
2805 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2806 (void *) page_address(frag->page) + frag->page_offset,
2807 frag->size, PCI_DMA_TODEVICE);
2808 tpd->iovec[slot].len = frag->size;
2809 ++slot;
2810
2811 }
2812
2813 tpd->iovec[slot - 1].len |= TPD_LST;
2814#else
2815 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2816 tpd->length0 = skb->len | TPD_LST;
2817#endif
2818 tpd->status |= TPD_INT;
2819
2820 tpd->vcc = vcc;
2821 tpd->skb = skb;
2822 wmb();
2823 ATM_SKB(skb)->vcc = vcc;
2824
2825 __enqueue_tpd(he_dev, tpd, cid);
2826 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2827
2828 atomic_inc(&vcc->stats->tx);
2829
2830 return 0;
2831}
2832
2833static int
2834he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2835{
2836 unsigned long flags;
2837 struct he_dev *he_dev = HE_DEV(atm_dev);
2838 struct he_ioctl_reg reg;
2839 int err = 0;
2840
2841 switch (cmd) {
2842 case HE_GET_REG:
2843 if (!capable(CAP_NET_ADMIN))
2844 return -EPERM;
2845
2846 if (copy_from_user(&reg, arg,
2847 sizeof(struct he_ioctl_reg)))
2848 return -EFAULT;
2849
2850 spin_lock_irqsave(&he_dev->global_lock, flags);
2851 switch (reg.type) {
2852 case HE_REGTYPE_PCI:
2853 reg.val = he_readl(he_dev, reg.addr);
2854 break;
2855 case HE_REGTYPE_RCM:
2856 reg.val =
2857 he_readl_rcm(he_dev, reg.addr);
2858 break;
2859 case HE_REGTYPE_TCM:
2860 reg.val =
2861 he_readl_tcm(he_dev, reg.addr);
2862 break;
2863 case HE_REGTYPE_MBOX:
2864 reg.val =
2865 he_readl_mbox(he_dev, reg.addr);
2866 break;
2867 default:
2868 err = -EINVAL;
2869 break;
2870 }
2871 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2872 if (err == 0)
2873 if (copy_to_user(arg, &reg,
2874 sizeof(struct he_ioctl_reg)))
2875 return -EFAULT;
2876 break;
2877 default:
2878#ifdef CONFIG_ATM_HE_USE_SUNI
2879 if (atm_dev->phy && atm_dev->phy->ioctl)
2880 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2881#else /* CONFIG_ATM_HE_USE_SUNI */
2882 err = -EINVAL;
2883#endif /* CONFIG_ATM_HE_USE_SUNI */
2884 break;
2885 }
2886
2887 return err;
2888}
2889
2890static void
2891he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2892{
2893 unsigned long flags;
2894 struct he_dev *he_dev = HE_DEV(atm_dev);
2895
2896 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2897
2898 spin_lock_irqsave(&he_dev->global_lock, flags);
2899 he_writel(he_dev, val, FRAMER + (addr*4));
2900 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2901 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2902}
2903
2904
2905static unsigned char
2906he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2907{
2908 unsigned long flags;
2909 struct he_dev *he_dev = HE_DEV(atm_dev);
2910 unsigned reg;
2911
2912 spin_lock_irqsave(&he_dev->global_lock, flags);
2913 reg = he_readl(he_dev, FRAMER + (addr*4));
2914 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2915
2916 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2917 return reg;
2918}
2919
2920static int
2921he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2922{
2923 unsigned long flags;
2924 struct he_dev *he_dev = HE_DEV(dev);
2925 int left, i;
2926#ifdef notdef
2927 struct he_rbrq *rbrq_tail;
2928 struct he_tpdrq *tpdrq_head;
2929 int rbpl_head, rbpl_tail;
2930#endif
2931 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2932
2933
2934 left = *pos;
2935 if (!left--)
2936 return sprintf(page, "%s\n", version);
2937
2938 if (!left--)
2939 return sprintf(page, "%s%s\n\n",
2940 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2941
2942 if (!left--)
2943 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2944
2945 spin_lock_irqsave(&he_dev->global_lock, flags);
2946 mcc += he_readl(he_dev, MCC);
2947 oec += he_readl(he_dev, OEC);
2948 dcc += he_readl(he_dev, DCC);
2949 cec += he_readl(he_dev, CEC);
2950 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2951
2952 if (!left--)
2953 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2954 mcc, oec, dcc, cec);
2955
2956 if (!left--)
2957 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2958 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2959
2960 if (!left--)
2961 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2962 CONFIG_TPDRQ_SIZE);
2963
2964 if (!left--)
2965 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2966 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2967
2968 if (!left--)
2969 return sprintf(page, "tbrq_size = %d peak = %d\n",
2970 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2971
2972
2973#ifdef notdef
2974 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2975 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2976
2977 inuse = rbpl_head - rbpl_tail;
2978 if (inuse < 0)
2979 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2980 inuse /= sizeof(struct he_rbp);
2981
2982 if (!left--)
2983 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2984 CONFIG_RBPL_SIZE, inuse);
2985#endif
2986
2987 if (!left--)
2988 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2989
2990 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2991 if (!left--)
2992 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2993 he_dev->cs_stper[i].pcr,
2994 he_dev->cs_stper[i].inuse);
2995
2996 if (!left--)
2997 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2998 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2999
3000 return 0;
3001}
3002
3003/* eeprom routines -- see 4.7 */
3004
3005u8
3006read_prom_byte(struct he_dev *he_dev, int addr)
3007{
3008 u32 val = 0, tmp_read = 0;
3009 int i, j = 0;
3010 u8 byte_read = 0;
3011
3012 val = readl(he_dev->membase + HOST_CNTL);
3013 val &= 0xFFFFE0FF;
3014
3015 /* Turn on write enable */
3016 val |= 0x800;
3017 he_writel(he_dev, val, HOST_CNTL);
3018
3019 /* Send READ instruction */
3020 for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3021 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3022 udelay(EEPROM_DELAY);
3023 }
3024
3025 /* Next, we need to send the byte address to read from */
3026 for (i = 7; i >= 0; i--) {
3027 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3028 udelay(EEPROM_DELAY);
3029 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3030 udelay(EEPROM_DELAY);
3031 }
3032
3033 j = 0;
3034
3035 val &= 0xFFFFF7FF; /* Turn off write enable */
3036 he_writel(he_dev, val, HOST_CNTL);
3037
3038 /* Now, we can read data from the EEPROM by clocking it in */
3039 for (i = 7; i >= 0; i--) {
3040 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3041 udelay(EEPROM_DELAY);
3042 tmp_read = he_readl(he_dev, HOST_CNTL);
3043 byte_read |= (unsigned char)
3044 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3045 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3046 udelay(EEPROM_DELAY);
3047 }
3048
3049 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3050 udelay(EEPROM_DELAY);
3051
3052 return byte_read;
3053}
3054
3055MODULE_LICENSE("GPL");
3056MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3057MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3058module_param(disable64, bool, 0);
3059MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3060module_param(nvpibits, short, 0);
3061MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3062module_param(nvcibits, short, 0);
3063MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3064module_param(rx_skb_reserve, short, 0);
3065MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3066module_param(irq_coalesce, bool, 0);
3067MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3068module_param(sdh, bool, 0);
3069MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3070
3071static struct pci_device_id he_pci_tbl[] = {
3072 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3073 0, 0, 0 },
3074 { 0, }
3075};
3076
3077MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3078
3079static struct pci_driver he_driver = {
3080 .name = "he",
3081 .probe = he_init_one,
3082 .remove = __devexit_p(he_remove_one),
3083 .id_table = he_pci_tbl,
3084};
3085
3086static int __init he_init(void)
3087{
3088 return pci_register_driver(&he_driver);
3089}
3090
3091static void __exit he_cleanup(void)
3092{
3093 pci_unregister_driver(&he_driver);
3094}
3095
3096module_init(he_init);
3097module_exit(he_cleanup);