ethtool: fix drvinfo strings set in drivers
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / et131x / et131x.c
1 /*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10 *
11 *------------------------------------------------------------------------------
12 *
13 * SOFTWARE LICENSE
14 *
15 * This software is provided subject to the following terms and conditions,
16 * which you should read carefully before using the software. Using this
17 * software indicates your acceptance of these terms and conditions. If you do
18 * not agree with these terms and conditions, do not use the software.
19 *
20 * Copyright © 2005 Agere Systems Inc.
21 * All rights reserved.
22 *
23 * Redistribution and use in source or binary forms, with or without
24 * modifications, are permitted provided that the following conditions are met:
25 *
26 * . Redistributions of source code must retain the above copyright notice, this
27 * list of conditions and the following Disclaimer as comments in the code as
28 * well as in the documentation and/or other materials provided with the
29 * distribution.
30 *
31 * . Redistributions in binary form must reproduce the above copyright notice,
32 * this list of conditions and the following Disclaimer in the documentation
33 * and/or other materials provided with the distribution.
34 *
35 * . Neither the name of Agere Systems Inc. nor the names of the contributors
36 * may be used to endorse or promote products derived from this software
37 * without specific prior written permission.
38 *
39 * Disclaimer
40 *
41 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
44 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52 * DAMAGE.
53 *
54 */
55
56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57
58 #include <linux/pci.h>
59 #include <linux/init.h>
60 #include <linux/module.h>
61 #include <linux/types.h>
62 #include <linux/kernel.h>
63
64 #include <linux/sched.h>
65 #include <linux/ptrace.h>
66 #include <linux/slab.h>
67 #include <linux/ctype.h>
68 #include <linux/string.h>
69 #include <linux/timer.h>
70 #include <linux/interrupt.h>
71 #include <linux/in.h>
72 #include <linux/delay.h>
73 #include <linux/bitops.h>
74 #include <linux/io.h>
75
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_arp.h>
80 #include <linux/ioport.h>
81 #include <linux/crc32.h>
82 #include <linux/random.h>
83 #include <linux/phy.h>
84
85 #include "et131x.h"
86
87 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
88 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
89 MODULE_LICENSE("Dual BSD/GPL");
90 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
91
92 /* EEPROM defines */
93 #define MAX_NUM_REGISTER_POLLS 1000
94 #define MAX_NUM_WRITE_RETRIES 2
95
96 /* MAC defines */
97 #define COUNTER_WRAP_16_BIT 0x10000
98 #define COUNTER_WRAP_12_BIT 0x1000
99
100 /* PCI defines */
101 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
102 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
103
104 /* ISR defines */
105 /*
106 * For interrupts, normal running is:
107 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
108 * watchdog_interrupt & txdma_xfer_done
109 *
110 * In both cases, when flow control is enabled for either Tx or bi-direction,
111 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
112 * buffer rings are running low.
113 */
114 #define INT_MASK_DISABLE 0xffffffff
115
116 /* NOTE: Masking out MAC_STAT Interrupt for now...
117 * #define INT_MASK_ENABLE 0xfff6bf17
118 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
119 */
120 #define INT_MASK_ENABLE 0xfffebf17
121 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
122
123 /* General defines */
124 /* Packet and header sizes */
125 #define NIC_MIN_PACKET_SIZE 60
126
127 /* Multicast list size */
128 #define NIC_MAX_MCAST_LIST 128
129
130 /* Supported Filters */
131 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
132 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
133 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
134 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
135 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
136
137 /* Tx Timeout */
138 #define ET131X_TX_TIMEOUT (1 * HZ)
139 #define NIC_SEND_HANG_THRESHOLD 0
140
141 /* MP_TCB flags */
142 #define fMP_DEST_MULTI 0x00000001
143 #define fMP_DEST_BROAD 0x00000002
144
145 /* MP_ADAPTER flags */
146 #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
147
148 /* MP_SHARED flags */
149 #define fMP_ADAPTER_LOWER_POWER 0x00200000
150
151 #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
152 #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
153
154 #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
155
156 /* Some offsets in PCI config space that are actually used. */
157 #define ET1310_PCI_MAC_ADDRESS 0xA4
158 #define ET1310_PCI_EEPROM_STATUS 0xB2
159 #define ET1310_PCI_ACK_NACK 0xC0
160 #define ET1310_PCI_REPLAY 0xC2
161 #define ET1310_PCI_L0L1LATENCY 0xCF
162
163 /* PCI Product IDs */
164 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
165 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
166
167 /* Define order of magnitude converter */
168 #define NANO_IN_A_MICRO 1000
169
170 #define PARM_RX_NUM_BUFS_DEF 4
171 #define PARM_RX_TIME_INT_DEF 10
172 #define PARM_RX_MEM_END_DEF 0x2bc
173 #define PARM_TX_TIME_INT_DEF 40
174 #define PARM_TX_NUM_BUFS_DEF 4
175 #define PARM_DMA_CACHE_DEF 0
176
177 /* RX defines */
178 #define FBR_CHUNKS 32
179 #define MAX_DESC_PER_RING_RX 1024
180
181 /* number of RFDs - default and min */
182 #define RFD_LOW_WATER_MARK 40
183 #define NIC_DEFAULT_NUM_RFD 1024
184 #define NUM_FBRS 2
185
186 #define NUM_PACKETS_HANDLED 256
187
188 #define ALCATEL_MULTICAST_PKT 0x01000000
189 #define ALCATEL_BROADCAST_PKT 0x02000000
190
191 /* typedefs for Free Buffer Descriptors */
192 struct fbr_desc {
193 u32 addr_lo;
194 u32 addr_hi;
195 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
196 };
197
198 /* Packet Status Ring Descriptors
199 *
200 * Word 0:
201 *
202 * top 16 bits are from the Alcatel Status Word as enumerated in
203 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
204 *
205 * 0: hp hash pass
206 * 1: ipa IP checksum assist
207 * 2: ipp IP checksum pass
208 * 3: tcpa TCP checksum assist
209 * 4: tcpp TCP checksum pass
210 * 5: wol WOL Event
211 * 6: rxmac_error RXMAC Error Indicator
212 * 7: drop Drop packet
213 * 8: ft Frame Truncated
214 * 9: jp Jumbo Packet
215 * 10: vp VLAN Packet
216 * 11-15: unused
217 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
218 * 17: asw_RX_DV_event short receive event detected
219 * 18: asw_false_carrier_event bad carrier since last good packet
220 * 19: asw_code_err one or more nibbles signalled as errors
221 * 20: asw_CRC_err CRC error
222 * 21: asw_len_chk_err frame length field incorrect
223 * 22: asw_too_long frame length > 1518 bytes
224 * 23: asw_OK valid CRC + no code error
225 * 24: asw_multicast has a multicast address
226 * 25: asw_broadcast has a broadcast address
227 * 26: asw_dribble_nibble spurious bits after EOP
228 * 27: asw_control_frame is a control frame
229 * 28: asw_pause_frame is a pause frame
230 * 29: asw_unsupported_op unsupported OP code
231 * 30: asw_VLAN_tag VLAN tag detected
232 * 31: asw_long_evt Rx long event
233 *
234 * Word 1:
235 * 0-15: length length in bytes
236 * 16-25: bi Buffer Index
237 * 26-27: ri Ring Index
238 * 28-31: reserved
239 */
240
241 struct pkt_stat_desc {
242 u32 word0;
243 u32 word1;
244 };
245
246 /* Typedefs for the RX DMA status word */
247
248 /*
249 * rx status word 0 holds part of the status bits of the Rx DMA engine
250 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
251 * which contains the Free Buffer ring 0 and 1 available offset.
252 *
253 * bit 0-9 FBR1 offset
254 * bit 10 Wrap flag for FBR1
255 * bit 16-25 FBR0 offset
256 * bit 26 Wrap flag for FBR0
257 */
258
259 /*
260 * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
261 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
262 * which contains the Packet Status Ring available offset.
263 *
264 * bit 0-15 reserved
265 * bit 16-27 PSRoffset
266 * bit 28 PSRwrap
267 * bit 29-31 unused
268 */
269
270 /*
271 * struct rx_status_block is a structure representing the status of the Rx
272 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
273 */
274 struct rx_status_block {
275 u32 word0;
276 u32 word1;
277 };
278
279 /*
280 * Structure for look-up table holding free buffer ring pointers, addresses
281 * and state.
282 */
283 struct fbr_lookup {
284 void *virt[MAX_DESC_PER_RING_RX];
285 u32 bus_high[MAX_DESC_PER_RING_RX];
286 u32 bus_low[MAX_DESC_PER_RING_RX];
287 void *ring_virtaddr;
288 dma_addr_t ring_physaddr;
289 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
290 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
291 u32 local_full;
292 u32 num_entries;
293 dma_addr_t buffsize;
294 };
295
296 /*
297 * struct rx_ring is the sructure representing the adaptor's local
298 * reference(s) to the rings
299 */
300 struct rx_ring {
301 struct fbr_lookup *fbr[NUM_FBRS];
302 void *ps_ring_virtaddr;
303 dma_addr_t ps_ring_physaddr;
304 u32 local_psr_full;
305 u32 psr_num_entries;
306
307 struct rx_status_block *rx_status_block;
308 dma_addr_t rx_status_bus;
309
310 /* RECV */
311 struct list_head recv_list;
312 u32 num_ready_recv;
313
314 u32 num_rfd;
315
316 bool unfinished_receives;
317 };
318
319 /* TX defines */
320 /*
321 * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
322 *
323 * 0-15: length of packet
324 * 16-27: VLAN tag
325 * 28: VLAN CFI
326 * 29-31: VLAN priority
327 *
328 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
329 *
330 * 0: last packet in the sequence
331 * 1: first packet in the sequence
332 * 2: interrupt the processor when this pkt sent
333 * 3: Control word - no packet data
334 * 4: Issue half-duplex backpressure : XON/XOFF
335 * 5: send pause frame
336 * 6: Tx frame has error
337 * 7: append CRC
338 * 8: MAC override
339 * 9: pad packet
340 * 10: Packet is a Huge packet
341 * 11: append VLAN tag
342 * 12: IP checksum assist
343 * 13: TCP checksum assist
344 * 14: UDP checksum assist
345 */
346
347 /* struct tx_desc represents each descriptor on the ring */
348 struct tx_desc {
349 u32 addr_hi;
350 u32 addr_lo;
351 u32 len_vlan; /* control words how to xmit the */
352 u32 flags; /* data (detailed above) */
353 };
354
355 /*
356 * The status of the Tx DMA engine it sits in free memory, and is pointed to
357 * by 0x101c / 0x1020. This is a DMA10 type
358 */
359
360 /* TCB (Transmit Control Block: Host Side) */
361 struct tcb {
362 struct tcb *next; /* Next entry in ring */
363 u32 flags; /* Our flags for the packet */
364 u32 count; /* Used to spot stuck/lost packets */
365 u32 stale; /* Used to spot stuck/lost packets */
366 struct sk_buff *skb; /* Network skb we are tied to */
367 u32 index; /* Ring indexes */
368 u32 index_start;
369 };
370
371 /* Structure representing our local reference(s) to the ring */
372 struct tx_ring {
373 /* TCB (Transmit Control Block) memory and lists */
374 struct tcb *tcb_ring;
375
376 /* List of TCBs that are ready to be used */
377 struct tcb *tcb_qhead;
378 struct tcb *tcb_qtail;
379
380 /* list of TCBs that are currently being sent. NOTE that access to all
381 * three of these (including used) are controlled via the
382 * TCBSendQLock. This lock should be secured prior to incementing /
383 * decrementing used, or any queue manipulation on send_head /
384 * tail
385 */
386 struct tcb *send_head;
387 struct tcb *send_tail;
388 int used;
389
390 /* The actual descriptor ring */
391 struct tx_desc *tx_desc_ring;
392 dma_addr_t tx_desc_ring_pa;
393
394 /* send_idx indicates where we last wrote to in the descriptor ring. */
395 u32 send_idx;
396
397 /* The location of the write-back status block */
398 u32 *tx_status;
399 dma_addr_t tx_status_pa;
400
401 /* Packets since the last IRQ: used for interrupt coalescing */
402 int since_irq;
403 };
404
405 /*
406 * Do not change these values: if changed, then change also in respective
407 * TXdma and Rxdma engines
408 */
409 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
410 #define NUM_TCB 64
411
412 /*
413 * These values are all superseded by registry entries to facilitate tuning.
414 * Once the desired performance has been achieved, the optimal registry values
415 * should be re-populated to these #defines:
416 */
417 #define TX_ERROR_PERIOD 1000
418
419 #define LO_MARK_PERCENT_FOR_PSR 15
420 #define LO_MARK_PERCENT_FOR_RX 15
421
422 /* RFD (Receive Frame Descriptor) */
423 struct rfd {
424 struct list_head list_node;
425 struct sk_buff *skb;
426 u32 len; /* total size of receive frame */
427 u16 bufferindex;
428 u8 ringindex;
429 };
430
431 /* Flow Control */
432 #define FLOW_BOTH 0
433 #define FLOW_TXONLY 1
434 #define FLOW_RXONLY 2
435 #define FLOW_NONE 3
436
437 /* Struct to define some device statistics */
438 struct ce_stats {
439 /* MIB II variables
440 *
441 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
442 * MUST have 32, then we'll need another way to perform atomic
443 * operations
444 */
445 u32 unicast_pkts_rcvd;
446 atomic_t unicast_pkts_xmtd;
447 u32 multicast_pkts_rcvd;
448 atomic_t multicast_pkts_xmtd;
449 u32 broadcast_pkts_rcvd;
450 atomic_t broadcast_pkts_xmtd;
451 u32 rcvd_pkts_dropped;
452
453 /* Tx Statistics. */
454 u32 tx_underflows;
455
456 u32 tx_collisions;
457 u32 tx_excessive_collisions;
458 u32 tx_first_collisions;
459 u32 tx_late_collisions;
460 u32 tx_max_pkt_errs;
461 u32 tx_deferred;
462
463 /* Rx Statistics. */
464 u32 rx_overflows;
465
466 u32 rx_length_errs;
467 u32 rx_align_errs;
468 u32 rx_crc_errs;
469 u32 rx_code_violations;
470 u32 rx_other_errs;
471
472 u32 synchronous_iterations;
473 u32 interrupt_status;
474 };
475
476 /* The private adapter structure */
477 struct et131x_adapter {
478 struct net_device *netdev;
479 struct pci_dev *pdev;
480 struct mii_bus *mii_bus;
481 struct phy_device *phydev;
482 struct work_struct task;
483
484 /* Flags that indicate current state of the adapter */
485 u32 flags;
486
487 /* local link state, to determine if a state change has occurred */
488 int link;
489
490 /* Configuration */
491 u8 rom_addr[ETH_ALEN];
492 u8 addr[ETH_ALEN];
493 bool has_eeprom;
494 u8 eeprom_data[2];
495
496 /* Spinlocks */
497 spinlock_t lock;
498
499 spinlock_t tcb_send_qlock;
500 spinlock_t tcb_ready_qlock;
501 spinlock_t send_hw_lock;
502
503 spinlock_t rcv_lock;
504 spinlock_t rcv_pend_lock;
505 spinlock_t fbr_lock;
506
507 spinlock_t phy_lock;
508
509 /* Packet Filter and look ahead size */
510 u32 packet_filter;
511
512 /* multicast list */
513 u32 multicast_addr_count;
514 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
515
516 /* Pointer to the device's PCI register space */
517 struct address_map __iomem *regs;
518
519 /* Registry parameters */
520 u8 wanted_flow; /* Flow we want for 802.3x flow control */
521 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
522
523 /* Derived from the registry: */
524 u8 flowcontrol; /* flow control validated by the far-end */
525
526 /* Minimize init-time */
527 struct timer_list error_timer;
528
529 /* variable putting the phy into coma mode when boot up with no cable
530 * plugged in after 5 seconds
531 */
532 u8 boot_coma;
533
534 /* Next two used to save power information at power down. This
535 * information will be used during power up to set up parts of Power
536 * Management in JAGCore
537 */
538 u16 pdown_speed;
539 u8 pdown_duplex;
540
541 /* Tx Memory Variables */
542 struct tx_ring tx_ring;
543
544 /* Rx Memory Variables */
545 struct rx_ring rx_ring;
546
547 /* Stats */
548 struct ce_stats stats;
549
550 struct net_device_stats net_stats;
551 };
552
553 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
554 {
555 u32 reg;
556 int i;
557
558 /*
559 * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
560 * bits 7,1:0 both equal to 1, at least once after reset.
561 * Subsequent operations need only to check that bits 1:0 are equal
562 * to 1 prior to starting a single byte read/write
563 */
564
565 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
566 /* Read registers grouped in DWORD1 */
567 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
568 return -EIO;
569
570 /* I2C idle and Phy Queue Avail both true */
571 if ((reg & 0x3000) == 0x3000) {
572 if (status)
573 *status = reg;
574 return reg & 0xFF;
575 }
576 }
577 return -ETIMEDOUT;
578 }
579
580
581 /**
582 * eeprom_write - Write a byte to the ET1310's EEPROM
583 * @adapter: pointer to our private adapter structure
584 * @addr: the address to write
585 * @data: the value to write
586 *
587 * Returns 1 for a successful write.
588 */
589 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
590 {
591 struct pci_dev *pdev = adapter->pdev;
592 int index = 0;
593 int retries;
594 int err = 0;
595 int i2c_wack = 0;
596 int writeok = 0;
597 u32 status;
598 u32 val = 0;
599
600 /*
601 * For an EEPROM, an I2C single byte write is defined as a START
602 * condition followed by the device address, EEPROM address, one byte
603 * of data and a STOP condition. The STOP condition will trigger the
604 * EEPROM's internally timed write cycle to the nonvolatile memory.
605 * All inputs are disabled during this write cycle and the EEPROM will
606 * not respond to any access until the internal write is complete.
607 */
608
609 err = eeprom_wait_ready(pdev, NULL);
610 if (err)
611 return err;
612
613 /*
614 * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
615 * and bits 1:0 both =0. Bit 5 should be set according to the
616 * type of EEPROM being accessed (1=two byte addressing, 0=one
617 * byte addressing).
618 */
619 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
620 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
621 return -EIO;
622
623 i2c_wack = 1;
624
625 /* Prepare EEPROM address for Step 3 */
626
627 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
628 /* Write the address to the LBCIF Address Register */
629 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
630 break;
631 /*
632 * Write the data to the LBCIF Data Register (the I2C write
633 * will begin).
634 */
635 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
636 break;
637 /*
638 * Monitor bit 1:0 of the LBCIF Status Register. When bits
639 * 1:0 are both equal to 1, the I2C write has completed and the
640 * internal write cycle of the EEPROM is about to start.
641 * (bits 1:0 = 01 is a legal state while waiting from both
642 * equal to 1, but bits 1:0 = 10 is invalid and implies that
643 * something is broken).
644 */
645 err = eeprom_wait_ready(pdev, &status);
646 if (err < 0)
647 return 0;
648
649 /*
650 * Check bit 3 of the LBCIF Status Register. If equal to 1,
651 * an error has occurred.Don't break here if we are revision
652 * 1, this is so we do a blind write for load bug.
653 */
654 if ((status & LBCIF_STATUS_GENERAL_ERROR)
655 && adapter->pdev->revision == 0)
656 break;
657
658 /*
659 * Check bit 2 of the LBCIF Status Register. If equal to 1 an
660 * ACK error has occurred on the address phase of the write.
661 * This could be due to an actual hardware failure or the
662 * EEPROM may still be in its internal write cycle from a
663 * previous write. This write operation was ignored and must be
664 *repeated later.
665 */
666 if (status & LBCIF_STATUS_ACK_ERROR) {
667 /*
668 * This could be due to an actual hardware failure
669 * or the EEPROM may still be in its internal write
670 * cycle from a previous write. This write operation
671 * was ignored and must be repeated later.
672 */
673 udelay(10);
674 continue;
675 }
676
677 writeok = 1;
678 break;
679 }
680
681 /*
682 * Set bit 6 of the LBCIF Control Register = 0.
683 */
684 udelay(10);
685
686 while (i2c_wack) {
687 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
688 LBCIF_CONTROL_LBCIF_ENABLE))
689 writeok = 0;
690
691 /* Do read until internal ACK_ERROR goes away meaning write
692 * completed
693 */
694 do {
695 pci_write_config_dword(pdev,
696 LBCIF_ADDRESS_REGISTER,
697 addr);
698 do {
699 pci_read_config_dword(pdev,
700 LBCIF_DATA_REGISTER, &val);
701 } while ((val & 0x00010000) == 0);
702 } while (val & 0x00040000);
703
704 if ((val & 0xFF00) != 0xC000 || index == 10000)
705 break;
706 index++;
707 }
708 return writeok ? 0 : -EIO;
709 }
710
711 /**
712 * eeprom_read - Read a byte from the ET1310's EEPROM
713 * @adapter: pointer to our private adapter structure
714 * @addr: the address from which to read
715 * @pdata: a pointer to a byte in which to store the value of the read
716 * @eeprom_id: the ID of the EEPROM
717 * @addrmode: how the EEPROM is to be accessed
718 *
719 * Returns 1 for a successful read
720 */
721 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
722 {
723 struct pci_dev *pdev = adapter->pdev;
724 int err;
725 u32 status;
726
727 /*
728 * A single byte read is similar to the single byte write, with the
729 * exception of the data flow:
730 */
731
732 err = eeprom_wait_ready(pdev, NULL);
733 if (err)
734 return err;
735 /*
736 * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
737 * and bits 1:0 both =0. Bit 5 should be set according to the type
738 * of EEPROM being accessed (1=two byte addressing, 0=one byte
739 * addressing).
740 */
741 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
742 LBCIF_CONTROL_LBCIF_ENABLE))
743 return -EIO;
744 /*
745 * Write the address to the LBCIF Address Register (I2C read will
746 * begin).
747 */
748 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
749 return -EIO;
750 /*
751 * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
752 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
753 * has occurred).
754 */
755 err = eeprom_wait_ready(pdev, &status);
756 if (err < 0)
757 return err;
758 /*
759 * Regardless of error status, read data byte from LBCIF Data
760 * Register.
761 */
762 *pdata = err;
763 /*
764 * Check bit 2 of the LBCIF Status Register. If = 1,
765 * then an error has occurred.
766 */
767 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
768 }
769
770 static int et131x_init_eeprom(struct et131x_adapter *adapter)
771 {
772 struct pci_dev *pdev = adapter->pdev;
773 u8 eestatus;
774
775 /* We first need to check the EEPROM Status code located at offset
776 * 0xB2 of config space
777 */
778 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
779 &eestatus);
780
781 /* THIS IS A WORKAROUND:
782 * I need to call this function twice to get my card in a
783 * LG M1 Express Dual running. I tried also a msleep before this
784 * function, because I thought there could be some time condidions
785 * but it didn't work. Call the whole function twice also work.
786 */
787 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
788 dev_err(&pdev->dev,
789 "Could not read PCI config space for EEPROM Status\n");
790 return -EIO;
791 }
792
793 /* Determine if the error(s) we care about are present. If they are
794 * present we need to fail.
795 */
796 if (eestatus & 0x4C) {
797 int write_failed = 0;
798 if (pdev->revision == 0x01) {
799 int i;
800 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
801
802 /* Re-write the first 4 bytes if we have an eeprom
803 * present and the revision id is 1, this fixes the
804 * corruption seen with 1310 B Silicon
805 */
806 for (i = 0; i < 3; i++)
807 if (eeprom_write(adapter, i, eedata[i]) < 0)
808 write_failed = 1;
809 }
810 if (pdev->revision != 0x01 || write_failed) {
811 dev_err(&pdev->dev,
812 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
813
814 /* This error could mean that there was an error
815 * reading the eeprom or that the eeprom doesn't exist.
816 * We will treat each case the same and not try to
817 * gather additional information that normally would
818 * come from the eeprom, like MAC Address
819 */
820 adapter->has_eeprom = 0;
821 return -EIO;
822 }
823 }
824 adapter->has_eeprom = 1;
825
826 /* Read the EEPROM for information regarding LED behavior. Refer to
827 * ET1310_phy.c, et131x_xcvr_init(), for its use.
828 */
829 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
830 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
831
832 if (adapter->eeprom_data[0] != 0xcd)
833 /* Disable all optional features */
834 adapter->eeprom_data[1] = 0x00;
835
836 return 0;
837 }
838
839 /**
840 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
841 * @adapter: pointer to our adapter structure
842 */
843 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
844 {
845 /* Setup the receive dma configuration register for normal operation */
846 u32 csr = 0x2000; /* FBR1 enable */
847
848 if (adapter->rx_ring.fbr[1]->buffsize == 4096)
849 csr |= 0x0800;
850 else if (adapter->rx_ring.fbr[1]->buffsize == 8192)
851 csr |= 0x1000;
852 else if (adapter->rx_ring.fbr[1]->buffsize == 16384)
853 csr |= 0x1800;
854
855 csr |= 0x0400; /* FBR0 enable */
856 if (adapter->rx_ring.fbr[0]->buffsize == 256)
857 csr |= 0x0100;
858 else if (adapter->rx_ring.fbr[0]->buffsize == 512)
859 csr |= 0x0200;
860 else if (adapter->rx_ring.fbr[0]->buffsize == 1024)
861 csr |= 0x0300;
862 writel(csr, &adapter->regs->rxdma.csr);
863
864 csr = readl(&adapter->regs->rxdma.csr);
865 if (csr & 0x00020000) {
866 udelay(5);
867 csr = readl(&adapter->regs->rxdma.csr);
868 if (csr & 0x00020000) {
869 dev_err(&adapter->pdev->dev,
870 "RX Dma failed to exit halt state. CSR 0x%08x\n",
871 csr);
872 }
873 }
874 }
875
876 /**
877 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
878 * @adapter: pointer to our adapter structure
879 */
880 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
881 {
882 u32 csr;
883 /* Setup the receive dma configuration register */
884 writel(0x00002001, &adapter->regs->rxdma.csr);
885 csr = readl(&adapter->regs->rxdma.csr);
886 if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
887 udelay(5);
888 csr = readl(&adapter->regs->rxdma.csr);
889 if ((csr & 0x00020000) == 0)
890 dev_err(&adapter->pdev->dev,
891 "RX Dma failed to enter halt state. CSR 0x%08x\n",
892 csr);
893 }
894 }
895
896 /**
897 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
898 * @adapter: pointer to our adapter structure
899 *
900 * Mainly used after a return to the D0 (full-power) state from a lower state.
901 */
902 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
903 {
904 /* Setup the transmit dma configuration register for normal
905 * operation
906 */
907 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
908 &adapter->regs->txdma.csr);
909 }
910
911 static inline void add_10bit(u32 *v, int n)
912 {
913 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
914 }
915
916 static inline void add_12bit(u32 *v, int n)
917 {
918 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
919 }
920
921 /**
922 * et1310_config_mac_regs1 - Initialize the first part of MAC regs
923 * @adapter: pointer to our adapter structure
924 */
925 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
926 {
927 struct mac_regs __iomem *macregs = &adapter->regs->mac;
928 u32 station1;
929 u32 station2;
930 u32 ipg;
931
932 /* First we need to reset everything. Write to MAC configuration
933 * register 1 to perform reset.
934 */
935 writel(0xC00F0000, &macregs->cfg1);
936
937 /* Next lets configure the MAC Inter-packet gap register */
938 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
939 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
940 writel(ipg, &macregs->ipg);
941
942 /* Next lets configure the MAC Half Duplex register */
943 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
944 writel(0x00A1F037, &macregs->hfdp);
945
946 /* Next lets configure the MAC Interface Control register */
947 writel(0, &macregs->if_ctrl);
948
949 /* Let's move on to setting up the mii management configuration */
950 writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */
951
952 /* Next lets configure the MAC Station Address register. These
953 * values are read from the EEPROM during initialization and stored
954 * in the adapter structure. We write what is stored in the adapter
955 * structure to the MAC Station Address registers high and low. This
956 * station address is used for generating and checking pause control
957 * packets.
958 */
959 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
960 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
961 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
962 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
963 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
964 adapter->addr[2];
965 writel(station1, &macregs->station_addr_1);
966 writel(station2, &macregs->station_addr_2);
967
968 /* Max ethernet packet in bytes that will be passed by the mac without
969 * being truncated. Allow the MAC to pass 4 more than our max packet
970 * size. This is 4 for the Ethernet CRC.
971 *
972 * Packets larger than (registry_jumbo_packet) that do not contain a
973 * VLAN ID will be dropped by the Rx function.
974 */
975 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
976
977 /* clear out MAC config reset */
978 writel(0, &macregs->cfg1);
979 }
980
981 /**
982 * et1310_config_mac_regs2 - Initialize the second part of MAC regs
983 * @adapter: pointer to our adapter structure
984 */
985 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
986 {
987 int32_t delay = 0;
988 struct mac_regs __iomem *mac = &adapter->regs->mac;
989 struct phy_device *phydev = adapter->phydev;
990 u32 cfg1;
991 u32 cfg2;
992 u32 ifctrl;
993 u32 ctl;
994
995 ctl = readl(&adapter->regs->txmac.ctl);
996 cfg1 = readl(&mac->cfg1);
997 cfg2 = readl(&mac->cfg2);
998 ifctrl = readl(&mac->if_ctrl);
999
1000 /* Set up the if mode bits */
1001 cfg2 &= ~0x300;
1002 if (phydev && phydev->speed == SPEED_1000) {
1003 cfg2 |= 0x200;
1004 /* Phy mode bit */
1005 ifctrl &= ~(1 << 24);
1006 } else {
1007 cfg2 |= 0x100;
1008 ifctrl |= (1 << 24);
1009 }
1010
1011 /* We need to enable Rx/Tx */
1012 cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1013 /* Initialize loop back to off */
1014 cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1015 if (adapter->flowcontrol == FLOW_RXONLY ||
1016 adapter->flowcontrol == FLOW_BOTH)
1017 cfg1 |= CFG1_RX_FLOW;
1018 writel(cfg1, &mac->cfg1);
1019
1020 /* Now we need to initialize the MAC Configuration 2 register */
1021 /* preamble 7, check length, huge frame off, pad crc, crc enable
1022 full duplex off */
1023 cfg2 |= 0x7016;
1024 cfg2 &= ~0x0021;
1025
1026 /* Turn on duplex if needed */
1027 if (phydev && phydev->duplex == DUPLEX_FULL)
1028 cfg2 |= 0x01;
1029
1030 ifctrl &= ~(1 << 26);
1031 if (phydev && phydev->duplex == DUPLEX_HALF)
1032 ifctrl |= (1<<26); /* Enable ghd */
1033
1034 writel(ifctrl, &mac->if_ctrl);
1035 writel(cfg2, &mac->cfg2);
1036
1037 do {
1038 udelay(10);
1039 delay++;
1040 cfg1 = readl(&mac->cfg1);
1041 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1042
1043 if (delay == 100) {
1044 dev_warn(&adapter->pdev->dev,
1045 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1046 cfg1);
1047 }
1048
1049 /* Enable txmac */
1050 ctl |= 0x09; /* TX mac enable, FC disable */
1051 writel(ctl, &adapter->regs->txmac.ctl);
1052
1053 /* Ready to start the RXDMA/TXDMA engine */
1054 if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1055 et131x_rx_dma_enable(adapter);
1056 et131x_tx_dma_enable(adapter);
1057 }
1058 }
1059
1060 /**
1061 * et1310_in_phy_coma - check if the device is in phy coma
1062 * @adapter: pointer to our adapter structure
1063 *
1064 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1065 */
1066 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1067 {
1068 u32 pmcsr;
1069
1070 pmcsr = readl(&adapter->regs->global.pm_csr);
1071
1072 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1073 }
1074
1075 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1076 {
1077 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1078 u32 hash1 = 0;
1079 u32 hash2 = 0;
1080 u32 hash3 = 0;
1081 u32 hash4 = 0;
1082 u32 pm_csr;
1083
1084 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1085 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1086 * specified) then we should pass NO multi-cast addresses to the
1087 * driver.
1088 */
1089 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1090 int i;
1091
1092 /* Loop through our multicast array and set up the device */
1093 for (i = 0; i < adapter->multicast_addr_count; i++) {
1094 u32 result;
1095
1096 result = ether_crc(6, adapter->multicast_list[i]);
1097
1098 result = (result & 0x3F800000) >> 23;
1099
1100 if (result < 32) {
1101 hash1 |= (1 << result);
1102 } else if ((31 < result) && (result < 64)) {
1103 result -= 32;
1104 hash2 |= (1 << result);
1105 } else if ((63 < result) && (result < 96)) {
1106 result -= 64;
1107 hash3 |= (1 << result);
1108 } else {
1109 result -= 96;
1110 hash4 |= (1 << result);
1111 }
1112 }
1113 }
1114
1115 /* Write out the new hash to the device */
1116 pm_csr = readl(&adapter->regs->global.pm_csr);
1117 if (!et1310_in_phy_coma(adapter)) {
1118 writel(hash1, &rxmac->multi_hash1);
1119 writel(hash2, &rxmac->multi_hash2);
1120 writel(hash3, &rxmac->multi_hash3);
1121 writel(hash4, &rxmac->multi_hash4);
1122 }
1123 }
1124
1125 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1126 {
1127 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1128 u32 uni_pf1;
1129 u32 uni_pf2;
1130 u32 uni_pf3;
1131 u32 pm_csr;
1132
1133 /* Set up unicast packet filter reg 3 to be the first two octets of
1134 * the MAC address for both address
1135 *
1136 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1137 * MAC address for second address
1138 *
1139 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1140 * MAC address for first address
1141 */
1142 uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1143 (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1144 (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1145 adapter->addr[1];
1146
1147 uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1148 (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1149 (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1150 adapter->addr[5];
1151
1152 uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1153 (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1154 (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1155 adapter->addr[5];
1156
1157 pm_csr = readl(&adapter->regs->global.pm_csr);
1158 if (!et1310_in_phy_coma(adapter)) {
1159 writel(uni_pf1, &rxmac->uni_pf_addr1);
1160 writel(uni_pf2, &rxmac->uni_pf_addr2);
1161 writel(uni_pf3, &rxmac->uni_pf_addr3);
1162 }
1163 }
1164
1165 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1166 {
1167 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1168 struct phy_device *phydev = adapter->phydev;
1169 u32 sa_lo;
1170 u32 sa_hi = 0;
1171 u32 pf_ctrl = 0;
1172
1173 /* Disable the MAC while it is being configured (also disable WOL) */
1174 writel(0x8, &rxmac->ctrl);
1175
1176 /* Initialize WOL to disabled. */
1177 writel(0, &rxmac->crc0);
1178 writel(0, &rxmac->crc12);
1179 writel(0, &rxmac->crc34);
1180
1181 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1182 * its default Values of 0x00000000 because there are not WOL masks
1183 * as of this time.
1184 */
1185 writel(0, &rxmac->mask0_word0);
1186 writel(0, &rxmac->mask0_word1);
1187 writel(0, &rxmac->mask0_word2);
1188 writel(0, &rxmac->mask0_word3);
1189
1190 writel(0, &rxmac->mask1_word0);
1191 writel(0, &rxmac->mask1_word1);
1192 writel(0, &rxmac->mask1_word2);
1193 writel(0, &rxmac->mask1_word3);
1194
1195 writel(0, &rxmac->mask2_word0);
1196 writel(0, &rxmac->mask2_word1);
1197 writel(0, &rxmac->mask2_word2);
1198 writel(0, &rxmac->mask2_word3);
1199
1200 writel(0, &rxmac->mask3_word0);
1201 writel(0, &rxmac->mask3_word1);
1202 writel(0, &rxmac->mask3_word2);
1203 writel(0, &rxmac->mask3_word3);
1204
1205 writel(0, &rxmac->mask4_word0);
1206 writel(0, &rxmac->mask4_word1);
1207 writel(0, &rxmac->mask4_word2);
1208 writel(0, &rxmac->mask4_word3);
1209
1210 /* Lets setup the WOL Source Address */
1211 sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1212 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1213 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1214 adapter->addr[5];
1215 writel(sa_lo, &rxmac->sa_lo);
1216
1217 sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1218 adapter->addr[1];
1219 writel(sa_hi, &rxmac->sa_hi);
1220
1221 /* Disable all Packet Filtering */
1222 writel(0, &rxmac->pf_ctrl);
1223
1224 /* Let's initialize the Unicast Packet filtering address */
1225 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1226 et1310_setup_device_for_unicast(adapter);
1227 pf_ctrl |= 4; /* Unicast filter */
1228 } else {
1229 writel(0, &rxmac->uni_pf_addr1);
1230 writel(0, &rxmac->uni_pf_addr2);
1231 writel(0, &rxmac->uni_pf_addr3);
1232 }
1233
1234 /* Let's initialize the Multicast hash */
1235 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1236 pf_ctrl |= 2; /* Multicast filter */
1237 et1310_setup_device_for_multicast(adapter);
1238 }
1239
1240 /* Runt packet filtering. Didn't work in version A silicon. */
1241 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1242 pf_ctrl |= 8; /* Fragment filter */
1243
1244 if (adapter->registry_jumbo_packet > 8192)
1245 /* In order to transmit jumbo packets greater than 8k, the
1246 * FIFO between RxMAC and RxDMA needs to be reduced in size
1247 * to (16k - Jumbo packet size). In order to implement this,
1248 * we must use "cut through" mode in the RxMAC, which chops
1249 * packets down into segments which are (max_size * 16). In
1250 * this case we selected 256 bytes, since this is the size of
1251 * the PCI-Express TLP's that the 1310 uses.
1252 *
1253 * seg_en on, fc_en off, size 0x10
1254 */
1255 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1256 else
1257 writel(0, &rxmac->mcif_ctrl_max_seg);
1258
1259 /* Initialize the MCIF water marks */
1260 writel(0, &rxmac->mcif_water_mark);
1261
1262 /* Initialize the MIF control */
1263 writel(0, &rxmac->mif_ctrl);
1264
1265 /* Initialize the Space Available Register */
1266 writel(0, &rxmac->space_avail);
1267
1268 /* Initialize the the mif_ctrl register
1269 * bit 3: Receive code error. One or more nibbles were signaled as
1270 * errors during the reception of the packet. Clear this
1271 * bit in Gigabit, set it in 100Mbit. This was derived
1272 * experimentally at UNH.
1273 * bit 4: Receive CRC error. The packet's CRC did not match the
1274 * internally generated CRC.
1275 * bit 5: Receive length check error. Indicates that frame length
1276 * field value in the packet does not match the actual data
1277 * byte length and is not a type field.
1278 * bit 16: Receive frame truncated.
1279 * bit 17: Drop packet enable
1280 */
1281 if (phydev && phydev->speed == SPEED_100)
1282 writel(0x30038, &rxmac->mif_ctrl);
1283 else
1284 writel(0x30030, &rxmac->mif_ctrl);
1285
1286 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1287 * filter is always enabled since it is where the runt packets are
1288 * supposed to be dropped. For version A silicon, runt packet
1289 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1290 * but we still leave the packet filter on.
1291 */
1292 writel(pf_ctrl, &rxmac->pf_ctrl);
1293 writel(0x9, &rxmac->ctrl);
1294 }
1295
1296 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1297 {
1298 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1299
1300 /* We need to update the Control Frame Parameters
1301 * cfpt - control frame pause timer set to 64 (0x40)
1302 * cfep - control frame extended pause timer set to 0x0
1303 */
1304 if (adapter->flowcontrol == FLOW_NONE)
1305 writel(0, &txmac->cf_param);
1306 else
1307 writel(0x40, &txmac->cf_param);
1308 }
1309
1310 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1311 {
1312 struct macstat_regs __iomem *macstat =
1313 &adapter->regs->macstat;
1314
1315 /* Next we need to initialize all the macstat registers to zero on
1316 * the device.
1317 */
1318 writel(0, &macstat->txrx_0_64_byte_frames);
1319 writel(0, &macstat->txrx_65_127_byte_frames);
1320 writel(0, &macstat->txrx_128_255_byte_frames);
1321 writel(0, &macstat->txrx_256_511_byte_frames);
1322 writel(0, &macstat->txrx_512_1023_byte_frames);
1323 writel(0, &macstat->txrx_1024_1518_byte_frames);
1324 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1325
1326 writel(0, &macstat->rx_bytes);
1327 writel(0, &macstat->rx_packets);
1328 writel(0, &macstat->rx_fcs_errs);
1329 writel(0, &macstat->rx_multicast_packets);
1330 writel(0, &macstat->rx_broadcast_packets);
1331 writel(0, &macstat->rx_control_frames);
1332 writel(0, &macstat->rx_pause_frames);
1333 writel(0, &macstat->rx_unknown_opcodes);
1334 writel(0, &macstat->rx_align_errs);
1335 writel(0, &macstat->rx_frame_len_errs);
1336 writel(0, &macstat->rx_code_errs);
1337 writel(0, &macstat->rx_carrier_sense_errs);
1338 writel(0, &macstat->rx_undersize_packets);
1339 writel(0, &macstat->rx_oversize_packets);
1340 writel(0, &macstat->rx_fragment_packets);
1341 writel(0, &macstat->rx_jabbers);
1342 writel(0, &macstat->rx_drops);
1343
1344 writel(0, &macstat->tx_bytes);
1345 writel(0, &macstat->tx_packets);
1346 writel(0, &macstat->tx_multicast_packets);
1347 writel(0, &macstat->tx_broadcast_packets);
1348 writel(0, &macstat->tx_pause_frames);
1349 writel(0, &macstat->tx_deferred);
1350 writel(0, &macstat->tx_excessive_deferred);
1351 writel(0, &macstat->tx_single_collisions);
1352 writel(0, &macstat->tx_multiple_collisions);
1353 writel(0, &macstat->tx_late_collisions);
1354 writel(0, &macstat->tx_excessive_collisions);
1355 writel(0, &macstat->tx_total_collisions);
1356 writel(0, &macstat->tx_pause_honored_frames);
1357 writel(0, &macstat->tx_drops);
1358 writel(0, &macstat->tx_jabbers);
1359 writel(0, &macstat->tx_fcs_errs);
1360 writel(0, &macstat->tx_control_frames);
1361 writel(0, &macstat->tx_oversize_frames);
1362 writel(0, &macstat->tx_undersize_frames);
1363 writel(0, &macstat->tx_fragments);
1364 writel(0, &macstat->carry_reg1);
1365 writel(0, &macstat->carry_reg2);
1366
1367 /* Unmask any counters that we want to track the overflow of.
1368 * Initially this will be all counters. It may become clear later
1369 * that we do not need to track all counters.
1370 */
1371 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1372 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1373 }
1374
1375 /**
1376 * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1377 * @adapter: pointer to our private adapter structure
1378 * @addr: the address of the transceiver
1379 * @reg: the register to read
1380 * @value: pointer to a 16-bit value in which the value will be stored
1381 *
1382 * Returns 0 on success, errno on failure (as defined in errno.h)
1383 */
1384 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1385 u8 reg, u16 *value)
1386 {
1387 struct mac_regs __iomem *mac = &adapter->regs->mac;
1388 int status = 0;
1389 u32 delay = 0;
1390 u32 mii_addr;
1391 u32 mii_cmd;
1392 u32 mii_indicator;
1393
1394 /* Save a local copy of the registers we are dealing with so we can
1395 * set them back
1396 */
1397 mii_addr = readl(&mac->mii_mgmt_addr);
1398 mii_cmd = readl(&mac->mii_mgmt_cmd);
1399
1400 /* Stop the current operation */
1401 writel(0, &mac->mii_mgmt_cmd);
1402
1403 /* Set up the register we need to read from on the correct PHY */
1404 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1405
1406 writel(0x1, &mac->mii_mgmt_cmd);
1407
1408 do {
1409 udelay(50);
1410 delay++;
1411 mii_indicator = readl(&mac->mii_mgmt_indicator);
1412 } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1413
1414 /* If we hit the max delay, we could not read the register */
1415 if (delay == 50) {
1416 dev_warn(&adapter->pdev->dev,
1417 "reg 0x%08x could not be read\n", reg);
1418 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1419 mii_indicator);
1420
1421 status = -EIO;
1422 }
1423
1424 /* If we hit here we were able to read the register and we need to
1425 * return the value to the caller */
1426 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1427
1428 /* Stop the read operation */
1429 writel(0, &mac->mii_mgmt_cmd);
1430
1431 /* set the registers we touched back to the state at which we entered
1432 * this function
1433 */
1434 writel(mii_addr, &mac->mii_mgmt_addr);
1435 writel(mii_cmd, &mac->mii_mgmt_cmd);
1436
1437 return status;
1438 }
1439
1440 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1441 {
1442 struct phy_device *phydev = adapter->phydev;
1443
1444 if (!phydev)
1445 return -EIO;
1446
1447 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1448 }
1449
1450 /**
1451 * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
1452 * @adapter: pointer to our private adapter structure
1453 * @reg: the register to read
1454 * @value: 16-bit value to write
1455 *
1456 * FIXME: one caller in netdev still
1457 *
1458 * Return 0 on success, errno on failure (as defined in errno.h)
1459 */
1460 static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1461 {
1462 struct mac_regs __iomem *mac = &adapter->regs->mac;
1463 struct phy_device *phydev = adapter->phydev;
1464 int status = 0;
1465 u8 addr;
1466 u32 delay = 0;
1467 u32 mii_addr;
1468 u32 mii_cmd;
1469 u32 mii_indicator;
1470
1471 if (!phydev)
1472 return -EIO;
1473
1474 addr = phydev->addr;
1475
1476 /* Save a local copy of the registers we are dealing with so we can
1477 * set them back
1478 */
1479 mii_addr = readl(&mac->mii_mgmt_addr);
1480 mii_cmd = readl(&mac->mii_mgmt_cmd);
1481
1482 /* Stop the current operation */
1483 writel(0, &mac->mii_mgmt_cmd);
1484
1485 /* Set up the register we need to write to on the correct PHY */
1486 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1487
1488 /* Add the value to write to the registers to the mac */
1489 writel(value, &mac->mii_mgmt_ctrl);
1490
1491 do {
1492 udelay(50);
1493 delay++;
1494 mii_indicator = readl(&mac->mii_mgmt_indicator);
1495 } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1496
1497 /* If we hit the max delay, we could not write the register */
1498 if (delay == 100) {
1499 u16 tmp;
1500
1501 dev_warn(&adapter->pdev->dev,
1502 "reg 0x%08x could not be written", reg);
1503 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1504 mii_indicator);
1505 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1506 readl(&mac->mii_mgmt_cmd));
1507
1508 et131x_mii_read(adapter, reg, &tmp);
1509
1510 status = -EIO;
1511 }
1512 /* Stop the write operation */
1513 writel(0, &mac->mii_mgmt_cmd);
1514
1515 /*
1516 * set the registers we touched back to the state at which we entered
1517 * this function
1518 */
1519 writel(mii_addr, &mac->mii_mgmt_addr);
1520 writel(mii_cmd, &mac->mii_mgmt_cmd);
1521
1522 return status;
1523 }
1524
1525 /* Still used from _mac for BIT_READ */
1526 static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1527 u16 action, u16 regnum, u16 bitnum,
1528 u8 *value)
1529 {
1530 u16 reg;
1531 u16 mask = 0x0001 << bitnum;
1532
1533 /* Read the requested register */
1534 et131x_mii_read(adapter, regnum, &reg);
1535
1536 switch (action) {
1537 case TRUEPHY_BIT_READ:
1538 *value = (reg & mask) >> bitnum;
1539 break;
1540
1541 case TRUEPHY_BIT_SET:
1542 et131x_mii_write(adapter, regnum, reg | mask);
1543 break;
1544
1545 case TRUEPHY_BIT_CLEAR:
1546 et131x_mii_write(adapter, regnum, reg & ~mask);
1547 break;
1548
1549 default:
1550 break;
1551 }
1552 }
1553
1554 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1555 {
1556 struct phy_device *phydev = adapter->phydev;
1557
1558 if (phydev->duplex == DUPLEX_HALF) {
1559 adapter->flowcontrol = FLOW_NONE;
1560 } else {
1561 char remote_pause, remote_async_pause;
1562
1563 et1310_phy_access_mii_bit(adapter,
1564 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1565 et1310_phy_access_mii_bit(adapter,
1566 TRUEPHY_BIT_READ, 5, 11,
1567 &remote_async_pause);
1568
1569 if ((remote_pause == TRUEPHY_BIT_SET) &&
1570 (remote_async_pause == TRUEPHY_BIT_SET)) {
1571 adapter->flowcontrol = adapter->wanted_flow;
1572 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1573 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1574 if (adapter->wanted_flow == FLOW_BOTH)
1575 adapter->flowcontrol = FLOW_BOTH;
1576 else
1577 adapter->flowcontrol = FLOW_NONE;
1578 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1579 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1580 adapter->flowcontrol = FLOW_NONE;
1581 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1582 remote_async_pause == TRUEPHY_SET_BIT) */
1583 if (adapter->wanted_flow == FLOW_BOTH)
1584 adapter->flowcontrol = FLOW_RXONLY;
1585 else
1586 adapter->flowcontrol = FLOW_NONE;
1587 }
1588 }
1589 }
1590
1591 /**
1592 * et1310_update_macstat_host_counters - Update the local copy of the statistics
1593 * @adapter: pointer to the adapter structure
1594 */
1595 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1596 {
1597 struct ce_stats *stats = &adapter->stats;
1598 struct macstat_regs __iomem *macstat =
1599 &adapter->regs->macstat;
1600
1601 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1602 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1603 stats->tx_deferred += readl(&macstat->tx_deferred);
1604 stats->tx_excessive_collisions +=
1605 readl(&macstat->tx_multiple_collisions);
1606 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1607 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1608 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1609
1610 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1611 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1612 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1613 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1614 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1615 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1616 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1617 }
1618
1619 /**
1620 * et1310_handle_macstat_interrupt
1621 * @adapter: pointer to the adapter structure
1622 *
1623 * One of the MACSTAT counters has wrapped. Update the local copy of
1624 * the statistics held in the adapter structure, checking the "wrap"
1625 * bit for each counter.
1626 */
1627 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1628 {
1629 u32 carry_reg1;
1630 u32 carry_reg2;
1631
1632 /* Read the interrupt bits from the register(s). These are Clear On
1633 * Write.
1634 */
1635 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1636 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1637
1638 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1639 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1640
1641 /* We need to do update the host copy of all the MAC_STAT counters.
1642 * For each counter, check it's overflow bit. If the overflow bit is
1643 * set, then increment the host version of the count by one complete
1644 * revolution of the counter. This routine is called when the counter
1645 * block indicates that one of the counters has wrapped.
1646 */
1647 if (carry_reg1 & (1 << 14))
1648 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1649 if (carry_reg1 & (1 << 8))
1650 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1651 if (carry_reg1 & (1 << 7))
1652 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1653 if (carry_reg1 & (1 << 2))
1654 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1655 if (carry_reg1 & (1 << 6))
1656 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1657 if (carry_reg1 & (1 << 3))
1658 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1659 if (carry_reg1 & (1 << 0))
1660 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1661 if (carry_reg2 & (1 << 16))
1662 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1663 if (carry_reg2 & (1 << 15))
1664 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1665 if (carry_reg2 & (1 << 6))
1666 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1667 if (carry_reg2 & (1 << 8))
1668 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1669 if (carry_reg2 & (1 << 5))
1670 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1671 if (carry_reg2 & (1 << 4))
1672 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1673 if (carry_reg2 & (1 << 2))
1674 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1675 }
1676
1677 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1678 {
1679 struct net_device *netdev = bus->priv;
1680 struct et131x_adapter *adapter = netdev_priv(netdev);
1681 u16 value;
1682 int ret;
1683
1684 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1685
1686 if (ret < 0)
1687 return ret;
1688 else
1689 return value;
1690 }
1691
1692 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1693 int reg, u16 value)
1694 {
1695 struct net_device *netdev = bus->priv;
1696 struct et131x_adapter *adapter = netdev_priv(netdev);
1697
1698 return et131x_mii_write(adapter, reg, value);
1699 }
1700
1701 static int et131x_mdio_reset(struct mii_bus *bus)
1702 {
1703 struct net_device *netdev = bus->priv;
1704 struct et131x_adapter *adapter = netdev_priv(netdev);
1705
1706 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1707
1708 return 0;
1709 }
1710
1711 /**
1712 * et1310_phy_power_down - PHY power control
1713 * @adapter: device to control
1714 * @down: true for off/false for back on
1715 *
1716 * one hundred, ten, one thousand megs
1717 * How would you like to have your LAN accessed
1718 * Can't you see that this code processed
1719 * Phy power, phy power..
1720 */
1721 static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1722 {
1723 u16 data;
1724
1725 et131x_mii_read(adapter, MII_BMCR, &data);
1726 data &= ~BMCR_PDOWN;
1727 if (down)
1728 data |= BMCR_PDOWN;
1729 et131x_mii_write(adapter, MII_BMCR, data);
1730 }
1731
1732 /**
1733 * et131x_xcvr_init - Init the phy if we are setting it into force mode
1734 * @adapter: pointer to our private adapter structure
1735 *
1736 */
1737 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1738 {
1739 u16 lcr2;
1740
1741 /* Set the LED behavior such that LED 1 indicates speed (off =
1742 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1743 * link and activity (on for link, blink off for activity).
1744 *
1745 * NOTE: Some customizations have been added here for specific
1746 * vendors; The LED behavior is now determined by vendor data in the
1747 * EEPROM. However, the above description is the default.
1748 */
1749 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1750 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1751
1752 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1753 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1754
1755 if ((adapter->eeprom_data[1] & 0x8) == 0)
1756 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1757 else
1758 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1759
1760 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1761 }
1762 }
1763
1764 /**
1765 * et131x_configure_global_regs - configure JAGCore global regs
1766 * @adapter: pointer to our adapter structure
1767 *
1768 * Used to configure the global registers on the JAGCore
1769 */
1770 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1771 {
1772 struct global_regs __iomem *regs = &adapter->regs->global;
1773
1774 writel(0, &regs->rxq_start_addr);
1775 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1776
1777 if (adapter->registry_jumbo_packet < 2048) {
1778 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1779 * block of RAM that the driver can split between Tx
1780 * and Rx as it desires. Our default is to split it
1781 * 50/50:
1782 */
1783 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1784 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1785 } else if (adapter->registry_jumbo_packet < 8192) {
1786 /* For jumbo packets > 2k but < 8k, split 50-50. */
1787 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1788 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1789 } else {
1790 /* 9216 is the only packet size greater than 8k that
1791 * is available. The Tx buffer has to be big enough
1792 * for one whole packet on the Tx side. We'll make
1793 * the Tx 9408, and give the rest to Rx
1794 */
1795 writel(0x01b3, &regs->rxq_end_addr);
1796 writel(0x01b4, &regs->txq_start_addr);
1797 }
1798
1799 /* Initialize the loopback register. Disable all loopbacks. */
1800 writel(0, &regs->loopback);
1801
1802 /* MSI Register */
1803 writel(0, &regs->msi_config);
1804
1805 /* By default, disable the watchdog timer. It will be enabled when
1806 * a packet is queued.
1807 */
1808 writel(0, &regs->watchdog_timer);
1809 }
1810
1811 /**
1812 * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
1813 * @adapter: pointer to our adapter structure
1814 */
1815 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1816 {
1817 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1818 struct rx_ring *rx_local = &adapter->rx_ring;
1819 struct fbr_desc *fbr_entry;
1820 u32 entry;
1821 u32 psr_num_des;
1822 unsigned long flags;
1823 u8 id;
1824
1825 /* Halt RXDMA to perform the reconfigure. */
1826 et131x_rx_dma_disable(adapter);
1827
1828 /* Load the completion writeback physical address */
1829 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1830 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1831
1832 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1833
1834 /* Set the address and parameters of the packet status ring into the
1835 * 1310's registers
1836 */
1837 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1838 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1839 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1840 writel(0, &rx_dma->psr_full_offset);
1841
1842 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1843 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1844 &rx_dma->psr_min_des);
1845
1846 spin_lock_irqsave(&adapter->rcv_lock, flags);
1847
1848 /* These local variables track the PSR in the adapter structure */
1849 rx_local->local_psr_full = 0;
1850
1851 for (id = 0; id < NUM_FBRS; id++) {
1852 u32 *num_des;
1853 u32 *full_offset;
1854 u32 *min_des;
1855 u32 *base_hi;
1856 u32 *base_lo;
1857
1858 if (id == 0) {
1859 num_des = &rx_dma->fbr0_num_des;
1860 full_offset = &rx_dma->fbr0_full_offset;
1861 min_des = &rx_dma->fbr0_min_des;
1862 base_hi = &rx_dma->fbr0_base_hi;
1863 base_lo = &rx_dma->fbr0_base_lo;
1864 } else {
1865 num_des = &rx_dma->fbr1_num_des;
1866 full_offset = &rx_dma->fbr1_full_offset;
1867 min_des = &rx_dma->fbr1_min_des;
1868 base_hi = &rx_dma->fbr1_base_hi;
1869 base_lo = &rx_dma->fbr1_base_lo;
1870 }
1871
1872 /* Now's the best time to initialize FBR contents */
1873 fbr_entry =
1874 (struct fbr_desc *) rx_local->fbr[id]->ring_virtaddr;
1875 for (entry = 0;
1876 entry < rx_local->fbr[id]->num_entries; entry++) {
1877 fbr_entry->addr_hi = rx_local->fbr[id]->bus_high[entry];
1878 fbr_entry->addr_lo = rx_local->fbr[id]->bus_low[entry];
1879 fbr_entry->word2 = entry;
1880 fbr_entry++;
1881 }
1882
1883 /* Set the address and parameters of Free buffer ring 1 and 0
1884 * into the 1310's registers
1885 */
1886 writel(upper_32_bits(rx_local->fbr[id]->ring_physaddr),
1887 base_hi);
1888 writel(lower_32_bits(rx_local->fbr[id]->ring_physaddr),
1889 base_lo);
1890 writel(rx_local->fbr[id]->num_entries - 1, num_des);
1891 writel(ET_DMA10_WRAP, full_offset);
1892
1893 /* This variable tracks the free buffer ring 1 full position,
1894 * so it has to match the above.
1895 */
1896 rx_local->fbr[id]->local_full = ET_DMA10_WRAP;
1897 writel(((rx_local->fbr[id]->num_entries *
1898 LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1899 min_des);
1900 }
1901
1902 /* Program the number of packets we will receive before generating an
1903 * interrupt.
1904 * For version B silicon, this value gets updated once autoneg is
1905 *complete.
1906 */
1907 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1908
1909 /* The "time_done" is not working correctly to coalesce interrupts
1910 * after a given time period, but rather is giving us an interrupt
1911 * regardless of whether we have received packets.
1912 * This value gets updated once autoneg is complete.
1913 */
1914 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1915
1916 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1917 }
1918
1919 /**
1920 * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1921 * @adapter: pointer to our private adapter structure
1922 *
1923 * Configure the transmit engine with the ring buffers we have created
1924 * and prepare it for use.
1925 */
1926 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1927 {
1928 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1929
1930 /* Load the hardware with the start of the transmit descriptor ring. */
1931 writel(upper_32_bits(adapter->tx_ring.tx_desc_ring_pa),
1932 &txdma->pr_base_hi);
1933 writel(lower_32_bits(adapter->tx_ring.tx_desc_ring_pa),
1934 &txdma->pr_base_lo);
1935
1936 /* Initialise the transmit DMA engine */
1937 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1938
1939 /* Load the completion writeback physical address */
1940 writel(upper_32_bits(adapter->tx_ring.tx_status_pa),
1941 &txdma->dma_wb_base_hi);
1942 writel(lower_32_bits(adapter->tx_ring.tx_status_pa),
1943 &txdma->dma_wb_base_lo);
1944
1945 *adapter->tx_ring.tx_status = 0;
1946
1947 writel(0, &txdma->service_request);
1948 adapter->tx_ring.send_idx = 0;
1949 }
1950
1951 /**
1952 * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
1953 * @adapter: pointer to our private adapter structure
1954 *
1955 * Returns 0 on success, errno on failure (as defined in errno.h)
1956 */
1957 static void et131x_adapter_setup(struct et131x_adapter *adapter)
1958 {
1959 /* Configure the JAGCore */
1960 et131x_configure_global_regs(adapter);
1961
1962 et1310_config_mac_regs1(adapter);
1963
1964 /* Configure the MMC registers */
1965 /* All we need to do is initialize the Memory Control Register */
1966 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1967
1968 et1310_config_rxmac_regs(adapter);
1969 et1310_config_txmac_regs(adapter);
1970
1971 et131x_config_rx_dma_regs(adapter);
1972 et131x_config_tx_dma_regs(adapter);
1973
1974 et1310_config_macstat_regs(adapter);
1975
1976 et1310_phy_power_down(adapter, 0);
1977 et131x_xcvr_init(adapter);
1978 }
1979
1980 /**
1981 * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
1982 * @adapter: pointer to our private adapter structure
1983 */
1984 static void et131x_soft_reset(struct et131x_adapter *adapter)
1985 {
1986 /* Disable MAC Core */
1987 writel(0xc00f0000, &adapter->regs->mac.cfg1);
1988
1989 /* Set everything to a reset value */
1990 writel(0x7F, &adapter->regs->global.sw_reset);
1991 writel(0x000f0000, &adapter->regs->mac.cfg1);
1992 writel(0x00000000, &adapter->regs->mac.cfg1);
1993 }
1994
1995 /**
1996 * et131x_enable_interrupts - enable interrupt
1997 * @adapter: et131x device
1998 *
1999 * Enable the appropriate interrupts on the ET131x according to our
2000 * configuration
2001 */
2002 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
2003 {
2004 u32 mask;
2005
2006 /* Enable all global interrupts */
2007 if (adapter->flowcontrol == FLOW_TXONLY ||
2008 adapter->flowcontrol == FLOW_BOTH)
2009 mask = INT_MASK_ENABLE;
2010 else
2011 mask = INT_MASK_ENABLE_NO_FLOW;
2012
2013 writel(mask, &adapter->regs->global.int_mask);
2014 }
2015
2016 /**
2017 * et131x_disable_interrupts - interrupt disable
2018 * @adapter: et131x device
2019 *
2020 * Block all interrupts from the et131x device at the device itself
2021 */
2022 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
2023 {
2024 /* Disable all global interrupts */
2025 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2026 }
2027
2028 /**
2029 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
2030 * @adapter: pointer to our adapter structure
2031 */
2032 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2033 {
2034 /* Setup the tramsmit dma configuration register */
2035 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
2036 &adapter->regs->txdma.csr);
2037 }
2038
2039 /**
2040 * et131x_enable_txrx - Enable tx/rx queues
2041 * @netdev: device to be enabled
2042 */
2043 static void et131x_enable_txrx(struct net_device *netdev)
2044 {
2045 struct et131x_adapter *adapter = netdev_priv(netdev);
2046
2047 /* Enable the Tx and Rx DMA engines (if not already enabled) */
2048 et131x_rx_dma_enable(adapter);
2049 et131x_tx_dma_enable(adapter);
2050
2051 /* Enable device interrupts */
2052 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2053 et131x_enable_interrupts(adapter);
2054
2055 /* We're ready to move some data, so start the queue */
2056 netif_start_queue(netdev);
2057 }
2058
2059 /**
2060 * et131x_disable_txrx - Disable tx/rx queues
2061 * @netdev: device to be disabled
2062 */
2063 static void et131x_disable_txrx(struct net_device *netdev)
2064 {
2065 struct et131x_adapter *adapter = netdev_priv(netdev);
2066
2067 /* First thing is to stop the queue */
2068 netif_stop_queue(netdev);
2069
2070 /* Stop the Tx and Rx DMA engines */
2071 et131x_rx_dma_disable(adapter);
2072 et131x_tx_dma_disable(adapter);
2073
2074 /* Disable device interrupts */
2075 et131x_disable_interrupts(adapter);
2076 }
2077
2078 /**
2079 * et131x_init_send - Initialize send data structures
2080 * @adapter: pointer to our private adapter structure
2081 */
2082 static void et131x_init_send(struct et131x_adapter *adapter)
2083 {
2084 struct tcb *tcb;
2085 u32 ct;
2086 struct tx_ring *tx_ring;
2087
2088 /* Setup some convenience pointers */
2089 tx_ring = &adapter->tx_ring;
2090 tcb = adapter->tx_ring.tcb_ring;
2091
2092 tx_ring->tcb_qhead = tcb;
2093
2094 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2095
2096 /* Go through and set up each TCB */
2097 for (ct = 0; ct++ < NUM_TCB; tcb++)
2098 /* Set the link pointer in HW TCB to the next TCB in the
2099 * chain
2100 */
2101 tcb->next = tcb + 1;
2102
2103 /* Set the tail pointer */
2104 tcb--;
2105 tx_ring->tcb_qtail = tcb;
2106 tcb->next = NULL;
2107 /* Curr send queue should now be empty */
2108 tx_ring->send_head = NULL;
2109 tx_ring->send_tail = NULL;
2110 }
2111
2112 /**
2113 * et1310_enable_phy_coma - called when network cable is unplugged
2114 * @adapter: pointer to our adapter structure
2115 *
2116 * driver receive an phy status change interrupt while in D0 and check that
2117 * phy_status is down.
2118 *
2119 * -- gate off JAGCore;
2120 * -- set gigE PHY in Coma mode
2121 * -- wake on phy_interrupt; Perform software reset JAGCore,
2122 * re-initialize jagcore and gigE PHY
2123 *
2124 * Add D0-ASPM-PhyLinkDown Support:
2125 * -- while in D0, when there is a phy_interrupt indicating phy link
2126 * down status, call the MPSetPhyComa routine to enter this active
2127 * state power saving mode
2128 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
2129 * indicating linkup status, call the MPDisablePhyComa routine to
2130 * restore JAGCore and gigE PHY
2131 */
2132 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2133 {
2134 unsigned long flags;
2135 u32 pmcsr;
2136
2137 pmcsr = readl(&adapter->regs->global.pm_csr);
2138
2139 /* Save the GbE PHY speed and duplex modes. Need to restore this
2140 * when cable is plugged back in
2141 */
2142 /*
2143 * TODO - when PM is re-enabled, check if we need to
2144 * perform a similar task as this -
2145 * adapter->pdown_speed = adapter->ai_force_speed;
2146 * adapter->pdown_duplex = adapter->ai_force_duplex;
2147 */
2148
2149 /* Stop sending packets. */
2150 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2151 adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2152 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2153
2154 /* Wait for outstanding Receive packets */
2155
2156 et131x_disable_txrx(adapter->netdev);
2157
2158 /* Gate off JAGCore 3 clock domains */
2159 pmcsr &= ~ET_PMCSR_INIT;
2160 writel(pmcsr, &adapter->regs->global.pm_csr);
2161
2162 /* Program gigE PHY in to Coma mode */
2163 pmcsr |= ET_PM_PHY_SW_COMA;
2164 writel(pmcsr, &adapter->regs->global.pm_csr);
2165 }
2166
2167 /**
2168 * et1310_disable_phy_coma - Disable the Phy Coma Mode
2169 * @adapter: pointer to our adapter structure
2170 */
2171 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2172 {
2173 u32 pmcsr;
2174
2175 pmcsr = readl(&adapter->regs->global.pm_csr);
2176
2177 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
2178 pmcsr |= ET_PMCSR_INIT;
2179 pmcsr &= ~ET_PM_PHY_SW_COMA;
2180 writel(pmcsr, &adapter->regs->global.pm_csr);
2181
2182 /* Restore the GbE PHY speed and duplex modes;
2183 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
2184 */
2185 /* TODO - when PM is re-enabled, check if we need to
2186 * perform a similar task as this -
2187 * adapter->ai_force_speed = adapter->pdown_speed;
2188 * adapter->ai_force_duplex = adapter->pdown_duplex;
2189 */
2190
2191 /* Re-initialize the send structures */
2192 et131x_init_send(adapter);
2193
2194 /* Bring the device back to the state it was during init prior to
2195 * autonegotiation being complete. This way, when we get the auto-neg
2196 * complete interrupt, we can complete init by calling ConfigMacREGS2.
2197 */
2198 et131x_soft_reset(adapter);
2199
2200 /* setup et1310 as per the documentation ?? */
2201 et131x_adapter_setup(adapter);
2202
2203 /* Allow Tx to restart */
2204 adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2205
2206 et131x_enable_txrx(adapter->netdev);
2207 }
2208
2209 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2210 {
2211 u32 tmp_free_buff_ring = *free_buff_ring;
2212 tmp_free_buff_ring++;
2213 /* This works for all cases where limit < 1024. The 1023 case
2214 works because 1023++ is 1024 which means the if condition is not
2215 taken but the carry of the bit into the wrap bit toggles the wrap
2216 value correctly */
2217 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2218 tmp_free_buff_ring &= ~ET_DMA10_MASK;
2219 tmp_free_buff_ring ^= ET_DMA10_WRAP;
2220 }
2221 /* For the 1023 case */
2222 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2223 *free_buff_ring = tmp_free_buff_ring;
2224 return tmp_free_buff_ring;
2225 }
2226
2227 /**
2228 * et131x_rx_dma_memory_alloc
2229 * @adapter: pointer to our private adapter structure
2230 *
2231 * Returns 0 on success and errno on failure (as defined in errno.h)
2232 *
2233 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
2234 * and the Packet Status Ring.
2235 */
2236 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2237 {
2238 u8 id;
2239 u32 i, j;
2240 u32 bufsize;
2241 u32 pktstat_ringsize;
2242 u32 fbr_chunksize;
2243 struct rx_ring *rx_ring;
2244
2245 /* Setup some convenience pointers */
2246 rx_ring = &adapter->rx_ring;
2247
2248 /* Alloc memory for the lookup table */
2249 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2250 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2251
2252 /* The first thing we will do is configure the sizes of the buffer
2253 * rings. These will change based on jumbo packet support. Larger
2254 * jumbo packets increases the size of each entry in FBR0, and the
2255 * number of entries in FBR0, while at the same time decreasing the
2256 * number of entries in FBR1.
2257 *
2258 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
2259 * entries are huge in order to accommodate a "jumbo" frame, then it
2260 * will have less entries. Conversely, FBR1 will now be relied upon
2261 * to carry more "normal" frames, thus it's entry size also increases
2262 * and the number of entries goes up too (since it now carries
2263 * "small" + "regular" packets.
2264 *
2265 * In this scheme, we try to maintain 512 entries between the two
2266 * rings. Also, FBR1 remains a constant size - when it's size doubles
2267 * the number of entries halves. FBR0 increases in size, however.
2268 */
2269
2270 if (adapter->registry_jumbo_packet < 2048) {
2271 rx_ring->fbr[0]->buffsize = 256;
2272 rx_ring->fbr[0]->num_entries = 512;
2273 rx_ring->fbr[1]->buffsize = 2048;
2274 rx_ring->fbr[1]->num_entries = 512;
2275 } else if (adapter->registry_jumbo_packet < 4096) {
2276 rx_ring->fbr[0]->buffsize = 512;
2277 rx_ring->fbr[0]->num_entries = 1024;
2278 rx_ring->fbr[1]->buffsize = 4096;
2279 rx_ring->fbr[1]->num_entries = 512;
2280 } else {
2281 rx_ring->fbr[0]->buffsize = 1024;
2282 rx_ring->fbr[0]->num_entries = 768;
2283 rx_ring->fbr[1]->buffsize = 16384;
2284 rx_ring->fbr[1]->num_entries = 128;
2285 }
2286
2287 adapter->rx_ring.psr_num_entries =
2288 adapter->rx_ring.fbr[0]->num_entries +
2289 adapter->rx_ring.fbr[1]->num_entries;
2290
2291 for (id = 0; id < NUM_FBRS; id++) {
2292 /* Allocate an area of memory for Free Buffer Ring */
2293 bufsize =
2294 (sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries);
2295 rx_ring->fbr[id]->ring_virtaddr =
2296 dma_alloc_coherent(&adapter->pdev->dev,
2297 bufsize,
2298 &rx_ring->fbr[id]->ring_physaddr,
2299 GFP_KERNEL);
2300 if (!rx_ring->fbr[id]->ring_virtaddr) {
2301 dev_err(&adapter->pdev->dev,
2302 "Cannot alloc memory for Free Buffer Ring %d\n", id);
2303 return -ENOMEM;
2304 }
2305 }
2306
2307 for (id = 0; id < NUM_FBRS; id++) {
2308 fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[id]->buffsize);
2309
2310 for (i = 0;
2311 i < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); i++) {
2312 dma_addr_t fbr_tmp_physaddr;
2313
2314 rx_ring->fbr[id]->mem_virtaddrs[i] = dma_alloc_coherent(
2315 &adapter->pdev->dev, fbr_chunksize,
2316 &rx_ring->fbr[id]->mem_physaddrs[i],
2317 GFP_KERNEL);
2318
2319 if (!rx_ring->fbr[id]->mem_virtaddrs[i]) {
2320 dev_err(&adapter->pdev->dev,
2321 "Could not alloc memory\n");
2322 return -ENOMEM;
2323 }
2324
2325 /* See NOTE in "Save Physical Address" comment above */
2326 fbr_tmp_physaddr = rx_ring->fbr[id]->mem_physaddrs[i];
2327
2328 for (j = 0; j < FBR_CHUNKS; j++) {
2329 u32 index = (i * FBR_CHUNKS) + j;
2330
2331 /* Save the Virtual address of this index for
2332 * quick access later
2333 */
2334 rx_ring->fbr[id]->virt[index] =
2335 (u8 *) rx_ring->fbr[id]->mem_virtaddrs[i] +
2336 (j * rx_ring->fbr[id]->buffsize);
2337
2338 /* now store the physical address in the
2339 * descriptor so the device can access it
2340 */
2341 rx_ring->fbr[id]->bus_high[index] =
2342 upper_32_bits(fbr_tmp_physaddr);
2343 rx_ring->fbr[id]->bus_low[index] =
2344 lower_32_bits(fbr_tmp_physaddr);
2345
2346 fbr_tmp_physaddr += rx_ring->fbr[id]->buffsize;
2347 }
2348 }
2349 }
2350
2351 /* Allocate an area of memory for FIFO of Packet Status ring entries */
2352 pktstat_ringsize =
2353 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2354
2355 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2356 pktstat_ringsize,
2357 &rx_ring->ps_ring_physaddr,
2358 GFP_KERNEL);
2359
2360 if (!rx_ring->ps_ring_virtaddr) {
2361 dev_err(&adapter->pdev->dev,
2362 "Cannot alloc memory for Packet Status Ring\n");
2363 return -ENOMEM;
2364 }
2365 pr_info("Packet Status Ring %llx\n",
2366 (unsigned long long) rx_ring->ps_ring_physaddr);
2367
2368 /*
2369 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2370 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2371 * are ever returned, make sure the high part is retrieved here before
2372 * storing the adjusted address.
2373 */
2374
2375 /* Allocate an area of memory for writeback of status information */
2376 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2377 sizeof(struct rx_status_block),
2378 &rx_ring->rx_status_bus,
2379 GFP_KERNEL);
2380 if (!rx_ring->rx_status_block) {
2381 dev_err(&adapter->pdev->dev,
2382 "Cannot alloc memory for Status Block\n");
2383 return -ENOMEM;
2384 }
2385 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2386 pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
2387
2388 /* The RFDs are going to be put on lists later on, so initialize the
2389 * lists now.
2390 */
2391 INIT_LIST_HEAD(&rx_ring->recv_list);
2392 return 0;
2393 }
2394
2395 /**
2396 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
2397 * @adapter: pointer to our private adapter structure
2398 */
2399 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2400 {
2401 u8 id;
2402 u32 index;
2403 u32 bufsize;
2404 u32 pktstat_ringsize;
2405 struct rfd *rfd;
2406 struct rx_ring *rx_ring;
2407
2408 /* Setup some convenience pointers */
2409 rx_ring = &adapter->rx_ring;
2410
2411 /* Free RFDs and associated packet descriptors */
2412 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2413
2414 while (!list_empty(&rx_ring->recv_list)) {
2415 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2416 struct rfd, list_node);
2417
2418 list_del(&rfd->list_node);
2419 rfd->skb = NULL;
2420 kfree(rfd);
2421 }
2422
2423 /* Free Free Buffer Rings */
2424 for (id = 0; id < NUM_FBRS; id++) {
2425 if (!rx_ring->fbr[id]->ring_virtaddr)
2426 continue;
2427
2428 /* First the packet memory */
2429 for (index = 0;
2430 index < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS);
2431 index++) {
2432 if (rx_ring->fbr[id]->mem_virtaddrs[index]) {
2433 bufsize =
2434 rx_ring->fbr[id]->buffsize * FBR_CHUNKS;
2435
2436 dma_free_coherent(&adapter->pdev->dev,
2437 bufsize,
2438 rx_ring->fbr[id]->mem_virtaddrs[index],
2439 rx_ring->fbr[id]->mem_physaddrs[index]);
2440
2441 rx_ring->fbr[id]->mem_virtaddrs[index] = NULL;
2442 }
2443 }
2444
2445 bufsize =
2446 sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries;
2447
2448 dma_free_coherent(&adapter->pdev->dev, bufsize,
2449 rx_ring->fbr[id]->ring_virtaddr,
2450 rx_ring->fbr[id]->ring_physaddr);
2451
2452 rx_ring->fbr[id]->ring_virtaddr = NULL;
2453 }
2454
2455 /* Free Packet Status Ring */
2456 if (rx_ring->ps_ring_virtaddr) {
2457 pktstat_ringsize = sizeof(struct pkt_stat_desc) *
2458 adapter->rx_ring.psr_num_entries;
2459
2460 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2461 rx_ring->ps_ring_virtaddr,
2462 rx_ring->ps_ring_physaddr);
2463
2464 rx_ring->ps_ring_virtaddr = NULL;
2465 }
2466
2467 /* Free area of memory for the writeback of status information */
2468 if (rx_ring->rx_status_block) {
2469 dma_free_coherent(&adapter->pdev->dev,
2470 sizeof(struct rx_status_block),
2471 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2472 rx_ring->rx_status_block = NULL;
2473 }
2474
2475 /* Free the FBR Lookup Table */
2476 kfree(rx_ring->fbr[0]);
2477 kfree(rx_ring->fbr[1]);
2478
2479 /* Reset Counters */
2480 rx_ring->num_ready_recv = 0;
2481 }
2482
2483 /**
2484 * et131x_init_recv - Initialize receive data structures.
2485 * @adapter: pointer to our private adapter structure
2486 *
2487 * Returns 0 on success and errno on failure (as defined in errno.h)
2488 */
2489 static int et131x_init_recv(struct et131x_adapter *adapter)
2490 {
2491 struct rfd *rfd;
2492 u32 rfdct;
2493 u32 numrfd = 0;
2494 struct rx_ring *rx_ring;
2495
2496 /* Setup some convenience pointers */
2497 rx_ring = &adapter->rx_ring;
2498
2499 /* Setup each RFD */
2500 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2501 rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA);
2502
2503 if (!rfd) {
2504 dev_err(&adapter->pdev->dev, "Couldn't alloc RFD\n");
2505 return -ENOMEM;
2506 }
2507
2508 rfd->skb = NULL;
2509
2510 /* Add this RFD to the recv_list */
2511 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2512
2513 /* Increment both the available RFD's, and the total RFD's. */
2514 rx_ring->num_ready_recv++;
2515 numrfd++;
2516 }
2517
2518 return 0;
2519 }
2520
2521 /**
2522 * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2523 * @adapter: pointer to our adapter structure
2524 */
2525 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2526 {
2527 struct phy_device *phydev = adapter->phydev;
2528
2529 if (!phydev)
2530 return;
2531
2532 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2533 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2534 */
2535 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2536 writel(0, &adapter->regs->rxdma.max_pkt_time);
2537 writel(1, &adapter->regs->rxdma.num_pkt_done);
2538 }
2539 }
2540
2541 /**
2542 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
2543 * @adapter: pointer to our adapter
2544 * @rfd: pointer to the RFD
2545 */
2546 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2547 {
2548 struct rx_ring *rx_local = &adapter->rx_ring;
2549 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2550 u16 buff_index = rfd->bufferindex;
2551 u8 ring_index = rfd->ringindex;
2552 unsigned long flags;
2553
2554 /* We don't use any of the OOB data besides status. Otherwise, we
2555 * need to clean up OOB data
2556 */
2557 if (buff_index < rx_local->fbr[ring_index]->num_entries) {
2558 u32 *offset;
2559 struct fbr_desc *next;
2560
2561 spin_lock_irqsave(&adapter->fbr_lock, flags);
2562
2563 if (ring_index == 0)
2564 offset = &rx_dma->fbr0_full_offset;
2565 else
2566 offset = &rx_dma->fbr1_full_offset;
2567
2568 next = (struct fbr_desc *)
2569 (rx_local->fbr[ring_index]->ring_virtaddr) +
2570 INDEX10(rx_local->fbr[ring_index]->local_full);
2571
2572 /* Handle the Free Buffer Ring advancement here. Write
2573 * the PA / Buffer Index for the returned buffer into
2574 * the oldest (next to be freed)FBR entry
2575 */
2576 next->addr_hi = rx_local->fbr[ring_index]->bus_high[buff_index];
2577 next->addr_lo = rx_local->fbr[ring_index]->bus_low[buff_index];
2578 next->word2 = buff_index;
2579
2580 writel(bump_free_buff_ring(
2581 &rx_local->fbr[ring_index]->local_full,
2582 rx_local->fbr[ring_index]->num_entries - 1),
2583 offset);
2584
2585 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2586 } else {
2587 dev_err(&adapter->pdev->dev,
2588 "%s illegal Buffer Index returned\n", __func__);
2589 }
2590
2591 /* The processing on this RFD is done, so put it back on the tail of
2592 * our list
2593 */
2594 spin_lock_irqsave(&adapter->rcv_lock, flags);
2595 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2596 rx_local->num_ready_recv++;
2597 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2598
2599 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2600 }
2601
2602 /**
2603 * nic_rx_pkts - Checks the hardware for available packets
2604 * @adapter: pointer to our adapter
2605 *
2606 * Returns rfd, a pointer to our MPRFD.
2607 *
2608 * Checks the hardware for available packets, using completion ring
2609 * If packets are available, it gets an RFD from the recv_list, attaches
2610 * the packet to it, puts the RFD in the RecvPendList, and also returns
2611 * the pointer to the RFD.
2612 */
2613 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2614 {
2615 struct rx_ring *rx_local = &adapter->rx_ring;
2616 struct rx_status_block *status;
2617 struct pkt_stat_desc *psr;
2618 struct rfd *rfd;
2619 u32 i;
2620 u8 *buf;
2621 unsigned long flags;
2622 struct list_head *element;
2623 u8 ring_index;
2624 u16 buff_index;
2625 u32 len;
2626 u32 word0;
2627 u32 word1;
2628 struct sk_buff *skb;
2629
2630 /* RX Status block is written by the DMA engine prior to every
2631 * interrupt. It contains the next to be used entry in the Packet
2632 * Status Ring, and also the two Free Buffer rings.
2633 */
2634 status = rx_local->rx_status_block;
2635 word1 = status->word1 >> 16; /* Get the useful bits */
2636
2637 /* Check the PSR and wrap bits do not match */
2638 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2639 return NULL; /* Looks like this ring is not updated yet */
2640
2641 /* The packet status ring indicates that data is available. */
2642 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2643 (rx_local->local_psr_full & 0xFFF);
2644
2645 /* Grab any information that is required once the PSR is advanced,
2646 * since we can no longer rely on the memory being accurate
2647 */
2648 len = psr->word1 & 0xFFFF;
2649 ring_index = (psr->word1 >> 26) & 0x03;
2650 buff_index = (psr->word1 >> 16) & 0x3FF;
2651 word0 = psr->word0;
2652
2653 /* Indicate that we have used this PSR entry. */
2654 /* FIXME wrap 12 */
2655 add_12bit(&rx_local->local_psr_full, 1);
2656 if (
2657 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2658 /* Clear psr full and toggle the wrap bit */
2659 rx_local->local_psr_full &= ~0xFFF;
2660 rx_local->local_psr_full ^= 0x1000;
2661 }
2662
2663 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2664
2665 if (ring_index > 1 ||
2666 buff_index > rx_local->fbr[ring_index]->num_entries - 1) {
2667 /* Illegal buffer or ring index cannot be used by S/W*/
2668 dev_err(&adapter->pdev->dev,
2669 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2670 rx_local->local_psr_full & 0xFFF, len, buff_index);
2671 return NULL;
2672 }
2673
2674 /* Get and fill the RFD. */
2675 spin_lock_irqsave(&adapter->rcv_lock, flags);
2676
2677 element = rx_local->recv_list.next;
2678 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2679
2680 if (!rfd) {
2681 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2682 return NULL;
2683 }
2684
2685 list_del(&rfd->list_node);
2686 rx_local->num_ready_recv--;
2687
2688 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2689
2690 rfd->bufferindex = buff_index;
2691 rfd->ringindex = ring_index;
2692
2693 /* In V1 silicon, there is a bug which screws up filtering of runt
2694 * packets. Therefore runt packet filtering is disabled in the MAC and
2695 * the packets are dropped here. They are also counted here.
2696 */
2697 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2698 adapter->stats.rx_other_errs++;
2699 len = 0;
2700 }
2701
2702 if (len == 0) {
2703 rfd->len = 0;
2704 goto out;
2705 }
2706
2707 /* Determine if this is a multicast packet coming in */
2708 if ((word0 & ALCATEL_MULTICAST_PKT) &&
2709 !(word0 & ALCATEL_BROADCAST_PKT)) {
2710 /* Promiscuous mode and Multicast mode are not mutually
2711 * exclusive as was first thought. I guess Promiscuous is just
2712 * considered a super-set of the other filters. Generally filter
2713 * is 0x2b when in promiscuous mode.
2714 */
2715 if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST)
2716 && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS)
2717 && !(adapter->packet_filter &
2718 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
2719 buf = rx_local->fbr[ring_index]->virt[buff_index];
2720
2721 /* Loop through our list to see if the destination
2722 * address of this packet matches one in our list.
2723 */
2724 for (i = 0; i < adapter->multicast_addr_count; i++) {
2725 if (buf[0] == adapter->multicast_list[i][0]
2726 && buf[1] == adapter->multicast_list[i][1]
2727 && buf[2] == adapter->multicast_list[i][2]
2728 && buf[3] == adapter->multicast_list[i][3]
2729 && buf[4] == adapter->multicast_list[i][4]
2730 && buf[5] == adapter->multicast_list[i][5]) {
2731 break;
2732 }
2733 }
2734
2735 /* If our index is equal to the number of Multicast
2736 * address we have, then this means we did not find this
2737 * packet's matching address in our list. Set the len to
2738 * zero, so we free our RFD when we return from this
2739 * function.
2740 */
2741 if (i == adapter->multicast_addr_count)
2742 len = 0;
2743 }
2744
2745 if (len > 0)
2746 adapter->stats.multicast_pkts_rcvd++;
2747 } else if (word0 & ALCATEL_BROADCAST_PKT) {
2748 adapter->stats.broadcast_pkts_rcvd++;
2749 } else {
2750 /* Not sure what this counter measures in promiscuous mode.
2751 * Perhaps we should check the MAC address to see if it is
2752 * directed to us in promiscuous mode.
2753 */
2754 adapter->stats.unicast_pkts_rcvd++;
2755 }
2756
2757 if (len == 0) {
2758 rfd->len = 0;
2759 goto out;
2760 }
2761
2762 rfd->len = len;
2763
2764 skb = dev_alloc_skb(rfd->len + 2);
2765 if (!skb) {
2766 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n");
2767 return NULL;
2768 }
2769
2770 adapter->net_stats.rx_bytes += rfd->len;
2771
2772 memcpy(skb_put(skb, rfd->len),
2773 rx_local->fbr[ring_index]->virt[buff_index],
2774 rfd->len);
2775
2776 skb->dev = adapter->netdev;
2777 skb->protocol = eth_type_trans(skb, adapter->netdev);
2778 skb->ip_summed = CHECKSUM_NONE;
2779 netif_rx_ni(skb);
2780
2781 out:
2782 nic_return_rfd(adapter, rfd);
2783 return rfd;
2784 }
2785
2786 /**
2787 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
2788 * @adapter: pointer to our adapter
2789 *
2790 * Assumption, Rcv spinlock has been acquired.
2791 */
2792 static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
2793 {
2794 struct rfd *rfd = NULL;
2795 u32 count = 0;
2796 bool done = true;
2797
2798 /* Process up to available RFD's */
2799 while (count < NUM_PACKETS_HANDLED) {
2800 if (list_empty(&adapter->rx_ring.recv_list)) {
2801 WARN_ON(adapter->rx_ring.num_ready_recv != 0);
2802 done = false;
2803 break;
2804 }
2805
2806 rfd = nic_rx_pkts(adapter);
2807
2808 if (rfd == NULL)
2809 break;
2810
2811 /* Do not receive any packets until a filter has been set.
2812 * Do not receive any packets until we have link.
2813 * If length is zero, return the RFD in order to advance the
2814 * Free buffer ring.
2815 */
2816 if (!adapter->packet_filter ||
2817 !netif_carrier_ok(adapter->netdev) ||
2818 rfd->len == 0)
2819 continue;
2820
2821 /* Increment the number of packets we received */
2822 adapter->net_stats.rx_packets++;
2823
2824 /* Set the status on the packet, either resources or success */
2825 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
2826 dev_warn(&adapter->pdev->dev,
2827 "RFD's are running out\n");
2828 }
2829 count++;
2830 }
2831
2832 if (count == NUM_PACKETS_HANDLED || !done) {
2833 adapter->rx_ring.unfinished_receives = true;
2834 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2835 &adapter->regs->global.watchdog_timer);
2836 } else
2837 /* Watchdog timer will disable itself if appropriate. */
2838 adapter->rx_ring.unfinished_receives = false;
2839 }
2840
2841 /**
2842 * et131x_tx_dma_memory_alloc
2843 * @adapter: pointer to our private adapter structure
2844 *
2845 * Returns 0 on success and errno on failure (as defined in errno.h).
2846 *
2847 * Allocates memory that will be visible both to the device and to the CPU.
2848 * The OS will pass us packets, pointers to which we will insert in the Tx
2849 * Descriptor queue. The device will read this queue to find the packets in
2850 * memory. The device will update the "status" in memory each time it xmits a
2851 * packet.
2852 */
2853 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2854 {
2855 int desc_size = 0;
2856 struct tx_ring *tx_ring = &adapter->tx_ring;
2857
2858 /* Allocate memory for the TCB's (Transmit Control Block) */
2859 adapter->tx_ring.tcb_ring =
2860 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
2861 if (!adapter->tx_ring.tcb_ring) {
2862 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
2863 return -ENOMEM;
2864 }
2865
2866 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2867 tx_ring->tx_desc_ring =
2868 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
2869 desc_size,
2870 &tx_ring->tx_desc_ring_pa,
2871 GFP_KERNEL);
2872 if (!adapter->tx_ring.tx_desc_ring) {
2873 dev_err(&adapter->pdev->dev,
2874 "Cannot alloc memory for Tx Ring\n");
2875 return -ENOMEM;
2876 }
2877
2878 /* Save physical address
2879 *
2880 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2881 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2882 * are ever returned, make sure the high part is retrieved here before
2883 * storing the adjusted address.
2884 */
2885 /* Allocate memory for the Tx status block */
2886 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2887 sizeof(u32),
2888 &tx_ring->tx_status_pa,
2889 GFP_KERNEL);
2890 if (!adapter->tx_ring.tx_status_pa) {
2891 dev_err(&adapter->pdev->dev,
2892 "Cannot alloc memory for Tx status block\n");
2893 return -ENOMEM;
2894 }
2895 return 0;
2896 }
2897
2898 /**
2899 * et131x_tx_dma_memory_free - Free all memory allocated within this module
2900 * @adapter: pointer to our private adapter structure
2901 *
2902 * Returns 0 on success and errno on failure (as defined in errno.h).
2903 */
2904 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2905 {
2906 int desc_size = 0;
2907
2908 if (adapter->tx_ring.tx_desc_ring) {
2909 /* Free memory relating to Tx rings here */
2910 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2911 dma_free_coherent(&adapter->pdev->dev,
2912 desc_size,
2913 adapter->tx_ring.tx_desc_ring,
2914 adapter->tx_ring.tx_desc_ring_pa);
2915 adapter->tx_ring.tx_desc_ring = NULL;
2916 }
2917
2918 /* Free memory for the Tx status block */
2919 if (adapter->tx_ring.tx_status) {
2920 dma_free_coherent(&adapter->pdev->dev,
2921 sizeof(u32),
2922 adapter->tx_ring.tx_status,
2923 adapter->tx_ring.tx_status_pa);
2924
2925 adapter->tx_ring.tx_status = NULL;
2926 }
2927 /* Free the memory for the tcb structures */
2928 kfree(adapter->tx_ring.tcb_ring);
2929 }
2930
2931 /**
2932 * nic_send_packet - NIC specific send handler for version B silicon.
2933 * @adapter: pointer to our adapter
2934 * @tcb: pointer to struct tcb
2935 *
2936 * Returns 0 or errno.
2937 */
2938 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2939 {
2940 u32 i;
2941 struct tx_desc desc[24]; /* 24 x 16 byte */
2942 u32 frag = 0;
2943 u32 thiscopy, remainder;
2944 struct sk_buff *skb = tcb->skb;
2945 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2946 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2947 unsigned long flags;
2948 struct phy_device *phydev = adapter->phydev;
2949 dma_addr_t dma_addr;
2950
2951 /* Part of the optimizations of this send routine restrict us to
2952 * sending 24 fragments at a pass. In practice we should never see
2953 * more than 5 fragments.
2954 *
2955 * NOTE: The older version of this function (below) can handle any
2956 * number of fragments. If needed, we can call this function,
2957 * although it is less efficient.
2958 */
2959 if (nr_frags > 23)
2960 return -EIO;
2961
2962 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2963
2964 for (i = 0; i < nr_frags; i++) {
2965 /* If there is something in this element, lets get a
2966 * descriptor from the ring and get the necessary data
2967 */
2968 if (i == 0) {
2969 /* If the fragments are smaller than a standard MTU,
2970 * then map them to a single descriptor in the Tx
2971 * Desc ring. However, if they're larger, as is
2972 * possible with support for jumbo packets, then
2973 * split them each across 2 descriptors.
2974 *
2975 * This will work until we determine why the hardware
2976 * doesn't seem to like large fragments.
2977 */
2978 if (skb_headlen(skb) <= 1514) {
2979 /* Low 16bits are length, high is vlan and
2980 unused currently so zero */
2981 desc[frag].len_vlan = skb_headlen(skb);
2982 dma_addr = dma_map_single(&adapter->pdev->dev,
2983 skb->data,
2984 skb_headlen(skb),
2985 DMA_TO_DEVICE);
2986 desc[frag].addr_lo = lower_32_bits(dma_addr);
2987 desc[frag].addr_hi = upper_32_bits(dma_addr);
2988 frag++;
2989 } else {
2990 desc[frag].len_vlan = skb_headlen(skb) / 2;
2991 dma_addr = dma_map_single(&adapter->pdev->dev,
2992 skb->data,
2993 (skb_headlen(skb) / 2),
2994 DMA_TO_DEVICE);
2995 desc[frag].addr_lo = lower_32_bits(dma_addr);
2996 desc[frag].addr_hi = upper_32_bits(dma_addr);
2997 frag++;
2998
2999 desc[frag].len_vlan = skb_headlen(skb) / 2;
3000 dma_addr = dma_map_single(&adapter->pdev->dev,
3001 skb->data +
3002 (skb_headlen(skb) / 2),
3003 (skb_headlen(skb) / 2),
3004 DMA_TO_DEVICE);
3005 desc[frag].addr_lo = lower_32_bits(dma_addr);
3006 desc[frag].addr_hi = upper_32_bits(dma_addr);
3007 frag++;
3008 }
3009 } else {
3010 desc[frag].len_vlan = frags[i - 1].size;
3011 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
3012 &frags[i - 1],
3013 0,
3014 frags[i - 1].size,
3015 DMA_TO_DEVICE);
3016 desc[frag].addr_lo = lower_32_bits(dma_addr);
3017 desc[frag].addr_hi = upper_32_bits(dma_addr);
3018 frag++;
3019 }
3020 }
3021
3022 if (phydev && phydev->speed == SPEED_1000) {
3023 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3024 /* Last element & Interrupt flag */
3025 desc[frag - 1].flags = 0x5;
3026 adapter->tx_ring.since_irq = 0;
3027 } else { /* Last element */
3028 desc[frag - 1].flags = 0x1;
3029 }
3030 } else
3031 desc[frag - 1].flags = 0x5;
3032
3033 desc[0].flags |= 2; /* First element flag */
3034
3035 tcb->index_start = adapter->tx_ring.send_idx;
3036 tcb->stale = 0;
3037
3038 spin_lock_irqsave(&adapter->send_hw_lock, flags);
3039
3040 thiscopy = NUM_DESC_PER_RING_TX -
3041 INDEX10(adapter->tx_ring.send_idx);
3042
3043 if (thiscopy >= frag) {
3044 remainder = 0;
3045 thiscopy = frag;
3046 } else {
3047 remainder = frag - thiscopy;
3048 }
3049
3050 memcpy(adapter->tx_ring.tx_desc_ring +
3051 INDEX10(adapter->tx_ring.send_idx), desc,
3052 sizeof(struct tx_desc) * thiscopy);
3053
3054 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3055
3056 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3057 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3058 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3059 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3060 }
3061
3062 if (remainder) {
3063 memcpy(adapter->tx_ring.tx_desc_ring,
3064 desc + thiscopy,
3065 sizeof(struct tx_desc) * remainder);
3066
3067 add_10bit(&adapter->tx_ring.send_idx, remainder);
3068 }
3069
3070 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3071 if (adapter->tx_ring.send_idx)
3072 tcb->index = NUM_DESC_PER_RING_TX - 1;
3073 else
3074 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3075 } else
3076 tcb->index = adapter->tx_ring.send_idx - 1;
3077
3078 spin_lock(&adapter->tcb_send_qlock);
3079
3080 if (adapter->tx_ring.send_tail)
3081 adapter->tx_ring.send_tail->next = tcb;
3082 else
3083 adapter->tx_ring.send_head = tcb;
3084
3085 adapter->tx_ring.send_tail = tcb;
3086
3087 WARN_ON(tcb->next != NULL);
3088
3089 adapter->tx_ring.used++;
3090
3091 spin_unlock(&adapter->tcb_send_qlock);
3092
3093 /* Write the new write pointer back to the device. */
3094 writel(adapter->tx_ring.send_idx,
3095 &adapter->regs->txdma.service_request);
3096
3097 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
3098 * timer to wake us up if this packet isn't followed by N more.
3099 */
3100 if (phydev && phydev->speed == SPEED_1000) {
3101 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3102 &adapter->regs->global.watchdog_timer);
3103 }
3104 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3105
3106 return 0;
3107 }
3108
3109 /**
3110 * send_packet - Do the work to send a packet
3111 * @skb: the packet(s) to send
3112 * @adapter: a pointer to the device's private adapter structure
3113 *
3114 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3115 *
3116 * Assumption: Send spinlock has been acquired
3117 */
3118 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3119 {
3120 int status;
3121 struct tcb *tcb = NULL;
3122 u16 *shbufva;
3123 unsigned long flags;
3124
3125 /* All packets must have at least a MAC address and a protocol type */
3126 if (skb->len < ETH_HLEN)
3127 return -EIO;
3128
3129 /* Get a TCB for this packet */
3130 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3131
3132 tcb = adapter->tx_ring.tcb_qhead;
3133
3134 if (tcb == NULL) {
3135 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3136 return -ENOMEM;
3137 }
3138
3139 adapter->tx_ring.tcb_qhead = tcb->next;
3140
3141 if (adapter->tx_ring.tcb_qhead == NULL)
3142 adapter->tx_ring.tcb_qtail = NULL;
3143
3144 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3145
3146 tcb->skb = skb;
3147
3148 if (skb->data != NULL && skb_headlen(skb) >= 6) {
3149 shbufva = (u16 *) skb->data;
3150
3151 if ((shbufva[0] == 0xffff) &&
3152 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3153 tcb->flags |= fMP_DEST_BROAD;
3154 } else if ((shbufva[0] & 0x3) == 0x0001) {
3155 tcb->flags |= fMP_DEST_MULTI;
3156 }
3157 }
3158
3159 tcb->next = NULL;
3160
3161 /* Call the NIC specific send handler. */
3162 status = nic_send_packet(adapter, tcb);
3163
3164 if (status != 0) {
3165 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3166
3167 if (adapter->tx_ring.tcb_qtail)
3168 adapter->tx_ring.tcb_qtail->next = tcb;
3169 else
3170 /* Apparently ready Q is empty. */
3171 adapter->tx_ring.tcb_qhead = tcb;
3172
3173 adapter->tx_ring.tcb_qtail = tcb;
3174 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3175 return status;
3176 }
3177 WARN_ON(adapter->tx_ring.used > NUM_TCB);
3178 return 0;
3179 }
3180
3181 /**
3182 * et131x_send_packets - This function is called by the OS to send packets
3183 * @skb: the packet(s) to send
3184 * @netdev:device on which to TX the above packet(s)
3185 *
3186 * Return 0 in almost all cases; non-zero value in extreme hard failure only
3187 */
3188 static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3189 {
3190 int status = 0;
3191 struct et131x_adapter *adapter = netdev_priv(netdev);
3192
3193 /* Send these packets
3194 *
3195 * NOTE: The Linux Tx entry point is only given one packet at a time
3196 * to Tx, so the PacketCount and it's array used makes no sense here
3197 */
3198
3199 /* TCB is not available */
3200 if (adapter->tx_ring.used >= NUM_TCB) {
3201 /* NOTE: If there's an error on send, no need to queue the
3202 * packet under Linux; if we just send an error up to the
3203 * netif layer, it will resend the skb to us.
3204 */
3205 status = -ENOMEM;
3206 } else {
3207 /* We need to see if the link is up; if it's not, make the
3208 * netif layer think we're good and drop the packet
3209 */
3210 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3211 !netif_carrier_ok(netdev)) {
3212 dev_kfree_skb_any(skb);
3213 skb = NULL;
3214
3215 adapter->net_stats.tx_dropped++;
3216 } else {
3217 status = send_packet(skb, adapter);
3218 if (status != 0 && status != -ENOMEM) {
3219 /* On any other error, make netif think we're
3220 * OK and drop the packet
3221 */
3222 dev_kfree_skb_any(skb);
3223 skb = NULL;
3224 adapter->net_stats.tx_dropped++;
3225 }
3226 }
3227 }
3228 return status;
3229 }
3230
3231 /**
3232 * free_send_packet - Recycle a struct tcb
3233 * @adapter: pointer to our adapter
3234 * @tcb: pointer to struct tcb
3235 *
3236 * Complete the packet if necessary
3237 * Assumption - Send spinlock has been acquired
3238 */
3239 static inline void free_send_packet(struct et131x_adapter *adapter,
3240 struct tcb *tcb)
3241 {
3242 unsigned long flags;
3243 struct tx_desc *desc = NULL;
3244 struct net_device_stats *stats = &adapter->net_stats;
3245 u64 dma_addr;
3246
3247 if (tcb->flags & fMP_DEST_BROAD)
3248 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3249 else if (tcb->flags & fMP_DEST_MULTI)
3250 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3251 else
3252 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3253
3254 if (tcb->skb) {
3255 stats->tx_bytes += tcb->skb->len;
3256
3257 /* Iterate through the TX descriptors on the ring
3258 * corresponding to this packet and umap the fragments
3259 * they point to
3260 */
3261 do {
3262 desc = (struct tx_desc *)
3263 (adapter->tx_ring.tx_desc_ring +
3264 INDEX10(tcb->index_start));
3265
3266 dma_addr = desc->addr_lo;
3267 dma_addr |= (u64)desc->addr_hi << 32;
3268
3269 dma_unmap_single(&adapter->pdev->dev,
3270 dma_addr,
3271 desc->len_vlan, DMA_TO_DEVICE);
3272
3273 add_10bit(&tcb->index_start, 1);
3274 if (INDEX10(tcb->index_start) >=
3275 NUM_DESC_PER_RING_TX) {
3276 tcb->index_start &= ~ET_DMA10_MASK;
3277 tcb->index_start ^= ET_DMA10_WRAP;
3278 }
3279 } while (desc != (adapter->tx_ring.tx_desc_ring +
3280 INDEX10(tcb->index)));
3281
3282 dev_kfree_skb_any(tcb->skb);
3283 }
3284
3285 memset(tcb, 0, sizeof(struct tcb));
3286
3287 /* Add the TCB to the Ready Q */
3288 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3289
3290 adapter->net_stats.tx_packets++;
3291
3292 if (adapter->tx_ring.tcb_qtail)
3293 adapter->tx_ring.tcb_qtail->next = tcb;
3294 else
3295 /* Apparently ready Q is empty. */
3296 adapter->tx_ring.tcb_qhead = tcb;
3297
3298 adapter->tx_ring.tcb_qtail = tcb;
3299
3300 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3301 WARN_ON(adapter->tx_ring.used < 0);
3302 }
3303
3304 /**
3305 * et131x_free_busy_send_packets - Free and complete the stopped active sends
3306 * @adapter: pointer to our adapter
3307 *
3308 * Assumption - Send spinlock has been acquired
3309 */
3310 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3311 {
3312 struct tcb *tcb;
3313 unsigned long flags;
3314 u32 freed = 0;
3315
3316 /* Any packets being sent? Check the first TCB on the send list */
3317 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3318
3319 tcb = adapter->tx_ring.send_head;
3320
3321 while (tcb != NULL && freed < NUM_TCB) {
3322 struct tcb *next = tcb->next;
3323
3324 adapter->tx_ring.send_head = next;
3325
3326 if (next == NULL)
3327 adapter->tx_ring.send_tail = NULL;
3328
3329 adapter->tx_ring.used--;
3330
3331 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3332
3333 freed++;
3334 free_send_packet(adapter, tcb);
3335
3336 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3337
3338 tcb = adapter->tx_ring.send_head;
3339 }
3340
3341 WARN_ON(freed == NUM_TCB);
3342
3343 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3344
3345 adapter->tx_ring.used = 0;
3346 }
3347
3348 /**
3349 * et131x_handle_send_interrupt - Interrupt handler for sending processing
3350 * @adapter: pointer to our adapter
3351 *
3352 * Re-claim the send resources, complete sends and get more to send from
3353 * the send wait queue.
3354 *
3355 * Assumption - Send spinlock has been acquired
3356 */
3357 static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3358 {
3359 unsigned long flags;
3360 u32 serviced;
3361 struct tcb *tcb;
3362 u32 index;
3363
3364 serviced = readl(&adapter->regs->txdma.new_service_complete);
3365 index = INDEX10(serviced);
3366
3367 /* Has the ring wrapped? Process any descriptors that do not have
3368 * the same "wrap" indicator as the current completion indicator
3369 */
3370 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3371
3372 tcb = adapter->tx_ring.send_head;
3373
3374 while (tcb &&
3375 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3376 index < INDEX10(tcb->index)) {
3377 adapter->tx_ring.used--;
3378 adapter->tx_ring.send_head = tcb->next;
3379 if (tcb->next == NULL)
3380 adapter->tx_ring.send_tail = NULL;
3381
3382 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3383 free_send_packet(adapter, tcb);
3384 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3385
3386 /* Goto the next packet */
3387 tcb = adapter->tx_ring.send_head;
3388 }
3389 while (tcb &&
3390 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3391 && index > (tcb->index & ET_DMA10_MASK)) {
3392 adapter->tx_ring.used--;
3393 adapter->tx_ring.send_head = tcb->next;
3394 if (tcb->next == NULL)
3395 adapter->tx_ring.send_tail = NULL;
3396
3397 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3398 free_send_packet(adapter, tcb);
3399 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3400
3401 /* Goto the next packet */
3402 tcb = adapter->tx_ring.send_head;
3403 }
3404
3405 /* Wake up the queue when we hit a low-water mark */
3406 if (adapter->tx_ring.used <= NUM_TCB / 3)
3407 netif_wake_queue(adapter->netdev);
3408
3409 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3410 }
3411
3412 static int et131x_get_settings(struct net_device *netdev,
3413 struct ethtool_cmd *cmd)
3414 {
3415 struct et131x_adapter *adapter = netdev_priv(netdev);
3416
3417 return phy_ethtool_gset(adapter->phydev, cmd);
3418 }
3419
3420 static int et131x_set_settings(struct net_device *netdev,
3421 struct ethtool_cmd *cmd)
3422 {
3423 struct et131x_adapter *adapter = netdev_priv(netdev);
3424
3425 return phy_ethtool_sset(adapter->phydev, cmd);
3426 }
3427
3428 static int et131x_get_regs_len(struct net_device *netdev)
3429 {
3430 #define ET131X_REGS_LEN 256
3431 return ET131X_REGS_LEN * sizeof(u32);
3432 }
3433
3434 static void et131x_get_regs(struct net_device *netdev,
3435 struct ethtool_regs *regs, void *regs_data)
3436 {
3437 struct et131x_adapter *adapter = netdev_priv(netdev);
3438 struct address_map __iomem *aregs = adapter->regs;
3439 u32 *regs_buff = regs_data;
3440 u32 num = 0;
3441
3442 memset(regs_data, 0, et131x_get_regs_len(netdev));
3443
3444 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3445 adapter->pdev->device;
3446
3447 /* PHY regs */
3448 et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
3449 et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
3450 et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
3451 et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
3452 et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
3453 et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
3454 et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
3455 /* Autoneg next page transmit reg */
3456 et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
3457 /* Link partner next page reg */
3458 et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
3459 et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
3460 et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
3461 et131x_mii_read(adapter, 0x0b, (u16 *)&regs_buff[num++]);
3462 et131x_mii_read(adapter, 0x0c, (u16 *)&regs_buff[num++]);
3463 et131x_mii_read(adapter, MII_MMD_CTRL, (u16 *)&regs_buff[num++]);
3464 et131x_mii_read(adapter, MII_MMD_DATA, (u16 *)&regs_buff[num++]);
3465 et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
3466
3467 et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
3468 et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
3469 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3470 (u16 *)&regs_buff[num++]);
3471 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3472 (u16 *)&regs_buff[num++]);
3473 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3474 (u16 *)&regs_buff[num++]);
3475
3476 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3477 (u16 *)&regs_buff[num++]);
3478 et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
3479 et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
3480 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
3481 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3482 (u16 *)&regs_buff[num++]);
3483 et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
3484 et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
3485 et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
3486
3487 /* Global regs */
3488 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3489 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3490 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3491 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3492 regs_buff[num++] = readl(&aregs->global.pm_csr);
3493 regs_buff[num++] = adapter->stats.interrupt_status;
3494 regs_buff[num++] = readl(&aregs->global.int_mask);
3495 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3496 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3497 regs_buff[num++] = readl(&aregs->global.sw_reset);
3498 regs_buff[num++] = readl(&aregs->global.slv_timer);
3499 regs_buff[num++] = readl(&aregs->global.msi_config);
3500 regs_buff[num++] = readl(&aregs->global.loopback);
3501 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3502
3503 /* TXDMA regs */
3504 regs_buff[num++] = readl(&aregs->txdma.csr);
3505 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3506 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3507 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3508 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3509 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3510 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3511 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3512 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3513 regs_buff[num++] = readl(&aregs->txdma.service_request);
3514 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3515 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3516 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3517 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3518 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3519 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3520 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3521 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3522 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3523 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3524 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3525 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3526 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3527 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3528 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3529 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3530
3531 /* RXDMA regs */
3532 regs_buff[num++] = readl(&aregs->rxdma.csr);
3533 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3534 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3535 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3536 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3537 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3538 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3539 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3540 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3541 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3542 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3543 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3544 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3545 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3546 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3547 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3548 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3549 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3550 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3551 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3552 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3553 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3554 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3555 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3556 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3557 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3558 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3559 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3560 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3561 }
3562
3563 static void et131x_get_drvinfo(struct net_device *netdev,
3564 struct ethtool_drvinfo *info)
3565 {
3566 struct et131x_adapter *adapter = netdev_priv(netdev);
3567
3568 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
3569 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
3570 strlcpy(info->bus_info, pci_name(adapter->pdev),
3571 sizeof(info->bus_info));
3572 }
3573
3574 static struct ethtool_ops et131x_ethtool_ops = {
3575 .get_settings = et131x_get_settings,
3576 .set_settings = et131x_set_settings,
3577 .get_drvinfo = et131x_get_drvinfo,
3578 .get_regs_len = et131x_get_regs_len,
3579 .get_regs = et131x_get_regs,
3580 .get_link = ethtool_op_get_link,
3581 };
3582 /**
3583 * et131x_hwaddr_init - set up the MAC Address on the ET1310
3584 * @adapter: pointer to our private adapter structure
3585 */
3586 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3587 {
3588 /* If have our default mac from init and no mac address from
3589 * EEPROM then we need to generate the last octet and set it on the
3590 * device
3591 */
3592 if (is_zero_ether_addr(adapter->rom_addr)) {
3593 /*
3594 * We need to randomly generate the last octet so we
3595 * decrease our chances of setting the mac address to
3596 * same as another one of our cards in the system
3597 */
3598 get_random_bytes(&adapter->addr[5], 1);
3599 /*
3600 * We have the default value in the register we are
3601 * working with so we need to copy the current
3602 * address into the permanent address
3603 */
3604 memcpy(adapter->rom_addr,
3605 adapter->addr, ETH_ALEN);
3606 } else {
3607 /* We do not have an override address, so set the
3608 * current address to the permanent address and add
3609 * it to the device
3610 */
3611 memcpy(adapter->addr,
3612 adapter->rom_addr, ETH_ALEN);
3613 }
3614 }
3615
3616 /**
3617 * et131x_pci_init - initial PCI setup
3618 * @adapter: pointer to our private adapter structure
3619 * @pdev: our PCI device
3620 *
3621 * Perform the initial setup of PCI registers and if possible initialise
3622 * the MAC address. At this point the I/O registers have yet to be mapped
3623 */
3624 static int et131x_pci_init(struct et131x_adapter *adapter,
3625 struct pci_dev *pdev)
3626 {
3627 u16 max_payload;
3628 int i, rc;
3629
3630 rc = et131x_init_eeprom(adapter);
3631 if (rc < 0)
3632 goto out;
3633
3634 if (!pci_is_pcie(pdev)) {
3635 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3636 goto err_out;
3637 }
3638
3639 /* Let's set up the PORT LOGIC Register. First we need to know what
3640 * the max_payload_size is
3641 */
3642 if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) {
3643 dev_err(&pdev->dev,
3644 "Could not read PCI config space for Max Payload Size\n");
3645 goto err_out;
3646 }
3647
3648 /* Program the Ack/Nak latency and replay timers */
3649 max_payload &= 0x07;
3650
3651 if (max_payload < 2) {
3652 static const u16 acknak[2] = { 0x76, 0xD0 };
3653 static const u16 replay[2] = { 0x1E0, 0x2ED };
3654
3655 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3656 acknak[max_payload])) {
3657 dev_err(&pdev->dev,
3658 "Could not write PCI config space for ACK/NAK\n");
3659 goto err_out;
3660 }
3661 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3662 replay[max_payload])) {
3663 dev_err(&pdev->dev,
3664 "Could not write PCI config space for Replay Timer\n");
3665 goto err_out;
3666 }
3667 }
3668
3669 /* l0s and l1 latency timers. We are using default values.
3670 * Representing 001 for L0s and 010 for L1
3671 */
3672 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3673 dev_err(&pdev->dev,
3674 "Could not write PCI config space for Latency Timers\n");
3675 goto err_out;
3676 }
3677
3678 /* Change the max read size to 2k */
3679 if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
3680 PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) {
3681 dev_err(&pdev->dev,
3682 "Couldn't change PCI config space for Max read size\n");
3683 goto err_out;
3684 }
3685
3686 /* Get MAC address from config space if an eeprom exists, otherwise
3687 * the MAC address there will not be valid
3688 */
3689 if (!adapter->has_eeprom) {
3690 et131x_hwaddr_init(adapter);
3691 return 0;
3692 }
3693
3694 for (i = 0; i < ETH_ALEN; i++) {
3695 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3696 adapter->rom_addr + i)) {
3697 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3698 goto err_out;
3699 }
3700 }
3701 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
3702 out:
3703 return rc;
3704 err_out:
3705 rc = -EIO;
3706 goto out;
3707 }
3708
3709 /**
3710 * et131x_error_timer_handler
3711 * @data: timer-specific variable; here a pointer to our adapter structure
3712 *
3713 * The routine called when the error timer expires, to track the number of
3714 * recurring errors.
3715 */
3716 static void et131x_error_timer_handler(unsigned long data)
3717 {
3718 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
3719 struct phy_device *phydev = adapter->phydev;
3720
3721 if (et1310_in_phy_coma(adapter)) {
3722 /* Bring the device immediately out of coma, to
3723 * prevent it from sleeping indefinitely, this
3724 * mechanism could be improved! */
3725 et1310_disable_phy_coma(adapter);
3726 adapter->boot_coma = 20;
3727 } else {
3728 et1310_update_macstat_host_counters(adapter);
3729 }
3730
3731 if (!phydev->link && adapter->boot_coma < 11)
3732 adapter->boot_coma++;
3733
3734 if (adapter->boot_coma == 10) {
3735 if (!phydev->link) {
3736 if (!et1310_in_phy_coma(adapter)) {
3737 /* NOTE - This was originally a 'sync with
3738 * interrupt'. How to do that under Linux?
3739 */
3740 et131x_enable_interrupts(adapter);
3741 et1310_enable_phy_coma(adapter);
3742 }
3743 }
3744 }
3745
3746 /* This is a periodic timer, so reschedule */
3747 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
3748 }
3749
3750 /**
3751 * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
3752 * @adapter: pointer to our private adapter structure
3753 */
3754 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3755 {
3756 /* Free DMA memory */
3757 et131x_tx_dma_memory_free(adapter);
3758 et131x_rx_dma_memory_free(adapter);
3759 }
3760
3761 /**
3762 * et131x_adapter_memory_alloc
3763 * @adapter: pointer to our private adapter structure
3764 *
3765 * Returns 0 on success, errno on failure (as defined in errno.h).
3766 *
3767 * Allocate all the memory blocks for send, receive and others.
3768 */
3769 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3770 {
3771 int status;
3772
3773 /* Allocate memory for the Tx Ring */
3774 status = et131x_tx_dma_memory_alloc(adapter);
3775 if (status != 0) {
3776 dev_err(&adapter->pdev->dev,
3777 "et131x_tx_dma_memory_alloc FAILED\n");
3778 return status;
3779 }
3780 /* Receive buffer memory allocation */
3781 status = et131x_rx_dma_memory_alloc(adapter);
3782 if (status != 0) {
3783 dev_err(&adapter->pdev->dev,
3784 "et131x_rx_dma_memory_alloc FAILED\n");
3785 et131x_tx_dma_memory_free(adapter);
3786 return status;
3787 }
3788
3789 /* Init receive data structures */
3790 status = et131x_init_recv(adapter);
3791 if (status) {
3792 dev_err(&adapter->pdev->dev,
3793 "et131x_init_recv FAILED\n");
3794 et131x_adapter_memory_free(adapter);
3795 }
3796 return status;
3797 }
3798
3799 static void et131x_adjust_link(struct net_device *netdev)
3800 {
3801 struct et131x_adapter *adapter = netdev_priv(netdev);
3802 struct phy_device *phydev = adapter->phydev;
3803
3804 if (netif_carrier_ok(netdev)) {
3805 adapter->boot_coma = 20;
3806
3807 if (phydev && phydev->speed == SPEED_10) {
3808 /*
3809 * NOTE - Is there a way to query this without
3810 * TruePHY?
3811 * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
3812 * EMI_TRUEPHY_A13O) {
3813 */
3814 u16 register18;
3815
3816 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3817 &register18);
3818 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3819 register18 | 0x4);
3820 et131x_mii_write(adapter, PHY_INDEX_REG,
3821 register18 | 0x8402);
3822 et131x_mii_write(adapter, PHY_DATA_REG,
3823 register18 | 511);
3824 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3825 register18);
3826 }
3827
3828 et1310_config_flow_control(adapter);
3829
3830 if (phydev && phydev->speed == SPEED_1000 &&
3831 adapter->registry_jumbo_packet > 2048) {
3832 u16 reg;
3833
3834 et131x_mii_read(adapter, PHY_CONFIG, &reg);
3835 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3836 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3837 et131x_mii_write(adapter, PHY_CONFIG, reg);
3838 }
3839
3840 et131x_set_rx_dma_timer(adapter);
3841 et1310_config_mac_regs2(adapter);
3842 }
3843
3844 if (phydev && phydev->link != adapter->link) {
3845 /*
3846 * Check to see if we are in coma mode and if
3847 * so, disable it because we will not be able
3848 * to read PHY values until we are out.
3849 */
3850 if (et1310_in_phy_coma(adapter))
3851 et1310_disable_phy_coma(adapter);
3852
3853 if (phydev->link) {
3854 adapter->boot_coma = 20;
3855 } else {
3856 dev_warn(&adapter->pdev->dev,
3857 "Link down - cable problem ?\n");
3858 adapter->boot_coma = 0;
3859
3860 if (phydev->speed == SPEED_10) {
3861 /* NOTE - Is there a way to query this without
3862 * TruePHY?
3863 * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
3864 * EMI_TRUEPHY_A13O)
3865 */
3866 u16 register18;
3867
3868 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3869 &register18);
3870 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3871 register18 | 0x4);
3872 et131x_mii_write(adapter, PHY_INDEX_REG,
3873 register18 | 0x8402);
3874 et131x_mii_write(adapter, PHY_DATA_REG,
3875 register18 | 511);
3876 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3877 register18);
3878 }
3879
3880 /* Free the packets being actively sent & stopped */
3881 et131x_free_busy_send_packets(adapter);
3882
3883 /* Re-initialize the send structures */
3884 et131x_init_send(adapter);
3885
3886 /*
3887 * Bring the device back to the state it was during
3888 * init prior to autonegotiation being complete. This
3889 * way, when we get the auto-neg complete interrupt,
3890 * we can complete init by calling config_mac_regs2.
3891 */
3892 et131x_soft_reset(adapter);
3893
3894 /* Setup ET1310 as per the documentation */
3895 et131x_adapter_setup(adapter);
3896
3897 /* perform reset of tx/rx */
3898 et131x_disable_txrx(netdev);
3899 et131x_enable_txrx(netdev);
3900 }
3901
3902 adapter->link = phydev->link;
3903
3904 phy_print_status(phydev);
3905 }
3906 }
3907
3908 static int et131x_mii_probe(struct net_device *netdev)
3909 {
3910 struct et131x_adapter *adapter = netdev_priv(netdev);
3911 struct phy_device *phydev = NULL;
3912
3913 phydev = phy_find_first(adapter->mii_bus);
3914 if (!phydev) {
3915 dev_err(&adapter->pdev->dev, "no PHY found\n");
3916 return -ENODEV;
3917 }
3918
3919 phydev = phy_connect(netdev, dev_name(&phydev->dev),
3920 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
3921
3922 if (IS_ERR(phydev)) {
3923 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3924 return PTR_ERR(phydev);
3925 }
3926
3927 phydev->supported &= (SUPPORTED_10baseT_Half
3928 | SUPPORTED_10baseT_Full
3929 | SUPPORTED_100baseT_Half
3930 | SUPPORTED_100baseT_Full
3931 | SUPPORTED_Autoneg
3932 | SUPPORTED_MII
3933 | SUPPORTED_TP);
3934
3935 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3936 phydev->supported |= SUPPORTED_1000baseT_Full;
3937
3938 phydev->advertising = phydev->supported;
3939 adapter->phydev = phydev;
3940
3941 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
3942 phydev->drv->name, dev_name(&phydev->dev));
3943
3944 return 0;
3945 }
3946
3947 /**
3948 * et131x_adapter_init
3949 * @adapter: pointer to the private adapter struct
3950 * @pdev: pointer to the PCI device
3951 *
3952 * Initialize the data structures for the et131x_adapter object and link
3953 * them together with the platform provided device structures.
3954 */
3955 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3956 struct pci_dev *pdev)
3957 {
3958 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3959
3960 struct et131x_adapter *adapter;
3961
3962 /* Allocate private adapter struct and copy in relevant information */
3963 adapter = netdev_priv(netdev);
3964 adapter->pdev = pci_dev_get(pdev);
3965 adapter->netdev = netdev;
3966
3967 /* Initialize spinlocks here */
3968 spin_lock_init(&adapter->lock);
3969 spin_lock_init(&adapter->tcb_send_qlock);
3970 spin_lock_init(&adapter->tcb_ready_qlock);
3971 spin_lock_init(&adapter->send_hw_lock);
3972 spin_lock_init(&adapter->rcv_lock);
3973 spin_lock_init(&adapter->rcv_pend_lock);
3974 spin_lock_init(&adapter->fbr_lock);
3975 spin_lock_init(&adapter->phy_lock);
3976
3977 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
3978
3979 /* Set the MAC address to a default */
3980 memcpy(adapter->addr, default_mac, ETH_ALEN);
3981
3982 return adapter;
3983 }
3984
3985 /**
3986 * et131x_pci_remove
3987 * @pdev: a pointer to the device's pci_dev structure
3988 *
3989 * Registered in the pci_driver structure, this function is called when the
3990 * PCI subsystem detects that a PCI device which matches the information
3991 * contained in the pci_device_id table has been removed.
3992 */
3993 static void et131x_pci_remove(struct pci_dev *pdev)
3994 {
3995 struct net_device *netdev = pci_get_drvdata(pdev);
3996 struct et131x_adapter *adapter = netdev_priv(netdev);
3997
3998 unregister_netdev(netdev);
3999 phy_disconnect(adapter->phydev);
4000 mdiobus_unregister(adapter->mii_bus);
4001 kfree(adapter->mii_bus->irq);
4002 mdiobus_free(adapter->mii_bus);
4003
4004 et131x_adapter_memory_free(adapter);
4005 iounmap(adapter->regs);
4006 pci_dev_put(pdev);
4007
4008 free_netdev(netdev);
4009 pci_release_regions(pdev);
4010 pci_disable_device(pdev);
4011 }
4012
4013 /**
4014 * et131x_up - Bring up a device for use.
4015 * @netdev: device to be opened
4016 */
4017 static void et131x_up(struct net_device *netdev)
4018 {
4019 struct et131x_adapter *adapter = netdev_priv(netdev);
4020
4021 et131x_enable_txrx(netdev);
4022 phy_start(adapter->phydev);
4023 }
4024
4025 /**
4026 * et131x_down - Bring down the device
4027 * @netdev: device to be brought down
4028 */
4029 static void et131x_down(struct net_device *netdev)
4030 {
4031 struct et131x_adapter *adapter = netdev_priv(netdev);
4032
4033 /* Save the timestamp for the TX watchdog, prevent a timeout */
4034 netdev->trans_start = jiffies;
4035
4036 phy_stop(adapter->phydev);
4037 et131x_disable_txrx(netdev);
4038 }
4039
4040 #ifdef CONFIG_PM_SLEEP
4041 static int et131x_suspend(struct device *dev)
4042 {
4043 struct pci_dev *pdev = to_pci_dev(dev);
4044 struct net_device *netdev = pci_get_drvdata(pdev);
4045
4046 if (netif_running(netdev)) {
4047 netif_device_detach(netdev);
4048 et131x_down(netdev);
4049 pci_save_state(pdev);
4050 }
4051
4052 return 0;
4053 }
4054
4055 static int et131x_resume(struct device *dev)
4056 {
4057 struct pci_dev *pdev = to_pci_dev(dev);
4058 struct net_device *netdev = pci_get_drvdata(pdev);
4059
4060 if (netif_running(netdev)) {
4061 pci_restore_state(pdev);
4062 et131x_up(netdev);
4063 netif_device_attach(netdev);
4064 }
4065
4066 return 0;
4067 }
4068
4069 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4070 #define ET131X_PM_OPS (&et131x_pm_ops)
4071 #else
4072 #define ET131X_PM_OPS NULL
4073 #endif
4074
4075 /**
4076 * et131x_isr - The Interrupt Service Routine for the driver.
4077 * @irq: the IRQ on which the interrupt was received.
4078 * @dev_id: device-specific info (here a pointer to a net_device struct)
4079 *
4080 * Returns a value indicating if the interrupt was handled.
4081 */
4082 irqreturn_t et131x_isr(int irq, void *dev_id)
4083 {
4084 bool handled = true;
4085 struct net_device *netdev = (struct net_device *)dev_id;
4086 struct et131x_adapter *adapter = NULL;
4087 u32 status;
4088
4089 if (!netif_device_present(netdev)) {
4090 handled = false;
4091 goto out;
4092 }
4093
4094 adapter = netdev_priv(netdev);
4095
4096 /* If the adapter is in low power state, then it should not
4097 * recognize any interrupt
4098 */
4099
4100 /* Disable Device Interrupts */
4101 et131x_disable_interrupts(adapter);
4102
4103 /* Get a copy of the value in the interrupt status register
4104 * so we can process the interrupting section
4105 */
4106 status = readl(&adapter->regs->global.int_status);
4107
4108 if (adapter->flowcontrol == FLOW_TXONLY ||
4109 adapter->flowcontrol == FLOW_BOTH) {
4110 status &= ~INT_MASK_ENABLE;
4111 } else {
4112 status &= ~INT_MASK_ENABLE_NO_FLOW;
4113 }
4114
4115 /* Make sure this is our interrupt */
4116 if (!status) {
4117 handled = false;
4118 et131x_enable_interrupts(adapter);
4119 goto out;
4120 }
4121
4122 /* This is our interrupt, so process accordingly */
4123
4124 if (status & ET_INTR_WATCHDOG) {
4125 struct tcb *tcb = adapter->tx_ring.send_head;
4126
4127 if (tcb)
4128 if (++tcb->stale > 1)
4129 status |= ET_INTR_TXDMA_ISR;
4130
4131 if (adapter->rx_ring.unfinished_receives)
4132 status |= ET_INTR_RXDMA_XFR_DONE;
4133 else if (tcb == NULL)
4134 writel(0, &adapter->regs->global.watchdog_timer);
4135
4136 status &= ~ET_INTR_WATCHDOG;
4137 }
4138
4139 if (status == 0) {
4140 /* This interrupt has in some way been "handled" by
4141 * the ISR. Either it was a spurious Rx interrupt, or
4142 * it was a Tx interrupt that has been filtered by
4143 * the ISR.
4144 */
4145 et131x_enable_interrupts(adapter);
4146 goto out;
4147 }
4148
4149 /* We need to save the interrupt status value for use in our
4150 * DPC. We will clear the software copy of that in that
4151 * routine.
4152 */
4153 adapter->stats.interrupt_status = status;
4154
4155 /* Schedule the ISR handler as a bottom-half task in the
4156 * kernel's tq_immediate queue, and mark the queue for
4157 * execution
4158 */
4159 schedule_work(&adapter->task);
4160 out:
4161 return IRQ_RETVAL(handled);
4162 }
4163
4164 /**
4165 * et131x_isr_handler - The ISR handler
4166 * @p_adapter, a pointer to the device's private adapter structure
4167 *
4168 * scheduled to run in a deferred context by the ISR. This is where the ISR's
4169 * work actually gets done.
4170 */
4171 static void et131x_isr_handler(struct work_struct *work)
4172 {
4173 struct et131x_adapter *adapter =
4174 container_of(work, struct et131x_adapter, task);
4175 u32 status = adapter->stats.interrupt_status;
4176 struct address_map __iomem *iomem = adapter->regs;
4177
4178 /*
4179 * These first two are by far the most common. Once handled, we clear
4180 * their two bits in the status word. If the word is now zero, we
4181 * exit.
4182 */
4183 /* Handle all the completed Transmit interrupts */
4184 if (status & ET_INTR_TXDMA_ISR)
4185 et131x_handle_send_interrupt(adapter);
4186
4187 /* Handle all the completed Receives interrupts */
4188 if (status & ET_INTR_RXDMA_XFR_DONE)
4189 et131x_handle_recv_interrupt(adapter);
4190
4191 status &= 0xffffffd7;
4192
4193 if (!status)
4194 goto out;
4195
4196 /* Handle the TXDMA Error interrupt */
4197 if (status & ET_INTR_TXDMA_ERR) {
4198 u32 txdma_err;
4199
4200 /* Following read also clears the register (COR) */
4201 txdma_err = readl(&iomem->txdma.tx_dma_error);
4202
4203 dev_warn(&adapter->pdev->dev,
4204 "TXDMA_ERR interrupt, error = %d\n",
4205 txdma_err);
4206 }
4207
4208 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
4209 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4210 /*
4211 * This indicates the number of unused buffers in RXDMA free
4212 * buffer ring 0 is <= the limit you programmed. Free buffer
4213 * resources need to be returned. Free buffers are consumed as
4214 * packets are passed from the network to the host. The host
4215 * becomes aware of the packets from the contents of the packet
4216 * status ring. This ring is queried when the packet done
4217 * interrupt occurs. Packets are then passed to the OS. When
4218 * the OS is done with the packets the resources can be
4219 * returned to the ET1310 for re-use. This interrupt is one
4220 * method of returning resources.
4221 */
4222
4223 /*
4224 * If the user has flow control on, then we will
4225 * send a pause packet, otherwise just exit
4226 */
4227 if (adapter->flowcontrol == FLOW_TXONLY ||
4228 adapter->flowcontrol == FLOW_BOTH) {
4229 u32 pm_csr;
4230
4231 /*
4232 * Tell the device to send a pause packet via the back
4233 * pressure register (bp req and bp xon/xoff)
4234 */
4235 pm_csr = readl(&iomem->global.pm_csr);
4236 if (!et1310_in_phy_coma(adapter))
4237 writel(3, &iomem->txmac.bp_ctrl);
4238 }
4239 }
4240
4241 /* Handle Packet Status Ring Low Interrupt */
4242 if (status & ET_INTR_RXDMA_STAT_LOW) {
4243 /*
4244 * Same idea as with the two Free Buffer Rings. Packets going
4245 * from the network to the host each consume a free buffer
4246 * resource and a packet status resource. These resoures are
4247 * passed to the OS. When the OS is done with the resources,
4248 * they need to be returned to the ET1310. This is one method
4249 * of returning the resources.
4250 */
4251 }
4252
4253 /* Handle RXDMA Error Interrupt */
4254 if (status & ET_INTR_RXDMA_ERR) {
4255 /*
4256 * The rxdma_error interrupt is sent when a time-out on a
4257 * request issued by the JAGCore has occurred or a completion is
4258 * returned with an un-successful status. In both cases the
4259 * request is considered complete. The JAGCore will
4260 * automatically re-try the request in question. Normally
4261 * information on events like these are sent to the host using
4262 * the "Advanced Error Reporting" capability. This interrupt is
4263 * another way of getting similar information. The only thing
4264 * required is to clear the interrupt by reading the ISR in the
4265 * global resources. The JAGCore will do a re-try on the
4266 * request. Normally you should never see this interrupt. If
4267 * you start to see this interrupt occurring frequently then
4268 * something bad has occurred. A reset might be the thing to do.
4269 */
4270 /* TRAP();*/
4271
4272 dev_warn(&adapter->pdev->dev,
4273 "RxDMA_ERR interrupt, error %x\n",
4274 readl(&iomem->txmac.tx_test));
4275 }
4276
4277 /* Handle the Wake on LAN Event */
4278 if (status & ET_INTR_WOL) {
4279 /*
4280 * This is a secondary interrupt for wake on LAN. The driver
4281 * should never see this, if it does, something serious is
4282 * wrong. We will TRAP the message when we are in DBG mode,
4283 * otherwise we will ignore it.
4284 */
4285 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4286 }
4287
4288 /* Let's move on to the TxMac */
4289 if (status & ET_INTR_TXMAC) {
4290 u32 err = readl(&iomem->txmac.err);
4291
4292 /*
4293 * When any of the errors occur and TXMAC generates an
4294 * interrupt to report these errors, it usually means that
4295 * TXMAC has detected an error in the data stream retrieved
4296 * from the on-chip Tx Q. All of these errors are catastrophic
4297 * and TXMAC won't be able to recover data when these errors
4298 * occur. In a nutshell, the whole Tx path will have to be reset
4299 * and re-configured afterwards.
4300 */
4301 dev_warn(&adapter->pdev->dev,
4302 "TXMAC interrupt, error 0x%08x\n",
4303 err);
4304
4305 /*
4306 * If we are debugging, we want to see this error, otherwise we
4307 * just want the device to be reset and continue
4308 */
4309 }
4310
4311 /* Handle RXMAC Interrupt */
4312 if (status & ET_INTR_RXMAC) {
4313 /*
4314 * These interrupts are catastrophic to the device, what we need
4315 * to do is disable the interrupts and set the flag to cause us
4316 * to reset so we can solve this issue.
4317 */
4318 /* MP_SET_FLAG( adapter, fMP_ADAPTER_HARDWARE_ERROR); */
4319
4320 dev_warn(&adapter->pdev->dev,
4321 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4322 readl(&iomem->rxmac.err_reg));
4323
4324 dev_warn(&adapter->pdev->dev,
4325 "Enable 0x%08x, Diag 0x%08x\n",
4326 readl(&iomem->rxmac.ctrl),
4327 readl(&iomem->rxmac.rxq_diag));
4328
4329 /*
4330 * If we are debugging, we want to see this error, otherwise we
4331 * just want the device to be reset and continue
4332 */
4333 }
4334
4335 /* Handle MAC_STAT Interrupt */
4336 if (status & ET_INTR_MAC_STAT) {
4337 /*
4338 * This means at least one of the un-masked counters in the
4339 * MAC_STAT block has rolled over. Use this to maintain the top,
4340 * software managed bits of the counter(s).
4341 */
4342 et1310_handle_macstat_interrupt(adapter);
4343 }
4344
4345 /* Handle SLV Timeout Interrupt */
4346 if (status & ET_INTR_SLV_TIMEOUT) {
4347 /*
4348 * This means a timeout has occurred on a read or write request
4349 * to one of the JAGCore registers. The Global Resources block
4350 * has terminated the request and on a read request, returned a
4351 * "fake" value. The most likely reasons are: Bad Address or the
4352 * addressed module is in a power-down state and can't respond.
4353 */
4354 }
4355 out:
4356 et131x_enable_interrupts(adapter);
4357 }
4358
4359 /**
4360 * et131x_stats - Return the current device statistics.
4361 * @netdev: device whose stats are being queried
4362 *
4363 * Returns 0 on success, errno on failure (as defined in errno.h)
4364 */
4365 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4366 {
4367 struct et131x_adapter *adapter = netdev_priv(netdev);
4368 struct net_device_stats *stats = &adapter->net_stats;
4369 struct ce_stats *devstat = &adapter->stats;
4370
4371 stats->rx_errors = devstat->rx_length_errs +
4372 devstat->rx_align_errs +
4373 devstat->rx_crc_errs +
4374 devstat->rx_code_violations +
4375 devstat->rx_other_errs;
4376 stats->tx_errors = devstat->tx_max_pkt_errs;
4377 stats->multicast = devstat->multicast_pkts_rcvd;
4378 stats->collisions = devstat->tx_collisions;
4379
4380 stats->rx_length_errors = devstat->rx_length_errs;
4381 stats->rx_over_errors = devstat->rx_overflows;
4382 stats->rx_crc_errors = devstat->rx_crc_errs;
4383
4384 /* NOTE: These stats don't have corresponding values in CE_STATS,
4385 * so we're going to have to update these directly from within the
4386 * TX/RX code
4387 */
4388 /* stats->rx_bytes = 20; devstat->; */
4389 /* stats->tx_bytes = 20; devstat->; */
4390 /* stats->rx_dropped = devstat->; */
4391 /* stats->tx_dropped = devstat->; */
4392
4393 /* NOTE: Not used, can't find analogous statistics */
4394 /* stats->rx_frame_errors = devstat->; */
4395 /* stats->rx_fifo_errors = devstat->; */
4396 /* stats->rx_missed_errors = devstat->; */
4397
4398 /* stats->tx_aborted_errors = devstat->; */
4399 /* stats->tx_carrier_errors = devstat->; */
4400 /* stats->tx_fifo_errors = devstat->; */
4401 /* stats->tx_heartbeat_errors = devstat->; */
4402 /* stats->tx_window_errors = devstat->; */
4403 return stats;
4404 }
4405
4406 /**
4407 * et131x_open - Open the device for use.
4408 * @netdev: device to be opened
4409 *
4410 * Returns 0 on success, errno on failure (as defined in errno.h)
4411 */
4412 static int et131x_open(struct net_device *netdev)
4413 {
4414 struct et131x_adapter *adapter = netdev_priv(netdev);
4415 struct pci_dev *pdev = adapter->pdev;
4416 unsigned int irq = pdev->irq;
4417 int result;
4418
4419 /* Start the timer to track NIC errors */
4420 init_timer(&adapter->error_timer);
4421 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4422 adapter->error_timer.function = et131x_error_timer_handler;
4423 adapter->error_timer.data = (unsigned long)adapter;
4424 add_timer(&adapter->error_timer);
4425
4426 result = request_irq(irq, et131x_isr,
4427 IRQF_SHARED, netdev->name, netdev);
4428 if (result) {
4429 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4430 return result;
4431 }
4432
4433 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4434
4435 et131x_up(netdev);
4436
4437 return result;
4438 }
4439
4440 /**
4441 * et131x_close - Close the device
4442 * @netdev: device to be closed
4443 *
4444 * Returns 0 on success, errno on failure (as defined in errno.h)
4445 */
4446 static int et131x_close(struct net_device *netdev)
4447 {
4448 struct et131x_adapter *adapter = netdev_priv(netdev);
4449
4450 et131x_down(netdev);
4451
4452 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4453 free_irq(adapter->pdev->irq, netdev);
4454
4455 /* Stop the error timer */
4456 return del_timer_sync(&adapter->error_timer);
4457 }
4458
4459 /**
4460 * et131x_ioctl - The I/O Control handler for the driver
4461 * @netdev: device on which the control request is being made
4462 * @reqbuf: a pointer to the IOCTL request buffer
4463 * @cmd: the IOCTL command code
4464 *
4465 * Returns 0 on success, errno on failure (as defined in errno.h)
4466 */
4467 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4468 int cmd)
4469 {
4470 struct et131x_adapter *adapter = netdev_priv(netdev);
4471
4472 if (!adapter->phydev)
4473 return -EINVAL;
4474
4475 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4476 }
4477
4478 /**
4479 * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
4480 * @adapter: pointer to our private adapter structure
4481 *
4482 * FIXME: lot of dups with MAC code
4483 *
4484 * Returns 0 on success, errno on failure
4485 */
4486 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4487 {
4488 int filter = adapter->packet_filter;
4489 int status = 0;
4490 u32 ctrl;
4491 u32 pf_ctrl;
4492
4493 ctrl = readl(&adapter->regs->rxmac.ctrl);
4494 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4495
4496 /* Default to disabled packet filtering. Enable it in the individual
4497 * case statements that require the device to filter something
4498 */
4499 ctrl |= 0x04;
4500
4501 /* Set us to be in promiscuous mode so we receive everything, this
4502 * is also true when we get a packet filter of 0
4503 */
4504 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4505 pf_ctrl &= ~7; /* Clear filter bits */
4506 else {
4507 /*
4508 * Set us up with Multicast packet filtering. Three cases are
4509 * possible - (1) we have a multi-cast list, (2) we receive ALL
4510 * multicast entries or (3) we receive none.
4511 */
4512 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4513 pf_ctrl &= ~2; /* Multicast filter bit */
4514 else {
4515 et1310_setup_device_for_multicast(adapter);
4516 pf_ctrl |= 2;
4517 ctrl &= ~0x04;
4518 }
4519
4520 /* Set us up with Unicast packet filtering */
4521 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4522 et1310_setup_device_for_unicast(adapter);
4523 pf_ctrl |= 4;
4524 ctrl &= ~0x04;
4525 }
4526
4527 /* Set us up with Broadcast packet filtering */
4528 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4529 pf_ctrl |= 1; /* Broadcast filter bit */
4530 ctrl &= ~0x04;
4531 } else
4532 pf_ctrl &= ~1;
4533
4534 /* Setup the receive mac configuration registers - Packet
4535 * Filter control + the enable / disable for packet filter
4536 * in the control reg.
4537 */
4538 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4539 writel(ctrl, &adapter->regs->rxmac.ctrl);
4540 }
4541 return status;
4542 }
4543
4544 /**
4545 * et131x_multicast - The handler to configure multicasting on the interface
4546 * @netdev: a pointer to a net_device struct representing the device
4547 */
4548 static void et131x_multicast(struct net_device *netdev)
4549 {
4550 struct et131x_adapter *adapter = netdev_priv(netdev);
4551 int packet_filter;
4552 unsigned long flags;
4553 struct netdev_hw_addr *ha;
4554 int i;
4555
4556 spin_lock_irqsave(&adapter->lock, flags);
4557
4558 /* Before we modify the platform-independent filter flags, store them
4559 * locally. This allows us to determine if anything's changed and if
4560 * we even need to bother the hardware
4561 */
4562 packet_filter = adapter->packet_filter;
4563
4564 /* Clear the 'multicast' flag locally; because we only have a single
4565 * flag to check multicast, and multiple multicast addresses can be
4566 * set, this is the easiest way to determine if more than one
4567 * multicast address is being set.
4568 */
4569 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4570
4571 /* Check the net_device flags and set the device independent flags
4572 * accordingly
4573 */
4574
4575 if (netdev->flags & IFF_PROMISC)
4576 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4577 else
4578 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4579
4580 if (netdev->flags & IFF_ALLMULTI)
4581 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4582
4583 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4584 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4585
4586 if (netdev_mc_count(netdev) < 1) {
4587 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4588 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4589 } else
4590 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4591
4592 /* Set values in the private adapter struct */
4593 i = 0;
4594 netdev_for_each_mc_addr(ha, netdev) {
4595 if (i == NIC_MAX_MCAST_LIST)
4596 break;
4597 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4598 }
4599 adapter->multicast_addr_count = i;
4600
4601 /* Are the new flags different from the previous ones? If not, then no
4602 * action is required
4603 *
4604 * NOTE - This block will always update the multicast_list with the
4605 * hardware, even if the addresses aren't the same.
4606 */
4607 if (packet_filter != adapter->packet_filter) {
4608 /* Call the device's filter function */
4609 et131x_set_packet_filter(adapter);
4610 }
4611 spin_unlock_irqrestore(&adapter->lock, flags);
4612 }
4613
4614 /**
4615 * et131x_tx - The handler to tx a packet on the device
4616 * @skb: data to be Tx'd
4617 * @netdev: device on which data is to be Tx'd
4618 *
4619 * Returns 0 on success, errno on failure (as defined in errno.h)
4620 */
4621 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
4622 {
4623 int status = 0;
4624 struct et131x_adapter *adapter = netdev_priv(netdev);
4625
4626 /* stop the queue if it's getting full */
4627 if (adapter->tx_ring.used >= NUM_TCB - 1 &&
4628 !netif_queue_stopped(netdev))
4629 netif_stop_queue(netdev);
4630
4631 /* Save the timestamp for the TX timeout watchdog */
4632 netdev->trans_start = jiffies;
4633
4634 /* Call the device-specific data Tx routine */
4635 status = et131x_send_packets(skb, netdev);
4636
4637 /* Check status and manage the netif queue if necessary */
4638 if (status != 0) {
4639 if (status == -ENOMEM)
4640 status = NETDEV_TX_BUSY;
4641 else
4642 status = NETDEV_TX_OK;
4643 }
4644 return status;
4645 }
4646
4647 /**
4648 * et131x_tx_timeout - Timeout handler
4649 * @netdev: a pointer to a net_device struct representing the device
4650 *
4651 * The handler called when a Tx request times out. The timeout period is
4652 * specified by the 'tx_timeo" element in the net_device structure (see
4653 * et131x_alloc_device() to see how this value is set).
4654 */
4655 static void et131x_tx_timeout(struct net_device *netdev)
4656 {
4657 struct et131x_adapter *adapter = netdev_priv(netdev);
4658 struct tcb *tcb;
4659 unsigned long flags;
4660
4661 /* If the device is closed, ignore the timeout */
4662 if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
4663 return;
4664
4665 /* Any nonrecoverable hardware error?
4666 * Checks adapter->flags for any failure in phy reading
4667 */
4668 if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
4669 return;
4670
4671 /* Hardware failure? */
4672 if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
4673 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
4674 return;
4675 }
4676
4677 /* Is send stuck? */
4678 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
4679
4680 tcb = adapter->tx_ring.send_head;
4681
4682 if (tcb != NULL) {
4683 tcb->count++;
4684
4685 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
4686 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
4687 flags);
4688
4689 dev_warn(&adapter->pdev->dev,
4690 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
4691 tcb->index,
4692 tcb->flags);
4693
4694 adapter->net_stats.tx_errors++;
4695
4696 /* perform reset of tx/rx */
4697 et131x_disable_txrx(netdev);
4698 et131x_enable_txrx(netdev);
4699 return;
4700 }
4701 }
4702
4703 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
4704 }
4705
4706 /**
4707 * et131x_change_mtu - The handler called to change the MTU for the device
4708 * @netdev: device whose MTU is to be changed
4709 * @new_mtu: the desired MTU
4710 *
4711 * Returns 0 on success, errno on failure (as defined in errno.h)
4712 */
4713 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
4714 {
4715 int result = 0;
4716 struct et131x_adapter *adapter = netdev_priv(netdev);
4717
4718 /* Make sure the requested MTU is valid */
4719 if (new_mtu < 64 || new_mtu > 9216)
4720 return -EINVAL;
4721
4722 et131x_disable_txrx(netdev);
4723 et131x_handle_send_interrupt(adapter);
4724 et131x_handle_recv_interrupt(adapter);
4725
4726 /* Set the new MTU */
4727 netdev->mtu = new_mtu;
4728
4729 /* Free Rx DMA memory */
4730 et131x_adapter_memory_free(adapter);
4731
4732 /* Set the config parameter for Jumbo Packet support */
4733 adapter->registry_jumbo_packet = new_mtu + 14;
4734 et131x_soft_reset(adapter);
4735
4736 /* Alloc and init Rx DMA memory */
4737 result = et131x_adapter_memory_alloc(adapter);
4738 if (result != 0) {
4739 dev_warn(&adapter->pdev->dev,
4740 "Change MTU failed; couldn't re-alloc DMA memory\n");
4741 return result;
4742 }
4743
4744 et131x_init_send(adapter);
4745
4746 et131x_hwaddr_init(adapter);
4747 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4748
4749 /* Init the device with the new settings */
4750 et131x_adapter_setup(adapter);
4751
4752 et131x_enable_txrx(netdev);
4753
4754 return result;
4755 }
4756
4757 /**
4758 * et131x_set_mac_addr - handler to change the MAC address for the device
4759 * @netdev: device whose MAC is to be changed
4760 * @new_mac: the desired MAC address
4761 *
4762 * Returns 0 on success, errno on failure (as defined in errno.h)
4763 *
4764 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
4765 */
4766 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
4767 {
4768 int result = 0;
4769 struct et131x_adapter *adapter = netdev_priv(netdev);
4770 struct sockaddr *address = new_mac;
4771
4772 /* begin blux */
4773
4774 if (adapter == NULL)
4775 return -ENODEV;
4776
4777 /* Make sure the requested MAC is valid */
4778 if (!is_valid_ether_addr(address->sa_data))
4779 return -EADDRNOTAVAIL;
4780
4781 et131x_disable_txrx(netdev);
4782 et131x_handle_send_interrupt(adapter);
4783 et131x_handle_recv_interrupt(adapter);
4784
4785 /* Set the new MAC */
4786 /* netdev->set_mac_address = &new_mac; */
4787
4788 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
4789
4790 netdev_info(netdev, "Setting MAC address to %pM\n",
4791 netdev->dev_addr);
4792
4793 /* Free Rx DMA memory */
4794 et131x_adapter_memory_free(adapter);
4795
4796 et131x_soft_reset(adapter);
4797
4798 /* Alloc and init Rx DMA memory */
4799 result = et131x_adapter_memory_alloc(adapter);
4800 if (result != 0) {
4801 dev_err(&adapter->pdev->dev,
4802 "Change MAC failed; couldn't re-alloc DMA memory\n");
4803 return result;
4804 }
4805
4806 et131x_init_send(adapter);
4807
4808 et131x_hwaddr_init(adapter);
4809
4810 /* Init the device with the new settings */
4811 et131x_adapter_setup(adapter);
4812
4813 et131x_enable_txrx(netdev);
4814
4815 return result;
4816 }
4817
4818 static const struct net_device_ops et131x_netdev_ops = {
4819 .ndo_open = et131x_open,
4820 .ndo_stop = et131x_close,
4821 .ndo_start_xmit = et131x_tx,
4822 .ndo_set_rx_mode = et131x_multicast,
4823 .ndo_tx_timeout = et131x_tx_timeout,
4824 .ndo_change_mtu = et131x_change_mtu,
4825 .ndo_set_mac_address = et131x_set_mac_addr,
4826 .ndo_validate_addr = eth_validate_addr,
4827 .ndo_get_stats = et131x_stats,
4828 .ndo_do_ioctl = et131x_ioctl,
4829 };
4830
4831 /**
4832 * et131x_pci_setup - Perform device initialization
4833 * @pdev: a pointer to the device's pci_dev structure
4834 * @ent: this device's entry in the pci_device_id table
4835 *
4836 * Returns 0 on success, errno on failure (as defined in errno.h)
4837 *
4838 * Registered in the pci_driver structure, this function is called when the
4839 * PCI subsystem finds a new PCI device which matches the information
4840 * contained in the pci_device_id table. This routine is the equivalent to
4841 * a device insertion routine.
4842 */
4843 static int et131x_pci_setup(struct pci_dev *pdev,
4844 const struct pci_device_id *ent)
4845 {
4846 struct net_device *netdev;
4847 struct et131x_adapter *adapter;
4848 int rc;
4849 int ii;
4850
4851 rc = pci_enable_device(pdev);
4852 if (rc < 0) {
4853 dev_err(&pdev->dev, "pci_enable_device() failed\n");
4854 goto out;
4855 }
4856
4857 /* Perform some basic PCI checks */
4858 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4859 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
4860 rc = -ENODEV;
4861 goto err_disable;
4862 }
4863
4864 rc = pci_request_regions(pdev, DRIVER_NAME);
4865 if (rc < 0) {
4866 dev_err(&pdev->dev, "Can't get PCI resources\n");
4867 goto err_disable;
4868 }
4869
4870 pci_set_master(pdev);
4871
4872 /* Check the DMA addressing support of this device */
4873 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4874 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4875 if (rc < 0) {
4876 dev_err(&pdev->dev,
4877 "Unable to obtain 64 bit DMA for consistent allocations\n");
4878 goto err_release_res;
4879 }
4880 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4881 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4882 if (rc < 0) {
4883 dev_err(&pdev->dev,
4884 "Unable to obtain 32 bit DMA for consistent allocations\n");
4885 goto err_release_res;
4886 }
4887 } else {
4888 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4889 rc = -EIO;
4890 goto err_release_res;
4891 }
4892
4893 /* Allocate netdev and private adapter structs */
4894 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
4895 if (!netdev) {
4896 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
4897 rc = -ENOMEM;
4898 goto err_release_res;
4899 }
4900
4901 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
4902 netdev->netdev_ops = &et131x_netdev_ops;
4903
4904 SET_NETDEV_DEV(netdev, &pdev->dev);
4905 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
4906
4907 adapter = et131x_adapter_init(netdev, pdev);
4908
4909 rc = et131x_pci_init(adapter, pdev);
4910 if (rc < 0)
4911 goto err_free_dev;
4912
4913 /* Map the bus-relative registers to system virtual memory */
4914 adapter->regs = pci_ioremap_bar(pdev, 0);
4915 if (!adapter->regs) {
4916 dev_err(&pdev->dev, "Cannot map device registers\n");
4917 rc = -ENOMEM;
4918 goto err_free_dev;
4919 }
4920
4921 /* If Phy COMA mode was enabled when we went down, disable it here. */
4922 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4923
4924 /* Issue a global reset to the et1310 */
4925 et131x_soft_reset(adapter);
4926
4927 /* Disable all interrupts (paranoid) */
4928 et131x_disable_interrupts(adapter);
4929
4930 /* Allocate DMA memory */
4931 rc = et131x_adapter_memory_alloc(adapter);
4932 if (rc < 0) {
4933 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
4934 goto err_iounmap;
4935 }
4936
4937 /* Init send data structures */
4938 et131x_init_send(adapter);
4939
4940 /* Set up the task structure for the ISR's deferred handler */
4941 INIT_WORK(&adapter->task, et131x_isr_handler);
4942
4943 /* Copy address into the net_device struct */
4944 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4945
4946 /* Init variable for counting how long we do not have link status */
4947 adapter->boot_coma = 0;
4948 et1310_disable_phy_coma(adapter);
4949
4950 rc = -ENOMEM;
4951
4952 /* Setup the mii_bus struct */
4953 adapter->mii_bus = mdiobus_alloc();
4954 if (!adapter->mii_bus) {
4955 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4956 goto err_mem_free;
4957 }
4958
4959 adapter->mii_bus->name = "et131x_eth_mii";
4960 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4961 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4962 adapter->mii_bus->priv = netdev;
4963 adapter->mii_bus->read = et131x_mdio_read;
4964 adapter->mii_bus->write = et131x_mdio_write;
4965 adapter->mii_bus->reset = et131x_mdio_reset;
4966 adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
4967 if (!adapter->mii_bus->irq) {
4968 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
4969 goto err_mdio_free;
4970 }
4971
4972 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4973 adapter->mii_bus->irq[ii] = PHY_POLL;
4974
4975 rc = mdiobus_register(adapter->mii_bus);
4976 if (rc < 0) {
4977 dev_err(&pdev->dev, "failed to register MII bus\n");
4978 goto err_mdio_free_irq;
4979 }
4980
4981 rc = et131x_mii_probe(netdev);
4982 if (rc < 0) {
4983 dev_err(&pdev->dev, "failed to probe MII bus\n");
4984 goto err_mdio_unregister;
4985 }
4986
4987 /* Setup et1310 as per the documentation */
4988 et131x_adapter_setup(adapter);
4989
4990 /* We can enable interrupts now
4991 *
4992 * NOTE - Because registration of interrupt handler is done in the
4993 * device's open(), defer enabling device interrupts to that
4994 * point
4995 */
4996
4997 /* Register the net_device struct with the Linux network layer */
4998 rc = register_netdev(netdev);
4999 if (rc < 0) {
5000 dev_err(&pdev->dev, "register_netdev() failed\n");
5001 goto err_phy_disconnect;
5002 }
5003
5004 /* Register the net_device struct with the PCI subsystem. Save a copy
5005 * of the PCI config space for this device now that the device has
5006 * been initialized, just in case it needs to be quickly restored.
5007 */
5008 pci_set_drvdata(pdev, netdev);
5009 out:
5010 return rc;
5011
5012 err_phy_disconnect:
5013 phy_disconnect(adapter->phydev);
5014 err_mdio_unregister:
5015 mdiobus_unregister(adapter->mii_bus);
5016 err_mdio_free_irq:
5017 kfree(adapter->mii_bus->irq);
5018 err_mdio_free:
5019 mdiobus_free(adapter->mii_bus);
5020 err_mem_free:
5021 et131x_adapter_memory_free(adapter);
5022 err_iounmap:
5023 iounmap(adapter->regs);
5024 err_free_dev:
5025 pci_dev_put(pdev);
5026 free_netdev(netdev);
5027 err_release_res:
5028 pci_release_regions(pdev);
5029 err_disable:
5030 pci_disable_device(pdev);
5031 goto out;
5032 }
5033
5034 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5035 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5036 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
5037 {0,}
5038 };
5039 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5040
5041 static struct pci_driver et131x_driver = {
5042 .name = DRIVER_NAME,
5043 .id_table = et131x_pci_table,
5044 .probe = et131x_pci_setup,
5045 .remove = et131x_pci_remove,
5046 .driver.pm = ET131X_PM_OPS,
5047 };
5048
5049 module_pci_driver(et131x_driver);