defconfig: exynos9610: Re-add dropped Wi-Fi AP options lost
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
c6e970a0 32#include <linux/phy.h>
55d7de9d 33#include <net/ip6_checksum.h>
cc89c323
WH
34#include <linux/interrupt.h>
35#include <linux/irqdomain.h>
36#include <linux/irq.h>
37#include <linux/irqchip/chained_irq.h>
bdfba55e 38#include <linux/microchipphy.h>
8c56ea41 39#include <linux/phy.h>
a4977f3e 40#include <linux/of_net.h>
55d7de9d
WH
41#include "lan78xx.h"
42
43#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
44#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
45#define DRIVER_NAME "lan78xx"
02dc1f3d 46#define DRIVER_VERSION "1.0.6"
55d7de9d
WH
47
48#define TX_TIMEOUT_JIFFIES (5 * HZ)
49#define THROTTLE_JIFFIES (HZ / 8)
50#define UNLINK_TIMEOUT_MS 3
51
52#define RX_MAX_QUEUE_MEMORY (60 * 1518)
53
54#define SS_USB_PKT_SIZE (1024)
55#define HS_USB_PKT_SIZE (512)
56#define FS_USB_PKT_SIZE (64)
57
58#define MAX_RX_FIFO_SIZE (12 * 1024)
59#define MAX_TX_FIFO_SIZE (12 * 1024)
60#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
61#define DEFAULT_BULK_IN_DELAY (0x0800)
62#define MAX_SINGLE_PACKET_SIZE (9000)
63#define DEFAULT_TX_CSUM_ENABLE (true)
64#define DEFAULT_RX_CSUM_ENABLE (true)
65#define DEFAULT_TSO_CSUM_ENABLE (true)
66#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
67#define TX_OVERHEAD (8)
68#define RXW_PADDING 2
69
70#define LAN78XX_USB_VENDOR_ID (0x0424)
71#define LAN7800_USB_PRODUCT_ID (0x7800)
72#define LAN7850_USB_PRODUCT_ID (0x7850)
02dc1f3d 73#define LAN7801_USB_PRODUCT_ID (0x7801)
55d7de9d
WH
74#define LAN78XX_EEPROM_MAGIC (0x78A5)
75#define LAN78XX_OTP_MAGIC (0x78F3)
76
77#define MII_READ 1
78#define MII_WRITE 0
79
80#define EEPROM_INDICATOR (0xA5)
81#define EEPROM_MAC_OFFSET (0x01)
82#define MAX_EEPROM_SIZE 512
83#define OTP_INDICATOR_1 (0xF3)
84#define OTP_INDICATOR_2 (0xF7)
85
86#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
87 WAKE_MCAST | WAKE_BCAST | \
88 WAKE_ARP | WAKE_MAGIC)
89
90/* USB related defines */
91#define BULK_IN_PIPE 1
92#define BULK_OUT_PIPE 2
93
94/* default autosuspend delay (mSec)*/
95#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
96
20ff5565
WH
97/* statistic update interval (mSec) */
98#define STAT_UPDATE_TIMER (1 * 1000)
99
cc89c323
WH
100/* defines interrupts from interrupt EP */
101#define MAX_INT_EP (32)
102#define INT_EP_INTEP (31)
103#define INT_EP_OTP_WR_DONE (28)
104#define INT_EP_EEE_TX_LPI_START (26)
105#define INT_EP_EEE_TX_LPI_STOP (25)
106#define INT_EP_EEE_RX_LPI (24)
107#define INT_EP_MAC_RESET_TIMEOUT (23)
108#define INT_EP_RDFO (22)
109#define INT_EP_TXE (21)
110#define INT_EP_USB_STATUS (20)
111#define INT_EP_TX_DIS (19)
112#define INT_EP_RX_DIS (18)
113#define INT_EP_PHY (17)
114#define INT_EP_DP (16)
115#define INT_EP_MAC_ERR (15)
116#define INT_EP_TDFU (14)
117#define INT_EP_TDFO (13)
118#define INT_EP_UTX (12)
119#define INT_EP_GPIO_11 (11)
120#define INT_EP_GPIO_10 (10)
121#define INT_EP_GPIO_9 (9)
122#define INT_EP_GPIO_8 (8)
123#define INT_EP_GPIO_7 (7)
124#define INT_EP_GPIO_6 (6)
125#define INT_EP_GPIO_5 (5)
126#define INT_EP_GPIO_4 (4)
127#define INT_EP_GPIO_3 (3)
128#define INT_EP_GPIO_2 (2)
129#define INT_EP_GPIO_1 (1)
130#define INT_EP_GPIO_0 (0)
131
55d7de9d
WH
132static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
133 "RX FCS Errors",
134 "RX Alignment Errors",
135 "Rx Fragment Errors",
136 "RX Jabber Errors",
137 "RX Undersize Frame Errors",
138 "RX Oversize Frame Errors",
139 "RX Dropped Frames",
140 "RX Unicast Byte Count",
141 "RX Broadcast Byte Count",
142 "RX Multicast Byte Count",
143 "RX Unicast Frames",
144 "RX Broadcast Frames",
145 "RX Multicast Frames",
146 "RX Pause Frames",
147 "RX 64 Byte Frames",
148 "RX 65 - 127 Byte Frames",
149 "RX 128 - 255 Byte Frames",
150 "RX 256 - 511 Bytes Frames",
151 "RX 512 - 1023 Byte Frames",
152 "RX 1024 - 1518 Byte Frames",
153 "RX Greater 1518 Byte Frames",
154 "EEE RX LPI Transitions",
155 "EEE RX LPI Time",
156 "TX FCS Errors",
157 "TX Excess Deferral Errors",
158 "TX Carrier Errors",
159 "TX Bad Byte Count",
160 "TX Single Collisions",
161 "TX Multiple Collisions",
162 "TX Excessive Collision",
163 "TX Late Collisions",
164 "TX Unicast Byte Count",
165 "TX Broadcast Byte Count",
166 "TX Multicast Byte Count",
167 "TX Unicast Frames",
168 "TX Broadcast Frames",
169 "TX Multicast Frames",
170 "TX Pause Frames",
171 "TX 64 Byte Frames",
172 "TX 65 - 127 Byte Frames",
173 "TX 128 - 255 Byte Frames",
174 "TX 256 - 511 Bytes Frames",
175 "TX 512 - 1023 Byte Frames",
176 "TX 1024 - 1518 Byte Frames",
177 "TX Greater 1518 Byte Frames",
178 "EEE TX LPI Transitions",
179 "EEE TX LPI Time",
180};
181
182struct lan78xx_statstage {
183 u32 rx_fcs_errors;
184 u32 rx_alignment_errors;
185 u32 rx_fragment_errors;
186 u32 rx_jabber_errors;
187 u32 rx_undersize_frame_errors;
188 u32 rx_oversize_frame_errors;
189 u32 rx_dropped_frames;
190 u32 rx_unicast_byte_count;
191 u32 rx_broadcast_byte_count;
192 u32 rx_multicast_byte_count;
193 u32 rx_unicast_frames;
194 u32 rx_broadcast_frames;
195 u32 rx_multicast_frames;
196 u32 rx_pause_frames;
197 u32 rx_64_byte_frames;
198 u32 rx_65_127_byte_frames;
199 u32 rx_128_255_byte_frames;
200 u32 rx_256_511_bytes_frames;
201 u32 rx_512_1023_byte_frames;
202 u32 rx_1024_1518_byte_frames;
203 u32 rx_greater_1518_byte_frames;
204 u32 eee_rx_lpi_transitions;
205 u32 eee_rx_lpi_time;
206 u32 tx_fcs_errors;
207 u32 tx_excess_deferral_errors;
208 u32 tx_carrier_errors;
209 u32 tx_bad_byte_count;
210 u32 tx_single_collisions;
211 u32 tx_multiple_collisions;
212 u32 tx_excessive_collision;
213 u32 tx_late_collisions;
214 u32 tx_unicast_byte_count;
215 u32 tx_broadcast_byte_count;
216 u32 tx_multicast_byte_count;
217 u32 tx_unicast_frames;
218 u32 tx_broadcast_frames;
219 u32 tx_multicast_frames;
220 u32 tx_pause_frames;
221 u32 tx_64_byte_frames;
222 u32 tx_65_127_byte_frames;
223 u32 tx_128_255_byte_frames;
224 u32 tx_256_511_bytes_frames;
225 u32 tx_512_1023_byte_frames;
226 u32 tx_1024_1518_byte_frames;
227 u32 tx_greater_1518_byte_frames;
228 u32 eee_tx_lpi_transitions;
229 u32 eee_tx_lpi_time;
230};
231
20ff5565
WH
232struct lan78xx_statstage64 {
233 u64 rx_fcs_errors;
234 u64 rx_alignment_errors;
235 u64 rx_fragment_errors;
236 u64 rx_jabber_errors;
237 u64 rx_undersize_frame_errors;
238 u64 rx_oversize_frame_errors;
239 u64 rx_dropped_frames;
240 u64 rx_unicast_byte_count;
241 u64 rx_broadcast_byte_count;
242 u64 rx_multicast_byte_count;
243 u64 rx_unicast_frames;
244 u64 rx_broadcast_frames;
245 u64 rx_multicast_frames;
246 u64 rx_pause_frames;
247 u64 rx_64_byte_frames;
248 u64 rx_65_127_byte_frames;
249 u64 rx_128_255_byte_frames;
250 u64 rx_256_511_bytes_frames;
251 u64 rx_512_1023_byte_frames;
252 u64 rx_1024_1518_byte_frames;
253 u64 rx_greater_1518_byte_frames;
254 u64 eee_rx_lpi_transitions;
255 u64 eee_rx_lpi_time;
256 u64 tx_fcs_errors;
257 u64 tx_excess_deferral_errors;
258 u64 tx_carrier_errors;
259 u64 tx_bad_byte_count;
260 u64 tx_single_collisions;
261 u64 tx_multiple_collisions;
262 u64 tx_excessive_collision;
263 u64 tx_late_collisions;
264 u64 tx_unicast_byte_count;
265 u64 tx_broadcast_byte_count;
266 u64 tx_multicast_byte_count;
267 u64 tx_unicast_frames;
268 u64 tx_broadcast_frames;
269 u64 tx_multicast_frames;
270 u64 tx_pause_frames;
271 u64 tx_64_byte_frames;
272 u64 tx_65_127_byte_frames;
273 u64 tx_128_255_byte_frames;
274 u64 tx_256_511_bytes_frames;
275 u64 tx_512_1023_byte_frames;
276 u64 tx_1024_1518_byte_frames;
277 u64 tx_greater_1518_byte_frames;
278 u64 eee_tx_lpi_transitions;
279 u64 eee_tx_lpi_time;
280};
281
55d7de9d
WH
282struct lan78xx_net;
283
284struct lan78xx_priv {
285 struct lan78xx_net *dev;
286 u32 rfe_ctl;
287 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
288 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
289 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
290 struct mutex dataport_mutex; /* for dataport access */
291 spinlock_t rfe_ctl_lock; /* for rfe register access */
292 struct work_struct set_multicast;
293 struct work_struct set_vlan;
294 u32 wol;
295};
296
297enum skb_state {
298 illegal = 0,
299 tx_start,
300 tx_done,
301 rx_start,
302 rx_done,
303 rx_cleanup,
304 unlink_start
305};
306
307struct skb_data { /* skb->cb is one of these */
308 struct urb *urb;
309 struct lan78xx_net *dev;
310 enum skb_state state;
311 size_t length;
74d79a2e 312 int num_of_packet;
55d7de9d
WH
313};
314
315struct usb_context {
316 struct usb_ctrlrequest req;
317 struct lan78xx_net *dev;
318};
319
320#define EVENT_TX_HALT 0
321#define EVENT_RX_HALT 1
322#define EVENT_RX_MEMORY 2
323#define EVENT_STS_SPLIT 3
324#define EVENT_LINK_RESET 4
325#define EVENT_RX_PAUSED 5
326#define EVENT_DEV_WAKING 6
327#define EVENT_DEV_ASLEEP 7
328#define EVENT_DEV_OPEN 8
20ff5565
WH
329#define EVENT_STAT_UPDATE 9
330
331struct statstage {
332 struct mutex access_lock; /* for stats access */
333 struct lan78xx_statstage saved;
334 struct lan78xx_statstage rollover_count;
335 struct lan78xx_statstage rollover_max;
336 struct lan78xx_statstage64 curr_stat;
337};
55d7de9d 338
cc89c323
WH
339struct irq_domain_data {
340 struct irq_domain *irqdomain;
341 unsigned int phyirq;
342 struct irq_chip *irqchip;
343 irq_flow_handler_t irq_handler;
344 u32 irqenable;
345 struct mutex irq_lock; /* for irq bus access */
346};
347
55d7de9d
WH
348struct lan78xx_net {
349 struct net_device *net;
350 struct usb_device *udev;
351 struct usb_interface *intf;
352 void *driver_priv;
353
354 int rx_qlen;
355 int tx_qlen;
356 struct sk_buff_head rxq;
357 struct sk_buff_head txq;
358 struct sk_buff_head done;
359 struct sk_buff_head rxq_pause;
360 struct sk_buff_head txq_pend;
361
362 struct tasklet_struct bh;
363 struct delayed_work wq;
364
365 struct usb_host_endpoint *ep_blkin;
366 struct usb_host_endpoint *ep_blkout;
367 struct usb_host_endpoint *ep_intr;
368
369 int msg_enable;
370
371 struct urb *urb_intr;
372 struct usb_anchor deferred;
373
374 struct mutex phy_mutex; /* for phy access */
375 unsigned pipe_in, pipe_out, pipe_intr;
376
377 u32 hard_mtu; /* count any extra framing */
378 size_t rx_urb_size; /* size for rx urbs */
379
380 unsigned long flags;
381
382 wait_queue_head_t *wait;
383 unsigned char suspend_count;
384
385 unsigned maxpacket;
386 struct timer_list delay;
20ff5565 387 struct timer_list stat_monitor;
55d7de9d
WH
388
389 unsigned long data[5];
55d7de9d
WH
390
391 int link_on;
392 u8 mdix_ctrl;
ce85e13a 393
87177ba6
WH
394 u32 chipid;
395 u32 chiprev;
ce85e13a 396 struct mii_bus *mdiobus;
02dc1f3d 397 phy_interface_t interface;
349e0c5e
WH
398
399 int fc_autoneg;
400 u8 fc_request_control;
20ff5565
WH
401
402 int delta;
403 struct statstage stats;
cc89c323
WH
404
405 struct irq_domain_data domain_data;
55d7de9d
WH
406};
407
02dc1f3d
WH
408/* define external phy id */
409#define PHY_LAN8835 (0x0007C130)
410#define PHY_KSZ9031RNX (0x00221620)
411
55d7de9d
WH
412/* use ethtool to change the level for any given device */
413static int msg_level = -1;
414module_param(msg_level, int, 0);
415MODULE_PARM_DESC(msg_level, "Override default message level");
416
417static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
418{
419 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
420 int ret;
421
55d7de9d
WH
422 if (!buf)
423 return -ENOMEM;
424
425 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
426 USB_VENDOR_REQUEST_READ_REGISTER,
427 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
428 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
429 if (likely(ret >= 0)) {
430 le32_to_cpus(buf);
431 *data = *buf;
432 } else {
433 netdev_warn(dev->net,
434 "Failed to read register index 0x%08x. ret = %d",
435 index, ret);
436 }
437
438 kfree(buf);
439
440 return ret;
441}
442
443static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
444{
445 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
446 int ret;
447
55d7de9d
WH
448 if (!buf)
449 return -ENOMEM;
450
451 *buf = data;
452 cpu_to_le32s(buf);
453
454 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
455 USB_VENDOR_REQUEST_WRITE_REGISTER,
456 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
457 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
458 if (unlikely(ret < 0)) {
459 netdev_warn(dev->net,
460 "Failed to write register index 0x%08x. ret = %d",
461 index, ret);
462 }
463
464 kfree(buf);
465
466 return ret;
467}
468
469static int lan78xx_read_stats(struct lan78xx_net *dev,
470 struct lan78xx_statstage *data)
471{
472 int ret = 0;
473 int i;
474 struct lan78xx_statstage *stats;
475 u32 *src;
476 u32 *dst;
477
55d7de9d
WH
478 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
479 if (!stats)
480 return -ENOMEM;
481
482 ret = usb_control_msg(dev->udev,
483 usb_rcvctrlpipe(dev->udev, 0),
484 USB_VENDOR_REQUEST_GET_STATS,
485 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
486 0,
487 0,
488 (void *)stats,
489 sizeof(*stats),
490 USB_CTRL_SET_TIMEOUT);
491 if (likely(ret >= 0)) {
492 src = (u32 *)stats;
493 dst = (u32 *)data;
494 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
495 le32_to_cpus(&src[i]);
496 dst[i] = src[i];
497 }
498 } else {
499 netdev_warn(dev->net,
500 "Failed to read stat ret = 0x%x", ret);
501 }
502
503 kfree(stats);
504
505 return ret;
506}
507
20ff5565
WH
508#define check_counter_rollover(struct1, dev_stats, member) { \
509 if (struct1->member < dev_stats.saved.member) \
510 dev_stats.rollover_count.member++; \
511 }
512
513static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
514 struct lan78xx_statstage *stats)
515{
516 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
517 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
518 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
519 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
520 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
521 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
522 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
523 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
524 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
525 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
526 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
527 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
528 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
529 check_counter_rollover(stats, dev->stats, rx_pause_frames);
530 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
531 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
533 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
534 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
535 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
536 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
537 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
538 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
539 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
540 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
541 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
542 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
543 check_counter_rollover(stats, dev->stats, tx_single_collisions);
544 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
545 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
546 check_counter_rollover(stats, dev->stats, tx_late_collisions);
547 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
548 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
549 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
550 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
551 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
552 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
553 check_counter_rollover(stats, dev->stats, tx_pause_frames);
554 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
555 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
557 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
558 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
559 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
560 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
561 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
562 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
563
564 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
565}
566
567static void lan78xx_update_stats(struct lan78xx_net *dev)
568{
569 u32 *p, *count, *max;
570 u64 *data;
571 int i;
572 struct lan78xx_statstage lan78xx_stats;
573
574 if (usb_autopm_get_interface(dev->intf) < 0)
575 return;
576
577 p = (u32 *)&lan78xx_stats;
578 count = (u32 *)&dev->stats.rollover_count;
579 max = (u32 *)&dev->stats.rollover_max;
580 data = (u64 *)&dev->stats.curr_stat;
581
582 mutex_lock(&dev->stats.access_lock);
583
584 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
585 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
586
587 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
588 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
589
590 mutex_unlock(&dev->stats.access_lock);
591
592 usb_autopm_put_interface(dev->intf);
593}
594
55d7de9d
WH
595/* Loop until the read is completed with timeout called with phy_mutex held */
596static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
597{
598 unsigned long start_time = jiffies;
599 u32 val;
600 int ret;
601
602 do {
603 ret = lan78xx_read_reg(dev, MII_ACC, &val);
604 if (unlikely(ret < 0))
605 return -EIO;
606
607 if (!(val & MII_ACC_MII_BUSY_))
608 return 0;
609 } while (!time_after(jiffies, start_time + HZ));
610
611 return -EIO;
612}
613
614static inline u32 mii_access(int id, int index, int read)
615{
616 u32 ret;
617
618 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
619 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
620 if (read)
621 ret |= MII_ACC_MII_READ_;
622 else
623 ret |= MII_ACC_MII_WRITE_;
624 ret |= MII_ACC_MII_BUSY_;
625
626 return ret;
627}
628
55d7de9d
WH
629static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
630{
631 unsigned long start_time = jiffies;
632 u32 val;
633 int ret;
634
635 do {
636 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
637 if (unlikely(ret < 0))
638 return -EIO;
639
640 if (!(val & E2P_CMD_EPC_BUSY_) ||
641 (val & E2P_CMD_EPC_TIMEOUT_))
642 break;
643 usleep_range(40, 100);
644 } while (!time_after(jiffies, start_time + HZ));
645
646 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
647 netdev_warn(dev->net, "EEPROM read operation timeout");
648 return -EIO;
649 }
650
651 return 0;
652}
653
654static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
655{
656 unsigned long start_time = jiffies;
657 u32 val;
658 int ret;
659
660 do {
661 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
662 if (unlikely(ret < 0))
663 return -EIO;
664
665 if (!(val & E2P_CMD_EPC_BUSY_))
666 return 0;
667
668 usleep_range(40, 100);
669 } while (!time_after(jiffies, start_time + HZ));
670
671 netdev_warn(dev->net, "EEPROM is busy");
672 return -EIO;
673}
674
675static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
676 u32 length, u8 *data)
677{
678 u32 val;
a0db7d10 679 u32 saved;
55d7de9d 680 int i, ret;
a0db7d10
WH
681 int retval;
682
683 /* depends on chip, some EEPROM pins are muxed with LED function.
684 * disable & restore LED function to access EEPROM.
685 */
686 ret = lan78xx_read_reg(dev, HW_CFG, &val);
687 saved = val;
87177ba6 688 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
689 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
690 ret = lan78xx_write_reg(dev, HW_CFG, val);
691 }
55d7de9d 692
a0db7d10
WH
693 retval = lan78xx_eeprom_confirm_not_busy(dev);
694 if (retval)
695 return retval;
55d7de9d
WH
696
697 for (i = 0; i < length; i++) {
698 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
699 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
700 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
701 if (unlikely(ret < 0)) {
702 retval = -EIO;
703 goto exit;
704 }
55d7de9d 705
a0db7d10
WH
706 retval = lan78xx_wait_eeprom(dev);
707 if (retval < 0)
708 goto exit;
55d7de9d
WH
709
710 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
711 if (unlikely(ret < 0)) {
712 retval = -EIO;
713 goto exit;
714 }
55d7de9d
WH
715
716 data[i] = val & 0xFF;
717 offset++;
718 }
719
a0db7d10
WH
720 retval = 0;
721exit:
87177ba6 722 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
723 ret = lan78xx_write_reg(dev, HW_CFG, saved);
724
725 return retval;
55d7de9d
WH
726}
727
728static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
729 u32 length, u8 *data)
730{
731 u8 sig;
732 int ret;
733
734 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
735 if ((ret == 0) && (sig == EEPROM_INDICATOR))
736 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
737 else
738 ret = -EINVAL;
739
740 return ret;
741}
742
743static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
744 u32 length, u8 *data)
745{
746 u32 val;
a0db7d10 747 u32 saved;
55d7de9d 748 int i, ret;
a0db7d10
WH
749 int retval;
750
751 /* depends on chip, some EEPROM pins are muxed with LED function.
752 * disable & restore LED function to access EEPROM.
753 */
754 ret = lan78xx_read_reg(dev, HW_CFG, &val);
755 saved = val;
87177ba6 756 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
757 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
758 ret = lan78xx_write_reg(dev, HW_CFG, val);
759 }
55d7de9d 760
a0db7d10
WH
761 retval = lan78xx_eeprom_confirm_not_busy(dev);
762 if (retval)
763 goto exit;
55d7de9d
WH
764
765 /* Issue write/erase enable command */
766 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
767 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
768 if (unlikely(ret < 0)) {
769 retval = -EIO;
770 goto exit;
771 }
55d7de9d 772
a0db7d10
WH
773 retval = lan78xx_wait_eeprom(dev);
774 if (retval < 0)
775 goto exit;
55d7de9d
WH
776
777 for (i = 0; i < length; i++) {
778 /* Fill data register */
779 val = data[i];
780 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
781 if (ret < 0) {
782 retval = -EIO;
783 goto exit;
784 }
55d7de9d
WH
785
786 /* Send "write" command */
787 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
788 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
789 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
790 if (ret < 0) {
791 retval = -EIO;
792 goto exit;
793 }
55d7de9d 794
a0db7d10
WH
795 retval = lan78xx_wait_eeprom(dev);
796 if (retval < 0)
797 goto exit;
55d7de9d
WH
798
799 offset++;
800 }
801
a0db7d10
WH
802 retval = 0;
803exit:
87177ba6 804 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
805 ret = lan78xx_write_reg(dev, HW_CFG, saved);
806
807 return retval;
55d7de9d
WH
808}
809
810static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
811 u32 length, u8 *data)
812{
813 int i;
814 int ret;
815 u32 buf;
816 unsigned long timeout;
817
818 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
819
820 if (buf & OTP_PWR_DN_PWRDN_N_) {
821 /* clear it and wait to be cleared */
822 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
823
824 timeout = jiffies + HZ;
825 do {
826 usleep_range(1, 10);
827 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
828 if (time_after(jiffies, timeout)) {
829 netdev_warn(dev->net,
830 "timeout on OTP_PWR_DN");
831 return -EIO;
832 }
833 } while (buf & OTP_PWR_DN_PWRDN_N_);
834 }
835
836 for (i = 0; i < length; i++) {
837 ret = lan78xx_write_reg(dev, OTP_ADDR1,
838 ((offset + i) >> 8) & OTP_ADDR1_15_11);
839 ret = lan78xx_write_reg(dev, OTP_ADDR2,
840 ((offset + i) & OTP_ADDR2_10_3));
841
842 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
843 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
844
845 timeout = jiffies + HZ;
846 do {
847 udelay(1);
848 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
849 if (time_after(jiffies, timeout)) {
850 netdev_warn(dev->net,
851 "timeout on OTP_STATUS");
852 return -EIO;
853 }
854 } while (buf & OTP_STATUS_BUSY_);
855
856 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
857
858 data[i] = (u8)(buf & 0xFF);
859 }
860
861 return 0;
862}
863
9fb6066d
WH
864static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
865 u32 length, u8 *data)
866{
867 int i;
868 int ret;
869 u32 buf;
870 unsigned long timeout;
871
872 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
873
874 if (buf & OTP_PWR_DN_PWRDN_N_) {
875 /* clear it and wait to be cleared */
876 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
877
878 timeout = jiffies + HZ;
879 do {
880 udelay(1);
881 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
882 if (time_after(jiffies, timeout)) {
883 netdev_warn(dev->net,
884 "timeout on OTP_PWR_DN completion");
885 return -EIO;
886 }
887 } while (buf & OTP_PWR_DN_PWRDN_N_);
888 }
889
890 /* set to BYTE program mode */
891 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
892
893 for (i = 0; i < length; i++) {
894 ret = lan78xx_write_reg(dev, OTP_ADDR1,
895 ((offset + i) >> 8) & OTP_ADDR1_15_11);
896 ret = lan78xx_write_reg(dev, OTP_ADDR2,
897 ((offset + i) & OTP_ADDR2_10_3));
898 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
899 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
900 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
901
902 timeout = jiffies + HZ;
903 do {
904 udelay(1);
905 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
906 if (time_after(jiffies, timeout)) {
907 netdev_warn(dev->net,
908 "Timeout on OTP_STATUS completion");
909 return -EIO;
910 }
911 } while (buf & OTP_STATUS_BUSY_);
912 }
913
914 return 0;
915}
916
55d7de9d
WH
917static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
918 u32 length, u8 *data)
919{
920 u8 sig;
921 int ret;
922
923 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
924
925 if (ret == 0) {
926 if (sig == OTP_INDICATOR_1)
927 offset = offset;
928 else if (sig == OTP_INDICATOR_2)
929 offset += 0x100;
930 else
931 ret = -EINVAL;
c0e0cd65
PE
932 if (!ret)
933 ret = lan78xx_read_raw_otp(dev, offset, length, data);
55d7de9d
WH
934 }
935
936 return ret;
937}
938
939static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
940{
941 int i, ret;
942
943 for (i = 0; i < 100; i++) {
944 u32 dp_sel;
945
946 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
947 if (unlikely(ret < 0))
948 return -EIO;
949
950 if (dp_sel & DP_SEL_DPRDY_)
951 return 0;
952
953 usleep_range(40, 100);
954 }
955
956 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
957
958 return -EIO;
959}
960
961static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
962 u32 addr, u32 length, u32 *buf)
963{
964 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
965 u32 dp_sel;
966 int i, ret;
967
968 if (usb_autopm_get_interface(dev->intf) < 0)
969 return 0;
970
971 mutex_lock(&pdata->dataport_mutex);
972
973 ret = lan78xx_dataport_wait_not_busy(dev);
974 if (ret < 0)
975 goto done;
976
977 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
978
979 dp_sel &= ~DP_SEL_RSEL_MASK_;
980 dp_sel |= ram_select;
981 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
982
983 for (i = 0; i < length; i++) {
984 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
985
986 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
987
988 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
989
990 ret = lan78xx_dataport_wait_not_busy(dev);
991 if (ret < 0)
992 goto done;
993 }
994
995done:
996 mutex_unlock(&pdata->dataport_mutex);
997 usb_autopm_put_interface(dev->intf);
998
999 return ret;
1000}
1001
1002static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1003 int index, u8 addr[ETH_ALEN])
1004{
1005 u32 temp;
1006
1007 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1008 temp = addr[3];
1009 temp = addr[2] | (temp << 8);
1010 temp = addr[1] | (temp << 8);
1011 temp = addr[0] | (temp << 8);
1012 pdata->pfilter_table[index][1] = temp;
1013 temp = addr[5];
1014 temp = addr[4] | (temp << 8);
1015 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1016 pdata->pfilter_table[index][0] = temp;
1017 }
1018}
1019
1020/* returns hash bit number for given MAC address */
1021static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1022{
1023 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1024}
1025
1026static void lan78xx_deferred_multicast_write(struct work_struct *param)
1027{
1028 struct lan78xx_priv *pdata =
1029 container_of(param, struct lan78xx_priv, set_multicast);
1030 struct lan78xx_net *dev = pdata->dev;
1031 int i;
1032 int ret;
1033
1034 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1035 pdata->rfe_ctl);
1036
1037 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1038 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1039
1040 for (i = 1; i < NUM_OF_MAF; i++) {
1041 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1042 ret = lan78xx_write_reg(dev, MAF_LO(i),
1043 pdata->pfilter_table[i][1]);
1044 ret = lan78xx_write_reg(dev, MAF_HI(i),
1045 pdata->pfilter_table[i][0]);
1046 }
1047
1048 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1049}
1050
1051static void lan78xx_set_multicast(struct net_device *netdev)
1052{
1053 struct lan78xx_net *dev = netdev_priv(netdev);
1054 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1055 unsigned long flags;
1056 int i;
1057
1058 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1059
1060 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1061 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1062
1063 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1064 pdata->mchash_table[i] = 0;
1065 /* pfilter_table[0] has own HW address */
1066 for (i = 1; i < NUM_OF_MAF; i++) {
1067 pdata->pfilter_table[i][0] =
1068 pdata->pfilter_table[i][1] = 0;
1069 }
1070
1071 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1072
1073 if (dev->net->flags & IFF_PROMISC) {
1074 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1075 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1076 } else {
1077 if (dev->net->flags & IFF_ALLMULTI) {
1078 netif_dbg(dev, drv, dev->net,
1079 "receive all multicast enabled");
1080 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1081 }
1082 }
1083
1084 if (netdev_mc_count(dev->net)) {
1085 struct netdev_hw_addr *ha;
1086 int i;
1087
1088 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1089
1090 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1091
1092 i = 1;
1093 netdev_for_each_mc_addr(ha, netdev) {
1094 /* set first 32 into Perfect Filter */
1095 if (i < 33) {
1096 lan78xx_set_addr_filter(pdata, i, ha->addr);
1097 } else {
1098 u32 bitnum = lan78xx_hash(ha->addr);
1099
1100 pdata->mchash_table[bitnum / 32] |=
1101 (1 << (bitnum % 32));
1102 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1103 }
1104 i++;
1105 }
1106 }
1107
1108 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1109
1110 /* defer register writes to a sleepable context */
1111 schedule_work(&pdata->set_multicast);
1112}
1113
1114static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1115 u16 lcladv, u16 rmtadv)
1116{
1117 u32 flow = 0, fct_flow = 0;
1118 int ret;
349e0c5e 1119 u8 cap;
55d7de9d 1120
349e0c5e
WH
1121 if (dev->fc_autoneg)
1122 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1123 else
1124 cap = dev->fc_request_control;
55d7de9d
WH
1125
1126 if (cap & FLOW_CTRL_TX)
349e0c5e 1127 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
55d7de9d
WH
1128
1129 if (cap & FLOW_CTRL_RX)
1130 flow |= FLOW_CR_RX_FCEN_;
1131
1132 if (dev->udev->speed == USB_SPEED_SUPER)
1133 fct_flow = 0x817;
1134 else if (dev->udev->speed == USB_SPEED_HIGH)
1135 fct_flow = 0x211;
1136
1137 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1138 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1139 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1140
1141 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1142
1143 /* threshold value should be set before enabling flow */
1144 ret = lan78xx_write_reg(dev, FLOW, flow);
1145
1146 return 0;
1147}
1148
1149static int lan78xx_link_reset(struct lan78xx_net *dev)
1150{
ce85e13a 1151 struct phy_device *phydev = dev->net->phydev;
6e76510e 1152 struct ethtool_link_ksettings ecmd;
99c79ece 1153 int ladv, radv, ret;
55d7de9d
WH
1154 u32 buf;
1155
55d7de9d
WH
1156 /* clear LAN78xx interrupt status */
1157 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1158 if (unlikely(ret < 0))
1159 return -EIO;
1160
ce85e13a
WH
1161 phy_read_status(phydev);
1162
1163 if (!phydev->link && dev->link_on) {
55d7de9d 1164 dev->link_on = false;
55d7de9d
WH
1165
1166 /* reset MAC */
1167 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1168 if (unlikely(ret < 0))
1169 return -EIO;
1170 buf |= MAC_CR_RST_;
1171 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1172 if (unlikely(ret < 0))
1173 return -EIO;
e4953910 1174
20ff5565 1175 del_timer(&dev->stat_monitor);
ce85e13a 1176 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
1177 dev->link_on = true;
1178
6e76510e 1179 phy_ethtool_ksettings_get(phydev, &ecmd);
55d7de9d 1180
55d7de9d 1181 if (dev->udev->speed == USB_SPEED_SUPER) {
6e76510e 1182 if (ecmd.base.speed == 1000) {
55d7de9d
WH
1183 /* disable U2 */
1184 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1185 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1186 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1187 /* enable U1 */
1188 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1189 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1190 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1191 } else {
1192 /* enable U1 & U2 */
1193 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1194 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1195 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1196 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1197 }
1198 }
1199
ce85e13a 1200 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
1201 if (ladv < 0)
1202 return ladv;
55d7de9d 1203
ce85e13a 1204 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
1205 if (radv < 0)
1206 return radv;
55d7de9d
WH
1207
1208 netif_dbg(dev, link, dev->net,
1209 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
6e76510e 1210 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
55d7de9d 1211
6e76510e
PR
1212 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1213 radv);
20ff5565
WH
1214
1215 if (!timer_pending(&dev->stat_monitor)) {
1216 dev->delta = 1;
1217 mod_timer(&dev->stat_monitor,
1218 jiffies + STAT_UPDATE_TIMER);
1219 }
ccdbe7e2
SW
1220
1221 tasklet_schedule(&dev->bh);
55d7de9d
WH
1222 }
1223
1224 return ret;
1225}
1226
1227/* some work can't be done in tasklets, so we use keventd
1228 *
1229 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1230 * but tasklet_schedule() doesn't. hope the failure is rare.
1231 */
e0c79ff6 1232static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
55d7de9d
WH
1233{
1234 set_bit(work, &dev->flags);
1235 if (!schedule_delayed_work(&dev->wq, 0))
1236 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1237}
1238
1239static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1240{
1241 u32 intdata;
1242
1243 if (urb->actual_length != 4) {
1244 netdev_warn(dev->net,
1245 "unexpected urb length %d", urb->actual_length);
1246 return;
1247 }
1248
1249 memcpy(&intdata, urb->transfer_buffer, 4);
1250 le32_to_cpus(&intdata);
1251
1252 if (intdata & INT_ENP_PHY_INT) {
1253 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
cc89c323
WH
1254 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1255
1256 if (dev->domain_data.phyirq > 0)
1257 generic_handle_irq(dev->domain_data.phyirq);
55d7de9d
WH
1258 } else
1259 netdev_warn(dev->net,
1260 "unexpected interrupt: 0x%08x\n", intdata);
1261}
1262
1263static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1264{
1265 return MAX_EEPROM_SIZE;
1266}
1267
1268static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1269 struct ethtool_eeprom *ee, u8 *data)
1270{
1271 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1272 int ret;
1273
1274 ret = usb_autopm_get_interface(dev->intf);
1275 if (ret)
1276 return ret;
55d7de9d
WH
1277
1278 ee->magic = LAN78XX_EEPROM_MAGIC;
1279
8a7ffeb7
NS
1280 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1281
1282 usb_autopm_put_interface(dev->intf);
1283
1284 return ret;
55d7de9d
WH
1285}
1286
1287static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1288 struct ethtool_eeprom *ee, u8 *data)
1289{
1290 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1291 int ret;
1292
1293 ret = usb_autopm_get_interface(dev->intf);
1294 if (ret)
1295 return ret;
55d7de9d 1296
c0776822
NS
1297 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1298 * to load data from EEPROM
1299 */
1300 if (ee->magic == LAN78XX_EEPROM_MAGIC)
8a7ffeb7 1301 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
55d7de9d
WH
1302 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1303 (ee->offset == 0) &&
1304 (ee->len == 512) &&
1305 (data[0] == OTP_INDICATOR_1))
8a7ffeb7 1306 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d 1307
8a7ffeb7
NS
1308 usb_autopm_put_interface(dev->intf);
1309
1310 return ret;
55d7de9d
WH
1311}
1312
1313static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1314 u8 *data)
1315{
1316 if (stringset == ETH_SS_STATS)
1317 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1318}
1319
1320static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1321{
1322 if (sset == ETH_SS_STATS)
1323 return ARRAY_SIZE(lan78xx_gstrings);
1324 else
1325 return -EOPNOTSUPP;
1326}
1327
1328static void lan78xx_get_stats(struct net_device *netdev,
1329 struct ethtool_stats *stats, u64 *data)
1330{
1331 struct lan78xx_net *dev = netdev_priv(netdev);
55d7de9d 1332
20ff5565 1333 lan78xx_update_stats(dev);
55d7de9d 1334
20ff5565
WH
1335 mutex_lock(&dev->stats.access_lock);
1336 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1337 mutex_unlock(&dev->stats.access_lock);
55d7de9d
WH
1338}
1339
1340static void lan78xx_get_wol(struct net_device *netdev,
1341 struct ethtool_wolinfo *wol)
1342{
1343 struct lan78xx_net *dev = netdev_priv(netdev);
1344 int ret;
1345 u32 buf;
1346 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1347
1348 if (usb_autopm_get_interface(dev->intf) < 0)
1349 return;
1350
1351 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1352 if (unlikely(ret < 0)) {
1353 wol->supported = 0;
1354 wol->wolopts = 0;
1355 } else {
1356 if (buf & USB_CFG_RMT_WKP_) {
1357 wol->supported = WAKE_ALL;
1358 wol->wolopts = pdata->wol;
1359 } else {
1360 wol->supported = 0;
1361 wol->wolopts = 0;
1362 }
1363 }
1364
1365 usb_autopm_put_interface(dev->intf);
1366}
1367
1368static int lan78xx_set_wol(struct net_device *netdev,
1369 struct ethtool_wolinfo *wol)
1370{
1371 struct lan78xx_net *dev = netdev_priv(netdev);
1372 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1373 int ret;
1374
1375 ret = usb_autopm_get_interface(dev->intf);
1376 if (ret < 0)
1377 return ret;
1378
9f0962c0
FF
1379 if (wol->wolopts & ~WAKE_ALL)
1380 return -EINVAL;
1381
1382 pdata->wol = wol->wolopts;
55d7de9d
WH
1383
1384 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1385
ce85e13a
WH
1386 phy_ethtool_set_wol(netdev->phydev, wol);
1387
55d7de9d
WH
1388 usb_autopm_put_interface(dev->intf);
1389
1390 return ret;
1391}
1392
1393static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1394{
1395 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1396 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1397 int ret;
1398 u32 buf;
55d7de9d
WH
1399
1400 ret = usb_autopm_get_interface(dev->intf);
1401 if (ret < 0)
1402 return ret;
1403
ce85e13a
WH
1404 ret = phy_ethtool_get_eee(phydev, edata);
1405 if (ret < 0)
1406 goto exit;
1407
55d7de9d
WH
1408 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1409 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1410 edata->eee_enabled = true;
ce85e13a
WH
1411 edata->eee_active = !!(edata->advertised &
1412 edata->lp_advertised);
55d7de9d
WH
1413 edata->tx_lpi_enabled = true;
1414 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1415 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1416 edata->tx_lpi_timer = buf;
1417 } else {
55d7de9d
WH
1418 edata->eee_enabled = false;
1419 edata->eee_active = false;
55d7de9d
WH
1420 edata->tx_lpi_enabled = false;
1421 edata->tx_lpi_timer = 0;
1422 }
1423
ce85e13a
WH
1424 ret = 0;
1425exit:
55d7de9d
WH
1426 usb_autopm_put_interface(dev->intf);
1427
ce85e13a 1428 return ret;
55d7de9d
WH
1429}
1430
1431static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1432{
1433 struct lan78xx_net *dev = netdev_priv(net);
1434 int ret;
1435 u32 buf;
1436
1437 ret = usb_autopm_get_interface(dev->intf);
1438 if (ret < 0)
1439 return ret;
1440
1441 if (edata->eee_enabled) {
1442 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1443 buf |= MAC_CR_EEE_EN_;
1444 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1445
ce85e13a
WH
1446 phy_ethtool_set_eee(net->phydev, edata);
1447
1448 buf = (u32)edata->tx_lpi_timer;
1449 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1450 } else {
1451 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1452 buf &= ~MAC_CR_EEE_EN_;
1453 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1454 }
1455
1456 usb_autopm_put_interface(dev->intf);
1457
1458 return 0;
1459}
1460
1461static u32 lan78xx_get_link(struct net_device *net)
1462{
ce85e13a 1463 phy_read_status(net->phydev);
55d7de9d 1464
ce85e13a 1465 return net->phydev->link;
55d7de9d
WH
1466}
1467
55d7de9d
WH
1468static void lan78xx_get_drvinfo(struct net_device *net,
1469 struct ethtool_drvinfo *info)
1470{
1471 struct lan78xx_net *dev = netdev_priv(net);
1472
1473 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1474 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1475 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1476}
1477
1478static u32 lan78xx_get_msglevel(struct net_device *net)
1479{
1480 struct lan78xx_net *dev = netdev_priv(net);
1481
1482 return dev->msg_enable;
1483}
1484
1485static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1486{
1487 struct lan78xx_net *dev = netdev_priv(net);
1488
1489 dev->msg_enable = level;
1490}
1491
6e76510e
PR
1492static int lan78xx_get_link_ksettings(struct net_device *net,
1493 struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1494{
1495 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1496 struct phy_device *phydev = net->phydev;
55d7de9d 1497 int ret;
55d7de9d 1498
55d7de9d
WH
1499 ret = usb_autopm_get_interface(dev->intf);
1500 if (ret < 0)
1501 return ret;
1502
5514174f 1503 phy_ethtool_ksettings_get(phydev, cmd);
55d7de9d 1504
55d7de9d
WH
1505 usb_autopm_put_interface(dev->intf);
1506
1507 return ret;
1508}
1509
6e76510e
PR
1510static int lan78xx_set_link_ksettings(struct net_device *net,
1511 const struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1512{
1513 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1514 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1515 int ret = 0;
1516 int temp;
1517
55d7de9d
WH
1518 ret = usb_autopm_get_interface(dev->intf);
1519 if (ret < 0)
1520 return ret;
1521
55d7de9d 1522 /* change speed & duplex */
6e76510e 1523 ret = phy_ethtool_ksettings_set(phydev, cmd);
55d7de9d 1524
6e76510e 1525 if (!cmd->base.autoneg) {
55d7de9d 1526 /* force link down */
ce85e13a
WH
1527 temp = phy_read(phydev, MII_BMCR);
1528 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1529 mdelay(1);
ce85e13a 1530 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1531 }
1532
1533 usb_autopm_put_interface(dev->intf);
1534
1535 return ret;
1536}
1537
349e0c5e
WH
1538static void lan78xx_get_pause(struct net_device *net,
1539 struct ethtool_pauseparam *pause)
1540{
1541 struct lan78xx_net *dev = netdev_priv(net);
1542 struct phy_device *phydev = net->phydev;
6e76510e 1543 struct ethtool_link_ksettings ecmd;
349e0c5e 1544
6e76510e 1545 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e
WH
1546
1547 pause->autoneg = dev->fc_autoneg;
1548
1549 if (dev->fc_request_control & FLOW_CTRL_TX)
1550 pause->tx_pause = 1;
1551
1552 if (dev->fc_request_control & FLOW_CTRL_RX)
1553 pause->rx_pause = 1;
1554}
1555
1556static int lan78xx_set_pause(struct net_device *net,
1557 struct ethtool_pauseparam *pause)
1558{
1559 struct lan78xx_net *dev = netdev_priv(net);
1560 struct phy_device *phydev = net->phydev;
6e76510e 1561 struct ethtool_link_ksettings ecmd;
349e0c5e
WH
1562 int ret;
1563
6e76510e 1564 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e 1565
6e76510e 1566 if (pause->autoneg && !ecmd.base.autoneg) {
349e0c5e
WH
1567 ret = -EINVAL;
1568 goto exit;
1569 }
1570
1571 dev->fc_request_control = 0;
1572 if (pause->rx_pause)
1573 dev->fc_request_control |= FLOW_CTRL_RX;
1574
1575 if (pause->tx_pause)
1576 dev->fc_request_control |= FLOW_CTRL_TX;
1577
6e76510e 1578 if (ecmd.base.autoneg) {
349e0c5e 1579 u32 mii_adv;
6e76510e
PR
1580 u32 advertising;
1581
1582 ethtool_convert_link_mode_to_legacy_u32(
1583 &advertising, ecmd.link_modes.advertising);
349e0c5e 1584
6e76510e 1585 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
349e0c5e 1586 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
6e76510e
PR
1587 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1588
1589 ethtool_convert_legacy_u32_to_link_mode(
1590 ecmd.link_modes.advertising, advertising);
1591
1592 phy_ethtool_ksettings_set(phydev, &ecmd);
349e0c5e
WH
1593 }
1594
1595 dev->fc_autoneg = pause->autoneg;
1596
1597 ret = 0;
1598exit:
1599 return ret;
1600}
1601
55d7de9d
WH
1602static const struct ethtool_ops lan78xx_ethtool_ops = {
1603 .get_link = lan78xx_get_link,
860ce4b4 1604 .nway_reset = phy_ethtool_nway_reset,
55d7de9d
WH
1605 .get_drvinfo = lan78xx_get_drvinfo,
1606 .get_msglevel = lan78xx_get_msglevel,
1607 .set_msglevel = lan78xx_set_msglevel,
55d7de9d
WH
1608 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1609 .get_eeprom = lan78xx_ethtool_get_eeprom,
1610 .set_eeprom = lan78xx_ethtool_set_eeprom,
1611 .get_ethtool_stats = lan78xx_get_stats,
1612 .get_sset_count = lan78xx_get_sset_count,
1613 .get_strings = lan78xx_get_strings,
1614 .get_wol = lan78xx_get_wol,
1615 .set_wol = lan78xx_set_wol,
1616 .get_eee = lan78xx_get_eee,
1617 .set_eee = lan78xx_set_eee,
349e0c5e
WH
1618 .get_pauseparam = lan78xx_get_pause,
1619 .set_pauseparam = lan78xx_set_pause,
6e76510e
PR
1620 .get_link_ksettings = lan78xx_get_link_ksettings,
1621 .set_link_ksettings = lan78xx_set_link_ksettings,
55d7de9d
WH
1622};
1623
1624static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1625{
55d7de9d
WH
1626 if (!netif_running(netdev))
1627 return -EINVAL;
1628
ce85e13a 1629 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1630}
1631
1632static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1633{
1634 u32 addr_lo, addr_hi;
1635 int ret;
1636 u8 addr[6];
1637
1638 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1639 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1640
1641 addr[0] = addr_lo & 0xFF;
1642 addr[1] = (addr_lo >> 8) & 0xFF;
1643 addr[2] = (addr_lo >> 16) & 0xFF;
1644 addr[3] = (addr_lo >> 24) & 0xFF;
1645 addr[4] = addr_hi & 0xFF;
1646 addr[5] = (addr_hi >> 8) & 0xFF;
1647
1648 if (!is_valid_ether_addr(addr)) {
a4977f3e
PE
1649 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1650 /* valid address present in Device Tree */
1651 netif_dbg(dev, ifup, dev->net,
1652 "MAC address read from Device Tree");
1653 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1654 ETH_ALEN, addr) == 0) ||
1655 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1656 ETH_ALEN, addr) == 0)) &&
1657 is_valid_ether_addr(addr)) {
1658 /* eeprom values are valid so use them */
1659 netif_dbg(dev, ifup, dev->net,
1660 "MAC address read from EEPROM");
55d7de9d
WH
1661 } else {
1662 /* generate random MAC */
1663 random_ether_addr(addr);
1664 netif_dbg(dev, ifup, dev->net,
1665 "MAC address set to random addr");
1666 }
a4977f3e
PE
1667
1668 addr_lo = addr[0] | (addr[1] << 8) |
1669 (addr[2] << 16) | (addr[3] << 24);
1670 addr_hi = addr[4] | (addr[5] << 8);
1671
1672 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1673 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
55d7de9d
WH
1674 }
1675
1676 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1677 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1678
1679 ether_addr_copy(dev->net->dev_addr, addr);
1680}
1681
ce85e13a
WH
1682/* MDIO read and write wrappers for phylib */
1683static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1684{
1685 struct lan78xx_net *dev = bus->priv;
1686 u32 val, addr;
1687 int ret;
1688
1689 ret = usb_autopm_get_interface(dev->intf);
1690 if (ret < 0)
1691 return ret;
1692
1693 mutex_lock(&dev->phy_mutex);
1694
1695 /* confirm MII not busy */
1696 ret = lan78xx_phy_wait_not_busy(dev);
1697 if (ret < 0)
1698 goto done;
1699
1700 /* set the address, index & direction (read from PHY) */
1701 addr = mii_access(phy_id, idx, MII_READ);
1702 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1703
1704 ret = lan78xx_phy_wait_not_busy(dev);
1705 if (ret < 0)
1706 goto done;
1707
1708 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1709
1710 ret = (int)(val & 0xFFFF);
1711
1712done:
1713 mutex_unlock(&dev->phy_mutex);
1714 usb_autopm_put_interface(dev->intf);
02dc1f3d 1715
ce85e13a
WH
1716 return ret;
1717}
1718
1719static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1720 u16 regval)
1721{
1722 struct lan78xx_net *dev = bus->priv;
1723 u32 val, addr;
1724 int ret;
1725
1726 ret = usb_autopm_get_interface(dev->intf);
1727 if (ret < 0)
1728 return ret;
1729
1730 mutex_lock(&dev->phy_mutex);
1731
1732 /* confirm MII not busy */
1733 ret = lan78xx_phy_wait_not_busy(dev);
1734 if (ret < 0)
1735 goto done;
1736
1737 val = (u32)regval;
1738 ret = lan78xx_write_reg(dev, MII_DATA, val);
1739
1740 /* set the address, index & direction (write to PHY) */
1741 addr = mii_access(phy_id, idx, MII_WRITE);
1742 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1743
1744 ret = lan78xx_phy_wait_not_busy(dev);
1745 if (ret < 0)
1746 goto done;
1747
1748done:
1749 mutex_unlock(&dev->phy_mutex);
1750 usb_autopm_put_interface(dev->intf);
1751 return 0;
1752}
1753
1754static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1755{
ce85e13a 1756 int ret;
ce85e13a
WH
1757
1758 dev->mdiobus = mdiobus_alloc();
1759 if (!dev->mdiobus) {
1760 netdev_err(dev->net, "can't allocate MDIO bus\n");
1761 return -ENOMEM;
1762 }
1763
1764 dev->mdiobus->priv = (void *)dev;
1765 dev->mdiobus->read = lan78xx_mdiobus_read;
1766 dev->mdiobus->write = lan78xx_mdiobus_write;
1767 dev->mdiobus->name = "lan78xx-mdiobus";
1768
1769 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1770 dev->udev->bus->busnum, dev->udev->devnum);
1771
87177ba6
WH
1772 switch (dev->chipid) {
1773 case ID_REV_CHIP_ID_7800_:
1774 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1775 /* set to internal PHY id */
1776 dev->mdiobus->phy_mask = ~(1 << 1);
1777 break;
02dc1f3d
WH
1778 case ID_REV_CHIP_ID_7801_:
1779 /* scan thru PHYAD[2..0] */
1780 dev->mdiobus->phy_mask = ~(0xFF);
1781 break;
ce85e13a
WH
1782 }
1783
1784 ret = mdiobus_register(dev->mdiobus);
1785 if (ret) {
1786 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1787 goto exit1;
ce85e13a
WH
1788 }
1789
1790 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1791 return 0;
ce85e13a
WH
1792exit1:
1793 mdiobus_free(dev->mdiobus);
1794 return ret;
1795}
1796
1797static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1798{
1799 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1800 mdiobus_free(dev->mdiobus);
1801}
1802
1803static void lan78xx_link_status_change(struct net_device *net)
1804{
14437e3f
WH
1805 struct phy_device *phydev = net->phydev;
1806 int ret, temp;
1807
1808 /* At forced 100 F/H mode, chip may fail to set mode correctly
1809 * when cable is switched between long(~50+m) and short one.
1810 * As workaround, set to 10 before setting to 100
1811 * at forced 100 F/H mode.
1812 */
1813 if (!phydev->autoneg && (phydev->speed == 100)) {
1814 /* disable phy interrupt */
1815 temp = phy_read(phydev, LAN88XX_INT_MASK);
1816 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1817 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1818
1819 temp = phy_read(phydev, MII_BMCR);
1820 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1821 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1822 temp |= BMCR_SPEED100;
1823 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1824
1825 /* clear pending interrupt generated while workaround */
1826 temp = phy_read(phydev, LAN88XX_INT_STS);
1827
1828 /* enable phy interrupt back */
1829 temp = phy_read(phydev, LAN88XX_INT_MASK);
1830 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1831 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1832 }
55d7de9d
WH
1833}
1834
cc89c323
WH
1835static int irq_map(struct irq_domain *d, unsigned int irq,
1836 irq_hw_number_t hwirq)
1837{
1838 struct irq_domain_data *data = d->host_data;
1839
1840 irq_set_chip_data(irq, data);
1841 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1842 irq_set_noprobe(irq);
1843
1844 return 0;
1845}
1846
1847static void irq_unmap(struct irq_domain *d, unsigned int irq)
1848{
1849 irq_set_chip_and_handler(irq, NULL, NULL);
1850 irq_set_chip_data(irq, NULL);
1851}
1852
1853static const struct irq_domain_ops chip_domain_ops = {
1854 .map = irq_map,
1855 .unmap = irq_unmap,
1856};
1857
1858static void lan78xx_irq_mask(struct irq_data *irqd)
1859{
1860 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1861
1862 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1863}
1864
1865static void lan78xx_irq_unmask(struct irq_data *irqd)
1866{
1867 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1868
1869 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1870}
1871
1872static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1873{
1874 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1875
1876 mutex_lock(&data->irq_lock);
1877}
1878
1879static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1880{
1881 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1882 struct lan78xx_net *dev =
1883 container_of(data, struct lan78xx_net, domain_data);
1884 u32 buf;
1885 int ret;
1886
1887 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1888 * are only two callbacks executed in non-atomic contex.
1889 */
1890 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1891 if (buf != data->irqenable)
1892 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1893
1894 mutex_unlock(&data->irq_lock);
1895}
1896
1897static struct irq_chip lan78xx_irqchip = {
1898 .name = "lan78xx-irqs",
1899 .irq_mask = lan78xx_irq_mask,
1900 .irq_unmask = lan78xx_irq_unmask,
1901 .irq_bus_lock = lan78xx_irq_bus_lock,
1902 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1903};
1904
1905static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1906{
1907 struct device_node *of_node;
1908 struct irq_domain *irqdomain;
1909 unsigned int irqmap = 0;
1910 u32 buf;
1911 int ret = 0;
1912
1913 of_node = dev->udev->dev.parent->of_node;
1914
1915 mutex_init(&dev->domain_data.irq_lock);
1916
1917 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1918 dev->domain_data.irqenable = buf;
1919
1920 dev->domain_data.irqchip = &lan78xx_irqchip;
1921 dev->domain_data.irq_handler = handle_simple_irq;
1922
1923 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1924 &chip_domain_ops, &dev->domain_data);
1925 if (irqdomain) {
1926 /* create mapping for PHY interrupt */
1927 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1928 if (!irqmap) {
1929 irq_domain_remove(irqdomain);
1930
1931 irqdomain = NULL;
1932 ret = -EINVAL;
1933 }
1934 } else {
1935 ret = -EINVAL;
1936 }
1937
1938 dev->domain_data.irqdomain = irqdomain;
1939 dev->domain_data.phyirq = irqmap;
1940
1941 return ret;
1942}
1943
1944static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1945{
1946 if (dev->domain_data.phyirq > 0) {
1947 irq_dispose_mapping(dev->domain_data.phyirq);
1948
1949 if (dev->domain_data.irqdomain)
1950 irq_domain_remove(dev->domain_data.irqdomain);
1951 }
1952 dev->domain_data.phyirq = 0;
1953 dev->domain_data.irqdomain = NULL;
1954}
1955
02dc1f3d
WH
1956static int lan8835_fixup(struct phy_device *phydev)
1957{
1958 int buf;
1959 int ret;
1960 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1961
1962 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
5f613677 1963 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
02dc1f3d
WH
1964 buf &= ~0x1800;
1965 buf |= 0x0800;
5f613677 1966 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
02dc1f3d
WH
1967
1968 /* RGMII MAC TXC Delay Enable */
1969 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1970 MAC_RGMII_ID_TXC_DELAY_EN_);
1971
1972 /* RGMII TX DLL Tune Adjust */
1973 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1974
1975 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1976
1977 return 1;
1978}
1979
1980static int ksz9031rnx_fixup(struct phy_device *phydev)
1981{
1982 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1983
1984 /* Micrel9301RNX PHY configuration */
1985 /* RGMII Control Signal Pad Skew */
5f613677 1986 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
02dc1f3d 1987 /* RGMII RX Data Pad Skew */
5f613677 1988 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
02dc1f3d 1989 /* RGMII RX Clock Pad Skew */
5f613677 1990 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
02dc1f3d
WH
1991
1992 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1993
1994 return 1;
1995}
1996
55d7de9d
WH
1997static int lan78xx_phy_init(struct lan78xx_net *dev)
1998{
ce85e13a 1999 int ret;
349e0c5e 2000 u32 mii_adv;
ce85e13a 2001 struct phy_device *phydev = dev->net->phydev;
55d7de9d 2002
ce85e13a
WH
2003 phydev = phy_find_first(dev->mdiobus);
2004 if (!phydev) {
2005 netdev_err(dev->net, "no PHY found\n");
2006 return -EIO;
2007 }
55d7de9d 2008
02dc1f3d
WH
2009 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2010 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2011 phydev->is_internal = true;
2012 dev->interface = PHY_INTERFACE_MODE_GMII;
2013
2014 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2015 if (!phydev->drv) {
2016 netdev_err(dev->net, "no PHY driver found\n");
2017 return -EIO;
2018 }
2019
2020 dev->interface = PHY_INTERFACE_MODE_RGMII;
2021
2022 /* external PHY fixup for KSZ9031RNX */
2023 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2024 ksz9031rnx_fixup);
2025 if (ret < 0) {
2026 netdev_err(dev->net, "fail to register fixup\n");
2027 return ret;
2028 }
2029 /* external PHY fixup for LAN8835 */
2030 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2031 lan8835_fixup);
2032 if (ret < 0) {
2033 netdev_err(dev->net, "fail to register fixup\n");
2034 return ret;
2035 }
2036 /* add more external PHY fixup here if needed */
2037
2038 phydev->is_internal = false;
2039 } else {
2040 netdev_err(dev->net, "unknown ID found\n");
2041 ret = -EIO;
2042 goto error;
2043 }
2044
cc89c323
WH
2045 /* if phyirq is not set, use polling mode in phylib */
2046 if (dev->domain_data.phyirq > 0)
2047 phydev->irq = dev->domain_data.phyirq;
2048 else
2049 phydev->irq = 0;
2050 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
e4953910 2051
f6e3ef3e
WH
2052 /* set to AUTOMDIX */
2053 phydev->mdix = ETH_TP_MDI_AUTO;
2054
ce85e13a
WH
2055 ret = phy_connect_direct(dev->net, phydev,
2056 lan78xx_link_status_change,
02dc1f3d 2057 dev->interface);
ce85e13a
WH
2058 if (ret) {
2059 netdev_err(dev->net, "can't attach PHY to %s\n",
2060 dev->mdiobus->id);
2061 return -EIO;
2062 }
55d7de9d 2063
ce85e13a
WH
2064 /* MAC doesn't support 1000T Half */
2065 phydev->supported &= ~SUPPORTED_1000baseT_Half;
e270b2db 2066
349e0c5e
WH
2067 /* support both flow controls */
2068 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2069 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2070 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2071 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2072
ce85e13a
WH
2073 genphy_config_aneg(phydev);
2074
349e0c5e
WH
2075 dev->fc_autoneg = phydev->autoneg;
2076
55d7de9d 2077 return 0;
02dc1f3d
WH
2078
2079error:
2080 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2081 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2082
2083 return ret;
55d7de9d
WH
2084}
2085
2086static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2087{
2088 int ret = 0;
2089 u32 buf;
2090 bool rxenabled;
2091
2092 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2093
2094 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2095
2096 if (rxenabled) {
2097 buf &= ~MAC_RX_RXEN_;
2098 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2099 }
2100
2101 /* add 4 to size for FCS */
2102 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2103 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2104
2105 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2106
2107 if (rxenabled) {
2108 buf |= MAC_RX_RXEN_;
2109 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2110 }
2111
2112 return 0;
2113}
2114
2115static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2116{
2117 struct sk_buff *skb;
2118 unsigned long flags;
2119 int count = 0;
2120
2121 spin_lock_irqsave(&q->lock, flags);
2122 while (!skb_queue_empty(q)) {
2123 struct skb_data *entry;
2124 struct urb *urb;
2125 int ret;
2126
2127 skb_queue_walk(q, skb) {
2128 entry = (struct skb_data *)skb->cb;
2129 if (entry->state != unlink_start)
2130 goto found;
2131 }
2132 break;
2133found:
2134 entry->state = unlink_start;
2135 urb = entry->urb;
2136
2137 /* Get reference count of the URB to avoid it to be
2138 * freed during usb_unlink_urb, which may trigger
2139 * use-after-free problem inside usb_unlink_urb since
2140 * usb_unlink_urb is always racing with .complete
2141 * handler(include defer_bh).
2142 */
2143 usb_get_urb(urb);
2144 spin_unlock_irqrestore(&q->lock, flags);
2145 /* during some PM-driven resume scenarios,
2146 * these (async) unlinks complete immediately
2147 */
2148 ret = usb_unlink_urb(urb);
2149 if (ret != -EINPROGRESS && ret != 0)
2150 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2151 else
2152 count++;
2153 usb_put_urb(urb);
2154 spin_lock_irqsave(&q->lock, flags);
2155 }
2156 spin_unlock_irqrestore(&q->lock, flags);
2157 return count;
2158}
2159
2160static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2161{
2162 struct lan78xx_net *dev = netdev_priv(netdev);
2163 int ll_mtu = new_mtu + netdev->hard_header_len;
2164 int old_hard_mtu = dev->hard_mtu;
2165 int old_rx_urb_size = dev->rx_urb_size;
2166 int ret;
2167
55d7de9d
WH
2168 /* no second zero-length packet read wanted after mtu-sized packets */
2169 if ((ll_mtu % dev->maxpacket) == 0)
2170 return -EDOM;
2171
2172 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2173
2174 netdev->mtu = new_mtu;
2175
2176 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2177 if (dev->rx_urb_size == old_hard_mtu) {
2178 dev->rx_urb_size = dev->hard_mtu;
2179 if (dev->rx_urb_size > old_rx_urb_size) {
2180 if (netif_running(dev->net)) {
2181 unlink_urbs(dev, &dev->rxq);
2182 tasklet_schedule(&dev->bh);
2183 }
2184 }
2185 }
2186
2187 return 0;
2188}
2189
e0c79ff6 2190static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
55d7de9d
WH
2191{
2192 struct lan78xx_net *dev = netdev_priv(netdev);
2193 struct sockaddr *addr = p;
2194 u32 addr_lo, addr_hi;
2195 int ret;
2196
2197 if (netif_running(netdev))
2198 return -EBUSY;
2199
2200 if (!is_valid_ether_addr(addr->sa_data))
2201 return -EADDRNOTAVAIL;
2202
2203 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2204
2205 addr_lo = netdev->dev_addr[0] |
2206 netdev->dev_addr[1] << 8 |
2207 netdev->dev_addr[2] << 16 |
2208 netdev->dev_addr[3] << 24;
2209 addr_hi = netdev->dev_addr[4] |
2210 netdev->dev_addr[5] << 8;
2211
2212 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2213 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2214
9c5239ee
JM
2215 /* Added to support MAC address changes */
2216 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2217 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2218
55d7de9d
WH
2219 return 0;
2220}
2221
2222/* Enable or disable Rx checksum offload engine */
2223static int lan78xx_set_features(struct net_device *netdev,
2224 netdev_features_t features)
2225{
2226 struct lan78xx_net *dev = netdev_priv(netdev);
2227 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2228 unsigned long flags;
2229 int ret;
2230
2231 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2232
2233 if (features & NETIF_F_RXCSUM) {
2234 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2235 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2236 } else {
2237 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2238 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2239 }
2240
2241 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2242 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2243 else
2244 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2245
2246 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2247
2248 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2249
2250 return 0;
2251}
2252
2253static void lan78xx_deferred_vlan_write(struct work_struct *param)
2254{
2255 struct lan78xx_priv *pdata =
2256 container_of(param, struct lan78xx_priv, set_vlan);
2257 struct lan78xx_net *dev = pdata->dev;
2258
2259 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2260 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2261}
2262
2263static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2264 __be16 proto, u16 vid)
2265{
2266 struct lan78xx_net *dev = netdev_priv(netdev);
2267 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2268 u16 vid_bit_index;
2269 u16 vid_dword_index;
2270
2271 vid_dword_index = (vid >> 5) & 0x7F;
2272 vid_bit_index = vid & 0x1F;
2273
2274 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2275
2276 /* defer register writes to a sleepable context */
2277 schedule_work(&pdata->set_vlan);
2278
2279 return 0;
2280}
2281
2282static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2283 __be16 proto, u16 vid)
2284{
2285 struct lan78xx_net *dev = netdev_priv(netdev);
2286 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2287 u16 vid_bit_index;
2288 u16 vid_dword_index;
2289
2290 vid_dword_index = (vid >> 5) & 0x7F;
2291 vid_bit_index = vid & 0x1F;
2292
2293 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2294
2295 /* defer register writes to a sleepable context */
2296 schedule_work(&pdata->set_vlan);
2297
2298 return 0;
2299}
2300
2301static void lan78xx_init_ltm(struct lan78xx_net *dev)
2302{
2303 int ret;
2304 u32 buf;
2305 u32 regs[6] = { 0 };
2306
2307 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2308 if (buf & USB_CFG1_LTM_ENABLE_) {
2309 u8 temp[2];
2310 /* Get values from EEPROM first */
2311 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2312 if (temp[0] == 24) {
2313 ret = lan78xx_read_raw_eeprom(dev,
2314 temp[1] * 2,
2315 24,
2316 (u8 *)regs);
2317 if (ret < 0)
2318 return;
2319 }
2320 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2321 if (temp[0] == 24) {
2322 ret = lan78xx_read_raw_otp(dev,
2323 temp[1] * 2,
2324 24,
2325 (u8 *)regs);
2326 if (ret < 0)
2327 return;
2328 }
2329 }
2330 }
2331
2332 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2333 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2334 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2335 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2336 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2337 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2338}
2339
2340static int lan78xx_reset(struct lan78xx_net *dev)
2341{
2342 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2343 u32 buf;
2344 int ret = 0;
2345 unsigned long timeout;
e78be20d 2346 u8 sig;
55d7de9d
WH
2347
2348 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2349 buf |= HW_CFG_LRST_;
2350 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2351
2352 timeout = jiffies + HZ;
2353 do {
2354 mdelay(1);
2355 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2356 if (time_after(jiffies, timeout)) {
2357 netdev_warn(dev->net,
2358 "timeout on completion of LiteReset");
2359 return -EIO;
2360 }
2361 } while (buf & HW_CFG_LRST_);
2362
2363 lan78xx_init_mac_address(dev);
2364
ce85e13a
WH
2365 /* save DEVID for later usage */
2366 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
2367 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2368 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 2369
55d7de9d
WH
2370 /* Respond to the IN token with a NAK */
2371 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2372 buf |= USB_CFG_BIR_;
2373 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2374
2375 /* Init LTM */
2376 lan78xx_init_ltm(dev);
2377
55d7de9d
WH
2378 if (dev->udev->speed == USB_SPEED_SUPER) {
2379 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2380 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2381 dev->rx_qlen = 4;
2382 dev->tx_qlen = 4;
2383 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2384 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2385 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2386 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2387 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2388 } else {
2389 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2390 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2391 dev->rx_qlen = 4;
2afdce2c 2392 dev->tx_qlen = 4;
55d7de9d
WH
2393 }
2394
2395 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2396 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2397
2398 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2399 buf |= HW_CFG_MEF_;
2400 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2401
2402 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2403 buf |= USB_CFG_BCE_;
2404 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2405
2406 /* set FIFO sizes */
2407 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2408 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2409
2410 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2411 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2412
2413 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2414 ret = lan78xx_write_reg(dev, FLOW, 0);
2415 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2416
2417 /* Don't need rfe_ctl_lock during initialisation */
2418 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2419 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2420 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2421
2422 /* Enable or disable checksum offload engines */
2423 lan78xx_set_features(dev->net, dev->net->features);
2424
2425 lan78xx_set_multicast(dev->net);
2426
2427 /* reset PHY */
2428 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2429 buf |= PMT_CTL_PHY_RST_;
2430 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2431
2432 timeout = jiffies + HZ;
2433 do {
2434 mdelay(1);
2435 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2436 if (time_after(jiffies, timeout)) {
2437 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2438 return -EIO;
2439 }
6c595b03 2440 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 2441
55d7de9d 2442 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
02dc1f3d
WH
2443 /* LAN7801 only has RGMII mode */
2444 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2445 buf &= ~MAC_CR_GMII_EN_;
e78be20d
RC
2446
2447 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2448 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2449 if (!ret && sig != EEPROM_INDICATOR) {
2450 /* Implies there is no external eeprom. Set mac speed */
2451 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2452 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2453 }
2454 }
55d7de9d
WH
2455 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2456
55d7de9d
WH
2457 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2458 buf |= MAC_TX_TXEN_;
2459 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2460
2461 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2462 buf |= FCT_TX_CTL_EN_;
2463 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2464
2465 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2466
2467 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2468 buf |= MAC_RX_RXEN_;
2469 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2470
2471 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2472 buf |= FCT_RX_CTL_EN_;
2473 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2474
55d7de9d
WH
2475 return 0;
2476}
2477
20ff5565
WH
2478static void lan78xx_init_stats(struct lan78xx_net *dev)
2479{
2480 u32 *p;
2481 int i;
2482
2483 /* initialize for stats update
2484 * some counters are 20bits and some are 32bits
2485 */
2486 p = (u32 *)&dev->stats.rollover_max;
2487 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2488 p[i] = 0xFFFFF;
2489
2490 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2491 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2492 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2493 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2494 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2495 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2496 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2497 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2498 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2499 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2500
2501 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2502}
2503
55d7de9d
WH
2504static int lan78xx_open(struct net_device *net)
2505{
2506 struct lan78xx_net *dev = netdev_priv(net);
2507 int ret;
2508
2509 ret = usb_autopm_get_interface(dev->intf);
2510 if (ret < 0)
2511 goto out;
2512
6d03ff16
AG
2513 phy_start(net->phydev);
2514
2515 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
ce85e13a 2516
55d7de9d
WH
2517 /* for Link Check */
2518 if (dev->urb_intr) {
2519 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2520 if (ret < 0) {
2521 netif_err(dev, ifup, dev->net,
2522 "intr submit %d\n", ret);
2523 goto done;
2524 }
2525 }
2526
20ff5565
WH
2527 lan78xx_init_stats(dev);
2528
55d7de9d
WH
2529 set_bit(EVENT_DEV_OPEN, &dev->flags);
2530
2531 netif_start_queue(net);
2532
2533 dev->link_on = false;
2534
2535 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2536done:
2537 usb_autopm_put_interface(dev->intf);
2538
2539out:
2540 return ret;
2541}
2542
2543static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2544{
2545 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2546 DECLARE_WAITQUEUE(wait, current);
2547 int temp;
2548
2549 /* ensure there are no more active urbs */
2550 add_wait_queue(&unlink_wakeup, &wait);
2551 set_current_state(TASK_UNINTERRUPTIBLE);
2552 dev->wait = &unlink_wakeup;
2553 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2554
2555 /* maybe wait for deletions to finish. */
2556 while (!skb_queue_empty(&dev->rxq) &&
2557 !skb_queue_empty(&dev->txq) &&
2558 !skb_queue_empty(&dev->done)) {
2559 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2560 set_current_state(TASK_UNINTERRUPTIBLE);
2561 netif_dbg(dev, ifdown, dev->net,
2562 "waited for %d urb completions\n", temp);
2563 }
2564 set_current_state(TASK_RUNNING);
2565 dev->wait = NULL;
2566 remove_wait_queue(&unlink_wakeup, &wait);
2567}
2568
e0c79ff6 2569static int lan78xx_stop(struct net_device *net)
55d7de9d
WH
2570{
2571 struct lan78xx_net *dev = netdev_priv(net);
2572
20ff5565
WH
2573 if (timer_pending(&dev->stat_monitor))
2574 del_timer_sync(&dev->stat_monitor);
2575
6d03ff16
AG
2576 if (net->phydev)
2577 phy_stop(net->phydev);
ce85e13a 2578
55d7de9d
WH
2579 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2580 netif_stop_queue(net);
2581
2582 netif_info(dev, ifdown, dev->net,
2583 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2584 net->stats.rx_packets, net->stats.tx_packets,
2585 net->stats.rx_errors, net->stats.tx_errors);
2586
2587 lan78xx_terminate_urbs(dev);
2588
2589 usb_kill_urb(dev->urb_intr);
2590
2591 skb_queue_purge(&dev->rxq_pause);
2592
2593 /* deferred work (task, timer, softirq) must also stop.
2594 * can't flush_scheduled_work() until we drop rtnl (later),
2595 * else workers could deadlock; so make workers a NOP.
2596 */
2597 dev->flags = 0;
2598 cancel_delayed_work_sync(&dev->wq);
2599 tasklet_kill(&dev->bh);
2600
2601 usb_autopm_put_interface(dev->intf);
2602
2603 return 0;
2604}
2605
2606static int lan78xx_linearize(struct sk_buff *skb)
2607{
2608 return skb_linearize(skb);
2609}
2610
2611static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2612 struct sk_buff *skb, gfp_t flags)
2613{
2614 u32 tx_cmd_a, tx_cmd_b;
2615
d4ca7359 2616 if (skb_cow_head(skb, TX_OVERHEAD)) {
55d7de9d 2617 dev_kfree_skb_any(skb);
d4ca7359 2618 return NULL;
55d7de9d
WH
2619 }
2620
2621 if (lan78xx_linearize(skb) < 0)
2622 return NULL;
2623
2624 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2625
2626 if (skb->ip_summed == CHECKSUM_PARTIAL)
2627 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2628
2629 tx_cmd_b = 0;
2630 if (skb_is_gso(skb)) {
2631 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2632
2633 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2634
2635 tx_cmd_a |= TX_CMD_A_LSO_;
2636 }
2637
2638 if (skb_vlan_tag_present(skb)) {
2639 tx_cmd_a |= TX_CMD_A_IVTG_;
2640 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2641 }
2642
2643 skb_push(skb, 4);
2644 cpu_to_le32s(&tx_cmd_b);
2645 memcpy(skb->data, &tx_cmd_b, 4);
2646
2647 skb_push(skb, 4);
2648 cpu_to_le32s(&tx_cmd_a);
2649 memcpy(skb->data, &tx_cmd_a, 4);
2650
2651 return skb;
2652}
2653
2654static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2655 struct sk_buff_head *list, enum skb_state state)
2656{
2657 unsigned long flags;
2658 enum skb_state old_state;
2659 struct skb_data *entry = (struct skb_data *)skb->cb;
2660
2661 spin_lock_irqsave(&list->lock, flags);
2662 old_state = entry->state;
2663 entry->state = state;
55d7de9d
WH
2664
2665 __skb_unlink(skb, list);
2666 spin_unlock(&list->lock);
2667 spin_lock(&dev->done.lock);
55d7de9d
WH
2668
2669 __skb_queue_tail(&dev->done, skb);
2670 if (skb_queue_len(&dev->done) == 1)
2671 tasklet_schedule(&dev->bh);
2672 spin_unlock_irqrestore(&dev->done.lock, flags);
2673
2674 return old_state;
2675}
2676
2677static void tx_complete(struct urb *urb)
2678{
2679 struct sk_buff *skb = (struct sk_buff *)urb->context;
2680 struct skb_data *entry = (struct skb_data *)skb->cb;
2681 struct lan78xx_net *dev = entry->dev;
2682
2683 if (urb->status == 0) {
74d79a2e 2684 dev->net->stats.tx_packets += entry->num_of_packet;
55d7de9d
WH
2685 dev->net->stats.tx_bytes += entry->length;
2686 } else {
2687 dev->net->stats.tx_errors++;
2688
2689 switch (urb->status) {
2690 case -EPIPE:
2691 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2692 break;
2693
2694 /* software-driven interface shutdown */
2695 case -ECONNRESET:
2696 case -ESHUTDOWN:
2697 break;
2698
2699 case -EPROTO:
2700 case -ETIME:
2701 case -EILSEQ:
2702 netif_stop_queue(dev->net);
2703 break;
2704 default:
2705 netif_dbg(dev, tx_err, dev->net,
2706 "tx err %d\n", entry->urb->status);
2707 break;
2708 }
2709 }
2710
2711 usb_autopm_put_interface_async(dev->intf);
2712
81c38e81 2713 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2714}
2715
2716static void lan78xx_queue_skb(struct sk_buff_head *list,
2717 struct sk_buff *newsk, enum skb_state state)
2718{
2719 struct skb_data *entry = (struct skb_data *)newsk->cb;
2720
2721 __skb_queue_tail(list, newsk);
2722 entry->state = state;
2723}
2724
e0c79ff6
BX
2725static netdev_tx_t
2726lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
55d7de9d
WH
2727{
2728 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2729 struct sk_buff *skb2 = NULL;
55d7de9d 2730
81c38e81 2731 if (skb) {
55d7de9d 2732 skb_tx_timestamp(skb);
81c38e81
WH
2733 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2734 }
55d7de9d 2735
81c38e81
WH
2736 if (skb2) {
2737 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2738
4b2a4a96
WH
2739 /* throttle TX patch at slower than SUPER SPEED USB */
2740 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2741 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2742 netif_stop_queue(net);
2743 } else {
2744 netif_dbg(dev, tx_err, dev->net,
2745 "lan78xx_tx_prep return NULL\n");
2746 dev->net->stats.tx_errors++;
2747 dev->net->stats.tx_dropped++;
2748 }
2749
2750 tasklet_schedule(&dev->bh);
2751
2752 return NETDEV_TX_OK;
2753}
2754
e0c79ff6
BX
2755static int
2756lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
55d7de9d
WH
2757{
2758 int tmp;
2759 struct usb_host_interface *alt = NULL;
2760 struct usb_host_endpoint *in = NULL, *out = NULL;
2761 struct usb_host_endpoint *status = NULL;
2762
2763 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2764 unsigned ep;
2765
2766 in = NULL;
2767 out = NULL;
2768 status = NULL;
2769 alt = intf->altsetting + tmp;
2770
2771 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2772 struct usb_host_endpoint *e;
2773 int intr = 0;
2774
2775 e = alt->endpoint + ep;
2776 switch (e->desc.bmAttributes) {
2777 case USB_ENDPOINT_XFER_INT:
2778 if (!usb_endpoint_dir_in(&e->desc))
2779 continue;
2780 intr = 1;
2781 /* FALLTHROUGH */
2782 case USB_ENDPOINT_XFER_BULK:
2783 break;
2784 default:
2785 continue;
2786 }
2787 if (usb_endpoint_dir_in(&e->desc)) {
2788 if (!intr && !in)
2789 in = e;
2790 else if (intr && !status)
2791 status = e;
2792 } else {
2793 if (!out)
2794 out = e;
2795 }
2796 }
2797 if (in && out)
2798 break;
2799 }
2800 if (!alt || !in || !out)
2801 return -EINVAL;
2802
2803 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2804 in->desc.bEndpointAddress &
2805 USB_ENDPOINT_NUMBER_MASK);
2806 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2807 out->desc.bEndpointAddress &
2808 USB_ENDPOINT_NUMBER_MASK);
2809 dev->ep_intr = status;
2810
2811 return 0;
2812}
2813
2814static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2815{
2816 struct lan78xx_priv *pdata = NULL;
2817 int ret;
2818 int i;
2819
2820 ret = lan78xx_get_endpoints(dev, intf);
2821
2822 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2823
2824 pdata = (struct lan78xx_priv *)(dev->data[0]);
2825 if (!pdata) {
2826 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2827 return -ENOMEM;
2828 }
2829
2830 pdata->dev = dev;
2831
2832 spin_lock_init(&pdata->rfe_ctl_lock);
2833 mutex_init(&pdata->dataport_mutex);
2834
2835 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2836
2837 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2838 pdata->vlan_table[i] = 0;
2839
2840 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2841
2842 dev->net->features = 0;
2843
2844 if (DEFAULT_TX_CSUM_ENABLE)
2845 dev->net->features |= NETIF_F_HW_CSUM;
2846
2847 if (DEFAULT_RX_CSUM_ENABLE)
2848 dev->net->features |= NETIF_F_RXCSUM;
2849
2850 if (DEFAULT_TSO_CSUM_ENABLE)
2851 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2852
2853 dev->net->hw_features = dev->net->features;
2854
cc89c323
WH
2855 ret = lan78xx_setup_irq_domain(dev);
2856 if (ret < 0) {
2857 netdev_warn(dev->net,
2858 "lan78xx_setup_irq_domain() failed : %d", ret);
629eeaac 2859 goto out1;
cc89c323
WH
2860 }
2861
0573f94b
NS
2862 dev->net->hard_header_len += TX_OVERHEAD;
2863 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2864
55d7de9d
WH
2865 /* Init all registers */
2866 ret = lan78xx_reset(dev);
629eeaac
RC
2867 if (ret) {
2868 netdev_warn(dev->net, "Registers INIT FAILED....");
2869 goto out2;
2870 }
55d7de9d 2871
fb52c3b5 2872 ret = lan78xx_mdio_init(dev);
629eeaac
RC
2873 if (ret) {
2874 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2875 goto out2;
2876 }
ce85e13a 2877
55d7de9d
WH
2878 dev->net->flags |= IFF_MULTICAST;
2879
2880 pdata->wol = WAKE_MAGIC;
2881
fb52c3b5 2882 return ret;
629eeaac
RC
2883
2884out2:
2885 lan78xx_remove_irq_domain(dev);
2886
2887out1:
2888 netdev_warn(dev->net, "Bind routine FAILED");
2889 cancel_work_sync(&pdata->set_multicast);
2890 cancel_work_sync(&pdata->set_vlan);
2891 kfree(pdata);
2892 return ret;
55d7de9d
WH
2893}
2894
2895static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2896{
2897 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2898
cc89c323
WH
2899 lan78xx_remove_irq_domain(dev);
2900
ce85e13a
WH
2901 lan78xx_remove_mdio(dev);
2902
55d7de9d 2903 if (pdata) {
629eeaac
RC
2904 cancel_work_sync(&pdata->set_multicast);
2905 cancel_work_sync(&pdata->set_vlan);
55d7de9d
WH
2906 netif_dbg(dev, ifdown, dev->net, "free pdata");
2907 kfree(pdata);
2908 pdata = NULL;
2909 dev->data[0] = 0;
2910 }
2911}
2912
2913static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2914 struct sk_buff *skb,
2915 u32 rx_cmd_a, u32 rx_cmd_b)
2916{
2917 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2918 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2919 skb->ip_summed = CHECKSUM_NONE;
2920 } else {
2921 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2922 skb->ip_summed = CHECKSUM_COMPLETE;
2923 }
2924}
2925
e0c79ff6 2926static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
55d7de9d
WH
2927{
2928 int status;
2929
2930 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2931 skb_queue_tail(&dev->rxq_pause, skb);
2932 return;
2933 }
2934
55d7de9d
WH
2935 dev->net->stats.rx_packets++;
2936 dev->net->stats.rx_bytes += skb->len;
2937
74d79a2e
WH
2938 skb->protocol = eth_type_trans(skb, dev->net);
2939
55d7de9d
WH
2940 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2941 skb->len + sizeof(struct ethhdr), skb->protocol);
2942 memset(skb->cb, 0, sizeof(struct skb_data));
2943
2944 if (skb_defer_rx_timestamp(skb))
2945 return;
2946
2947 status = netif_rx(skb);
2948 if (status != NET_RX_SUCCESS)
2949 netif_dbg(dev, rx_err, dev->net,
2950 "netif_rx status %d\n", status);
2951}
2952
2953static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2954{
2955 if (skb->len < dev->net->hard_header_len)
2956 return 0;
2957
2958 while (skb->len > 0) {
2959 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2960 u16 rx_cmd_c;
2961 struct sk_buff *skb2;
2962 unsigned char *packet;
2963
2964 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2965 le32_to_cpus(&rx_cmd_a);
2966 skb_pull(skb, sizeof(rx_cmd_a));
2967
2968 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2969 le32_to_cpus(&rx_cmd_b);
2970 skb_pull(skb, sizeof(rx_cmd_b));
2971
2972 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2973 le16_to_cpus(&rx_cmd_c);
2974 skb_pull(skb, sizeof(rx_cmd_c));
2975
2976 packet = skb->data;
2977
2978 /* get the packet length */
2979 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2980 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2981
2982 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2983 netif_dbg(dev, rx_err, dev->net,
2984 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2985 } else {
2986 /* last frame in this batch */
2987 if (skb->len == size) {
2988 lan78xx_rx_csum_offload(dev, skb,
2989 rx_cmd_a, rx_cmd_b);
2990
2991 skb_trim(skb, skb->len - 4); /* remove fcs */
2992 skb->truesize = size + sizeof(struct sk_buff);
2993
2994 return 1;
2995 }
2996
2997 skb2 = skb_clone(skb, GFP_ATOMIC);
2998 if (unlikely(!skb2)) {
2999 netdev_warn(dev->net, "Error allocating skb");
3000 return 0;
3001 }
3002
3003 skb2->len = size;
3004 skb2->data = packet;
3005 skb_set_tail_pointer(skb2, size);
3006
3007 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3008
3009 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3010 skb2->truesize = size + sizeof(struct sk_buff);
3011
3012 lan78xx_skb_return(dev, skb2);
3013 }
3014
3015 skb_pull(skb, size);
3016
3017 /* padding bytes before the next frame starts */
3018 if (skb->len)
3019 skb_pull(skb, align_count);
3020 }
3021
55d7de9d
WH
3022 return 1;
3023}
3024
3025static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3026{
3027 if (!lan78xx_rx(dev, skb)) {
3028 dev->net->stats.rx_errors++;
3029 goto done;
3030 }
3031
3032 if (skb->len) {
3033 lan78xx_skb_return(dev, skb);
3034 return;
3035 }
3036
3037 netif_dbg(dev, rx_err, dev->net, "drop\n");
3038 dev->net->stats.rx_errors++;
3039done:
3040 skb_queue_tail(&dev->done, skb);
3041}
3042
3043static void rx_complete(struct urb *urb);
3044
3045static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3046{
3047 struct sk_buff *skb;
3048 struct skb_data *entry;
3049 unsigned long lockflags;
3050 size_t size = dev->rx_urb_size;
3051 int ret = 0;
3052
3053 skb = netdev_alloc_skb_ip_align(dev->net, size);
3054 if (!skb) {
3055 usb_free_urb(urb);
3056 return -ENOMEM;
3057 }
3058
3059 entry = (struct skb_data *)skb->cb;
3060 entry->urb = urb;
3061 entry->dev = dev;
3062 entry->length = 0;
3063
3064 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3065 skb->data, size, rx_complete, skb);
3066
3067 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3068
3069 if (netif_device_present(dev->net) &&
3070 netif_running(dev->net) &&
3071 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3072 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3073 ret = usb_submit_urb(urb, GFP_ATOMIC);
3074 switch (ret) {
3075 case 0:
3076 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3077 break;
3078 case -EPIPE:
3079 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3080 break;
3081 case -ENODEV:
3082 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3083 netif_device_detach(dev->net);
3084 break;
3085 case -EHOSTUNREACH:
3086 ret = -ENOLINK;
3087 break;
3088 default:
3089 netif_dbg(dev, rx_err, dev->net,
3090 "rx submit, %d\n", ret);
3091 tasklet_schedule(&dev->bh);
3092 }
3093 } else {
3094 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3095 ret = -ENOLINK;
3096 }
3097 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3098 if (ret) {
3099 dev_kfree_skb_any(skb);
3100 usb_free_urb(urb);
3101 }
3102 return ret;
3103}
3104
3105static void rx_complete(struct urb *urb)
3106{
3107 struct sk_buff *skb = (struct sk_buff *)urb->context;
3108 struct skb_data *entry = (struct skb_data *)skb->cb;
3109 struct lan78xx_net *dev = entry->dev;
3110 int urb_status = urb->status;
3111 enum skb_state state;
3112
3113 skb_put(skb, urb->actual_length);
3114 state = rx_done;
3115 entry->urb = NULL;
3116
3117 switch (urb_status) {
3118 case 0:
3119 if (skb->len < dev->net->hard_header_len) {
3120 state = rx_cleanup;
3121 dev->net->stats.rx_errors++;
3122 dev->net->stats.rx_length_errors++;
3123 netif_dbg(dev, rx_err, dev->net,
3124 "rx length %d\n", skb->len);
3125 }
3126 usb_mark_last_busy(dev->udev);
3127 break;
3128 case -EPIPE:
3129 dev->net->stats.rx_errors++;
3130 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3131 /* FALLTHROUGH */
3132 case -ECONNRESET: /* async unlink */
3133 case -ESHUTDOWN: /* hardware gone */
3134 netif_dbg(dev, ifdown, dev->net,
3135 "rx shutdown, code %d\n", urb_status);
3136 state = rx_cleanup;
3137 entry->urb = urb;
3138 urb = NULL;
3139 break;
3140 case -EPROTO:
3141 case -ETIME:
3142 case -EILSEQ:
3143 dev->net->stats.rx_errors++;
3144 state = rx_cleanup;
3145 entry->urb = urb;
3146 urb = NULL;
3147 break;
3148
3149 /* data overrun ... flush fifo? */
3150 case -EOVERFLOW:
3151 dev->net->stats.rx_over_errors++;
3152 /* FALLTHROUGH */
3153
3154 default:
3155 state = rx_cleanup;
3156 dev->net->stats.rx_errors++;
3157 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3158 break;
3159 }
3160
3161 state = defer_bh(dev, skb, &dev->rxq, state);
3162
3163 if (urb) {
3164 if (netif_running(dev->net) &&
3165 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3166 state != unlink_start) {
3167 rx_submit(dev, urb, GFP_ATOMIC);
3168 return;
3169 }
3170 usb_free_urb(urb);
3171 }
3172 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3173}
3174
3175static void lan78xx_tx_bh(struct lan78xx_net *dev)
3176{
3177 int length;
3178 struct urb *urb = NULL;
3179 struct skb_data *entry;
3180 unsigned long flags;
3181 struct sk_buff_head *tqp = &dev->txq_pend;
3182 struct sk_buff *skb, *skb2;
3183 int ret;
3184 int count, pos;
3185 int skb_totallen, pkt_cnt;
3186
3187 skb_totallen = 0;
3188 pkt_cnt = 0;
74d79a2e
WH
3189 count = 0;
3190 length = 0;
2be27d44 3191 spin_lock_irqsave(&tqp->lock, flags);
55d7de9d
WH
3192 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3193 if (skb_is_gso(skb)) {
3194 if (pkt_cnt) {
3195 /* handle previous packets first */
3196 break;
3197 }
74d79a2e
WH
3198 count = 1;
3199 length = skb->len - TX_OVERHEAD;
2be27d44
SW
3200 __skb_unlink(skb, tqp);
3201 spin_unlock_irqrestore(&tqp->lock, flags);
55d7de9d
WH
3202 goto gso_skb;
3203 }
3204
3205 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3206 break;
3207 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3208 pkt_cnt++;
3209 }
2be27d44 3210 spin_unlock_irqrestore(&tqp->lock, flags);
55d7de9d
WH
3211
3212 /* copy to a single skb */
3213 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3214 if (!skb)
3215 goto drop;
3216
3217 skb_put(skb, skb_totallen);
3218
3219 for (count = pos = 0; count < pkt_cnt; count++) {
3220 skb2 = skb_dequeue(tqp);
3221 if (skb2) {
74d79a2e 3222 length += (skb2->len - TX_OVERHEAD);
55d7de9d
WH
3223 memcpy(skb->data + pos, skb2->data, skb2->len);
3224 pos += roundup(skb2->len, sizeof(u32));
3225 dev_kfree_skb(skb2);
55d7de9d
WH
3226 }
3227 }
3228
55d7de9d
WH
3229gso_skb:
3230 urb = usb_alloc_urb(0, GFP_ATOMIC);
d7c4e84e 3231 if (!urb)
55d7de9d 3232 goto drop;
55d7de9d
WH
3233
3234 entry = (struct skb_data *)skb->cb;
3235 entry->urb = urb;
3236 entry->dev = dev;
3237 entry->length = length;
74d79a2e 3238 entry->num_of_packet = count;
55d7de9d
WH
3239
3240 spin_lock_irqsave(&dev->txq.lock, flags);
3241 ret = usb_autopm_get_interface_async(dev->intf);
3242 if (ret < 0) {
3243 spin_unlock_irqrestore(&dev->txq.lock, flags);
3244 goto drop;
3245 }
3246
3247 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3248 skb->data, skb->len, tx_complete, skb);
3249
3250 if (length % dev->maxpacket == 0) {
3251 /* send USB_ZERO_PACKET */
3252 urb->transfer_flags |= URB_ZERO_PACKET;
3253 }
3254
3255#ifdef CONFIG_PM
3256 /* if this triggers the device is still a sleep */
3257 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3258 /* transmission will be done in resume */
3259 usb_anchor_urb(urb, &dev->deferred);
3260 /* no use to process more packets */
3261 netif_stop_queue(dev->net);
3262 usb_put_urb(urb);
3263 spin_unlock_irqrestore(&dev->txq.lock, flags);
3264 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3265 return;
3266 }
3267#endif
3268
3269 ret = usb_submit_urb(urb, GFP_ATOMIC);
3270 switch (ret) {
3271 case 0:
860e9538 3272 netif_trans_update(dev->net);
55d7de9d
WH
3273 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3274 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3275 netif_stop_queue(dev->net);
3276 break;
3277 case -EPIPE:
3278 netif_stop_queue(dev->net);
3279 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3280 usb_autopm_put_interface_async(dev->intf);
3281 break;
3282 default:
3283 usb_autopm_put_interface_async(dev->intf);
3284 netif_dbg(dev, tx_err, dev->net,
3285 "tx: submit urb err %d\n", ret);
3286 break;
3287 }
3288
3289 spin_unlock_irqrestore(&dev->txq.lock, flags);
3290
3291 if (ret) {
3292 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3293drop:
3294 dev->net->stats.tx_dropped++;
3295 if (skb)
3296 dev_kfree_skb_any(skb);
3297 usb_free_urb(urb);
3298 } else
3299 netif_dbg(dev, tx_queued, dev->net,
3300 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3301}
3302
3303static void lan78xx_rx_bh(struct lan78xx_net *dev)
3304{
3305 struct urb *urb;
3306 int i;
3307
3308 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3309 for (i = 0; i < 10; i++) {
3310 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3311 break;
3312 urb = usb_alloc_urb(0, GFP_ATOMIC);
3313 if (urb)
3314 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3315 return;
3316 }
3317
3318 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3319 tasklet_schedule(&dev->bh);
3320 }
3321 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3322 netif_wake_queue(dev->net);
3323}
3324
3325static void lan78xx_bh(unsigned long param)
3326{
3327 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3328 struct sk_buff *skb;
3329 struct skb_data *entry;
3330
55d7de9d
WH
3331 while ((skb = skb_dequeue(&dev->done))) {
3332 entry = (struct skb_data *)(skb->cb);
3333 switch (entry->state) {
3334 case rx_done:
3335 entry->state = rx_cleanup;
3336 rx_process(dev, skb);
3337 continue;
3338 case tx_done:
3339 usb_free_urb(entry->urb);
3340 dev_kfree_skb(skb);
3341 continue;
3342 case rx_cleanup:
3343 usb_free_urb(entry->urb);
3344 dev_kfree_skb(skb);
3345 continue;
3346 default:
3347 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3348 return;
3349 }
55d7de9d
WH
3350 }
3351
3352 if (netif_device_present(dev->net) && netif_running(dev->net)) {
20ff5565
WH
3353 /* reset update timer delta */
3354 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3355 dev->delta = 1;
3356 mod_timer(&dev->stat_monitor,
3357 jiffies + STAT_UPDATE_TIMER);
3358 }
3359
55d7de9d
WH
3360 if (!skb_queue_empty(&dev->txq_pend))
3361 lan78xx_tx_bh(dev);
3362
3363 if (!timer_pending(&dev->delay) &&
3364 !test_bit(EVENT_RX_HALT, &dev->flags))
3365 lan78xx_rx_bh(dev);
3366 }
3367}
3368
3369static void lan78xx_delayedwork(struct work_struct *work)
3370{
3371 int status;
3372 struct lan78xx_net *dev;
3373
3374 dev = container_of(work, struct lan78xx_net, wq.work);
3375
3376 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3377 unlink_urbs(dev, &dev->txq);
3378 status = usb_autopm_get_interface(dev->intf);
3379 if (status < 0)
3380 goto fail_pipe;
3381 status = usb_clear_halt(dev->udev, dev->pipe_out);
3382 usb_autopm_put_interface(dev->intf);
3383 if (status < 0 &&
3384 status != -EPIPE &&
3385 status != -ESHUTDOWN) {
3386 if (netif_msg_tx_err(dev))
3387fail_pipe:
3388 netdev_err(dev->net,
3389 "can't clear tx halt, status %d\n",
3390 status);
3391 } else {
3392 clear_bit(EVENT_TX_HALT, &dev->flags);
3393 if (status != -ESHUTDOWN)
3394 netif_wake_queue(dev->net);
3395 }
3396 }
3397 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3398 unlink_urbs(dev, &dev->rxq);
3399 status = usb_autopm_get_interface(dev->intf);
3400 if (status < 0)
3401 goto fail_halt;
3402 status = usb_clear_halt(dev->udev, dev->pipe_in);
3403 usb_autopm_put_interface(dev->intf);
3404 if (status < 0 &&
3405 status != -EPIPE &&
3406 status != -ESHUTDOWN) {
3407 if (netif_msg_rx_err(dev))
3408fail_halt:
3409 netdev_err(dev->net,
3410 "can't clear rx halt, status %d\n",
3411 status);
3412 } else {
3413 clear_bit(EVENT_RX_HALT, &dev->flags);
3414 tasklet_schedule(&dev->bh);
3415 }
3416 }
3417
3418 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3419 int ret = 0;
3420
3421 clear_bit(EVENT_LINK_RESET, &dev->flags);
3422 status = usb_autopm_get_interface(dev->intf);
3423 if (status < 0)
3424 goto skip_reset;
3425 if (lan78xx_link_reset(dev) < 0) {
3426 usb_autopm_put_interface(dev->intf);
3427skip_reset:
3428 netdev_info(dev->net, "link reset failed (%d)\n",
3429 ret);
3430 } else {
3431 usb_autopm_put_interface(dev->intf);
3432 }
3433 }
20ff5565
WH
3434
3435 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3436 lan78xx_update_stats(dev);
3437
3438 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3439
3440 mod_timer(&dev->stat_monitor,
3441 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3442
3443 dev->delta = min((dev->delta * 2), 50);
3444 }
55d7de9d
WH
3445}
3446
3447static void intr_complete(struct urb *urb)
3448{
3449 struct lan78xx_net *dev = urb->context;
3450 int status = urb->status;
3451
3452 switch (status) {
3453 /* success */
3454 case 0:
3455 lan78xx_status(dev, urb);
3456 break;
3457
3458 /* software-driven interface shutdown */
3459 case -ENOENT: /* urb killed */
3460 case -ESHUTDOWN: /* hardware gone */
3461 netif_dbg(dev, ifdown, dev->net,
3462 "intr shutdown, code %d\n", status);
3463 return;
3464
3465 /* NOTE: not throttling like RX/TX, since this endpoint
3466 * already polls infrequently
3467 */
3468 default:
3469 netdev_dbg(dev->net, "intr status %d\n", status);
3470 break;
3471 }
3472
3473 if (!netif_running(dev->net))
3474 return;
3475
3476 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3477 status = usb_submit_urb(urb, GFP_ATOMIC);
3478 if (status != 0)
3479 netif_err(dev, timer, dev->net,
3480 "intr resubmit --> %d\n", status);
3481}
3482
3483static void lan78xx_disconnect(struct usb_interface *intf)
3484{
3485 struct lan78xx_net *dev;
3486 struct usb_device *udev;
3487 struct net_device *net;
3488
3489 dev = usb_get_intfdata(intf);
3490 usb_set_intfdata(intf, NULL);
3491 if (!dev)
3492 return;
3493
3494 udev = interface_to_usbdev(intf);
55d7de9d 3495 net = dev->net;
6d03ff16
AG
3496
3497 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3498 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3499
3500 phy_disconnect(net->phydev);
3501
55d7de9d
WH
3502 unregister_netdev(net);
3503
3504 cancel_delayed_work_sync(&dev->wq);
3505
3506 usb_scuttle_anchored_urbs(&dev->deferred);
3507
3508 lan78xx_unbind(dev, intf);
3509
3510 usb_kill_urb(dev->urb_intr);
3511 usb_free_urb(dev->urb_intr);
3512
3513 free_netdev(net);
3514 usb_put_dev(udev);
3515}
3516
e0c79ff6 3517static void lan78xx_tx_timeout(struct net_device *net)
55d7de9d
WH
3518{
3519 struct lan78xx_net *dev = netdev_priv(net);
3520
3521 unlink_urbs(dev, &dev->txq);
3522 tasklet_schedule(&dev->bh);
3523}
3524
3525static const struct net_device_ops lan78xx_netdev_ops = {
3526 .ndo_open = lan78xx_open,
3527 .ndo_stop = lan78xx_stop,
3528 .ndo_start_xmit = lan78xx_start_xmit,
3529 .ndo_tx_timeout = lan78xx_tx_timeout,
3530 .ndo_change_mtu = lan78xx_change_mtu,
3531 .ndo_set_mac_address = lan78xx_set_mac_addr,
3532 .ndo_validate_addr = eth_validate_addr,
3533 .ndo_do_ioctl = lan78xx_ioctl,
3534 .ndo_set_rx_mode = lan78xx_set_multicast,
3535 .ndo_set_features = lan78xx_set_features,
3536 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3537 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3538};
3539
20ff5565
WH
3540static void lan78xx_stat_monitor(unsigned long param)
3541{
3542 struct lan78xx_net *dev;
3543
3544 dev = (struct lan78xx_net *)param;
3545
3546 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3547}
3548
55d7de9d
WH
3549static int lan78xx_probe(struct usb_interface *intf,
3550 const struct usb_device_id *id)
3551{
3552 struct lan78xx_net *dev;
3553 struct net_device *netdev;
3554 struct usb_device *udev;
3555 int ret;
3556 unsigned maxp;
3557 unsigned period;
3558 u8 *buf = NULL;
3559
3560 udev = interface_to_usbdev(intf);
3561 udev = usb_get_dev(udev);
3562
55d7de9d
WH
3563 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3564 if (!netdev) {
fb52c3b5
NS
3565 dev_err(&intf->dev, "Error: OOM\n");
3566 ret = -ENOMEM;
3567 goto out1;
55d7de9d
WH
3568 }
3569
3570 /* netdev_printk() needs this */
3571 SET_NETDEV_DEV(netdev, &intf->dev);
3572
3573 dev = netdev_priv(netdev);
3574 dev->udev = udev;
3575 dev->intf = intf;
3576 dev->net = netdev;
3577 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3578 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3579
3580 skb_queue_head_init(&dev->rxq);
3581 skb_queue_head_init(&dev->txq);
3582 skb_queue_head_init(&dev->done);
3583 skb_queue_head_init(&dev->rxq_pause);
3584 skb_queue_head_init(&dev->txq_pend);
3585 mutex_init(&dev->phy_mutex);
3586
3587 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3588 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3589 init_usb_anchor(&dev->deferred);
3590
3591 netdev->netdev_ops = &lan78xx_netdev_ops;
3592 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3593 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3594
20ff5565
WH
3595 dev->stat_monitor.function = lan78xx_stat_monitor;
3596 dev->stat_monitor.data = (unsigned long)dev;
3597 dev->delta = 1;
3598 init_timer(&dev->stat_monitor);
3599
3600 mutex_init(&dev->stats.access_lock);
3601
55d7de9d
WH
3602 ret = lan78xx_bind(dev, intf);
3603 if (ret < 0)
3604 goto out2;
3605 strcpy(netdev->name, "eth%d");
3606
3607 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3608 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3609
f77f0aee
JW
3610 /* MTU range: 68 - 9000 */
3611 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3612
55d7de9d
WH
3613 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3614 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3615 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3616
3617 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3618 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3619
3620 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3621 dev->ep_intr->desc.bEndpointAddress &
3622 USB_ENDPOINT_NUMBER_MASK);
3623 period = dev->ep_intr->desc.bInterval;
3624
3625 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3626 buf = kmalloc(maxp, GFP_KERNEL);
3627 if (buf) {
3628 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3629 if (!dev->urb_intr) {
51920830 3630 ret = -ENOMEM;
55d7de9d
WH
3631 kfree(buf);
3632 goto out3;
3633 } else {
3634 usb_fill_int_urb(dev->urb_intr, dev->udev,
3635 dev->pipe_intr, buf, maxp,
3636 intr_complete, dev, period);
3637 }
3638 }
3639
3640 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3641
3642 /* driver requires remote-wakeup capability during autosuspend. */
3643 intf->needs_remote_wakeup = 1;
3644
3645 ret = register_netdev(netdev);
3646 if (ret != 0) {
3647 netif_err(dev, probe, netdev, "couldn't register the device\n");
fb52c3b5 3648 goto out3;
55d7de9d
WH
3649 }
3650
3651 usb_set_intfdata(intf, dev);
3652
3653 ret = device_set_wakeup_enable(&udev->dev, true);
3654
3655 /* Default delay of 2sec has more overhead than advantage.
3656 * Set to 10sec as default.
3657 */
3658 pm_runtime_set_autosuspend_delay(&udev->dev,
3659 DEFAULT_AUTOSUSPEND_DELAY);
3660
6d03ff16
AG
3661 ret = lan78xx_phy_init(dev);
3662 if (ret < 0)
3663 goto out4;
3664
55d7de9d
WH
3665 return 0;
3666
6d03ff16
AG
3667out4:
3668 unregister_netdev(netdev);
55d7de9d
WH
3669out3:
3670 lan78xx_unbind(dev, intf);
3671out2:
3672 free_netdev(netdev);
3673out1:
3674 usb_put_dev(udev);
3675
3676 return ret;
3677}
3678
3679static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3680{
3681 const u16 crc16poly = 0x8005;
3682 int i;
3683 u16 bit, crc, msb;
3684 u8 data;
3685
3686 crc = 0xFFFF;
3687 for (i = 0; i < len; i++) {
3688 data = *buf++;
3689 for (bit = 0; bit < 8; bit++) {
3690 msb = crc >> 15;
3691 crc <<= 1;
3692
3693 if (msb ^ (u16)(data & 1)) {
3694 crc ^= crc16poly;
3695 crc |= (u16)0x0001U;
3696 }
3697 data >>= 1;
3698 }
3699 }
3700
3701 return crc;
3702}
3703
3704static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3705{
3706 u32 buf;
3707 int ret;
3708 int mask_index;
3709 u16 crc;
3710 u32 temp_wucsr;
3711 u32 temp_pmt_ctl;
3712 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3713 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3714 const u8 arp_type[2] = { 0x08, 0x06 };
3715
3716 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3717 buf &= ~MAC_TX_TXEN_;
3718 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3719 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3720 buf &= ~MAC_RX_RXEN_;
3721 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3722
3723 ret = lan78xx_write_reg(dev, WUCSR, 0);
3724 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3725 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3726
3727 temp_wucsr = 0;
3728
3729 temp_pmt_ctl = 0;
3730 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3731 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3732 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3733
3734 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3735 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3736
3737 mask_index = 0;
3738 if (wol & WAKE_PHY) {
3739 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3740
3741 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3742 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3743 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3744 }
3745 if (wol & WAKE_MAGIC) {
3746 temp_wucsr |= WUCSR_MPEN_;
3747
3748 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3749 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3750 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3751 }
3752 if (wol & WAKE_BCAST) {
3753 temp_wucsr |= WUCSR_BCST_EN_;
3754
3755 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3756 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3757 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3758 }
3759 if (wol & WAKE_MCAST) {
3760 temp_wucsr |= WUCSR_WAKE_EN_;
3761
3762 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3763 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3764 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3765 WUF_CFGX_EN_ |
3766 WUF_CFGX_TYPE_MCAST_ |
3767 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3768 (crc & WUF_CFGX_CRC16_MASK_));
3769
3770 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3771 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3772 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3773 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3774 mask_index++;
3775
3776 /* for IPv6 Multicast */
3777 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3778 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3779 WUF_CFGX_EN_ |
3780 WUF_CFGX_TYPE_MCAST_ |
3781 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3782 (crc & WUF_CFGX_CRC16_MASK_));
3783
3784 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3785 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3786 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3787 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3788 mask_index++;
3789
3790 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3791 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3792 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3793 }
3794 if (wol & WAKE_UCAST) {
3795 temp_wucsr |= WUCSR_PFDA_EN_;
3796
3797 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3798 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3799 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3800 }
3801 if (wol & WAKE_ARP) {
3802 temp_wucsr |= WUCSR_WAKE_EN_;
3803
3804 /* set WUF_CFG & WUF_MASK
3805 * for packettype (offset 12,13) = ARP (0x0806)
3806 */
3807 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3808 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3809 WUF_CFGX_EN_ |
3810 WUF_CFGX_TYPE_ALL_ |
3811 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3812 (crc & WUF_CFGX_CRC16_MASK_));
3813
3814 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3815 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3816 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3817 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3818 mask_index++;
3819
3820 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3821 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3822 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3823 }
3824
3825 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3826
3827 /* when multiple WOL bits are set */
3828 if (hweight_long((unsigned long)wol) > 1) {
3829 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3830 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3831 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3832 }
3833 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3834
3835 /* clear WUPS */
3836 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3837 buf |= PMT_CTL_WUPS_MASK_;
3838 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3839
3840 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3841 buf |= MAC_RX_RXEN_;
3842 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3843
3844 return 0;
3845}
3846
e0c79ff6 3847static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
55d7de9d
WH
3848{
3849 struct lan78xx_net *dev = usb_get_intfdata(intf);
3850 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3851 u32 buf;
3852 int ret;
3853 int event;
3854
55d7de9d
WH
3855 event = message.event;
3856
3857 if (!dev->suspend_count++) {
3858 spin_lock_irq(&dev->txq.lock);
3859 /* don't autosuspend while transmitting */
3860 if ((skb_queue_len(&dev->txq) ||
3861 skb_queue_len(&dev->txq_pend)) &&
3862 PMSG_IS_AUTO(message)) {
3863 spin_unlock_irq(&dev->txq.lock);
3864 ret = -EBUSY;
3865 goto out;
3866 } else {
3867 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3868 spin_unlock_irq(&dev->txq.lock);
3869 }
3870
3871 /* stop TX & RX */
3872 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3873 buf &= ~MAC_TX_TXEN_;
3874 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3875 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3876 buf &= ~MAC_RX_RXEN_;
3877 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3878
3879 /* empty out the rx and queues */
3880 netif_device_detach(dev->net);
3881 lan78xx_terminate_urbs(dev);
3882 usb_kill_urb(dev->urb_intr);
3883
3884 /* reattach */
3885 netif_device_attach(dev->net);
3886 }
3887
3888 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
20ff5565
WH
3889 del_timer(&dev->stat_monitor);
3890
55d7de9d
WH
3891 if (PMSG_IS_AUTO(message)) {
3892 /* auto suspend (selective suspend) */
3893 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3894 buf &= ~MAC_TX_TXEN_;
3895 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3896 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3897 buf &= ~MAC_RX_RXEN_;
3898 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3899
3900 ret = lan78xx_write_reg(dev, WUCSR, 0);
3901 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3902 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3903
3904 /* set goodframe wakeup */
3905 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3906
3907 buf |= WUCSR_RFE_WAKE_EN_;
3908 buf |= WUCSR_STORE_WAKE_;
3909
3910 ret = lan78xx_write_reg(dev, WUCSR, buf);
3911
3912 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3913
3914 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3915 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3916
3917 buf |= PMT_CTL_PHY_WAKE_EN_;
3918 buf |= PMT_CTL_WOL_EN_;
3919 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3920 buf |= PMT_CTL_SUS_MODE_3_;
3921
3922 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3923
3924 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3925
3926 buf |= PMT_CTL_WUPS_MASK_;
3927
3928 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3929
3930 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3931 buf |= MAC_RX_RXEN_;
3932 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3933 } else {
3934 lan78xx_set_suspend(dev, pdata->wol);
3935 }
3936 }
3937
49d28b56 3938 ret = 0;
55d7de9d
WH
3939out:
3940 return ret;
3941}
3942
e0c79ff6 3943static int lan78xx_resume(struct usb_interface *intf)
55d7de9d
WH
3944{
3945 struct lan78xx_net *dev = usb_get_intfdata(intf);
3946 struct sk_buff *skb;
3947 struct urb *res;
3948 int ret;
3949 u32 buf;
3950
20ff5565
WH
3951 if (!timer_pending(&dev->stat_monitor)) {
3952 dev->delta = 1;
3953 mod_timer(&dev->stat_monitor,
3954 jiffies + STAT_UPDATE_TIMER);
3955 }
3956
55d7de9d
WH
3957 if (!--dev->suspend_count) {
3958 /* resume interrupt URBs */
3959 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3960 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3961
3962 spin_lock_irq(&dev->txq.lock);
3963 while ((res = usb_get_from_anchor(&dev->deferred))) {
3964 skb = (struct sk_buff *)res->context;
3965 ret = usb_submit_urb(res, GFP_ATOMIC);
3966 if (ret < 0) {
3967 dev_kfree_skb_any(skb);
3968 usb_free_urb(res);
3969 usb_autopm_put_interface_async(dev->intf);
3970 } else {
860e9538 3971 netif_trans_update(dev->net);
55d7de9d
WH
3972 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3973 }
3974 }
3975
3976 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3977 spin_unlock_irq(&dev->txq.lock);
3978
3979 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3980 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3981 netif_start_queue(dev->net);
3982 tasklet_schedule(&dev->bh);
3983 }
3984 }
3985
3986 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3987 ret = lan78xx_write_reg(dev, WUCSR, 0);
3988 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3989
3990 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3991 WUCSR2_ARP_RCD_ |
3992 WUCSR2_IPV6_TCPSYN_RCD_ |
3993 WUCSR2_IPV4_TCPSYN_RCD_);
3994
3995 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3996 WUCSR_EEE_RX_WAKE_ |
3997 WUCSR_PFDA_FR_ |
3998 WUCSR_RFE_WAKE_FR_ |
3999 WUCSR_WUFR_ |
4000 WUCSR_MPR_ |
4001 WUCSR_BCST_FR_);
4002
4003 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4004 buf |= MAC_TX_TXEN_;
4005 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4006
4007 return 0;
4008}
4009
e0c79ff6 4010static int lan78xx_reset_resume(struct usb_interface *intf)
55d7de9d
WH
4011{
4012 struct lan78xx_net *dev = usb_get_intfdata(intf);
4013
4014 lan78xx_reset(dev);
ce85e13a 4015
6d03ff16 4016 phy_start(dev->net->phydev);
ce85e13a 4017
55d7de9d
WH
4018 return lan78xx_resume(intf);
4019}
4020
4021static const struct usb_device_id products[] = {
4022 {
4023 /* LAN7800 USB Gigabit Ethernet Device */
4024 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4025 },
4026 {
4027 /* LAN7850 USB Gigabit Ethernet Device */
4028 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4029 },
02dc1f3d
WH
4030 {
4031 /* LAN7801 USB Gigabit Ethernet Device */
4032 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4033 },
55d7de9d
WH
4034 {},
4035};
4036MODULE_DEVICE_TABLE(usb, products);
4037
4038static struct usb_driver lan78xx_driver = {
4039 .name = DRIVER_NAME,
4040 .id_table = products,
4041 .probe = lan78xx_probe,
4042 .disconnect = lan78xx_disconnect,
4043 .suspend = lan78xx_suspend,
4044 .resume = lan78xx_resume,
4045 .reset_resume = lan78xx_reset_resume,
4046 .supports_autosuspend = 1,
4047 .disable_hub_initiated_lpm = 1,
4048};
4049
4050module_usb_driver(lan78xx_driver);
4051
4052MODULE_AUTHOR(DRIVER_AUTHOR);
4053MODULE_DESCRIPTION(DRIVER_DESC);
4054MODULE_LICENSE("GPL");