Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
cc89c323
WH
33#include <linux/interrupt.h>
34#include <linux/irqdomain.h>
35#include <linux/irq.h>
36#include <linux/irqchip/chained_irq.h>
bdfba55e 37#include <linux/microchipphy.h>
55d7de9d
WH
38#include "lan78xx.h"
39
40#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
41#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
42#define DRIVER_NAME "lan78xx"
02dc1f3d 43#define DRIVER_VERSION "1.0.6"
55d7de9d
WH
44
45#define TX_TIMEOUT_JIFFIES (5 * HZ)
46#define THROTTLE_JIFFIES (HZ / 8)
47#define UNLINK_TIMEOUT_MS 3
48
49#define RX_MAX_QUEUE_MEMORY (60 * 1518)
50
51#define SS_USB_PKT_SIZE (1024)
52#define HS_USB_PKT_SIZE (512)
53#define FS_USB_PKT_SIZE (64)
54
55#define MAX_RX_FIFO_SIZE (12 * 1024)
56#define MAX_TX_FIFO_SIZE (12 * 1024)
57#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
58#define DEFAULT_BULK_IN_DELAY (0x0800)
59#define MAX_SINGLE_PACKET_SIZE (9000)
60#define DEFAULT_TX_CSUM_ENABLE (true)
61#define DEFAULT_RX_CSUM_ENABLE (true)
62#define DEFAULT_TSO_CSUM_ENABLE (true)
63#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
64#define TX_OVERHEAD (8)
65#define RXW_PADDING 2
66
67#define LAN78XX_USB_VENDOR_ID (0x0424)
68#define LAN7800_USB_PRODUCT_ID (0x7800)
69#define LAN7850_USB_PRODUCT_ID (0x7850)
02dc1f3d 70#define LAN7801_USB_PRODUCT_ID (0x7801)
55d7de9d
WH
71#define LAN78XX_EEPROM_MAGIC (0x78A5)
72#define LAN78XX_OTP_MAGIC (0x78F3)
73
74#define MII_READ 1
75#define MII_WRITE 0
76
77#define EEPROM_INDICATOR (0xA5)
78#define EEPROM_MAC_OFFSET (0x01)
79#define MAX_EEPROM_SIZE 512
80#define OTP_INDICATOR_1 (0xF3)
81#define OTP_INDICATOR_2 (0xF7)
82
83#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
84 WAKE_MCAST | WAKE_BCAST | \
85 WAKE_ARP | WAKE_MAGIC)
86
87/* USB related defines */
88#define BULK_IN_PIPE 1
89#define BULK_OUT_PIPE 2
90
91/* default autosuspend delay (mSec)*/
92#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
93
20ff5565
WH
94/* statistic update interval (mSec) */
95#define STAT_UPDATE_TIMER (1 * 1000)
96
cc89c323
WH
97/* defines interrupts from interrupt EP */
98#define MAX_INT_EP (32)
99#define INT_EP_INTEP (31)
100#define INT_EP_OTP_WR_DONE (28)
101#define INT_EP_EEE_TX_LPI_START (26)
102#define INT_EP_EEE_TX_LPI_STOP (25)
103#define INT_EP_EEE_RX_LPI (24)
104#define INT_EP_MAC_RESET_TIMEOUT (23)
105#define INT_EP_RDFO (22)
106#define INT_EP_TXE (21)
107#define INT_EP_USB_STATUS (20)
108#define INT_EP_TX_DIS (19)
109#define INT_EP_RX_DIS (18)
110#define INT_EP_PHY (17)
111#define INT_EP_DP (16)
112#define INT_EP_MAC_ERR (15)
113#define INT_EP_TDFU (14)
114#define INT_EP_TDFO (13)
115#define INT_EP_UTX (12)
116#define INT_EP_GPIO_11 (11)
117#define INT_EP_GPIO_10 (10)
118#define INT_EP_GPIO_9 (9)
119#define INT_EP_GPIO_8 (8)
120#define INT_EP_GPIO_7 (7)
121#define INT_EP_GPIO_6 (6)
122#define INT_EP_GPIO_5 (5)
123#define INT_EP_GPIO_4 (4)
124#define INT_EP_GPIO_3 (3)
125#define INT_EP_GPIO_2 (2)
126#define INT_EP_GPIO_1 (1)
127#define INT_EP_GPIO_0 (0)
128
55d7de9d
WH
129static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
130 "RX FCS Errors",
131 "RX Alignment Errors",
132 "Rx Fragment Errors",
133 "RX Jabber Errors",
134 "RX Undersize Frame Errors",
135 "RX Oversize Frame Errors",
136 "RX Dropped Frames",
137 "RX Unicast Byte Count",
138 "RX Broadcast Byte Count",
139 "RX Multicast Byte Count",
140 "RX Unicast Frames",
141 "RX Broadcast Frames",
142 "RX Multicast Frames",
143 "RX Pause Frames",
144 "RX 64 Byte Frames",
145 "RX 65 - 127 Byte Frames",
146 "RX 128 - 255 Byte Frames",
147 "RX 256 - 511 Bytes Frames",
148 "RX 512 - 1023 Byte Frames",
149 "RX 1024 - 1518 Byte Frames",
150 "RX Greater 1518 Byte Frames",
151 "EEE RX LPI Transitions",
152 "EEE RX LPI Time",
153 "TX FCS Errors",
154 "TX Excess Deferral Errors",
155 "TX Carrier Errors",
156 "TX Bad Byte Count",
157 "TX Single Collisions",
158 "TX Multiple Collisions",
159 "TX Excessive Collision",
160 "TX Late Collisions",
161 "TX Unicast Byte Count",
162 "TX Broadcast Byte Count",
163 "TX Multicast Byte Count",
164 "TX Unicast Frames",
165 "TX Broadcast Frames",
166 "TX Multicast Frames",
167 "TX Pause Frames",
168 "TX 64 Byte Frames",
169 "TX 65 - 127 Byte Frames",
170 "TX 128 - 255 Byte Frames",
171 "TX 256 - 511 Bytes Frames",
172 "TX 512 - 1023 Byte Frames",
173 "TX 1024 - 1518 Byte Frames",
174 "TX Greater 1518 Byte Frames",
175 "EEE TX LPI Transitions",
176 "EEE TX LPI Time",
177};
178
179struct lan78xx_statstage {
180 u32 rx_fcs_errors;
181 u32 rx_alignment_errors;
182 u32 rx_fragment_errors;
183 u32 rx_jabber_errors;
184 u32 rx_undersize_frame_errors;
185 u32 rx_oversize_frame_errors;
186 u32 rx_dropped_frames;
187 u32 rx_unicast_byte_count;
188 u32 rx_broadcast_byte_count;
189 u32 rx_multicast_byte_count;
190 u32 rx_unicast_frames;
191 u32 rx_broadcast_frames;
192 u32 rx_multicast_frames;
193 u32 rx_pause_frames;
194 u32 rx_64_byte_frames;
195 u32 rx_65_127_byte_frames;
196 u32 rx_128_255_byte_frames;
197 u32 rx_256_511_bytes_frames;
198 u32 rx_512_1023_byte_frames;
199 u32 rx_1024_1518_byte_frames;
200 u32 rx_greater_1518_byte_frames;
201 u32 eee_rx_lpi_transitions;
202 u32 eee_rx_lpi_time;
203 u32 tx_fcs_errors;
204 u32 tx_excess_deferral_errors;
205 u32 tx_carrier_errors;
206 u32 tx_bad_byte_count;
207 u32 tx_single_collisions;
208 u32 tx_multiple_collisions;
209 u32 tx_excessive_collision;
210 u32 tx_late_collisions;
211 u32 tx_unicast_byte_count;
212 u32 tx_broadcast_byte_count;
213 u32 tx_multicast_byte_count;
214 u32 tx_unicast_frames;
215 u32 tx_broadcast_frames;
216 u32 tx_multicast_frames;
217 u32 tx_pause_frames;
218 u32 tx_64_byte_frames;
219 u32 tx_65_127_byte_frames;
220 u32 tx_128_255_byte_frames;
221 u32 tx_256_511_bytes_frames;
222 u32 tx_512_1023_byte_frames;
223 u32 tx_1024_1518_byte_frames;
224 u32 tx_greater_1518_byte_frames;
225 u32 eee_tx_lpi_transitions;
226 u32 eee_tx_lpi_time;
227};
228
20ff5565
WH
229struct lan78xx_statstage64 {
230 u64 rx_fcs_errors;
231 u64 rx_alignment_errors;
232 u64 rx_fragment_errors;
233 u64 rx_jabber_errors;
234 u64 rx_undersize_frame_errors;
235 u64 rx_oversize_frame_errors;
236 u64 rx_dropped_frames;
237 u64 rx_unicast_byte_count;
238 u64 rx_broadcast_byte_count;
239 u64 rx_multicast_byte_count;
240 u64 rx_unicast_frames;
241 u64 rx_broadcast_frames;
242 u64 rx_multicast_frames;
243 u64 rx_pause_frames;
244 u64 rx_64_byte_frames;
245 u64 rx_65_127_byte_frames;
246 u64 rx_128_255_byte_frames;
247 u64 rx_256_511_bytes_frames;
248 u64 rx_512_1023_byte_frames;
249 u64 rx_1024_1518_byte_frames;
250 u64 rx_greater_1518_byte_frames;
251 u64 eee_rx_lpi_transitions;
252 u64 eee_rx_lpi_time;
253 u64 tx_fcs_errors;
254 u64 tx_excess_deferral_errors;
255 u64 tx_carrier_errors;
256 u64 tx_bad_byte_count;
257 u64 tx_single_collisions;
258 u64 tx_multiple_collisions;
259 u64 tx_excessive_collision;
260 u64 tx_late_collisions;
261 u64 tx_unicast_byte_count;
262 u64 tx_broadcast_byte_count;
263 u64 tx_multicast_byte_count;
264 u64 tx_unicast_frames;
265 u64 tx_broadcast_frames;
266 u64 tx_multicast_frames;
267 u64 tx_pause_frames;
268 u64 tx_64_byte_frames;
269 u64 tx_65_127_byte_frames;
270 u64 tx_128_255_byte_frames;
271 u64 tx_256_511_bytes_frames;
272 u64 tx_512_1023_byte_frames;
273 u64 tx_1024_1518_byte_frames;
274 u64 tx_greater_1518_byte_frames;
275 u64 eee_tx_lpi_transitions;
276 u64 eee_tx_lpi_time;
277};
278
55d7de9d
WH
279struct lan78xx_net;
280
281struct lan78xx_priv {
282 struct lan78xx_net *dev;
283 u32 rfe_ctl;
284 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
285 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
286 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
287 struct mutex dataport_mutex; /* for dataport access */
288 spinlock_t rfe_ctl_lock; /* for rfe register access */
289 struct work_struct set_multicast;
290 struct work_struct set_vlan;
291 u32 wol;
292};
293
294enum skb_state {
295 illegal = 0,
296 tx_start,
297 tx_done,
298 rx_start,
299 rx_done,
300 rx_cleanup,
301 unlink_start
302};
303
304struct skb_data { /* skb->cb is one of these */
305 struct urb *urb;
306 struct lan78xx_net *dev;
307 enum skb_state state;
308 size_t length;
74d79a2e 309 int num_of_packet;
55d7de9d
WH
310};
311
312struct usb_context {
313 struct usb_ctrlrequest req;
314 struct lan78xx_net *dev;
315};
316
317#define EVENT_TX_HALT 0
318#define EVENT_RX_HALT 1
319#define EVENT_RX_MEMORY 2
320#define EVENT_STS_SPLIT 3
321#define EVENT_LINK_RESET 4
322#define EVENT_RX_PAUSED 5
323#define EVENT_DEV_WAKING 6
324#define EVENT_DEV_ASLEEP 7
325#define EVENT_DEV_OPEN 8
20ff5565
WH
326#define EVENT_STAT_UPDATE 9
327
328struct statstage {
329 struct mutex access_lock; /* for stats access */
330 struct lan78xx_statstage saved;
331 struct lan78xx_statstage rollover_count;
332 struct lan78xx_statstage rollover_max;
333 struct lan78xx_statstage64 curr_stat;
334};
55d7de9d 335
cc89c323
WH
336struct irq_domain_data {
337 struct irq_domain *irqdomain;
338 unsigned int phyirq;
339 struct irq_chip *irqchip;
340 irq_flow_handler_t irq_handler;
341 u32 irqenable;
342 struct mutex irq_lock; /* for irq bus access */
343};
344
55d7de9d
WH
345struct lan78xx_net {
346 struct net_device *net;
347 struct usb_device *udev;
348 struct usb_interface *intf;
349 void *driver_priv;
350
351 int rx_qlen;
352 int tx_qlen;
353 struct sk_buff_head rxq;
354 struct sk_buff_head txq;
355 struct sk_buff_head done;
356 struct sk_buff_head rxq_pause;
357 struct sk_buff_head txq_pend;
358
359 struct tasklet_struct bh;
360 struct delayed_work wq;
361
362 struct usb_host_endpoint *ep_blkin;
363 struct usb_host_endpoint *ep_blkout;
364 struct usb_host_endpoint *ep_intr;
365
366 int msg_enable;
367
368 struct urb *urb_intr;
369 struct usb_anchor deferred;
370
371 struct mutex phy_mutex; /* for phy access */
372 unsigned pipe_in, pipe_out, pipe_intr;
373
374 u32 hard_mtu; /* count any extra framing */
375 size_t rx_urb_size; /* size for rx urbs */
376
377 unsigned long flags;
378
379 wait_queue_head_t *wait;
380 unsigned char suspend_count;
381
382 unsigned maxpacket;
383 struct timer_list delay;
20ff5565 384 struct timer_list stat_monitor;
55d7de9d
WH
385
386 unsigned long data[5];
55d7de9d
WH
387
388 int link_on;
389 u8 mdix_ctrl;
ce85e13a 390
87177ba6
WH
391 u32 chipid;
392 u32 chiprev;
ce85e13a 393 struct mii_bus *mdiobus;
02dc1f3d 394 phy_interface_t interface;
349e0c5e
WH
395
396 int fc_autoneg;
397 u8 fc_request_control;
20ff5565
WH
398
399 int delta;
400 struct statstage stats;
cc89c323
WH
401
402 struct irq_domain_data domain_data;
55d7de9d
WH
403};
404
02dc1f3d
WH
405/* define external phy id */
406#define PHY_LAN8835 (0x0007C130)
407#define PHY_KSZ9031RNX (0x00221620)
408
55d7de9d
WH
409/* use ethtool to change the level for any given device */
410static int msg_level = -1;
411module_param(msg_level, int, 0);
412MODULE_PARM_DESC(msg_level, "Override default message level");
413
414static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
415{
416 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
417 int ret;
418
55d7de9d
WH
419 if (!buf)
420 return -ENOMEM;
421
422 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
423 USB_VENDOR_REQUEST_READ_REGISTER,
424 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
425 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
426 if (likely(ret >= 0)) {
427 le32_to_cpus(buf);
428 *data = *buf;
429 } else {
430 netdev_warn(dev->net,
431 "Failed to read register index 0x%08x. ret = %d",
432 index, ret);
433 }
434
435 kfree(buf);
436
437 return ret;
438}
439
440static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
441{
442 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
443 int ret;
444
55d7de9d
WH
445 if (!buf)
446 return -ENOMEM;
447
448 *buf = data;
449 cpu_to_le32s(buf);
450
451 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
452 USB_VENDOR_REQUEST_WRITE_REGISTER,
453 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
454 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
455 if (unlikely(ret < 0)) {
456 netdev_warn(dev->net,
457 "Failed to write register index 0x%08x. ret = %d",
458 index, ret);
459 }
460
461 kfree(buf);
462
463 return ret;
464}
465
466static int lan78xx_read_stats(struct lan78xx_net *dev,
467 struct lan78xx_statstage *data)
468{
469 int ret = 0;
470 int i;
471 struct lan78xx_statstage *stats;
472 u32 *src;
473 u32 *dst;
474
55d7de9d
WH
475 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
476 if (!stats)
477 return -ENOMEM;
478
479 ret = usb_control_msg(dev->udev,
480 usb_rcvctrlpipe(dev->udev, 0),
481 USB_VENDOR_REQUEST_GET_STATS,
482 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
483 0,
484 0,
485 (void *)stats,
486 sizeof(*stats),
487 USB_CTRL_SET_TIMEOUT);
488 if (likely(ret >= 0)) {
489 src = (u32 *)stats;
490 dst = (u32 *)data;
491 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
492 le32_to_cpus(&src[i]);
493 dst[i] = src[i];
494 }
495 } else {
496 netdev_warn(dev->net,
497 "Failed to read stat ret = 0x%x", ret);
498 }
499
500 kfree(stats);
501
502 return ret;
503}
504
20ff5565
WH
505#define check_counter_rollover(struct1, dev_stats, member) { \
506 if (struct1->member < dev_stats.saved.member) \
507 dev_stats.rollover_count.member++; \
508 }
509
510static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
511 struct lan78xx_statstage *stats)
512{
513 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
514 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
515 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
516 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
517 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
518 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
519 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
520 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
521 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
522 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
524 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
525 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
526 check_counter_rollover(stats, dev->stats, rx_pause_frames);
527 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
528 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
529 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
531 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
533 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
534 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
535 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
536 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
537 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
538 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
539 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
540 check_counter_rollover(stats, dev->stats, tx_single_collisions);
541 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
542 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
543 check_counter_rollover(stats, dev->stats, tx_late_collisions);
544 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
545 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
546 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
548 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
549 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
550 check_counter_rollover(stats, dev->stats, tx_pause_frames);
551 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
552 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
553 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
555 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
557 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
558 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
559 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
560
561 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
562}
563
564static void lan78xx_update_stats(struct lan78xx_net *dev)
565{
566 u32 *p, *count, *max;
567 u64 *data;
568 int i;
569 struct lan78xx_statstage lan78xx_stats;
570
571 if (usb_autopm_get_interface(dev->intf) < 0)
572 return;
573
574 p = (u32 *)&lan78xx_stats;
575 count = (u32 *)&dev->stats.rollover_count;
576 max = (u32 *)&dev->stats.rollover_max;
577 data = (u64 *)&dev->stats.curr_stat;
578
579 mutex_lock(&dev->stats.access_lock);
580
581 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
582 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
583
584 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
585 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
586
587 mutex_unlock(&dev->stats.access_lock);
588
589 usb_autopm_put_interface(dev->intf);
590}
591
55d7de9d
WH
592/* Loop until the read is completed with timeout called with phy_mutex held */
593static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
594{
595 unsigned long start_time = jiffies;
596 u32 val;
597 int ret;
598
599 do {
600 ret = lan78xx_read_reg(dev, MII_ACC, &val);
601 if (unlikely(ret < 0))
602 return -EIO;
603
604 if (!(val & MII_ACC_MII_BUSY_))
605 return 0;
606 } while (!time_after(jiffies, start_time + HZ));
607
608 return -EIO;
609}
610
611static inline u32 mii_access(int id, int index, int read)
612{
613 u32 ret;
614
615 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
616 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
617 if (read)
618 ret |= MII_ACC_MII_READ_;
619 else
620 ret |= MII_ACC_MII_WRITE_;
621 ret |= MII_ACC_MII_BUSY_;
622
623 return ret;
624}
625
55d7de9d
WH
626static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
627{
628 unsigned long start_time = jiffies;
629 u32 val;
630 int ret;
631
632 do {
633 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
634 if (unlikely(ret < 0))
635 return -EIO;
636
637 if (!(val & E2P_CMD_EPC_BUSY_) ||
638 (val & E2P_CMD_EPC_TIMEOUT_))
639 break;
640 usleep_range(40, 100);
641 } while (!time_after(jiffies, start_time + HZ));
642
643 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
644 netdev_warn(dev->net, "EEPROM read operation timeout");
645 return -EIO;
646 }
647
648 return 0;
649}
650
651static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
652{
653 unsigned long start_time = jiffies;
654 u32 val;
655 int ret;
656
657 do {
658 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
659 if (unlikely(ret < 0))
660 return -EIO;
661
662 if (!(val & E2P_CMD_EPC_BUSY_))
663 return 0;
664
665 usleep_range(40, 100);
666 } while (!time_after(jiffies, start_time + HZ));
667
668 netdev_warn(dev->net, "EEPROM is busy");
669 return -EIO;
670}
671
672static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
673 u32 length, u8 *data)
674{
675 u32 val;
a0db7d10 676 u32 saved;
55d7de9d 677 int i, ret;
a0db7d10
WH
678 int retval;
679
680 /* depends on chip, some EEPROM pins are muxed with LED function.
681 * disable & restore LED function to access EEPROM.
682 */
683 ret = lan78xx_read_reg(dev, HW_CFG, &val);
684 saved = val;
87177ba6 685 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
686 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
687 ret = lan78xx_write_reg(dev, HW_CFG, val);
688 }
55d7de9d 689
a0db7d10
WH
690 retval = lan78xx_eeprom_confirm_not_busy(dev);
691 if (retval)
692 return retval;
55d7de9d
WH
693
694 for (i = 0; i < length; i++) {
695 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
696 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
697 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
698 if (unlikely(ret < 0)) {
699 retval = -EIO;
700 goto exit;
701 }
55d7de9d 702
a0db7d10
WH
703 retval = lan78xx_wait_eeprom(dev);
704 if (retval < 0)
705 goto exit;
55d7de9d
WH
706
707 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
708 if (unlikely(ret < 0)) {
709 retval = -EIO;
710 goto exit;
711 }
55d7de9d
WH
712
713 data[i] = val & 0xFF;
714 offset++;
715 }
716
a0db7d10
WH
717 retval = 0;
718exit:
87177ba6 719 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
720 ret = lan78xx_write_reg(dev, HW_CFG, saved);
721
722 return retval;
55d7de9d
WH
723}
724
725static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
726 u32 length, u8 *data)
727{
728 u8 sig;
729 int ret;
730
731 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
732 if ((ret == 0) && (sig == EEPROM_INDICATOR))
733 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
734 else
735 ret = -EINVAL;
736
737 return ret;
738}
739
740static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
741 u32 length, u8 *data)
742{
743 u32 val;
a0db7d10 744 u32 saved;
55d7de9d 745 int i, ret;
a0db7d10
WH
746 int retval;
747
748 /* depends on chip, some EEPROM pins are muxed with LED function.
749 * disable & restore LED function to access EEPROM.
750 */
751 ret = lan78xx_read_reg(dev, HW_CFG, &val);
752 saved = val;
87177ba6 753 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
754 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
755 ret = lan78xx_write_reg(dev, HW_CFG, val);
756 }
55d7de9d 757
a0db7d10
WH
758 retval = lan78xx_eeprom_confirm_not_busy(dev);
759 if (retval)
760 goto exit;
55d7de9d
WH
761
762 /* Issue write/erase enable command */
763 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
764 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
765 if (unlikely(ret < 0)) {
766 retval = -EIO;
767 goto exit;
768 }
55d7de9d 769
a0db7d10
WH
770 retval = lan78xx_wait_eeprom(dev);
771 if (retval < 0)
772 goto exit;
55d7de9d
WH
773
774 for (i = 0; i < length; i++) {
775 /* Fill data register */
776 val = data[i];
777 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
778 if (ret < 0) {
779 retval = -EIO;
780 goto exit;
781 }
55d7de9d
WH
782
783 /* Send "write" command */
784 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
785 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
786 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
787 if (ret < 0) {
788 retval = -EIO;
789 goto exit;
790 }
55d7de9d 791
a0db7d10
WH
792 retval = lan78xx_wait_eeprom(dev);
793 if (retval < 0)
794 goto exit;
55d7de9d
WH
795
796 offset++;
797 }
798
a0db7d10
WH
799 retval = 0;
800exit:
87177ba6 801 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
802 ret = lan78xx_write_reg(dev, HW_CFG, saved);
803
804 return retval;
55d7de9d
WH
805}
806
807static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
808 u32 length, u8 *data)
809{
810 int i;
811 int ret;
812 u32 buf;
813 unsigned long timeout;
814
815 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
816
817 if (buf & OTP_PWR_DN_PWRDN_N_) {
818 /* clear it and wait to be cleared */
819 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
820
821 timeout = jiffies + HZ;
822 do {
823 usleep_range(1, 10);
824 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
825 if (time_after(jiffies, timeout)) {
826 netdev_warn(dev->net,
827 "timeout on OTP_PWR_DN");
828 return -EIO;
829 }
830 } while (buf & OTP_PWR_DN_PWRDN_N_);
831 }
832
833 for (i = 0; i < length; i++) {
834 ret = lan78xx_write_reg(dev, OTP_ADDR1,
835 ((offset + i) >> 8) & OTP_ADDR1_15_11);
836 ret = lan78xx_write_reg(dev, OTP_ADDR2,
837 ((offset + i) & OTP_ADDR2_10_3));
838
839 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
840 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
841
842 timeout = jiffies + HZ;
843 do {
844 udelay(1);
845 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
846 if (time_after(jiffies, timeout)) {
847 netdev_warn(dev->net,
848 "timeout on OTP_STATUS");
849 return -EIO;
850 }
851 } while (buf & OTP_STATUS_BUSY_);
852
853 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
854
855 data[i] = (u8)(buf & 0xFF);
856 }
857
858 return 0;
859}
860
9fb6066d
WH
861static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
862 u32 length, u8 *data)
863{
864 int i;
865 int ret;
866 u32 buf;
867 unsigned long timeout;
868
869 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
870
871 if (buf & OTP_PWR_DN_PWRDN_N_) {
872 /* clear it and wait to be cleared */
873 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
874
875 timeout = jiffies + HZ;
876 do {
877 udelay(1);
878 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
879 if (time_after(jiffies, timeout)) {
880 netdev_warn(dev->net,
881 "timeout on OTP_PWR_DN completion");
882 return -EIO;
883 }
884 } while (buf & OTP_PWR_DN_PWRDN_N_);
885 }
886
887 /* set to BYTE program mode */
888 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
889
890 for (i = 0; i < length; i++) {
891 ret = lan78xx_write_reg(dev, OTP_ADDR1,
892 ((offset + i) >> 8) & OTP_ADDR1_15_11);
893 ret = lan78xx_write_reg(dev, OTP_ADDR2,
894 ((offset + i) & OTP_ADDR2_10_3));
895 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
896 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
897 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
898
899 timeout = jiffies + HZ;
900 do {
901 udelay(1);
902 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
903 if (time_after(jiffies, timeout)) {
904 netdev_warn(dev->net,
905 "Timeout on OTP_STATUS completion");
906 return -EIO;
907 }
908 } while (buf & OTP_STATUS_BUSY_);
909 }
910
911 return 0;
912}
913
55d7de9d
WH
914static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
915 u32 length, u8 *data)
916{
917 u8 sig;
918 int ret;
919
920 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
921
922 if (ret == 0) {
923 if (sig == OTP_INDICATOR_1)
924 offset = offset;
925 else if (sig == OTP_INDICATOR_2)
926 offset += 0x100;
927 else
928 ret = -EINVAL;
929 ret = lan78xx_read_raw_otp(dev, offset, length, data);
930 }
931
932 return ret;
933}
934
935static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
936{
937 int i, ret;
938
939 for (i = 0; i < 100; i++) {
940 u32 dp_sel;
941
942 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
943 if (unlikely(ret < 0))
944 return -EIO;
945
946 if (dp_sel & DP_SEL_DPRDY_)
947 return 0;
948
949 usleep_range(40, 100);
950 }
951
952 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
953
954 return -EIO;
955}
956
957static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
958 u32 addr, u32 length, u32 *buf)
959{
960 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
961 u32 dp_sel;
962 int i, ret;
963
964 if (usb_autopm_get_interface(dev->intf) < 0)
965 return 0;
966
967 mutex_lock(&pdata->dataport_mutex);
968
969 ret = lan78xx_dataport_wait_not_busy(dev);
970 if (ret < 0)
971 goto done;
972
973 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
974
975 dp_sel &= ~DP_SEL_RSEL_MASK_;
976 dp_sel |= ram_select;
977 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
978
979 for (i = 0; i < length; i++) {
980 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
981
982 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
983
984 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
985
986 ret = lan78xx_dataport_wait_not_busy(dev);
987 if (ret < 0)
988 goto done;
989 }
990
991done:
992 mutex_unlock(&pdata->dataport_mutex);
993 usb_autopm_put_interface(dev->intf);
994
995 return ret;
996}
997
998static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
999 int index, u8 addr[ETH_ALEN])
1000{
1001 u32 temp;
1002
1003 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1004 temp = addr[3];
1005 temp = addr[2] | (temp << 8);
1006 temp = addr[1] | (temp << 8);
1007 temp = addr[0] | (temp << 8);
1008 pdata->pfilter_table[index][1] = temp;
1009 temp = addr[5];
1010 temp = addr[4] | (temp << 8);
1011 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1012 pdata->pfilter_table[index][0] = temp;
1013 }
1014}
1015
1016/* returns hash bit number for given MAC address */
1017static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1018{
1019 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1020}
1021
1022static void lan78xx_deferred_multicast_write(struct work_struct *param)
1023{
1024 struct lan78xx_priv *pdata =
1025 container_of(param, struct lan78xx_priv, set_multicast);
1026 struct lan78xx_net *dev = pdata->dev;
1027 int i;
1028 int ret;
1029
1030 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1031 pdata->rfe_ctl);
1032
1033 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1034 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1035
1036 for (i = 1; i < NUM_OF_MAF; i++) {
1037 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1038 ret = lan78xx_write_reg(dev, MAF_LO(i),
1039 pdata->pfilter_table[i][1]);
1040 ret = lan78xx_write_reg(dev, MAF_HI(i),
1041 pdata->pfilter_table[i][0]);
1042 }
1043
1044 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1045}
1046
1047static void lan78xx_set_multicast(struct net_device *netdev)
1048{
1049 struct lan78xx_net *dev = netdev_priv(netdev);
1050 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1051 unsigned long flags;
1052 int i;
1053
1054 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1055
1056 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1057 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1058
1059 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1060 pdata->mchash_table[i] = 0;
1061 /* pfilter_table[0] has own HW address */
1062 for (i = 1; i < NUM_OF_MAF; i++) {
1063 pdata->pfilter_table[i][0] =
1064 pdata->pfilter_table[i][1] = 0;
1065 }
1066
1067 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1068
1069 if (dev->net->flags & IFF_PROMISC) {
1070 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1071 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1072 } else {
1073 if (dev->net->flags & IFF_ALLMULTI) {
1074 netif_dbg(dev, drv, dev->net,
1075 "receive all multicast enabled");
1076 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1077 }
1078 }
1079
1080 if (netdev_mc_count(dev->net)) {
1081 struct netdev_hw_addr *ha;
1082 int i;
1083
1084 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1085
1086 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1087
1088 i = 1;
1089 netdev_for_each_mc_addr(ha, netdev) {
1090 /* set first 32 into Perfect Filter */
1091 if (i < 33) {
1092 lan78xx_set_addr_filter(pdata, i, ha->addr);
1093 } else {
1094 u32 bitnum = lan78xx_hash(ha->addr);
1095
1096 pdata->mchash_table[bitnum / 32] |=
1097 (1 << (bitnum % 32));
1098 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1099 }
1100 i++;
1101 }
1102 }
1103
1104 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1105
1106 /* defer register writes to a sleepable context */
1107 schedule_work(&pdata->set_multicast);
1108}
1109
1110static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1111 u16 lcladv, u16 rmtadv)
1112{
1113 u32 flow = 0, fct_flow = 0;
1114 int ret;
349e0c5e 1115 u8 cap;
55d7de9d 1116
349e0c5e
WH
1117 if (dev->fc_autoneg)
1118 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1119 else
1120 cap = dev->fc_request_control;
55d7de9d
WH
1121
1122 if (cap & FLOW_CTRL_TX)
349e0c5e 1123 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
55d7de9d
WH
1124
1125 if (cap & FLOW_CTRL_RX)
1126 flow |= FLOW_CR_RX_FCEN_;
1127
1128 if (dev->udev->speed == USB_SPEED_SUPER)
1129 fct_flow = 0x817;
1130 else if (dev->udev->speed == USB_SPEED_HIGH)
1131 fct_flow = 0x211;
1132
1133 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1134 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1135 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1136
1137 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1138
1139 /* threshold value should be set before enabling flow */
1140 ret = lan78xx_write_reg(dev, FLOW, flow);
1141
1142 return 0;
1143}
1144
1145static int lan78xx_link_reset(struct lan78xx_net *dev)
1146{
ce85e13a 1147 struct phy_device *phydev = dev->net->phydev;
6e76510e 1148 struct ethtool_link_ksettings ecmd;
99c79ece 1149 int ladv, radv, ret;
55d7de9d
WH
1150 u32 buf;
1151
55d7de9d
WH
1152 /* clear LAN78xx interrupt status */
1153 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1154 if (unlikely(ret < 0))
1155 return -EIO;
1156
ce85e13a
WH
1157 phy_read_status(phydev);
1158
1159 if (!phydev->link && dev->link_on) {
55d7de9d 1160 dev->link_on = false;
55d7de9d
WH
1161
1162 /* reset MAC */
1163 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1164 if (unlikely(ret < 0))
1165 return -EIO;
1166 buf |= MAC_CR_RST_;
1167 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1168 if (unlikely(ret < 0))
1169 return -EIO;
e4953910 1170
20ff5565 1171 del_timer(&dev->stat_monitor);
ce85e13a 1172 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
1173 dev->link_on = true;
1174
6e76510e 1175 phy_ethtool_ksettings_get(phydev, &ecmd);
55d7de9d 1176
55d7de9d 1177 if (dev->udev->speed == USB_SPEED_SUPER) {
6e76510e 1178 if (ecmd.base.speed == 1000) {
55d7de9d
WH
1179 /* disable U2 */
1180 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1181 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1182 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1183 /* enable U1 */
1184 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1185 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1186 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1187 } else {
1188 /* enable U1 & U2 */
1189 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1190 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1191 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1192 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1193 }
1194 }
1195
ce85e13a 1196 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
1197 if (ladv < 0)
1198 return ladv;
55d7de9d 1199
ce85e13a 1200 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
1201 if (radv < 0)
1202 return radv;
55d7de9d
WH
1203
1204 netif_dbg(dev, link, dev->net,
1205 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
6e76510e 1206 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
55d7de9d 1207
6e76510e
PR
1208 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1209 radv);
20ff5565
WH
1210
1211 if (!timer_pending(&dev->stat_monitor)) {
1212 dev->delta = 1;
1213 mod_timer(&dev->stat_monitor,
1214 jiffies + STAT_UPDATE_TIMER);
1215 }
55d7de9d
WH
1216 }
1217
1218 return ret;
1219}
1220
1221/* some work can't be done in tasklets, so we use keventd
1222 *
1223 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1224 * but tasklet_schedule() doesn't. hope the failure is rare.
1225 */
e0c79ff6 1226static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
55d7de9d
WH
1227{
1228 set_bit(work, &dev->flags);
1229 if (!schedule_delayed_work(&dev->wq, 0))
1230 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1231}
1232
1233static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1234{
1235 u32 intdata;
1236
1237 if (urb->actual_length != 4) {
1238 netdev_warn(dev->net,
1239 "unexpected urb length %d", urb->actual_length);
1240 return;
1241 }
1242
1243 memcpy(&intdata, urb->transfer_buffer, 4);
1244 le32_to_cpus(&intdata);
1245
1246 if (intdata & INT_ENP_PHY_INT) {
1247 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
cc89c323
WH
1248 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1249
1250 if (dev->domain_data.phyirq > 0)
1251 generic_handle_irq(dev->domain_data.phyirq);
55d7de9d
WH
1252 } else
1253 netdev_warn(dev->net,
1254 "unexpected interrupt: 0x%08x\n", intdata);
1255}
1256
1257static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1258{
1259 return MAX_EEPROM_SIZE;
1260}
1261
1262static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1263 struct ethtool_eeprom *ee, u8 *data)
1264{
1265 struct lan78xx_net *dev = netdev_priv(netdev);
1266
1267 ee->magic = LAN78XX_EEPROM_MAGIC;
1268
1269 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1270}
1271
1272static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1273 struct ethtool_eeprom *ee, u8 *data)
1274{
1275 struct lan78xx_net *dev = netdev_priv(netdev);
1276
1277 /* Allow entire eeprom update only */
1278 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1279 (ee->offset == 0) &&
1280 (ee->len == 512) &&
1281 (data[0] == EEPROM_INDICATOR))
1282 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1283 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1284 (ee->offset == 0) &&
1285 (ee->len == 512) &&
1286 (data[0] == OTP_INDICATOR_1))
9fb6066d 1287 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d
WH
1288
1289 return -EINVAL;
1290}
1291
1292static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1293 u8 *data)
1294{
1295 if (stringset == ETH_SS_STATS)
1296 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1297}
1298
1299static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1300{
1301 if (sset == ETH_SS_STATS)
1302 return ARRAY_SIZE(lan78xx_gstrings);
1303 else
1304 return -EOPNOTSUPP;
1305}
1306
1307static void lan78xx_get_stats(struct net_device *netdev,
1308 struct ethtool_stats *stats, u64 *data)
1309{
1310 struct lan78xx_net *dev = netdev_priv(netdev);
55d7de9d 1311
20ff5565 1312 lan78xx_update_stats(dev);
55d7de9d 1313
20ff5565
WH
1314 mutex_lock(&dev->stats.access_lock);
1315 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1316 mutex_unlock(&dev->stats.access_lock);
55d7de9d
WH
1317}
1318
1319static void lan78xx_get_wol(struct net_device *netdev,
1320 struct ethtool_wolinfo *wol)
1321{
1322 struct lan78xx_net *dev = netdev_priv(netdev);
1323 int ret;
1324 u32 buf;
1325 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1326
1327 if (usb_autopm_get_interface(dev->intf) < 0)
1328 return;
1329
1330 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1331 if (unlikely(ret < 0)) {
1332 wol->supported = 0;
1333 wol->wolopts = 0;
1334 } else {
1335 if (buf & USB_CFG_RMT_WKP_) {
1336 wol->supported = WAKE_ALL;
1337 wol->wolopts = pdata->wol;
1338 } else {
1339 wol->supported = 0;
1340 wol->wolopts = 0;
1341 }
1342 }
1343
1344 usb_autopm_put_interface(dev->intf);
1345}
1346
1347static int lan78xx_set_wol(struct net_device *netdev,
1348 struct ethtool_wolinfo *wol)
1349{
1350 struct lan78xx_net *dev = netdev_priv(netdev);
1351 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1352 int ret;
1353
1354 ret = usb_autopm_get_interface(dev->intf);
1355 if (ret < 0)
1356 return ret;
1357
1358 pdata->wol = 0;
1359 if (wol->wolopts & WAKE_UCAST)
1360 pdata->wol |= WAKE_UCAST;
1361 if (wol->wolopts & WAKE_MCAST)
1362 pdata->wol |= WAKE_MCAST;
1363 if (wol->wolopts & WAKE_BCAST)
1364 pdata->wol |= WAKE_BCAST;
1365 if (wol->wolopts & WAKE_MAGIC)
1366 pdata->wol |= WAKE_MAGIC;
1367 if (wol->wolopts & WAKE_PHY)
1368 pdata->wol |= WAKE_PHY;
1369 if (wol->wolopts & WAKE_ARP)
1370 pdata->wol |= WAKE_ARP;
1371
1372 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1373
ce85e13a
WH
1374 phy_ethtool_set_wol(netdev->phydev, wol);
1375
55d7de9d
WH
1376 usb_autopm_put_interface(dev->intf);
1377
1378 return ret;
1379}
1380
1381static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1382{
1383 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1384 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1385 int ret;
1386 u32 buf;
55d7de9d
WH
1387
1388 ret = usb_autopm_get_interface(dev->intf);
1389 if (ret < 0)
1390 return ret;
1391
ce85e13a
WH
1392 ret = phy_ethtool_get_eee(phydev, edata);
1393 if (ret < 0)
1394 goto exit;
1395
55d7de9d
WH
1396 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1397 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1398 edata->eee_enabled = true;
ce85e13a
WH
1399 edata->eee_active = !!(edata->advertised &
1400 edata->lp_advertised);
55d7de9d
WH
1401 edata->tx_lpi_enabled = true;
1402 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1403 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1404 edata->tx_lpi_timer = buf;
1405 } else {
55d7de9d
WH
1406 edata->eee_enabled = false;
1407 edata->eee_active = false;
55d7de9d
WH
1408 edata->tx_lpi_enabled = false;
1409 edata->tx_lpi_timer = 0;
1410 }
1411
ce85e13a
WH
1412 ret = 0;
1413exit:
55d7de9d
WH
1414 usb_autopm_put_interface(dev->intf);
1415
ce85e13a 1416 return ret;
55d7de9d
WH
1417}
1418
1419static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1420{
1421 struct lan78xx_net *dev = netdev_priv(net);
1422 int ret;
1423 u32 buf;
1424
1425 ret = usb_autopm_get_interface(dev->intf);
1426 if (ret < 0)
1427 return ret;
1428
1429 if (edata->eee_enabled) {
1430 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1431 buf |= MAC_CR_EEE_EN_;
1432 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1433
ce85e13a
WH
1434 phy_ethtool_set_eee(net->phydev, edata);
1435
1436 buf = (u32)edata->tx_lpi_timer;
1437 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1438 } else {
1439 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1440 buf &= ~MAC_CR_EEE_EN_;
1441 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1442 }
1443
1444 usb_autopm_put_interface(dev->intf);
1445
1446 return 0;
1447}
1448
1449static u32 lan78xx_get_link(struct net_device *net)
1450{
ce85e13a 1451 phy_read_status(net->phydev);
55d7de9d 1452
ce85e13a 1453 return net->phydev->link;
55d7de9d
WH
1454}
1455
55d7de9d
WH
1456static void lan78xx_get_drvinfo(struct net_device *net,
1457 struct ethtool_drvinfo *info)
1458{
1459 struct lan78xx_net *dev = netdev_priv(net);
1460
1461 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1462 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1463 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1464}
1465
1466static u32 lan78xx_get_msglevel(struct net_device *net)
1467{
1468 struct lan78xx_net *dev = netdev_priv(net);
1469
1470 return dev->msg_enable;
1471}
1472
1473static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1474{
1475 struct lan78xx_net *dev = netdev_priv(net);
1476
1477 dev->msg_enable = level;
1478}
1479
6e76510e
PR
1480static int lan78xx_get_link_ksettings(struct net_device *net,
1481 struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1482{
1483 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1484 struct phy_device *phydev = net->phydev;
55d7de9d 1485 int ret;
55d7de9d 1486
55d7de9d
WH
1487 ret = usb_autopm_get_interface(dev->intf);
1488 if (ret < 0)
1489 return ret;
1490
6e76510e 1491 ret = phy_ethtool_ksettings_get(phydev, cmd);
55d7de9d 1492
55d7de9d
WH
1493 usb_autopm_put_interface(dev->intf);
1494
1495 return ret;
1496}
1497
6e76510e
PR
1498static int lan78xx_set_link_ksettings(struct net_device *net,
1499 const struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1500{
1501 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1502 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1503 int ret = 0;
1504 int temp;
1505
55d7de9d
WH
1506 ret = usb_autopm_get_interface(dev->intf);
1507 if (ret < 0)
1508 return ret;
1509
55d7de9d 1510 /* change speed & duplex */
6e76510e 1511 ret = phy_ethtool_ksettings_set(phydev, cmd);
55d7de9d 1512
6e76510e 1513 if (!cmd->base.autoneg) {
55d7de9d 1514 /* force link down */
ce85e13a
WH
1515 temp = phy_read(phydev, MII_BMCR);
1516 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1517 mdelay(1);
ce85e13a 1518 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1519 }
1520
1521 usb_autopm_put_interface(dev->intf);
1522
1523 return ret;
1524}
1525
349e0c5e
WH
1526static void lan78xx_get_pause(struct net_device *net,
1527 struct ethtool_pauseparam *pause)
1528{
1529 struct lan78xx_net *dev = netdev_priv(net);
1530 struct phy_device *phydev = net->phydev;
6e76510e 1531 struct ethtool_link_ksettings ecmd;
349e0c5e 1532
6e76510e 1533 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e
WH
1534
1535 pause->autoneg = dev->fc_autoneg;
1536
1537 if (dev->fc_request_control & FLOW_CTRL_TX)
1538 pause->tx_pause = 1;
1539
1540 if (dev->fc_request_control & FLOW_CTRL_RX)
1541 pause->rx_pause = 1;
1542}
1543
1544static int lan78xx_set_pause(struct net_device *net,
1545 struct ethtool_pauseparam *pause)
1546{
1547 struct lan78xx_net *dev = netdev_priv(net);
1548 struct phy_device *phydev = net->phydev;
6e76510e 1549 struct ethtool_link_ksettings ecmd;
349e0c5e
WH
1550 int ret;
1551
6e76510e 1552 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e 1553
6e76510e 1554 if (pause->autoneg && !ecmd.base.autoneg) {
349e0c5e
WH
1555 ret = -EINVAL;
1556 goto exit;
1557 }
1558
1559 dev->fc_request_control = 0;
1560 if (pause->rx_pause)
1561 dev->fc_request_control |= FLOW_CTRL_RX;
1562
1563 if (pause->tx_pause)
1564 dev->fc_request_control |= FLOW_CTRL_TX;
1565
6e76510e 1566 if (ecmd.base.autoneg) {
349e0c5e 1567 u32 mii_adv;
6e76510e
PR
1568 u32 advertising;
1569
1570 ethtool_convert_link_mode_to_legacy_u32(
1571 &advertising, ecmd.link_modes.advertising);
349e0c5e 1572
6e76510e 1573 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
349e0c5e 1574 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
6e76510e
PR
1575 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1576
1577 ethtool_convert_legacy_u32_to_link_mode(
1578 ecmd.link_modes.advertising, advertising);
1579
1580 phy_ethtool_ksettings_set(phydev, &ecmd);
349e0c5e
WH
1581 }
1582
1583 dev->fc_autoneg = pause->autoneg;
1584
1585 ret = 0;
1586exit:
1587 return ret;
1588}
1589
55d7de9d
WH
1590static const struct ethtool_ops lan78xx_ethtool_ops = {
1591 .get_link = lan78xx_get_link,
860ce4b4 1592 .nway_reset = phy_ethtool_nway_reset,
55d7de9d
WH
1593 .get_drvinfo = lan78xx_get_drvinfo,
1594 .get_msglevel = lan78xx_get_msglevel,
1595 .set_msglevel = lan78xx_set_msglevel,
55d7de9d
WH
1596 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1597 .get_eeprom = lan78xx_ethtool_get_eeprom,
1598 .set_eeprom = lan78xx_ethtool_set_eeprom,
1599 .get_ethtool_stats = lan78xx_get_stats,
1600 .get_sset_count = lan78xx_get_sset_count,
1601 .get_strings = lan78xx_get_strings,
1602 .get_wol = lan78xx_get_wol,
1603 .set_wol = lan78xx_set_wol,
1604 .get_eee = lan78xx_get_eee,
1605 .set_eee = lan78xx_set_eee,
349e0c5e
WH
1606 .get_pauseparam = lan78xx_get_pause,
1607 .set_pauseparam = lan78xx_set_pause,
6e76510e
PR
1608 .get_link_ksettings = lan78xx_get_link_ksettings,
1609 .set_link_ksettings = lan78xx_set_link_ksettings,
55d7de9d
WH
1610};
1611
1612static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1613{
55d7de9d
WH
1614 if (!netif_running(netdev))
1615 return -EINVAL;
1616
ce85e13a 1617 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1618}
1619
1620static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1621{
1622 u32 addr_lo, addr_hi;
1623 int ret;
1624 u8 addr[6];
1625
1626 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1627 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1628
1629 addr[0] = addr_lo & 0xFF;
1630 addr[1] = (addr_lo >> 8) & 0xFF;
1631 addr[2] = (addr_lo >> 16) & 0xFF;
1632 addr[3] = (addr_lo >> 24) & 0xFF;
1633 addr[4] = addr_hi & 0xFF;
1634 addr[5] = (addr_hi >> 8) & 0xFF;
1635
1636 if (!is_valid_ether_addr(addr)) {
1637 /* reading mac address from EEPROM or OTP */
1638 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1639 addr) == 0) ||
1640 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1641 addr) == 0)) {
1642 if (is_valid_ether_addr(addr)) {
1643 /* eeprom values are valid so use them */
1644 netif_dbg(dev, ifup, dev->net,
1645 "MAC address read from EEPROM");
1646 } else {
1647 /* generate random MAC */
1648 random_ether_addr(addr);
1649 netif_dbg(dev, ifup, dev->net,
1650 "MAC address set to random addr");
1651 }
1652
1653 addr_lo = addr[0] | (addr[1] << 8) |
1654 (addr[2] << 16) | (addr[3] << 24);
1655 addr_hi = addr[4] | (addr[5] << 8);
1656
1657 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1658 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1659 } else {
1660 /* generate random MAC */
1661 random_ether_addr(addr);
1662 netif_dbg(dev, ifup, dev->net,
1663 "MAC address set to random addr");
1664 }
1665 }
1666
1667 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1668 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1669
1670 ether_addr_copy(dev->net->dev_addr, addr);
1671}
1672
ce85e13a
WH
1673/* MDIO read and write wrappers for phylib */
1674static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1675{
1676 struct lan78xx_net *dev = bus->priv;
1677 u32 val, addr;
1678 int ret;
1679
1680 ret = usb_autopm_get_interface(dev->intf);
1681 if (ret < 0)
1682 return ret;
1683
1684 mutex_lock(&dev->phy_mutex);
1685
1686 /* confirm MII not busy */
1687 ret = lan78xx_phy_wait_not_busy(dev);
1688 if (ret < 0)
1689 goto done;
1690
1691 /* set the address, index & direction (read from PHY) */
1692 addr = mii_access(phy_id, idx, MII_READ);
1693 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1694
1695 ret = lan78xx_phy_wait_not_busy(dev);
1696 if (ret < 0)
1697 goto done;
1698
1699 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1700
1701 ret = (int)(val & 0xFFFF);
1702
1703done:
1704 mutex_unlock(&dev->phy_mutex);
1705 usb_autopm_put_interface(dev->intf);
02dc1f3d 1706
ce85e13a
WH
1707 return ret;
1708}
1709
1710static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1711 u16 regval)
1712{
1713 struct lan78xx_net *dev = bus->priv;
1714 u32 val, addr;
1715 int ret;
1716
1717 ret = usb_autopm_get_interface(dev->intf);
1718 if (ret < 0)
1719 return ret;
1720
1721 mutex_lock(&dev->phy_mutex);
1722
1723 /* confirm MII not busy */
1724 ret = lan78xx_phy_wait_not_busy(dev);
1725 if (ret < 0)
1726 goto done;
1727
1728 val = (u32)regval;
1729 ret = lan78xx_write_reg(dev, MII_DATA, val);
1730
1731 /* set the address, index & direction (write to PHY) */
1732 addr = mii_access(phy_id, idx, MII_WRITE);
1733 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1734
1735 ret = lan78xx_phy_wait_not_busy(dev);
1736 if (ret < 0)
1737 goto done;
1738
1739done:
1740 mutex_unlock(&dev->phy_mutex);
1741 usb_autopm_put_interface(dev->intf);
1742 return 0;
1743}
1744
1745static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1746{
ce85e13a 1747 int ret;
ce85e13a
WH
1748
1749 dev->mdiobus = mdiobus_alloc();
1750 if (!dev->mdiobus) {
1751 netdev_err(dev->net, "can't allocate MDIO bus\n");
1752 return -ENOMEM;
1753 }
1754
1755 dev->mdiobus->priv = (void *)dev;
1756 dev->mdiobus->read = lan78xx_mdiobus_read;
1757 dev->mdiobus->write = lan78xx_mdiobus_write;
1758 dev->mdiobus->name = "lan78xx-mdiobus";
1759
1760 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1761 dev->udev->bus->busnum, dev->udev->devnum);
1762
87177ba6
WH
1763 switch (dev->chipid) {
1764 case ID_REV_CHIP_ID_7800_:
1765 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1766 /* set to internal PHY id */
1767 dev->mdiobus->phy_mask = ~(1 << 1);
1768 break;
02dc1f3d
WH
1769 case ID_REV_CHIP_ID_7801_:
1770 /* scan thru PHYAD[2..0] */
1771 dev->mdiobus->phy_mask = ~(0xFF);
1772 break;
ce85e13a
WH
1773 }
1774
1775 ret = mdiobus_register(dev->mdiobus);
1776 if (ret) {
1777 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1778 goto exit1;
ce85e13a
WH
1779 }
1780
1781 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1782 return 0;
ce85e13a
WH
1783exit1:
1784 mdiobus_free(dev->mdiobus);
1785 return ret;
1786}
1787
1788static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1789{
1790 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1791 mdiobus_free(dev->mdiobus);
1792}
1793
1794static void lan78xx_link_status_change(struct net_device *net)
1795{
14437e3f
WH
1796 struct phy_device *phydev = net->phydev;
1797 int ret, temp;
1798
1799 /* At forced 100 F/H mode, chip may fail to set mode correctly
1800 * when cable is switched between long(~50+m) and short one.
1801 * As workaround, set to 10 before setting to 100
1802 * at forced 100 F/H mode.
1803 */
1804 if (!phydev->autoneg && (phydev->speed == 100)) {
1805 /* disable phy interrupt */
1806 temp = phy_read(phydev, LAN88XX_INT_MASK);
1807 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1808 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1809
1810 temp = phy_read(phydev, MII_BMCR);
1811 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1812 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1813 temp |= BMCR_SPEED100;
1814 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1815
1816 /* clear pending interrupt generated while workaround */
1817 temp = phy_read(phydev, LAN88XX_INT_STS);
1818
1819 /* enable phy interrupt back */
1820 temp = phy_read(phydev, LAN88XX_INT_MASK);
1821 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1822 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1823 }
55d7de9d
WH
1824}
1825
cc89c323
WH
1826static int irq_map(struct irq_domain *d, unsigned int irq,
1827 irq_hw_number_t hwirq)
1828{
1829 struct irq_domain_data *data = d->host_data;
1830
1831 irq_set_chip_data(irq, data);
1832 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1833 irq_set_noprobe(irq);
1834
1835 return 0;
1836}
1837
1838static void irq_unmap(struct irq_domain *d, unsigned int irq)
1839{
1840 irq_set_chip_and_handler(irq, NULL, NULL);
1841 irq_set_chip_data(irq, NULL);
1842}
1843
1844static const struct irq_domain_ops chip_domain_ops = {
1845 .map = irq_map,
1846 .unmap = irq_unmap,
1847};
1848
1849static void lan78xx_irq_mask(struct irq_data *irqd)
1850{
1851 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1852
1853 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1854}
1855
1856static void lan78xx_irq_unmask(struct irq_data *irqd)
1857{
1858 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1859
1860 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1861}
1862
1863static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1864{
1865 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1866
1867 mutex_lock(&data->irq_lock);
1868}
1869
1870static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1871{
1872 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1873 struct lan78xx_net *dev =
1874 container_of(data, struct lan78xx_net, domain_data);
1875 u32 buf;
1876 int ret;
1877
1878 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1879 * are only two callbacks executed in non-atomic contex.
1880 */
1881 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1882 if (buf != data->irqenable)
1883 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1884
1885 mutex_unlock(&data->irq_lock);
1886}
1887
1888static struct irq_chip lan78xx_irqchip = {
1889 .name = "lan78xx-irqs",
1890 .irq_mask = lan78xx_irq_mask,
1891 .irq_unmask = lan78xx_irq_unmask,
1892 .irq_bus_lock = lan78xx_irq_bus_lock,
1893 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1894};
1895
1896static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1897{
1898 struct device_node *of_node;
1899 struct irq_domain *irqdomain;
1900 unsigned int irqmap = 0;
1901 u32 buf;
1902 int ret = 0;
1903
1904 of_node = dev->udev->dev.parent->of_node;
1905
1906 mutex_init(&dev->domain_data.irq_lock);
1907
1908 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1909 dev->domain_data.irqenable = buf;
1910
1911 dev->domain_data.irqchip = &lan78xx_irqchip;
1912 dev->domain_data.irq_handler = handle_simple_irq;
1913
1914 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1915 &chip_domain_ops, &dev->domain_data);
1916 if (irqdomain) {
1917 /* create mapping for PHY interrupt */
1918 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1919 if (!irqmap) {
1920 irq_domain_remove(irqdomain);
1921
1922 irqdomain = NULL;
1923 ret = -EINVAL;
1924 }
1925 } else {
1926 ret = -EINVAL;
1927 }
1928
1929 dev->domain_data.irqdomain = irqdomain;
1930 dev->domain_data.phyirq = irqmap;
1931
1932 return ret;
1933}
1934
1935static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1936{
1937 if (dev->domain_data.phyirq > 0) {
1938 irq_dispose_mapping(dev->domain_data.phyirq);
1939
1940 if (dev->domain_data.irqdomain)
1941 irq_domain_remove(dev->domain_data.irqdomain);
1942 }
1943 dev->domain_data.phyirq = 0;
1944 dev->domain_data.irqdomain = NULL;
1945}
1946
02dc1f3d
WH
1947static int lan8835_fixup(struct phy_device *phydev)
1948{
1949 int buf;
1950 int ret;
1951 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1952
1953 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1954 buf = phy_read_mmd_indirect(phydev, 0x8010, 3);
1955 buf &= ~0x1800;
1956 buf |= 0x0800;
1957 phy_write_mmd_indirect(phydev, 0x8010, 3, buf);
1958
1959 /* RGMII MAC TXC Delay Enable */
1960 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1961 MAC_RGMII_ID_TXC_DELAY_EN_);
1962
1963 /* RGMII TX DLL Tune Adjust */
1964 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1965
1966 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1967
1968 return 1;
1969}
1970
1971static int ksz9031rnx_fixup(struct phy_device *phydev)
1972{
1973 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1974
1975 /* Micrel9301RNX PHY configuration */
1976 /* RGMII Control Signal Pad Skew */
1977 phy_write_mmd_indirect(phydev, 4, 2, 0x0077);
1978 /* RGMII RX Data Pad Skew */
1979 phy_write_mmd_indirect(phydev, 5, 2, 0x7777);
1980 /* RGMII RX Clock Pad Skew */
1981 phy_write_mmd_indirect(phydev, 8, 2, 0x1FF);
1982
1983 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1984
1985 return 1;
1986}
1987
55d7de9d
WH
1988static int lan78xx_phy_init(struct lan78xx_net *dev)
1989{
ce85e13a 1990 int ret;
349e0c5e 1991 u32 mii_adv;
ce85e13a 1992 struct phy_device *phydev = dev->net->phydev;
55d7de9d 1993
ce85e13a
WH
1994 phydev = phy_find_first(dev->mdiobus);
1995 if (!phydev) {
1996 netdev_err(dev->net, "no PHY found\n");
1997 return -EIO;
1998 }
55d7de9d 1999
02dc1f3d
WH
2000 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2001 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2002 phydev->is_internal = true;
2003 dev->interface = PHY_INTERFACE_MODE_GMII;
2004
2005 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2006 if (!phydev->drv) {
2007 netdev_err(dev->net, "no PHY driver found\n");
2008 return -EIO;
2009 }
2010
2011 dev->interface = PHY_INTERFACE_MODE_RGMII;
2012
2013 /* external PHY fixup for KSZ9031RNX */
2014 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2015 ksz9031rnx_fixup);
2016 if (ret < 0) {
2017 netdev_err(dev->net, "fail to register fixup\n");
2018 return ret;
2019 }
2020 /* external PHY fixup for LAN8835 */
2021 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2022 lan8835_fixup);
2023 if (ret < 0) {
2024 netdev_err(dev->net, "fail to register fixup\n");
2025 return ret;
2026 }
2027 /* add more external PHY fixup here if needed */
2028
2029 phydev->is_internal = false;
2030 } else {
2031 netdev_err(dev->net, "unknown ID found\n");
2032 ret = -EIO;
2033 goto error;
2034 }
2035
cc89c323
WH
2036 /* if phyirq is not set, use polling mode in phylib */
2037 if (dev->domain_data.phyirq > 0)
2038 phydev->irq = dev->domain_data.phyirq;
2039 else
2040 phydev->irq = 0;
2041 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
e4953910 2042
f6e3ef3e
WH
2043 /* set to AUTOMDIX */
2044 phydev->mdix = ETH_TP_MDI_AUTO;
2045
ce85e13a
WH
2046 ret = phy_connect_direct(dev->net, phydev,
2047 lan78xx_link_status_change,
02dc1f3d 2048 dev->interface);
ce85e13a
WH
2049 if (ret) {
2050 netdev_err(dev->net, "can't attach PHY to %s\n",
2051 dev->mdiobus->id);
2052 return -EIO;
2053 }
55d7de9d 2054
ce85e13a
WH
2055 /* MAC doesn't support 1000T Half */
2056 phydev->supported &= ~SUPPORTED_1000baseT_Half;
e270b2db 2057
349e0c5e
WH
2058 /* support both flow controls */
2059 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2060 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2061 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2062 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2063
ce85e13a
WH
2064 genphy_config_aneg(phydev);
2065
349e0c5e
WH
2066 dev->fc_autoneg = phydev->autoneg;
2067
ce85e13a 2068 phy_start(phydev);
55d7de9d
WH
2069
2070 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2071
2072 return 0;
02dc1f3d
WH
2073
2074error:
2075 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2076 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2077
2078 return ret;
55d7de9d
WH
2079}
2080
2081static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2082{
2083 int ret = 0;
2084 u32 buf;
2085 bool rxenabled;
2086
2087 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2088
2089 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2090
2091 if (rxenabled) {
2092 buf &= ~MAC_RX_RXEN_;
2093 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2094 }
2095
2096 /* add 4 to size for FCS */
2097 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2098 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2099
2100 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2101
2102 if (rxenabled) {
2103 buf |= MAC_RX_RXEN_;
2104 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2105 }
2106
2107 return 0;
2108}
2109
2110static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2111{
2112 struct sk_buff *skb;
2113 unsigned long flags;
2114 int count = 0;
2115
2116 spin_lock_irqsave(&q->lock, flags);
2117 while (!skb_queue_empty(q)) {
2118 struct skb_data *entry;
2119 struct urb *urb;
2120 int ret;
2121
2122 skb_queue_walk(q, skb) {
2123 entry = (struct skb_data *)skb->cb;
2124 if (entry->state != unlink_start)
2125 goto found;
2126 }
2127 break;
2128found:
2129 entry->state = unlink_start;
2130 urb = entry->urb;
2131
2132 /* Get reference count of the URB to avoid it to be
2133 * freed during usb_unlink_urb, which may trigger
2134 * use-after-free problem inside usb_unlink_urb since
2135 * usb_unlink_urb is always racing with .complete
2136 * handler(include defer_bh).
2137 */
2138 usb_get_urb(urb);
2139 spin_unlock_irqrestore(&q->lock, flags);
2140 /* during some PM-driven resume scenarios,
2141 * these (async) unlinks complete immediately
2142 */
2143 ret = usb_unlink_urb(urb);
2144 if (ret != -EINPROGRESS && ret != 0)
2145 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2146 else
2147 count++;
2148 usb_put_urb(urb);
2149 spin_lock_irqsave(&q->lock, flags);
2150 }
2151 spin_unlock_irqrestore(&q->lock, flags);
2152 return count;
2153}
2154
2155static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2156{
2157 struct lan78xx_net *dev = netdev_priv(netdev);
2158 int ll_mtu = new_mtu + netdev->hard_header_len;
2159 int old_hard_mtu = dev->hard_mtu;
2160 int old_rx_urb_size = dev->rx_urb_size;
2161 int ret;
2162
55d7de9d
WH
2163 /* no second zero-length packet read wanted after mtu-sized packets */
2164 if ((ll_mtu % dev->maxpacket) == 0)
2165 return -EDOM;
2166
2167 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2168
2169 netdev->mtu = new_mtu;
2170
2171 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2172 if (dev->rx_urb_size == old_hard_mtu) {
2173 dev->rx_urb_size = dev->hard_mtu;
2174 if (dev->rx_urb_size > old_rx_urb_size) {
2175 if (netif_running(dev->net)) {
2176 unlink_urbs(dev, &dev->rxq);
2177 tasklet_schedule(&dev->bh);
2178 }
2179 }
2180 }
2181
2182 return 0;
2183}
2184
e0c79ff6 2185static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
55d7de9d
WH
2186{
2187 struct lan78xx_net *dev = netdev_priv(netdev);
2188 struct sockaddr *addr = p;
2189 u32 addr_lo, addr_hi;
2190 int ret;
2191
2192 if (netif_running(netdev))
2193 return -EBUSY;
2194
2195 if (!is_valid_ether_addr(addr->sa_data))
2196 return -EADDRNOTAVAIL;
2197
2198 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2199
2200 addr_lo = netdev->dev_addr[0] |
2201 netdev->dev_addr[1] << 8 |
2202 netdev->dev_addr[2] << 16 |
2203 netdev->dev_addr[3] << 24;
2204 addr_hi = netdev->dev_addr[4] |
2205 netdev->dev_addr[5] << 8;
2206
2207 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2208 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2209
2210 return 0;
2211}
2212
2213/* Enable or disable Rx checksum offload engine */
2214static int lan78xx_set_features(struct net_device *netdev,
2215 netdev_features_t features)
2216{
2217 struct lan78xx_net *dev = netdev_priv(netdev);
2218 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2219 unsigned long flags;
2220 int ret;
2221
2222 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2223
2224 if (features & NETIF_F_RXCSUM) {
2225 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2226 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2227 } else {
2228 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2229 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2230 }
2231
2232 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2233 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2234 else
2235 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2236
2237 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2238
2239 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2240
2241 return 0;
2242}
2243
2244static void lan78xx_deferred_vlan_write(struct work_struct *param)
2245{
2246 struct lan78xx_priv *pdata =
2247 container_of(param, struct lan78xx_priv, set_vlan);
2248 struct lan78xx_net *dev = pdata->dev;
2249
2250 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2251 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2252}
2253
2254static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2255 __be16 proto, u16 vid)
2256{
2257 struct lan78xx_net *dev = netdev_priv(netdev);
2258 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2259 u16 vid_bit_index;
2260 u16 vid_dword_index;
2261
2262 vid_dword_index = (vid >> 5) & 0x7F;
2263 vid_bit_index = vid & 0x1F;
2264
2265 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2266
2267 /* defer register writes to a sleepable context */
2268 schedule_work(&pdata->set_vlan);
2269
2270 return 0;
2271}
2272
2273static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2274 __be16 proto, u16 vid)
2275{
2276 struct lan78xx_net *dev = netdev_priv(netdev);
2277 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2278 u16 vid_bit_index;
2279 u16 vid_dword_index;
2280
2281 vid_dword_index = (vid >> 5) & 0x7F;
2282 vid_bit_index = vid & 0x1F;
2283
2284 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2285
2286 /* defer register writes to a sleepable context */
2287 schedule_work(&pdata->set_vlan);
2288
2289 return 0;
2290}
2291
2292static void lan78xx_init_ltm(struct lan78xx_net *dev)
2293{
2294 int ret;
2295 u32 buf;
2296 u32 regs[6] = { 0 };
2297
2298 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2299 if (buf & USB_CFG1_LTM_ENABLE_) {
2300 u8 temp[2];
2301 /* Get values from EEPROM first */
2302 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2303 if (temp[0] == 24) {
2304 ret = lan78xx_read_raw_eeprom(dev,
2305 temp[1] * 2,
2306 24,
2307 (u8 *)regs);
2308 if (ret < 0)
2309 return;
2310 }
2311 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2312 if (temp[0] == 24) {
2313 ret = lan78xx_read_raw_otp(dev,
2314 temp[1] * 2,
2315 24,
2316 (u8 *)regs);
2317 if (ret < 0)
2318 return;
2319 }
2320 }
2321 }
2322
2323 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2324 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2325 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2326 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2327 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2328 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2329}
2330
2331static int lan78xx_reset(struct lan78xx_net *dev)
2332{
2333 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2334 u32 buf;
2335 int ret = 0;
2336 unsigned long timeout;
2337
2338 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2339 buf |= HW_CFG_LRST_;
2340 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2341
2342 timeout = jiffies + HZ;
2343 do {
2344 mdelay(1);
2345 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2346 if (time_after(jiffies, timeout)) {
2347 netdev_warn(dev->net,
2348 "timeout on completion of LiteReset");
2349 return -EIO;
2350 }
2351 } while (buf & HW_CFG_LRST_);
2352
2353 lan78xx_init_mac_address(dev);
2354
ce85e13a
WH
2355 /* save DEVID for later usage */
2356 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
2357 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2358 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 2359
55d7de9d
WH
2360 /* Respond to the IN token with a NAK */
2361 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2362 buf |= USB_CFG_BIR_;
2363 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2364
2365 /* Init LTM */
2366 lan78xx_init_ltm(dev);
2367
2368 dev->net->hard_header_len += TX_OVERHEAD;
2369 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2370
2371 if (dev->udev->speed == USB_SPEED_SUPER) {
2372 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2373 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2374 dev->rx_qlen = 4;
2375 dev->tx_qlen = 4;
2376 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2377 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2378 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2379 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2380 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2381 } else {
2382 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2383 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2384 dev->rx_qlen = 4;
2385 }
2386
2387 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2388 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2389
2390 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2391 buf |= HW_CFG_MEF_;
2392 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2393
2394 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2395 buf |= USB_CFG_BCE_;
2396 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2397
2398 /* set FIFO sizes */
2399 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2400 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2401
2402 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2403 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2404
2405 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2406 ret = lan78xx_write_reg(dev, FLOW, 0);
2407 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2408
2409 /* Don't need rfe_ctl_lock during initialisation */
2410 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2411 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2412 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2413
2414 /* Enable or disable checksum offload engines */
2415 lan78xx_set_features(dev->net, dev->net->features);
2416
2417 lan78xx_set_multicast(dev->net);
2418
2419 /* reset PHY */
2420 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2421 buf |= PMT_CTL_PHY_RST_;
2422 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2423
2424 timeout = jiffies + HZ;
2425 do {
2426 mdelay(1);
2427 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2428 if (time_after(jiffies, timeout)) {
2429 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2430 return -EIO;
2431 }
6c595b03 2432 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 2433
55d7de9d 2434 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
02dc1f3d
WH
2435 /* LAN7801 only has RGMII mode */
2436 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2437 buf &= ~MAC_CR_GMII_EN_;
55d7de9d 2438 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
55d7de9d
WH
2439 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2440
55d7de9d
WH
2441 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2442 buf |= MAC_TX_TXEN_;
2443 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2444
2445 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2446 buf |= FCT_TX_CTL_EN_;
2447 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2448
2449 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2450
2451 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2452 buf |= MAC_RX_RXEN_;
2453 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2454
2455 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2456 buf |= FCT_RX_CTL_EN_;
2457 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2458
55d7de9d
WH
2459 return 0;
2460}
2461
20ff5565
WH
2462static void lan78xx_init_stats(struct lan78xx_net *dev)
2463{
2464 u32 *p;
2465 int i;
2466
2467 /* initialize for stats update
2468 * some counters are 20bits and some are 32bits
2469 */
2470 p = (u32 *)&dev->stats.rollover_max;
2471 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2472 p[i] = 0xFFFFF;
2473
2474 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2475 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2476 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2477 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2478 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2479 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2480 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2481 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2482 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2483 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2484
2485 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2486}
2487
55d7de9d
WH
2488static int lan78xx_open(struct net_device *net)
2489{
2490 struct lan78xx_net *dev = netdev_priv(net);
2491 int ret;
2492
2493 ret = usb_autopm_get_interface(dev->intf);
2494 if (ret < 0)
2495 goto out;
2496
2497 ret = lan78xx_reset(dev);
2498 if (ret < 0)
2499 goto done;
2500
ce85e13a
WH
2501 ret = lan78xx_phy_init(dev);
2502 if (ret < 0)
2503 goto done;
2504
55d7de9d
WH
2505 /* for Link Check */
2506 if (dev->urb_intr) {
2507 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2508 if (ret < 0) {
2509 netif_err(dev, ifup, dev->net,
2510 "intr submit %d\n", ret);
2511 goto done;
2512 }
2513 }
2514
20ff5565
WH
2515 lan78xx_init_stats(dev);
2516
55d7de9d
WH
2517 set_bit(EVENT_DEV_OPEN, &dev->flags);
2518
2519 netif_start_queue(net);
2520
2521 dev->link_on = false;
2522
2523 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2524done:
2525 usb_autopm_put_interface(dev->intf);
2526
2527out:
2528 return ret;
2529}
2530
2531static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2532{
2533 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2534 DECLARE_WAITQUEUE(wait, current);
2535 int temp;
2536
2537 /* ensure there are no more active urbs */
2538 add_wait_queue(&unlink_wakeup, &wait);
2539 set_current_state(TASK_UNINTERRUPTIBLE);
2540 dev->wait = &unlink_wakeup;
2541 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2542
2543 /* maybe wait for deletions to finish. */
2544 while (!skb_queue_empty(&dev->rxq) &&
2545 !skb_queue_empty(&dev->txq) &&
2546 !skb_queue_empty(&dev->done)) {
2547 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2548 set_current_state(TASK_UNINTERRUPTIBLE);
2549 netif_dbg(dev, ifdown, dev->net,
2550 "waited for %d urb completions\n", temp);
2551 }
2552 set_current_state(TASK_RUNNING);
2553 dev->wait = NULL;
2554 remove_wait_queue(&unlink_wakeup, &wait);
2555}
2556
e0c79ff6 2557static int lan78xx_stop(struct net_device *net)
55d7de9d
WH
2558{
2559 struct lan78xx_net *dev = netdev_priv(net);
2560
20ff5565
WH
2561 if (timer_pending(&dev->stat_monitor))
2562 del_timer_sync(&dev->stat_monitor);
2563
02dc1f3d
WH
2564 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2565 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2566
ce85e13a
WH
2567 phy_stop(net->phydev);
2568 phy_disconnect(net->phydev);
02dc1f3d 2569
ce85e13a
WH
2570 net->phydev = NULL;
2571
55d7de9d
WH
2572 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2573 netif_stop_queue(net);
2574
2575 netif_info(dev, ifdown, dev->net,
2576 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2577 net->stats.rx_packets, net->stats.tx_packets,
2578 net->stats.rx_errors, net->stats.tx_errors);
2579
2580 lan78xx_terminate_urbs(dev);
2581
2582 usb_kill_urb(dev->urb_intr);
2583
2584 skb_queue_purge(&dev->rxq_pause);
2585
2586 /* deferred work (task, timer, softirq) must also stop.
2587 * can't flush_scheduled_work() until we drop rtnl (later),
2588 * else workers could deadlock; so make workers a NOP.
2589 */
2590 dev->flags = 0;
2591 cancel_delayed_work_sync(&dev->wq);
2592 tasklet_kill(&dev->bh);
2593
2594 usb_autopm_put_interface(dev->intf);
2595
2596 return 0;
2597}
2598
2599static int lan78xx_linearize(struct sk_buff *skb)
2600{
2601 return skb_linearize(skb);
2602}
2603
2604static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2605 struct sk_buff *skb, gfp_t flags)
2606{
2607 u32 tx_cmd_a, tx_cmd_b;
2608
2609 if (skb_headroom(skb) < TX_OVERHEAD) {
2610 struct sk_buff *skb2;
2611
2612 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2613 dev_kfree_skb_any(skb);
2614 skb = skb2;
2615 if (!skb)
2616 return NULL;
2617 }
2618
2619 if (lan78xx_linearize(skb) < 0)
2620 return NULL;
2621
2622 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2623
2624 if (skb->ip_summed == CHECKSUM_PARTIAL)
2625 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2626
2627 tx_cmd_b = 0;
2628 if (skb_is_gso(skb)) {
2629 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2630
2631 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2632
2633 tx_cmd_a |= TX_CMD_A_LSO_;
2634 }
2635
2636 if (skb_vlan_tag_present(skb)) {
2637 tx_cmd_a |= TX_CMD_A_IVTG_;
2638 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2639 }
2640
2641 skb_push(skb, 4);
2642 cpu_to_le32s(&tx_cmd_b);
2643 memcpy(skb->data, &tx_cmd_b, 4);
2644
2645 skb_push(skb, 4);
2646 cpu_to_le32s(&tx_cmd_a);
2647 memcpy(skb->data, &tx_cmd_a, 4);
2648
2649 return skb;
2650}
2651
2652static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2653 struct sk_buff_head *list, enum skb_state state)
2654{
2655 unsigned long flags;
2656 enum skb_state old_state;
2657 struct skb_data *entry = (struct skb_data *)skb->cb;
2658
2659 spin_lock_irqsave(&list->lock, flags);
2660 old_state = entry->state;
2661 entry->state = state;
55d7de9d
WH
2662
2663 __skb_unlink(skb, list);
2664 spin_unlock(&list->lock);
2665 spin_lock(&dev->done.lock);
55d7de9d
WH
2666
2667 __skb_queue_tail(&dev->done, skb);
2668 if (skb_queue_len(&dev->done) == 1)
2669 tasklet_schedule(&dev->bh);
2670 spin_unlock_irqrestore(&dev->done.lock, flags);
2671
2672 return old_state;
2673}
2674
2675static void tx_complete(struct urb *urb)
2676{
2677 struct sk_buff *skb = (struct sk_buff *)urb->context;
2678 struct skb_data *entry = (struct skb_data *)skb->cb;
2679 struct lan78xx_net *dev = entry->dev;
2680
2681 if (urb->status == 0) {
74d79a2e 2682 dev->net->stats.tx_packets += entry->num_of_packet;
55d7de9d
WH
2683 dev->net->stats.tx_bytes += entry->length;
2684 } else {
2685 dev->net->stats.tx_errors++;
2686
2687 switch (urb->status) {
2688 case -EPIPE:
2689 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2690 break;
2691
2692 /* software-driven interface shutdown */
2693 case -ECONNRESET:
2694 case -ESHUTDOWN:
2695 break;
2696
2697 case -EPROTO:
2698 case -ETIME:
2699 case -EILSEQ:
2700 netif_stop_queue(dev->net);
2701 break;
2702 default:
2703 netif_dbg(dev, tx_err, dev->net,
2704 "tx err %d\n", entry->urb->status);
2705 break;
2706 }
2707 }
2708
2709 usb_autopm_put_interface_async(dev->intf);
2710
81c38e81 2711 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2712}
2713
2714static void lan78xx_queue_skb(struct sk_buff_head *list,
2715 struct sk_buff *newsk, enum skb_state state)
2716{
2717 struct skb_data *entry = (struct skb_data *)newsk->cb;
2718
2719 __skb_queue_tail(list, newsk);
2720 entry->state = state;
2721}
2722
e0c79ff6
BX
2723static netdev_tx_t
2724lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
55d7de9d
WH
2725{
2726 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2727 struct sk_buff *skb2 = NULL;
55d7de9d 2728
81c38e81 2729 if (skb) {
55d7de9d 2730 skb_tx_timestamp(skb);
81c38e81
WH
2731 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2732 }
55d7de9d 2733
81c38e81
WH
2734 if (skb2) {
2735 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2736
4b2a4a96
WH
2737 /* throttle TX patch at slower than SUPER SPEED USB */
2738 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2739 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2740 netif_stop_queue(net);
2741 } else {
2742 netif_dbg(dev, tx_err, dev->net,
2743 "lan78xx_tx_prep return NULL\n");
2744 dev->net->stats.tx_errors++;
2745 dev->net->stats.tx_dropped++;
2746 }
2747
2748 tasklet_schedule(&dev->bh);
2749
2750 return NETDEV_TX_OK;
2751}
2752
e0c79ff6
BX
2753static int
2754lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
55d7de9d
WH
2755{
2756 int tmp;
2757 struct usb_host_interface *alt = NULL;
2758 struct usb_host_endpoint *in = NULL, *out = NULL;
2759 struct usb_host_endpoint *status = NULL;
2760
2761 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2762 unsigned ep;
2763
2764 in = NULL;
2765 out = NULL;
2766 status = NULL;
2767 alt = intf->altsetting + tmp;
2768
2769 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2770 struct usb_host_endpoint *e;
2771 int intr = 0;
2772
2773 e = alt->endpoint + ep;
2774 switch (e->desc.bmAttributes) {
2775 case USB_ENDPOINT_XFER_INT:
2776 if (!usb_endpoint_dir_in(&e->desc))
2777 continue;
2778 intr = 1;
2779 /* FALLTHROUGH */
2780 case USB_ENDPOINT_XFER_BULK:
2781 break;
2782 default:
2783 continue;
2784 }
2785 if (usb_endpoint_dir_in(&e->desc)) {
2786 if (!intr && !in)
2787 in = e;
2788 else if (intr && !status)
2789 status = e;
2790 } else {
2791 if (!out)
2792 out = e;
2793 }
2794 }
2795 if (in && out)
2796 break;
2797 }
2798 if (!alt || !in || !out)
2799 return -EINVAL;
2800
2801 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2802 in->desc.bEndpointAddress &
2803 USB_ENDPOINT_NUMBER_MASK);
2804 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2805 out->desc.bEndpointAddress &
2806 USB_ENDPOINT_NUMBER_MASK);
2807 dev->ep_intr = status;
2808
2809 return 0;
2810}
2811
2812static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2813{
2814 struct lan78xx_priv *pdata = NULL;
2815 int ret;
2816 int i;
2817
2818 ret = lan78xx_get_endpoints(dev, intf);
2819
2820 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2821
2822 pdata = (struct lan78xx_priv *)(dev->data[0]);
2823 if (!pdata) {
2824 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2825 return -ENOMEM;
2826 }
2827
2828 pdata->dev = dev;
2829
2830 spin_lock_init(&pdata->rfe_ctl_lock);
2831 mutex_init(&pdata->dataport_mutex);
2832
2833 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2834
2835 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2836 pdata->vlan_table[i] = 0;
2837
2838 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2839
2840 dev->net->features = 0;
2841
2842 if (DEFAULT_TX_CSUM_ENABLE)
2843 dev->net->features |= NETIF_F_HW_CSUM;
2844
2845 if (DEFAULT_RX_CSUM_ENABLE)
2846 dev->net->features |= NETIF_F_RXCSUM;
2847
2848 if (DEFAULT_TSO_CSUM_ENABLE)
2849 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2850
2851 dev->net->hw_features = dev->net->features;
2852
cc89c323
WH
2853 ret = lan78xx_setup_irq_domain(dev);
2854 if (ret < 0) {
2855 netdev_warn(dev->net,
2856 "lan78xx_setup_irq_domain() failed : %d", ret);
2857 kfree(pdata);
2858 return ret;
2859 }
2860
55d7de9d
WH
2861 /* Init all registers */
2862 ret = lan78xx_reset(dev);
2863
ce85e13a
WH
2864 lan78xx_mdio_init(dev);
2865
55d7de9d
WH
2866 dev->net->flags |= IFF_MULTICAST;
2867
2868 pdata->wol = WAKE_MAGIC;
2869
2870 return 0;
2871}
2872
2873static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2874{
2875 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2876
cc89c323
WH
2877 lan78xx_remove_irq_domain(dev);
2878
ce85e13a
WH
2879 lan78xx_remove_mdio(dev);
2880
55d7de9d
WH
2881 if (pdata) {
2882 netif_dbg(dev, ifdown, dev->net, "free pdata");
2883 kfree(pdata);
2884 pdata = NULL;
2885 dev->data[0] = 0;
2886 }
2887}
2888
2889static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2890 struct sk_buff *skb,
2891 u32 rx_cmd_a, u32 rx_cmd_b)
2892{
2893 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2894 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2895 skb->ip_summed = CHECKSUM_NONE;
2896 } else {
2897 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2898 skb->ip_summed = CHECKSUM_COMPLETE;
2899 }
2900}
2901
e0c79ff6 2902static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
55d7de9d
WH
2903{
2904 int status;
2905
2906 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2907 skb_queue_tail(&dev->rxq_pause, skb);
2908 return;
2909 }
2910
55d7de9d
WH
2911 dev->net->stats.rx_packets++;
2912 dev->net->stats.rx_bytes += skb->len;
2913
74d79a2e
WH
2914 skb->protocol = eth_type_trans(skb, dev->net);
2915
55d7de9d
WH
2916 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2917 skb->len + sizeof(struct ethhdr), skb->protocol);
2918 memset(skb->cb, 0, sizeof(struct skb_data));
2919
2920 if (skb_defer_rx_timestamp(skb))
2921 return;
2922
2923 status = netif_rx(skb);
2924 if (status != NET_RX_SUCCESS)
2925 netif_dbg(dev, rx_err, dev->net,
2926 "netif_rx status %d\n", status);
2927}
2928
2929static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2930{
2931 if (skb->len < dev->net->hard_header_len)
2932 return 0;
2933
2934 while (skb->len > 0) {
2935 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2936 u16 rx_cmd_c;
2937 struct sk_buff *skb2;
2938 unsigned char *packet;
2939
2940 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2941 le32_to_cpus(&rx_cmd_a);
2942 skb_pull(skb, sizeof(rx_cmd_a));
2943
2944 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2945 le32_to_cpus(&rx_cmd_b);
2946 skb_pull(skb, sizeof(rx_cmd_b));
2947
2948 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2949 le16_to_cpus(&rx_cmd_c);
2950 skb_pull(skb, sizeof(rx_cmd_c));
2951
2952 packet = skb->data;
2953
2954 /* get the packet length */
2955 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2956 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2957
2958 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2959 netif_dbg(dev, rx_err, dev->net,
2960 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2961 } else {
2962 /* last frame in this batch */
2963 if (skb->len == size) {
2964 lan78xx_rx_csum_offload(dev, skb,
2965 rx_cmd_a, rx_cmd_b);
2966
2967 skb_trim(skb, skb->len - 4); /* remove fcs */
2968 skb->truesize = size + sizeof(struct sk_buff);
2969
2970 return 1;
2971 }
2972
2973 skb2 = skb_clone(skb, GFP_ATOMIC);
2974 if (unlikely(!skb2)) {
2975 netdev_warn(dev->net, "Error allocating skb");
2976 return 0;
2977 }
2978
2979 skb2->len = size;
2980 skb2->data = packet;
2981 skb_set_tail_pointer(skb2, size);
2982
2983 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2984
2985 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2986 skb2->truesize = size + sizeof(struct sk_buff);
2987
2988 lan78xx_skb_return(dev, skb2);
2989 }
2990
2991 skb_pull(skb, size);
2992
2993 /* padding bytes before the next frame starts */
2994 if (skb->len)
2995 skb_pull(skb, align_count);
2996 }
2997
55d7de9d
WH
2998 return 1;
2999}
3000
3001static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3002{
3003 if (!lan78xx_rx(dev, skb)) {
3004 dev->net->stats.rx_errors++;
3005 goto done;
3006 }
3007
3008 if (skb->len) {
3009 lan78xx_skb_return(dev, skb);
3010 return;
3011 }
3012
3013 netif_dbg(dev, rx_err, dev->net, "drop\n");
3014 dev->net->stats.rx_errors++;
3015done:
3016 skb_queue_tail(&dev->done, skb);
3017}
3018
3019static void rx_complete(struct urb *urb);
3020
3021static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3022{
3023 struct sk_buff *skb;
3024 struct skb_data *entry;
3025 unsigned long lockflags;
3026 size_t size = dev->rx_urb_size;
3027 int ret = 0;
3028
3029 skb = netdev_alloc_skb_ip_align(dev->net, size);
3030 if (!skb) {
3031 usb_free_urb(urb);
3032 return -ENOMEM;
3033 }
3034
3035 entry = (struct skb_data *)skb->cb;
3036 entry->urb = urb;
3037 entry->dev = dev;
3038 entry->length = 0;
3039
3040 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3041 skb->data, size, rx_complete, skb);
3042
3043 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3044
3045 if (netif_device_present(dev->net) &&
3046 netif_running(dev->net) &&
3047 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3048 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3049 ret = usb_submit_urb(urb, GFP_ATOMIC);
3050 switch (ret) {
3051 case 0:
3052 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3053 break;
3054 case -EPIPE:
3055 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3056 break;
3057 case -ENODEV:
3058 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3059 netif_device_detach(dev->net);
3060 break;
3061 case -EHOSTUNREACH:
3062 ret = -ENOLINK;
3063 break;
3064 default:
3065 netif_dbg(dev, rx_err, dev->net,
3066 "rx submit, %d\n", ret);
3067 tasklet_schedule(&dev->bh);
3068 }
3069 } else {
3070 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3071 ret = -ENOLINK;
3072 }
3073 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3074 if (ret) {
3075 dev_kfree_skb_any(skb);
3076 usb_free_urb(urb);
3077 }
3078 return ret;
3079}
3080
3081static void rx_complete(struct urb *urb)
3082{
3083 struct sk_buff *skb = (struct sk_buff *)urb->context;
3084 struct skb_data *entry = (struct skb_data *)skb->cb;
3085 struct lan78xx_net *dev = entry->dev;
3086 int urb_status = urb->status;
3087 enum skb_state state;
3088
3089 skb_put(skb, urb->actual_length);
3090 state = rx_done;
3091 entry->urb = NULL;
3092
3093 switch (urb_status) {
3094 case 0:
3095 if (skb->len < dev->net->hard_header_len) {
3096 state = rx_cleanup;
3097 dev->net->stats.rx_errors++;
3098 dev->net->stats.rx_length_errors++;
3099 netif_dbg(dev, rx_err, dev->net,
3100 "rx length %d\n", skb->len);
3101 }
3102 usb_mark_last_busy(dev->udev);
3103 break;
3104 case -EPIPE:
3105 dev->net->stats.rx_errors++;
3106 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3107 /* FALLTHROUGH */
3108 case -ECONNRESET: /* async unlink */
3109 case -ESHUTDOWN: /* hardware gone */
3110 netif_dbg(dev, ifdown, dev->net,
3111 "rx shutdown, code %d\n", urb_status);
3112 state = rx_cleanup;
3113 entry->urb = urb;
3114 urb = NULL;
3115 break;
3116 case -EPROTO:
3117 case -ETIME:
3118 case -EILSEQ:
3119 dev->net->stats.rx_errors++;
3120 state = rx_cleanup;
3121 entry->urb = urb;
3122 urb = NULL;
3123 break;
3124
3125 /* data overrun ... flush fifo? */
3126 case -EOVERFLOW:
3127 dev->net->stats.rx_over_errors++;
3128 /* FALLTHROUGH */
3129
3130 default:
3131 state = rx_cleanup;
3132 dev->net->stats.rx_errors++;
3133 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3134 break;
3135 }
3136
3137 state = defer_bh(dev, skb, &dev->rxq, state);
3138
3139 if (urb) {
3140 if (netif_running(dev->net) &&
3141 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3142 state != unlink_start) {
3143 rx_submit(dev, urb, GFP_ATOMIC);
3144 return;
3145 }
3146 usb_free_urb(urb);
3147 }
3148 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3149}
3150
3151static void lan78xx_tx_bh(struct lan78xx_net *dev)
3152{
3153 int length;
3154 struct urb *urb = NULL;
3155 struct skb_data *entry;
3156 unsigned long flags;
3157 struct sk_buff_head *tqp = &dev->txq_pend;
3158 struct sk_buff *skb, *skb2;
3159 int ret;
3160 int count, pos;
3161 int skb_totallen, pkt_cnt;
3162
3163 skb_totallen = 0;
3164 pkt_cnt = 0;
74d79a2e
WH
3165 count = 0;
3166 length = 0;
55d7de9d
WH
3167 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3168 if (skb_is_gso(skb)) {
3169 if (pkt_cnt) {
3170 /* handle previous packets first */
3171 break;
3172 }
74d79a2e
WH
3173 count = 1;
3174 length = skb->len - TX_OVERHEAD;
55d7de9d
WH
3175 skb2 = skb_dequeue(tqp);
3176 goto gso_skb;
3177 }
3178
3179 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3180 break;
3181 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3182 pkt_cnt++;
3183 }
3184
3185 /* copy to a single skb */
3186 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3187 if (!skb)
3188 goto drop;
3189
3190 skb_put(skb, skb_totallen);
3191
3192 for (count = pos = 0; count < pkt_cnt; count++) {
3193 skb2 = skb_dequeue(tqp);
3194 if (skb2) {
74d79a2e 3195 length += (skb2->len - TX_OVERHEAD);
55d7de9d
WH
3196 memcpy(skb->data + pos, skb2->data, skb2->len);
3197 pos += roundup(skb2->len, sizeof(u32));
3198 dev_kfree_skb(skb2);
55d7de9d
WH
3199 }
3200 }
3201
55d7de9d
WH
3202gso_skb:
3203 urb = usb_alloc_urb(0, GFP_ATOMIC);
d7c4e84e 3204 if (!urb)
55d7de9d 3205 goto drop;
55d7de9d
WH
3206
3207 entry = (struct skb_data *)skb->cb;
3208 entry->urb = urb;
3209 entry->dev = dev;
3210 entry->length = length;
74d79a2e 3211 entry->num_of_packet = count;
55d7de9d
WH
3212
3213 spin_lock_irqsave(&dev->txq.lock, flags);
3214 ret = usb_autopm_get_interface_async(dev->intf);
3215 if (ret < 0) {
3216 spin_unlock_irqrestore(&dev->txq.lock, flags);
3217 goto drop;
3218 }
3219
3220 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3221 skb->data, skb->len, tx_complete, skb);
3222
3223 if (length % dev->maxpacket == 0) {
3224 /* send USB_ZERO_PACKET */
3225 urb->transfer_flags |= URB_ZERO_PACKET;
3226 }
3227
3228#ifdef CONFIG_PM
3229 /* if this triggers the device is still a sleep */
3230 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3231 /* transmission will be done in resume */
3232 usb_anchor_urb(urb, &dev->deferred);
3233 /* no use to process more packets */
3234 netif_stop_queue(dev->net);
3235 usb_put_urb(urb);
3236 spin_unlock_irqrestore(&dev->txq.lock, flags);
3237 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3238 return;
3239 }
3240#endif
3241
3242 ret = usb_submit_urb(urb, GFP_ATOMIC);
3243 switch (ret) {
3244 case 0:
860e9538 3245 netif_trans_update(dev->net);
55d7de9d
WH
3246 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3247 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3248 netif_stop_queue(dev->net);
3249 break;
3250 case -EPIPE:
3251 netif_stop_queue(dev->net);
3252 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3253 usb_autopm_put_interface_async(dev->intf);
3254 break;
3255 default:
3256 usb_autopm_put_interface_async(dev->intf);
3257 netif_dbg(dev, tx_err, dev->net,
3258 "tx: submit urb err %d\n", ret);
3259 break;
3260 }
3261
3262 spin_unlock_irqrestore(&dev->txq.lock, flags);
3263
3264 if (ret) {
3265 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3266drop:
3267 dev->net->stats.tx_dropped++;
3268 if (skb)
3269 dev_kfree_skb_any(skb);
3270 usb_free_urb(urb);
3271 } else
3272 netif_dbg(dev, tx_queued, dev->net,
3273 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3274}
3275
3276static void lan78xx_rx_bh(struct lan78xx_net *dev)
3277{
3278 struct urb *urb;
3279 int i;
3280
3281 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3282 for (i = 0; i < 10; i++) {
3283 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3284 break;
3285 urb = usb_alloc_urb(0, GFP_ATOMIC);
3286 if (urb)
3287 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3288 return;
3289 }
3290
3291 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3292 tasklet_schedule(&dev->bh);
3293 }
3294 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3295 netif_wake_queue(dev->net);
3296}
3297
3298static void lan78xx_bh(unsigned long param)
3299{
3300 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3301 struct sk_buff *skb;
3302 struct skb_data *entry;
3303
55d7de9d
WH
3304 while ((skb = skb_dequeue(&dev->done))) {
3305 entry = (struct skb_data *)(skb->cb);
3306 switch (entry->state) {
3307 case rx_done:
3308 entry->state = rx_cleanup;
3309 rx_process(dev, skb);
3310 continue;
3311 case tx_done:
3312 usb_free_urb(entry->urb);
3313 dev_kfree_skb(skb);
3314 continue;
3315 case rx_cleanup:
3316 usb_free_urb(entry->urb);
3317 dev_kfree_skb(skb);
3318 continue;
3319 default:
3320 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3321 return;
3322 }
55d7de9d
WH
3323 }
3324
3325 if (netif_device_present(dev->net) && netif_running(dev->net)) {
20ff5565
WH
3326 /* reset update timer delta */
3327 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3328 dev->delta = 1;
3329 mod_timer(&dev->stat_monitor,
3330 jiffies + STAT_UPDATE_TIMER);
3331 }
3332
55d7de9d
WH
3333 if (!skb_queue_empty(&dev->txq_pend))
3334 lan78xx_tx_bh(dev);
3335
3336 if (!timer_pending(&dev->delay) &&
3337 !test_bit(EVENT_RX_HALT, &dev->flags))
3338 lan78xx_rx_bh(dev);
3339 }
3340}
3341
3342static void lan78xx_delayedwork(struct work_struct *work)
3343{
3344 int status;
3345 struct lan78xx_net *dev;
3346
3347 dev = container_of(work, struct lan78xx_net, wq.work);
3348
3349 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3350 unlink_urbs(dev, &dev->txq);
3351 status = usb_autopm_get_interface(dev->intf);
3352 if (status < 0)
3353 goto fail_pipe;
3354 status = usb_clear_halt(dev->udev, dev->pipe_out);
3355 usb_autopm_put_interface(dev->intf);
3356 if (status < 0 &&
3357 status != -EPIPE &&
3358 status != -ESHUTDOWN) {
3359 if (netif_msg_tx_err(dev))
3360fail_pipe:
3361 netdev_err(dev->net,
3362 "can't clear tx halt, status %d\n",
3363 status);
3364 } else {
3365 clear_bit(EVENT_TX_HALT, &dev->flags);
3366 if (status != -ESHUTDOWN)
3367 netif_wake_queue(dev->net);
3368 }
3369 }
3370 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3371 unlink_urbs(dev, &dev->rxq);
3372 status = usb_autopm_get_interface(dev->intf);
3373 if (status < 0)
3374 goto fail_halt;
3375 status = usb_clear_halt(dev->udev, dev->pipe_in);
3376 usb_autopm_put_interface(dev->intf);
3377 if (status < 0 &&
3378 status != -EPIPE &&
3379 status != -ESHUTDOWN) {
3380 if (netif_msg_rx_err(dev))
3381fail_halt:
3382 netdev_err(dev->net,
3383 "can't clear rx halt, status %d\n",
3384 status);
3385 } else {
3386 clear_bit(EVENT_RX_HALT, &dev->flags);
3387 tasklet_schedule(&dev->bh);
3388 }
3389 }
3390
3391 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3392 int ret = 0;
3393
3394 clear_bit(EVENT_LINK_RESET, &dev->flags);
3395 status = usb_autopm_get_interface(dev->intf);
3396 if (status < 0)
3397 goto skip_reset;
3398 if (lan78xx_link_reset(dev) < 0) {
3399 usb_autopm_put_interface(dev->intf);
3400skip_reset:
3401 netdev_info(dev->net, "link reset failed (%d)\n",
3402 ret);
3403 } else {
3404 usb_autopm_put_interface(dev->intf);
3405 }
3406 }
20ff5565
WH
3407
3408 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3409 lan78xx_update_stats(dev);
3410
3411 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3412
3413 mod_timer(&dev->stat_monitor,
3414 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3415
3416 dev->delta = min((dev->delta * 2), 50);
3417 }
55d7de9d
WH
3418}
3419
3420static void intr_complete(struct urb *urb)
3421{
3422 struct lan78xx_net *dev = urb->context;
3423 int status = urb->status;
3424
3425 switch (status) {
3426 /* success */
3427 case 0:
3428 lan78xx_status(dev, urb);
3429 break;
3430
3431 /* software-driven interface shutdown */
3432 case -ENOENT: /* urb killed */
3433 case -ESHUTDOWN: /* hardware gone */
3434 netif_dbg(dev, ifdown, dev->net,
3435 "intr shutdown, code %d\n", status);
3436 return;
3437
3438 /* NOTE: not throttling like RX/TX, since this endpoint
3439 * already polls infrequently
3440 */
3441 default:
3442 netdev_dbg(dev->net, "intr status %d\n", status);
3443 break;
3444 }
3445
3446 if (!netif_running(dev->net))
3447 return;
3448
3449 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3450 status = usb_submit_urb(urb, GFP_ATOMIC);
3451 if (status != 0)
3452 netif_err(dev, timer, dev->net,
3453 "intr resubmit --> %d\n", status);
3454}
3455
3456static void lan78xx_disconnect(struct usb_interface *intf)
3457{
3458 struct lan78xx_net *dev;
3459 struct usb_device *udev;
3460 struct net_device *net;
3461
3462 dev = usb_get_intfdata(intf);
3463 usb_set_intfdata(intf, NULL);
3464 if (!dev)
3465 return;
3466
3467 udev = interface_to_usbdev(intf);
3468
3469 net = dev->net;
3470 unregister_netdev(net);
3471
3472 cancel_delayed_work_sync(&dev->wq);
3473
3474 usb_scuttle_anchored_urbs(&dev->deferred);
3475
3476 lan78xx_unbind(dev, intf);
3477
3478 usb_kill_urb(dev->urb_intr);
3479 usb_free_urb(dev->urb_intr);
3480
3481 free_netdev(net);
3482 usb_put_dev(udev);
3483}
3484
e0c79ff6 3485static void lan78xx_tx_timeout(struct net_device *net)
55d7de9d
WH
3486{
3487 struct lan78xx_net *dev = netdev_priv(net);
3488
3489 unlink_urbs(dev, &dev->txq);
3490 tasklet_schedule(&dev->bh);
3491}
3492
3493static const struct net_device_ops lan78xx_netdev_ops = {
3494 .ndo_open = lan78xx_open,
3495 .ndo_stop = lan78xx_stop,
3496 .ndo_start_xmit = lan78xx_start_xmit,
3497 .ndo_tx_timeout = lan78xx_tx_timeout,
3498 .ndo_change_mtu = lan78xx_change_mtu,
3499 .ndo_set_mac_address = lan78xx_set_mac_addr,
3500 .ndo_validate_addr = eth_validate_addr,
3501 .ndo_do_ioctl = lan78xx_ioctl,
3502 .ndo_set_rx_mode = lan78xx_set_multicast,
3503 .ndo_set_features = lan78xx_set_features,
3504 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3505 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3506};
3507
20ff5565
WH
3508static void lan78xx_stat_monitor(unsigned long param)
3509{
3510 struct lan78xx_net *dev;
3511
3512 dev = (struct lan78xx_net *)param;
3513
3514 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3515}
3516
55d7de9d
WH
3517static int lan78xx_probe(struct usb_interface *intf,
3518 const struct usb_device_id *id)
3519{
3520 struct lan78xx_net *dev;
3521 struct net_device *netdev;
3522 struct usb_device *udev;
3523 int ret;
3524 unsigned maxp;
3525 unsigned period;
3526 u8 *buf = NULL;
3527
3528 udev = interface_to_usbdev(intf);
3529 udev = usb_get_dev(udev);
3530
3531 ret = -ENOMEM;
3532 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3533 if (!netdev) {
3534 dev_err(&intf->dev, "Error: OOM\n");
3535 goto out1;
3536 }
3537
3538 /* netdev_printk() needs this */
3539 SET_NETDEV_DEV(netdev, &intf->dev);
3540
3541 dev = netdev_priv(netdev);
3542 dev->udev = udev;
3543 dev->intf = intf;
3544 dev->net = netdev;
3545 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3546 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3547
3548 skb_queue_head_init(&dev->rxq);
3549 skb_queue_head_init(&dev->txq);
3550 skb_queue_head_init(&dev->done);
3551 skb_queue_head_init(&dev->rxq_pause);
3552 skb_queue_head_init(&dev->txq_pend);
3553 mutex_init(&dev->phy_mutex);
3554
3555 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3556 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3557 init_usb_anchor(&dev->deferred);
3558
3559 netdev->netdev_ops = &lan78xx_netdev_ops;
3560 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3561 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3562
20ff5565
WH
3563 dev->stat_monitor.function = lan78xx_stat_monitor;
3564 dev->stat_monitor.data = (unsigned long)dev;
3565 dev->delta = 1;
3566 init_timer(&dev->stat_monitor);
3567
3568 mutex_init(&dev->stats.access_lock);
3569
55d7de9d
WH
3570 ret = lan78xx_bind(dev, intf);
3571 if (ret < 0)
3572 goto out2;
3573 strcpy(netdev->name, "eth%d");
3574
3575 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3576 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3577
f77f0aee
JW
3578 /* MTU range: 68 - 9000 */
3579 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3580
55d7de9d
WH
3581 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3582 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3583 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3584
3585 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3586 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3587
3588 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3589 dev->ep_intr->desc.bEndpointAddress &
3590 USB_ENDPOINT_NUMBER_MASK);
3591 period = dev->ep_intr->desc.bInterval;
3592
3593 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3594 buf = kmalloc(maxp, GFP_KERNEL);
3595 if (buf) {
3596 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3597 if (!dev->urb_intr) {
51920830 3598 ret = -ENOMEM;
55d7de9d
WH
3599 kfree(buf);
3600 goto out3;
3601 } else {
3602 usb_fill_int_urb(dev->urb_intr, dev->udev,
3603 dev->pipe_intr, buf, maxp,
3604 intr_complete, dev, period);
3605 }
3606 }
3607
3608 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3609
3610 /* driver requires remote-wakeup capability during autosuspend. */
3611 intf->needs_remote_wakeup = 1;
3612
3613 ret = register_netdev(netdev);
3614 if (ret != 0) {
3615 netif_err(dev, probe, netdev, "couldn't register the device\n");
3616 goto out2;
3617 }
3618
3619 usb_set_intfdata(intf, dev);
3620
3621 ret = device_set_wakeup_enable(&udev->dev, true);
3622
3623 /* Default delay of 2sec has more overhead than advantage.
3624 * Set to 10sec as default.
3625 */
3626 pm_runtime_set_autosuspend_delay(&udev->dev,
3627 DEFAULT_AUTOSUSPEND_DELAY);
3628
3629 return 0;
3630
55d7de9d
WH
3631out3:
3632 lan78xx_unbind(dev, intf);
3633out2:
3634 free_netdev(netdev);
3635out1:
3636 usb_put_dev(udev);
3637
3638 return ret;
3639}
3640
3641static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3642{
3643 const u16 crc16poly = 0x8005;
3644 int i;
3645 u16 bit, crc, msb;
3646 u8 data;
3647
3648 crc = 0xFFFF;
3649 for (i = 0; i < len; i++) {
3650 data = *buf++;
3651 for (bit = 0; bit < 8; bit++) {
3652 msb = crc >> 15;
3653 crc <<= 1;
3654
3655 if (msb ^ (u16)(data & 1)) {
3656 crc ^= crc16poly;
3657 crc |= (u16)0x0001U;
3658 }
3659 data >>= 1;
3660 }
3661 }
3662
3663 return crc;
3664}
3665
3666static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3667{
3668 u32 buf;
3669 int ret;
3670 int mask_index;
3671 u16 crc;
3672 u32 temp_wucsr;
3673 u32 temp_pmt_ctl;
3674 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3675 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3676 const u8 arp_type[2] = { 0x08, 0x06 };
3677
3678 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3679 buf &= ~MAC_TX_TXEN_;
3680 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3681 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3682 buf &= ~MAC_RX_RXEN_;
3683 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3684
3685 ret = lan78xx_write_reg(dev, WUCSR, 0);
3686 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3687 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3688
3689 temp_wucsr = 0;
3690
3691 temp_pmt_ctl = 0;
3692 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3693 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3694 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3695
3696 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3697 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3698
3699 mask_index = 0;
3700 if (wol & WAKE_PHY) {
3701 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3702
3703 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3704 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3705 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3706 }
3707 if (wol & WAKE_MAGIC) {
3708 temp_wucsr |= WUCSR_MPEN_;
3709
3710 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3711 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3712 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3713 }
3714 if (wol & WAKE_BCAST) {
3715 temp_wucsr |= WUCSR_BCST_EN_;
3716
3717 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3718 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3719 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3720 }
3721 if (wol & WAKE_MCAST) {
3722 temp_wucsr |= WUCSR_WAKE_EN_;
3723
3724 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3725 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3726 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3727 WUF_CFGX_EN_ |
3728 WUF_CFGX_TYPE_MCAST_ |
3729 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3730 (crc & WUF_CFGX_CRC16_MASK_));
3731
3732 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3733 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3734 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3735 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3736 mask_index++;
3737
3738 /* for IPv6 Multicast */
3739 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3740 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3741 WUF_CFGX_EN_ |
3742 WUF_CFGX_TYPE_MCAST_ |
3743 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3744 (crc & WUF_CFGX_CRC16_MASK_));
3745
3746 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3747 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3748 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3749 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3750 mask_index++;
3751
3752 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3753 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3754 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3755 }
3756 if (wol & WAKE_UCAST) {
3757 temp_wucsr |= WUCSR_PFDA_EN_;
3758
3759 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3760 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3761 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3762 }
3763 if (wol & WAKE_ARP) {
3764 temp_wucsr |= WUCSR_WAKE_EN_;
3765
3766 /* set WUF_CFG & WUF_MASK
3767 * for packettype (offset 12,13) = ARP (0x0806)
3768 */
3769 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3770 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3771 WUF_CFGX_EN_ |
3772 WUF_CFGX_TYPE_ALL_ |
3773 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3774 (crc & WUF_CFGX_CRC16_MASK_));
3775
3776 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3777 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3778 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3779 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3780 mask_index++;
3781
3782 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3783 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3784 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3785 }
3786
3787 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3788
3789 /* when multiple WOL bits are set */
3790 if (hweight_long((unsigned long)wol) > 1) {
3791 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3792 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3793 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3794 }
3795 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3796
3797 /* clear WUPS */
3798 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3799 buf |= PMT_CTL_WUPS_MASK_;
3800 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3801
3802 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3803 buf |= MAC_RX_RXEN_;
3804 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3805
3806 return 0;
3807}
3808
e0c79ff6 3809static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
55d7de9d
WH
3810{
3811 struct lan78xx_net *dev = usb_get_intfdata(intf);
3812 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3813 u32 buf;
3814 int ret;
3815 int event;
3816
55d7de9d
WH
3817 event = message.event;
3818
3819 if (!dev->suspend_count++) {
3820 spin_lock_irq(&dev->txq.lock);
3821 /* don't autosuspend while transmitting */
3822 if ((skb_queue_len(&dev->txq) ||
3823 skb_queue_len(&dev->txq_pend)) &&
3824 PMSG_IS_AUTO(message)) {
3825 spin_unlock_irq(&dev->txq.lock);
3826 ret = -EBUSY;
3827 goto out;
3828 } else {
3829 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3830 spin_unlock_irq(&dev->txq.lock);
3831 }
3832
3833 /* stop TX & RX */
3834 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3835 buf &= ~MAC_TX_TXEN_;
3836 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3837 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3838 buf &= ~MAC_RX_RXEN_;
3839 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3840
3841 /* empty out the rx and queues */
3842 netif_device_detach(dev->net);
3843 lan78xx_terminate_urbs(dev);
3844 usb_kill_urb(dev->urb_intr);
3845
3846 /* reattach */
3847 netif_device_attach(dev->net);
3848 }
3849
3850 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
20ff5565
WH
3851 del_timer(&dev->stat_monitor);
3852
55d7de9d
WH
3853 if (PMSG_IS_AUTO(message)) {
3854 /* auto suspend (selective suspend) */
3855 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3856 buf &= ~MAC_TX_TXEN_;
3857 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3858 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3859 buf &= ~MAC_RX_RXEN_;
3860 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3861
3862 ret = lan78xx_write_reg(dev, WUCSR, 0);
3863 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3864 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3865
3866 /* set goodframe wakeup */
3867 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3868
3869 buf |= WUCSR_RFE_WAKE_EN_;
3870 buf |= WUCSR_STORE_WAKE_;
3871
3872 ret = lan78xx_write_reg(dev, WUCSR, buf);
3873
3874 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3875
3876 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3877 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3878
3879 buf |= PMT_CTL_PHY_WAKE_EN_;
3880 buf |= PMT_CTL_WOL_EN_;
3881 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3882 buf |= PMT_CTL_SUS_MODE_3_;
3883
3884 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3885
3886 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3887
3888 buf |= PMT_CTL_WUPS_MASK_;
3889
3890 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3891
3892 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3893 buf |= MAC_RX_RXEN_;
3894 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3895 } else {
3896 lan78xx_set_suspend(dev, pdata->wol);
3897 }
3898 }
3899
49d28b56 3900 ret = 0;
55d7de9d
WH
3901out:
3902 return ret;
3903}
3904
e0c79ff6 3905static int lan78xx_resume(struct usb_interface *intf)
55d7de9d
WH
3906{
3907 struct lan78xx_net *dev = usb_get_intfdata(intf);
3908 struct sk_buff *skb;
3909 struct urb *res;
3910 int ret;
3911 u32 buf;
3912
20ff5565
WH
3913 if (!timer_pending(&dev->stat_monitor)) {
3914 dev->delta = 1;
3915 mod_timer(&dev->stat_monitor,
3916 jiffies + STAT_UPDATE_TIMER);
3917 }
3918
55d7de9d
WH
3919 if (!--dev->suspend_count) {
3920 /* resume interrupt URBs */
3921 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3922 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3923
3924 spin_lock_irq(&dev->txq.lock);
3925 while ((res = usb_get_from_anchor(&dev->deferred))) {
3926 skb = (struct sk_buff *)res->context;
3927 ret = usb_submit_urb(res, GFP_ATOMIC);
3928 if (ret < 0) {
3929 dev_kfree_skb_any(skb);
3930 usb_free_urb(res);
3931 usb_autopm_put_interface_async(dev->intf);
3932 } else {
860e9538 3933 netif_trans_update(dev->net);
55d7de9d
WH
3934 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3935 }
3936 }
3937
3938 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3939 spin_unlock_irq(&dev->txq.lock);
3940
3941 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3942 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3943 netif_start_queue(dev->net);
3944 tasklet_schedule(&dev->bh);
3945 }
3946 }
3947
3948 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3949 ret = lan78xx_write_reg(dev, WUCSR, 0);
3950 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3951
3952 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3953 WUCSR2_ARP_RCD_ |
3954 WUCSR2_IPV6_TCPSYN_RCD_ |
3955 WUCSR2_IPV4_TCPSYN_RCD_);
3956
3957 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3958 WUCSR_EEE_RX_WAKE_ |
3959 WUCSR_PFDA_FR_ |
3960 WUCSR_RFE_WAKE_FR_ |
3961 WUCSR_WUFR_ |
3962 WUCSR_MPR_ |
3963 WUCSR_BCST_FR_);
3964
3965 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3966 buf |= MAC_TX_TXEN_;
3967 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3968
3969 return 0;
3970}
3971
e0c79ff6 3972static int lan78xx_reset_resume(struct usb_interface *intf)
55d7de9d
WH
3973{
3974 struct lan78xx_net *dev = usb_get_intfdata(intf);
3975
3976 lan78xx_reset(dev);
ce85e13a
WH
3977
3978 lan78xx_phy_init(dev);
3979
55d7de9d
WH
3980 return lan78xx_resume(intf);
3981}
3982
3983static const struct usb_device_id products[] = {
3984 {
3985 /* LAN7800 USB Gigabit Ethernet Device */
3986 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3987 },
3988 {
3989 /* LAN7850 USB Gigabit Ethernet Device */
3990 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3991 },
02dc1f3d
WH
3992 {
3993 /* LAN7801 USB Gigabit Ethernet Device */
3994 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
3995 },
55d7de9d
WH
3996 {},
3997};
3998MODULE_DEVICE_TABLE(usb, products);
3999
4000static struct usb_driver lan78xx_driver = {
4001 .name = DRIVER_NAME,
4002 .id_table = products,
4003 .probe = lan78xx_probe,
4004 .disconnect = lan78xx_disconnect,
4005 .suspend = lan78xx_suspend,
4006 .resume = lan78xx_resume,
4007 .reset_resume = lan78xx_reset_resume,
4008 .supports_autosuspend = 1,
4009 .disable_hub_initiated_lpm = 1,
4010};
4011
4012module_usb_driver(lan78xx_driver);
4013
4014MODULE_AUTHOR(DRIVER_AUTHOR);
4015MODULE_DESCRIPTION(DRIVER_DESC);
4016MODULE_LICENSE("GPL");