[PATCH] irq-flags: drivers/net: Use the new IRQF_ constants
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / phy / phy.c
1 /*
2 * drivers/net/phy/phy.c
3 *
4 * Framework for configuring and reading PHY devices
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/mii.h>
33 #include <linux/ethtool.h>
34 #include <linux/phy.h>
35
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/uaccess.h>
39
40 /* Convenience function to print out the current phy status
41 */
42 void phy_print_status(struct phy_device *phydev)
43 {
44 pr_info("PHY: %s - Link is %s", phydev->dev.bus_id,
45 phydev->link ? "Up" : "Down");
46 if (phydev->link)
47 printk(" - %d/%s", phydev->speed,
48 DUPLEX_FULL == phydev->duplex ?
49 "Full" : "Half");
50
51 printk("\n");
52 }
53 EXPORT_SYMBOL(phy_print_status);
54
55
56 /* Convenience functions for reading/writing a given PHY
57 * register. They MUST NOT be called from interrupt context,
58 * because the bus read/write functions may wait for an interrupt
59 * to conclude the operation. */
60 int phy_read(struct phy_device *phydev, u16 regnum)
61 {
62 int retval;
63 struct mii_bus *bus = phydev->bus;
64
65 spin_lock_bh(&bus->mdio_lock);
66 retval = bus->read(bus, phydev->addr, regnum);
67 spin_unlock_bh(&bus->mdio_lock);
68
69 return retval;
70 }
71 EXPORT_SYMBOL(phy_read);
72
73 int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
74 {
75 int err;
76 struct mii_bus *bus = phydev->bus;
77
78 spin_lock_bh(&bus->mdio_lock);
79 err = bus->write(bus, phydev->addr, regnum, val);
80 spin_unlock_bh(&bus->mdio_lock);
81
82 return err;
83 }
84 EXPORT_SYMBOL(phy_write);
85
86
87 int phy_clear_interrupt(struct phy_device *phydev)
88 {
89 int err = 0;
90
91 if (phydev->drv->ack_interrupt)
92 err = phydev->drv->ack_interrupt(phydev);
93
94 return err;
95 }
96
97
98 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
99 {
100 int err = 0;
101
102 phydev->interrupts = interrupts;
103 if (phydev->drv->config_intr)
104 err = phydev->drv->config_intr(phydev);
105
106 return err;
107 }
108
109
110 /* phy_aneg_done
111 *
112 * description: Reads the status register and returns 0 either if
113 * auto-negotiation is incomplete, or if there was an error.
114 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
115 */
116 static inline int phy_aneg_done(struct phy_device *phydev)
117 {
118 int retval;
119
120 retval = phy_read(phydev, MII_BMSR);
121
122 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
123 }
124
125 /* A structure for mapping a particular speed and duplex
126 * combination to a particular SUPPORTED and ADVERTISED value */
127 struct phy_setting {
128 int speed;
129 int duplex;
130 u32 setting;
131 };
132
133 /* A mapping of all SUPPORTED settings to speed/duplex */
134 static const struct phy_setting settings[] = {
135 {
136 .speed = 10000,
137 .duplex = DUPLEX_FULL,
138 .setting = SUPPORTED_10000baseT_Full,
139 },
140 {
141 .speed = SPEED_1000,
142 .duplex = DUPLEX_FULL,
143 .setting = SUPPORTED_1000baseT_Full,
144 },
145 {
146 .speed = SPEED_1000,
147 .duplex = DUPLEX_HALF,
148 .setting = SUPPORTED_1000baseT_Half,
149 },
150 {
151 .speed = SPEED_100,
152 .duplex = DUPLEX_FULL,
153 .setting = SUPPORTED_100baseT_Full,
154 },
155 {
156 .speed = SPEED_100,
157 .duplex = DUPLEX_HALF,
158 .setting = SUPPORTED_100baseT_Half,
159 },
160 {
161 .speed = SPEED_10,
162 .duplex = DUPLEX_FULL,
163 .setting = SUPPORTED_10baseT_Full,
164 },
165 {
166 .speed = SPEED_10,
167 .duplex = DUPLEX_HALF,
168 .setting = SUPPORTED_10baseT_Half,
169 },
170 };
171
172 #define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting))
173
174 /* phy_find_setting
175 *
176 * description: Searches the settings array for the setting which
177 * matches the desired speed and duplex, and returns the index
178 * of that setting. Returns the index of the last setting if
179 * none of the others match.
180 */
181 static inline int phy_find_setting(int speed, int duplex)
182 {
183 int idx = 0;
184
185 while (idx < ARRAY_SIZE(settings) &&
186 (settings[idx].speed != speed ||
187 settings[idx].duplex != duplex))
188 idx++;
189
190 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
191 }
192
193 /* phy_find_valid
194 * idx: The first index in settings[] to search
195 * features: A mask of the valid settings
196 *
197 * description: Returns the index of the first valid setting less
198 * than or equal to the one pointed to by idx, as determined by
199 * the mask in features. Returns the index of the last setting
200 * if nothing else matches.
201 */
202 static inline int phy_find_valid(int idx, u32 features)
203 {
204 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
205 idx++;
206
207 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
208 }
209
210 /* phy_sanitize_settings
211 *
212 * description: Make sure the PHY is set to supported speeds and
213 * duplexes. Drop down by one in this order: 1000/FULL,
214 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF
215 */
216 void phy_sanitize_settings(struct phy_device *phydev)
217 {
218 u32 features = phydev->supported;
219 int idx;
220
221 /* Sanitize settings based on PHY capabilities */
222 if ((features & SUPPORTED_Autoneg) == 0)
223 phydev->autoneg = 0;
224
225 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
226 features);
227
228 phydev->speed = settings[idx].speed;
229 phydev->duplex = settings[idx].duplex;
230 }
231 EXPORT_SYMBOL(phy_sanitize_settings);
232
233 /* phy_ethtool_sset:
234 * A generic ethtool sset function. Handles all the details
235 *
236 * A few notes about parameter checking:
237 * - We don't set port or transceiver, so we don't care what they
238 * were set to.
239 * - phy_start_aneg() will make sure forced settings are sane, and
240 * choose the next best ones from the ones selected, so we don't
241 * care if ethtool tries to give us bad values
242 *
243 */
244 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
245 {
246 if (cmd->phy_address != phydev->addr)
247 return -EINVAL;
248
249 /* We make sure that we don't pass unsupported
250 * values in to the PHY */
251 cmd->advertising &= phydev->supported;
252
253 /* Verify the settings we care about. */
254 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
255 return -EINVAL;
256
257 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
258 return -EINVAL;
259
260 if (cmd->autoneg == AUTONEG_DISABLE
261 && ((cmd->speed != SPEED_1000
262 && cmd->speed != SPEED_100
263 && cmd->speed != SPEED_10)
264 || (cmd->duplex != DUPLEX_HALF
265 && cmd->duplex != DUPLEX_FULL)))
266 return -EINVAL;
267
268 phydev->autoneg = cmd->autoneg;
269
270 phydev->speed = cmd->speed;
271
272 phydev->advertising = cmd->advertising;
273
274 if (AUTONEG_ENABLE == cmd->autoneg)
275 phydev->advertising |= ADVERTISED_Autoneg;
276 else
277 phydev->advertising &= ~ADVERTISED_Autoneg;
278
279 phydev->duplex = cmd->duplex;
280
281 /* Restart the PHY */
282 phy_start_aneg(phydev);
283
284 return 0;
285 }
286
287 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
288 {
289 cmd->supported = phydev->supported;
290
291 cmd->advertising = phydev->advertising;
292
293 cmd->speed = phydev->speed;
294 cmd->duplex = phydev->duplex;
295 cmd->port = PORT_MII;
296 cmd->phy_address = phydev->addr;
297 cmd->transceiver = XCVR_EXTERNAL;
298 cmd->autoneg = phydev->autoneg;
299
300 return 0;
301 }
302
303
304 /* Note that this function is currently incompatible with the
305 * PHYCONTROL layer. It changes registers without regard to
306 * current state. Use at own risk
307 */
308 int phy_mii_ioctl(struct phy_device *phydev,
309 struct mii_ioctl_data *mii_data, int cmd)
310 {
311 u16 val = mii_data->val_in;
312
313 switch (cmd) {
314 case SIOCGMIIPHY:
315 mii_data->phy_id = phydev->addr;
316 break;
317 case SIOCGMIIREG:
318 mii_data->val_out = phy_read(phydev, mii_data->reg_num);
319 break;
320
321 case SIOCSMIIREG:
322 if (!capable(CAP_NET_ADMIN))
323 return -EPERM;
324
325 if (mii_data->phy_id == phydev->addr) {
326 switch(mii_data->reg_num) {
327 case MII_BMCR:
328 if (val & (BMCR_RESET|BMCR_ANENABLE))
329 phydev->autoneg = AUTONEG_DISABLE;
330 else
331 phydev->autoneg = AUTONEG_ENABLE;
332 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
333 phydev->duplex = DUPLEX_FULL;
334 else
335 phydev->duplex = DUPLEX_HALF;
336 break;
337 case MII_ADVERTISE:
338 phydev->advertising = val;
339 break;
340 default:
341 /* do nothing */
342 break;
343 }
344 }
345
346 phy_write(phydev, mii_data->reg_num, val);
347
348 if (mii_data->reg_num == MII_BMCR
349 && val & BMCR_RESET
350 && phydev->drv->config_init)
351 phydev->drv->config_init(phydev);
352 break;
353 }
354
355 return 0;
356 }
357
358 /* phy_start_aneg
359 *
360 * description: Sanitizes the settings (if we're not
361 * autonegotiating them), and then calls the driver's
362 * config_aneg function. If the PHYCONTROL Layer is operating,
363 * we change the state to reflect the beginning of
364 * Auto-negotiation or forcing.
365 */
366 int phy_start_aneg(struct phy_device *phydev)
367 {
368 int err;
369
370 spin_lock(&phydev->lock);
371
372 if (AUTONEG_DISABLE == phydev->autoneg)
373 phy_sanitize_settings(phydev);
374
375 err = phydev->drv->config_aneg(phydev);
376
377 if (err < 0)
378 goto out_unlock;
379
380 if (phydev->state != PHY_HALTED) {
381 if (AUTONEG_ENABLE == phydev->autoneg) {
382 phydev->state = PHY_AN;
383 phydev->link_timeout = PHY_AN_TIMEOUT;
384 } else {
385 phydev->state = PHY_FORCING;
386 phydev->link_timeout = PHY_FORCE_TIMEOUT;
387 }
388 }
389
390 out_unlock:
391 spin_unlock(&phydev->lock);
392 return err;
393 }
394 EXPORT_SYMBOL(phy_start_aneg);
395
396
397 static void phy_change(void *data);
398 static void phy_timer(unsigned long data);
399
400 /* phy_start_machine:
401 *
402 * description: The PHY infrastructure can run a state machine
403 * which tracks whether the PHY is starting up, negotiating,
404 * etc. This function starts the timer which tracks the state
405 * of the PHY. If you want to be notified when the state
406 * changes, pass in the callback, otherwise, pass NULL. If you
407 * want to maintain your own state machine, do not call this
408 * function. */
409 void phy_start_machine(struct phy_device *phydev,
410 void (*handler)(struct net_device *))
411 {
412 phydev->adjust_state = handler;
413
414 init_timer(&phydev->phy_timer);
415 phydev->phy_timer.function = &phy_timer;
416 phydev->phy_timer.data = (unsigned long) phydev;
417 mod_timer(&phydev->phy_timer, jiffies + HZ);
418 }
419
420 /* phy_stop_machine
421 *
422 * description: Stops the state machine timer, sets the state to
423 * UP (unless it wasn't up yet), and then frees the interrupt,
424 * if it is in use. This function must be called BEFORE
425 * phy_detach.
426 */
427 void phy_stop_machine(struct phy_device *phydev)
428 {
429 del_timer_sync(&phydev->phy_timer);
430
431 spin_lock(&phydev->lock);
432 if (phydev->state > PHY_UP)
433 phydev->state = PHY_UP;
434 spin_unlock(&phydev->lock);
435
436 if (phydev->irq != PHY_POLL)
437 phy_stop_interrupts(phydev);
438
439 phydev->adjust_state = NULL;
440 }
441
442 /* phy_force_reduction
443 *
444 * description: Reduces the speed/duplex settings by
445 * one notch. The order is so:
446 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF,
447 * 10/FULL, 10/HALF. The function bottoms out at 10/HALF.
448 */
449 static void phy_force_reduction(struct phy_device *phydev)
450 {
451 int idx;
452
453 idx = phy_find_setting(phydev->speed, phydev->duplex);
454
455 idx++;
456
457 idx = phy_find_valid(idx, phydev->supported);
458
459 phydev->speed = settings[idx].speed;
460 phydev->duplex = settings[idx].duplex;
461
462 pr_info("Trying %d/%s\n", phydev->speed,
463 DUPLEX_FULL == phydev->duplex ?
464 "FULL" : "HALF");
465 }
466
467
468 /* phy_error:
469 *
470 * Moves the PHY to the HALTED state in response to a read
471 * or write error, and tells the controller the link is down.
472 * Must not be called from interrupt context, or while the
473 * phydev->lock is held.
474 */
475 void phy_error(struct phy_device *phydev)
476 {
477 spin_lock(&phydev->lock);
478 phydev->state = PHY_HALTED;
479 spin_unlock(&phydev->lock);
480 }
481
482 /* phy_interrupt
483 *
484 * description: When a PHY interrupt occurs, the handler disables
485 * interrupts, and schedules a work task to clear the interrupt.
486 */
487 static irqreturn_t phy_interrupt(int irq, void *phy_dat, struct pt_regs *regs)
488 {
489 struct phy_device *phydev = phy_dat;
490
491 /* The MDIO bus is not allowed to be written in interrupt
492 * context, so we need to disable the irq here. A work
493 * queue will write the PHY to disable and clear the
494 * interrupt, and then reenable the irq line. */
495 disable_irq_nosync(irq);
496
497 schedule_work(&phydev->phy_queue);
498
499 return IRQ_HANDLED;
500 }
501
502 /* Enable the interrupts from the PHY side */
503 int phy_enable_interrupts(struct phy_device *phydev)
504 {
505 int err;
506
507 err = phy_clear_interrupt(phydev);
508
509 if (err < 0)
510 return err;
511
512 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
513
514 return err;
515 }
516 EXPORT_SYMBOL(phy_enable_interrupts);
517
518 /* Disable the PHY interrupts from the PHY side */
519 int phy_disable_interrupts(struct phy_device *phydev)
520 {
521 int err;
522
523 /* Disable PHY interrupts */
524 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
525
526 if (err)
527 goto phy_err;
528
529 /* Clear the interrupt */
530 err = phy_clear_interrupt(phydev);
531
532 if (err)
533 goto phy_err;
534
535 return 0;
536
537 phy_err:
538 phy_error(phydev);
539
540 return err;
541 }
542 EXPORT_SYMBOL(phy_disable_interrupts);
543
544 /* phy_start_interrupts
545 *
546 * description: Request the interrupt for the given PHY. If
547 * this fails, then we set irq to PHY_POLL.
548 * Otherwise, we enable the interrupts in the PHY.
549 * Returns 0 on success.
550 * This should only be called with a valid IRQ number.
551 */
552 int phy_start_interrupts(struct phy_device *phydev)
553 {
554 int err = 0;
555
556 INIT_WORK(&phydev->phy_queue, phy_change, phydev);
557
558 if (request_irq(phydev->irq, phy_interrupt,
559 IRQF_SHARED,
560 "phy_interrupt",
561 phydev) < 0) {
562 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
563 phydev->bus->name,
564 phydev->irq);
565 phydev->irq = PHY_POLL;
566 return 0;
567 }
568
569 err = phy_enable_interrupts(phydev);
570
571 return err;
572 }
573 EXPORT_SYMBOL(phy_start_interrupts);
574
575 int phy_stop_interrupts(struct phy_device *phydev)
576 {
577 int err;
578
579 err = phy_disable_interrupts(phydev);
580
581 if (err)
582 phy_error(phydev);
583
584 free_irq(phydev->irq, phydev);
585
586 return err;
587 }
588 EXPORT_SYMBOL(phy_stop_interrupts);
589
590
591 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
592 static void phy_change(void *data)
593 {
594 int err;
595 struct phy_device *phydev = data;
596
597 err = phy_disable_interrupts(phydev);
598
599 if (err)
600 goto phy_err;
601
602 spin_lock(&phydev->lock);
603 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
604 phydev->state = PHY_CHANGELINK;
605 spin_unlock(&phydev->lock);
606
607 enable_irq(phydev->irq);
608
609 /* Reenable interrupts */
610 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
611
612 if (err)
613 goto irq_enable_err;
614
615 return;
616
617 irq_enable_err:
618 disable_irq(phydev->irq);
619 phy_err:
620 phy_error(phydev);
621 }
622
623 /* Bring down the PHY link, and stop checking the status. */
624 void phy_stop(struct phy_device *phydev)
625 {
626 spin_lock(&phydev->lock);
627
628 if (PHY_HALTED == phydev->state)
629 goto out_unlock;
630
631 if (phydev->irq != PHY_POLL) {
632 /* Clear any pending interrupts */
633 phy_clear_interrupt(phydev);
634
635 /* Disable PHY Interrupts */
636 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
637 }
638
639 phydev->state = PHY_HALTED;
640
641 out_unlock:
642 spin_unlock(&phydev->lock);
643 }
644
645
646 /* phy_start
647 *
648 * description: Indicates the attached device's readiness to
649 * handle PHY-related work. Used during startup to start the
650 * PHY, and after a call to phy_stop() to resume operation.
651 * Also used to indicate the MDIO bus has cleared an error
652 * condition.
653 */
654 void phy_start(struct phy_device *phydev)
655 {
656 spin_lock(&phydev->lock);
657
658 switch (phydev->state) {
659 case PHY_STARTING:
660 phydev->state = PHY_PENDING;
661 break;
662 case PHY_READY:
663 phydev->state = PHY_UP;
664 break;
665 case PHY_HALTED:
666 phydev->state = PHY_RESUMING;
667 default:
668 break;
669 }
670 spin_unlock(&phydev->lock);
671 }
672 EXPORT_SYMBOL(phy_stop);
673 EXPORT_SYMBOL(phy_start);
674
675 /* PHY timer which handles the state machine */
676 static void phy_timer(unsigned long data)
677 {
678 struct phy_device *phydev = (struct phy_device *)data;
679 int needs_aneg = 0;
680 int err = 0;
681
682 spin_lock(&phydev->lock);
683
684 if (phydev->adjust_state)
685 phydev->adjust_state(phydev->attached_dev);
686
687 switch(phydev->state) {
688 case PHY_DOWN:
689 case PHY_STARTING:
690 case PHY_READY:
691 case PHY_PENDING:
692 break;
693 case PHY_UP:
694 needs_aneg = 1;
695
696 phydev->link_timeout = PHY_AN_TIMEOUT;
697
698 break;
699 case PHY_AN:
700 /* Check if negotiation is done. Break
701 * if there's an error */
702 err = phy_aneg_done(phydev);
703 if (err < 0)
704 break;
705
706 /* If auto-negotiation is done, we change to
707 * either RUNNING, or NOLINK */
708 if (err > 0) {
709 err = phy_read_status(phydev);
710
711 if (err)
712 break;
713
714 if (phydev->link) {
715 phydev->state = PHY_RUNNING;
716 netif_carrier_on(phydev->attached_dev);
717 } else {
718 phydev->state = PHY_NOLINK;
719 netif_carrier_off(phydev->attached_dev);
720 }
721
722 phydev->adjust_link(phydev->attached_dev);
723
724 } else if (0 == phydev->link_timeout--) {
725 /* The counter expired, so either we
726 * switch to forced mode, or the
727 * magic_aneg bit exists, and we try aneg
728 * again */
729 if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
730 int idx;
731
732 /* We'll start from the
733 * fastest speed, and work
734 * our way down */
735 idx = phy_find_valid(0,
736 phydev->supported);
737
738 phydev->speed = settings[idx].speed;
739 phydev->duplex = settings[idx].duplex;
740
741 phydev->autoneg = AUTONEG_DISABLE;
742 phydev->state = PHY_FORCING;
743 phydev->link_timeout =
744 PHY_FORCE_TIMEOUT;
745
746 pr_info("Trying %d/%s\n",
747 phydev->speed,
748 DUPLEX_FULL ==
749 phydev->duplex ?
750 "FULL" : "HALF");
751 }
752
753 needs_aneg = 1;
754 }
755 break;
756 case PHY_NOLINK:
757 err = phy_read_status(phydev);
758
759 if (err)
760 break;
761
762 if (phydev->link) {
763 phydev->state = PHY_RUNNING;
764 netif_carrier_on(phydev->attached_dev);
765 phydev->adjust_link(phydev->attached_dev);
766 }
767 break;
768 case PHY_FORCING:
769 err = phy_read_status(phydev);
770
771 if (err)
772 break;
773
774 if (phydev->link) {
775 phydev->state = PHY_RUNNING;
776 netif_carrier_on(phydev->attached_dev);
777 } else {
778 if (0 == phydev->link_timeout--) {
779 phy_force_reduction(phydev);
780 needs_aneg = 1;
781 }
782 }
783
784 phydev->adjust_link(phydev->attached_dev);
785 break;
786 case PHY_RUNNING:
787 /* Only register a CHANGE if we are
788 * polling */
789 if (PHY_POLL == phydev->irq)
790 phydev->state = PHY_CHANGELINK;
791 break;
792 case PHY_CHANGELINK:
793 err = phy_read_status(phydev);
794
795 if (err)
796 break;
797
798 if (phydev->link) {
799 phydev->state = PHY_RUNNING;
800 netif_carrier_on(phydev->attached_dev);
801 } else {
802 phydev->state = PHY_NOLINK;
803 netif_carrier_off(phydev->attached_dev);
804 }
805
806 phydev->adjust_link(phydev->attached_dev);
807
808 if (PHY_POLL != phydev->irq)
809 err = phy_config_interrupt(phydev,
810 PHY_INTERRUPT_ENABLED);
811 break;
812 case PHY_HALTED:
813 if (phydev->link) {
814 phydev->link = 0;
815 netif_carrier_off(phydev->attached_dev);
816 phydev->adjust_link(phydev->attached_dev);
817 }
818 break;
819 case PHY_RESUMING:
820
821 err = phy_clear_interrupt(phydev);
822
823 if (err)
824 break;
825
826 err = phy_config_interrupt(phydev,
827 PHY_INTERRUPT_ENABLED);
828
829 if (err)
830 break;
831
832 if (AUTONEG_ENABLE == phydev->autoneg) {
833 err = phy_aneg_done(phydev);
834 if (err < 0)
835 break;
836
837 /* err > 0 if AN is done.
838 * Otherwise, it's 0, and we're
839 * still waiting for AN */
840 if (err > 0) {
841 phydev->state = PHY_RUNNING;
842 } else {
843 phydev->state = PHY_AN;
844 phydev->link_timeout = PHY_AN_TIMEOUT;
845 }
846 } else
847 phydev->state = PHY_RUNNING;
848 break;
849 }
850
851 spin_unlock(&phydev->lock);
852
853 if (needs_aneg)
854 err = phy_start_aneg(phydev);
855
856 if (err < 0)
857 phy_error(phydev);
858
859 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
860 }
861