Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Mon, 3 May 2010 04:43:40 +0000 (21:43 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 3 May 2010 05:02:06 +0000 (22:02 -0700)
17 files changed:
1  2 
drivers/net/bnx2.c
drivers/net/e1000e/netdev.c
drivers/net/gianfar.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/sb1250-mac.c
drivers/net/sfc/efx.c
drivers/net/sfc/falcon.c
drivers/net/sfc/nic.h
drivers/net/sfc/siena.c
drivers/net/usb/cdc_ether.c
drivers/net/wireless/p54/p54pci.c
include/net/sctp/sctp.h
include/net/sock.h
net/bluetooth/l2cap.c
net/ipv4/inet_connection_sock.c
net/ipv6/inet6_connection_sock.c
net/sctp/socket.c

diff --combined drivers/net/bnx2.c
index 53326fed6c8151870b9d634af5bcd30b29b716b9,ac90a3828f69fd3f56efd3443a785b15af0ffb1e..ab26bbc2a1d39c7a331990a22c2c4b75f36d4a98
@@@ -58,8 -58,8 +58,8 @@@
  #include "bnx2_fw.h"
  
  #define DRV_MODULE_NAME               "bnx2"
- #define DRV_MODULE_VERSION    "2.0.8"
- #define DRV_MODULE_RELDATE    "Feb 15, 2010"
+ #define DRV_MODULE_VERSION    "2.0.9"
+ #define DRV_MODULE_RELDATE    "April 27, 2010"
  #define FW_MIPS_FILE_06               "bnx2/bnx2-mips-06-5.0.0.j6.fw"
  #define FW_RV2P_FILE_06               "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
  #define FW_MIPS_FILE_09               "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@@ -651,9 -651,10 +651,10 @@@ bnx2_napi_enable(struct bnx2 *bp
  }
  
  static void
- bnx2_netif_stop(struct bnx2 *bp)
+ bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
  {
-       bnx2_cnic_stop(bp);
+       if (stop_cnic)
+               bnx2_cnic_stop(bp);
        if (netif_running(bp->dev)) {
                int i;
  
  }
  
  static void
- bnx2_netif_start(struct bnx2 *bp)
+ bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
  {
        if (atomic_dec_and_test(&bp->intr_sem)) {
                if (netif_running(bp->dev)) {
                        netif_tx_wake_all_queues(bp->dev);
                        bnx2_napi_enable(bp);
                        bnx2_enable_int(bp);
-                       bnx2_cnic_start(bp);
+                       if (start_cnic)
+                               bnx2_cnic_start(bp);
                }
        }
  }
@@@ -2670,7 -2672,7 +2672,7 @@@ bnx2_alloc_rx_page(struct bnx2 *bp, str
        }
  
        rx_pg->page = page;
 -      pci_unmap_addr_set(rx_pg, mapping, mapping);
 +      dma_unmap_addr_set(rx_pg, mapping, mapping);
        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
        return 0;
@@@ -2685,7 -2687,7 +2687,7 @@@ bnx2_free_rx_page(struct bnx2 *bp, stru
        if (!page)
                return;
  
 -      pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
 +      pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
                       PCI_DMA_FROMDEVICE);
  
        __free_page(page);
@@@ -2717,7 -2719,7 +2719,7 @@@ bnx2_alloc_rx_skb(struct bnx2 *bp, stru
        }
  
        rx_buf->skb = skb;
 -      pci_unmap_addr_set(rx_buf, mapping, mapping);
 +      dma_unmap_addr_set(rx_buf, mapping, mapping);
  
        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
        rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@@ -2816,7 -2818,7 +2818,7 @@@ bnx2_tx_int(struct bnx2 *bp, struct bnx
                        }
                }
  
 -              pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
 +              pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
                        skb_headlen(skb), PCI_DMA_TODEVICE);
  
                tx_buf->skb = NULL;
                        sw_cons = NEXT_TX_BD(sw_cons);
  
                        pci_unmap_page(bp->pdev,
 -                              pci_unmap_addr(
 +                              dma_unmap_addr(
                                        &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
                                        mapping),
                                skb_shinfo(skb)->frags[i].size,
@@@ -2908,8 -2910,8 +2910,8 @@@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp
                if (prod != cons) {
                        prod_rx_pg->page = cons_rx_pg->page;
                        cons_rx_pg->page = NULL;
 -                      pci_unmap_addr_set(prod_rx_pg, mapping,
 -                              pci_unmap_addr(cons_rx_pg, mapping));
 +                      dma_unmap_addr_set(prod_rx_pg, mapping,
 +                              dma_unmap_addr(cons_rx_pg, mapping));
  
                        prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
                        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@@ -2933,7 -2935,7 +2935,7 @@@ bnx2_reuse_rx_skb(struct bnx2 *bp, stru
        prod_rx_buf = &rxr->rx_buf_ring[prod];
  
        pci_dma_sync_single_for_device(bp->pdev,
 -              pci_unmap_addr(cons_rx_buf, mapping),
 +              dma_unmap_addr(cons_rx_buf, mapping),
                BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
  
        rxr->rx_prod_bseq += bp->rx_buf_use_size;
        if (cons == prod)
                return;
  
 -      pci_unmap_addr_set(prod_rx_buf, mapping,
 -                      pci_unmap_addr(cons_rx_buf, mapping));
 +      dma_unmap_addr_set(prod_rx_buf, mapping,
 +                      dma_unmap_addr(cons_rx_buf, mapping));
  
        cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@@ -3017,7 -3019,7 +3019,7 @@@ bnx2_rx_skb(struct bnx2 *bp, struct bnx
                        /* Don't unmap yet.  If we're unable to allocate a new
                         * page, we need to recycle the page and the DMA addr.
                         */
 -                      mapping_old = pci_unmap_addr(rx_pg, mapping);
 +                      mapping_old = dma_unmap_addr(rx_pg, mapping);
                        if (i == pages - 1)
                                frag_len -= 4;
  
@@@ -3098,7 -3100,7 +3100,7 @@@ bnx2_rx_int(struct bnx2 *bp, struct bnx
  
                rx_buf->skb = NULL;
  
 -              dma_addr = pci_unmap_addr(rx_buf, mapping);
 +              dma_addr = dma_unmap_addr(rx_buf, mapping);
  
                pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
@@@ -3546,6 -3548,7 +3548,6 @@@ bnx2_set_rx_mode(struct net_device *dev
        }
        else {
                /* Accept one or more multicast(s). */
 -              struct dev_mc_list *mclist;
                u32 mc_filter[NUM_MC_HASH_REGISTERS];
                u32 regidx;
                u32 bit;
  
                memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
  
 -              netdev_for_each_mc_addr(mclist, dev) {
 -                      crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
 +              netdev_for_each_mc_addr(ha, dev) {
 +                      crc = ether_crc_le(ETH_ALEN, ha->addr);
                        bit = crc & 0xff;
                        regidx = (bit & 0xe0) >> 5;
                        bit &= 0x1f;
@@@ -4758,8 -4761,12 +4760,12 @@@ bnx2_reset_chip(struct bnx2 *bp, u32 re
                rc = bnx2_alloc_bad_rbuf(bp);
        }
  
-       if (bp->flags & BNX2_FLAG_USING_MSIX)
+       if (bp->flags & BNX2_FLAG_USING_MSIX) {
                bnx2_setup_msix_tbl(bp);
+               /* Prevent MSIX table reads and write from timing out */
+               REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+                       BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+       }
  
        return rc;
  }
@@@ -5311,7 -5318,7 +5317,7 @@@ bnx2_free_tx_skbs(struct bnx2 *bp
                        }
  
                        pci_unmap_single(bp->pdev,
 -                                       pci_unmap_addr(tx_buf, mapping),
 +                                       dma_unmap_addr(tx_buf, mapping),
                                         skb_headlen(skb),
                                         PCI_DMA_TODEVICE);
  
                        for (k = 0; k < last; k++, j++) {
                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
                                pci_unmap_page(bp->pdev,
 -                                      pci_unmap_addr(tx_buf, mapping),
 +                                      dma_unmap_addr(tx_buf, mapping),
                                        skb_shinfo(skb)->frags[k].size,
                                        PCI_DMA_TODEVICE);
                        }
@@@ -5352,7 -5359,7 +5358,7 @@@ bnx2_free_rx_skbs(struct bnx2 *bp
                                continue;
  
                        pci_unmap_single(bp->pdev,
 -                                       pci_unmap_addr(rx_buf, mapping),
 +                                       dma_unmap_addr(rx_buf, mapping),
                                         bp->rx_buf_use_size,
                                         PCI_DMA_FROMDEVICE);
  
@@@ -5762,7 -5769,7 +5768,7 @@@ bnx2_run_loopback(struct bnx2 *bp, int 
        skb_reserve(rx_skb, BNX2_RX_OFFSET);
  
        pci_dma_sync_single_for_cpu(bp->pdev,
 -              pci_unmap_addr(rx_buf, mapping),
 +              dma_unmap_addr(rx_buf, mapping),
                bp->rx_buf_size, PCI_DMA_FROMDEVICE);
  
        if (rx_hdr->l2_fhdr_status &
@@@ -6272,12 -6279,12 +6278,12 @@@ bnx2_reset_task(struct work_struct *wor
                return;
        }
  
-       bnx2_netif_stop(bp);
+       bnx2_netif_stop(bp, true);
  
        bnx2_init_nic(bp, 1);
  
        atomic_set(&bp->intr_sem, 1);
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, true);
        rtnl_unlock();
  }
  
@@@ -6319,7 -6326,7 +6325,7 @@@ bnx2_vlan_rx_register(struct net_devic
        struct bnx2 *bp = netdev_priv(dev);
  
        if (netif_running(dev))
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, false);
  
        bp->vlgrp = vlgrp;
  
        if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
  
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, false);
  }
  #endif
  
@@@ -6422,7 -6429,7 +6428,7 @@@ bnx2_start_xmit(struct sk_buff *skb, st
  
        tx_buf = &txr->tx_buf_ring[ring_prod];
        tx_buf->skb = skb;
 -      pci_unmap_addr_set(tx_buf, mapping, mapping);
 +      dma_unmap_addr_set(tx_buf, mapping, mapping);
  
        txbd = &txr->tx_desc_ring[ring_prod];
  
                        len, PCI_DMA_TODEVICE);
                if (pci_dma_mapping_error(bp->pdev, mapping))
                        goto dma_error;
 -              pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
 +              dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
                                   mapping);
  
                txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@@ -6484,7 -6491,7 +6490,7 @@@ dma_error
        ring_prod = TX_RING_IDX(prod);
        tx_buf = &txr->tx_buf_ring[ring_prod];
        tx_buf->skb = NULL;
 -      pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
 +      pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
                         skb_headlen(skb), PCI_DMA_TODEVICE);
  
        /* unmap remaining mapped pages */
                prod = NEXT_TX_BD(prod);
                ring_prod = TX_RING_IDX(prod);
                tx_buf = &txr->tx_buf_ring[ring_prod];
 -              pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
 +              pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
                               skb_shinfo(skb)->frags[i].size,
                               PCI_DMA_TODEVICE);
        }
@@@ -7050,9 -7057,9 +7056,9 @@@ bnx2_set_coalesce(struct net_device *de
        bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  
        if (netif_running(bp->dev)) {
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_init_nic(bp, 0);
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
        }
  
        return 0;
@@@ -7082,7 -7089,7 +7088,7 @@@ bnx2_change_ring_size(struct bnx2 *bp, 
                /* Reset will erase chipset stats; save them */
                bnx2_save_stats(bp);
  
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
                bnx2_free_skbs(bp);
                bnx2_free_mem(bp);
                        bnx2_setup_cnic_irq_info(bp);
                mutex_unlock(&bp->cnic_lock);
  #endif
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
        }
        return 0;
  }
@@@ -7363,7 -7370,7 +7369,7 @@@ bnx2_self_test(struct net_device *dev, 
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
                int i;
  
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
                bnx2_free_skbs(bp);
  
                        bnx2_shutdown_chip(bp);
                else {
                        bnx2_init_nic(bp, 1);
-                       bnx2_netif_start(bp);
+                       bnx2_netif_start(bp, true);
                }
  
                /* wait for link up */
@@@ -8376,7 -8383,7 +8382,7 @@@ bnx2_suspend(struct pci_dev *pdev, pm_m
                return 0;
  
        flush_scheduled_work();
-       bnx2_netif_stop(bp);
+       bnx2_netif_stop(bp, true);
        netif_device_detach(dev);
        del_timer_sync(&bp->timer);
        bnx2_shutdown_chip(bp);
@@@ -8398,7 -8405,7 +8404,7 @@@ bnx2_resume(struct pci_dev *pdev
        bnx2_set_power_state(bp, PCI_D0);
        netif_device_attach(dev);
        bnx2_init_nic(bp, 1);
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, true);
        return 0;
  }
  
@@@ -8425,7 -8432,7 +8431,7 @@@ static pci_ers_result_t bnx2_io_error_d
        }
  
        if (netif_running(dev)) {
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                del_timer_sync(&bp->timer);
                bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
        }
@@@ -8482,7 -8489,7 +8488,7 @@@ static void bnx2_io_resume(struct pci_d
  
        rtnl_lock();
        if (netif_running(dev))
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
  
        netif_device_attach(dev);
        rtnl_unlock();
index 904bd6bf319928a9f520866d827c86f3e52aeab2,dbf81788bb406f29e7bf9f5c3c3f50ab22016c16..d13760dc27f8e09a5bb324cb70fa1c126f8c27cc
@@@ -26,8 -26,6 +26,8 @@@
  
  *******************************************************************************/
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/module.h>
  #include <linux/types.h>
  #include <linux/init.h>
@@@ -47,7 -45,6 +47,7 @@@
  #include <linux/cpu.h>
  #include <linux/smp.h>
  #include <linux/pm_qos_params.h>
 +#include <linux/pm_runtime.h>
  #include <linux/aer.h>
  
  #include "e1000.h"
@@@ -69,361 -66,6 +69,361 @@@ static const struct e1000_info *e1000_i
        [board_pchlan]          = &e1000_pch_info,
  };
  
 +struct e1000_reg_info {
 +      u32 ofs;
 +      char *name;
 +};
 +
 +#define E1000_RDFH    0x02410 /* Rx Data FIFO Head - RW */
 +#define E1000_RDFT    0x02418 /* Rx Data FIFO Tail - RW */
 +#define E1000_RDFHS   0x02420 /* Rx Data FIFO Head Saved - RW */
 +#define E1000_RDFTS   0x02428 /* Rx Data FIFO Tail Saved - RW */
 +#define E1000_RDFPC   0x02430 /* Rx Data FIFO Packet Count - RW */
 +
 +#define E1000_TDFH    0x03410 /* Tx Data FIFO Head - RW */
 +#define E1000_TDFT    0x03418 /* Tx Data FIFO Tail - RW */
 +#define E1000_TDFHS   0x03420 /* Tx Data FIFO Head Saved - RW */
 +#define E1000_TDFTS   0x03428 /* Tx Data FIFO Tail Saved - RW */
 +#define E1000_TDFPC   0x03430 /* Tx Data FIFO Packet Count - RW */
 +
 +static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 +
 +      /* General Registers */
 +      {E1000_CTRL, "CTRL"},
 +      {E1000_STATUS, "STATUS"},
 +      {E1000_CTRL_EXT, "CTRL_EXT"},
 +
 +      /* Interrupt Registers */
 +      {E1000_ICR, "ICR"},
 +
 +      /* RX Registers */
 +      {E1000_RCTL, "RCTL"},
 +      {E1000_RDLEN, "RDLEN"},
 +      {E1000_RDH, "RDH"},
 +      {E1000_RDT, "RDT"},
 +      {E1000_RDTR, "RDTR"},
 +      {E1000_RXDCTL(0), "RXDCTL"},
 +      {E1000_ERT, "ERT"},
 +      {E1000_RDBAL, "RDBAL"},
 +      {E1000_RDBAH, "RDBAH"},
 +      {E1000_RDFH, "RDFH"},
 +      {E1000_RDFT, "RDFT"},
 +      {E1000_RDFHS, "RDFHS"},
 +      {E1000_RDFTS, "RDFTS"},
 +      {E1000_RDFPC, "RDFPC"},
 +
 +      /* TX Registers */
 +      {E1000_TCTL, "TCTL"},
 +      {E1000_TDBAL, "TDBAL"},
 +      {E1000_TDBAH, "TDBAH"},
 +      {E1000_TDLEN, "TDLEN"},
 +      {E1000_TDH, "TDH"},
 +      {E1000_TDT, "TDT"},
 +      {E1000_TIDV, "TIDV"},
 +      {E1000_TXDCTL(0), "TXDCTL"},
 +      {E1000_TADV, "TADV"},
 +      {E1000_TARC(0), "TARC"},
 +      {E1000_TDFH, "TDFH"},
 +      {E1000_TDFT, "TDFT"},
 +      {E1000_TDFHS, "TDFHS"},
 +      {E1000_TDFTS, "TDFTS"},
 +      {E1000_TDFPC, "TDFPC"},
 +
 +      /* List Terminator */
 +      {}
 +};
 +
 +/*
 + * e1000_regdump - register printout routine
 + */
 +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
 +{
 +      int n = 0;
 +      char rname[16];
 +      u32 regs[8];
 +
 +      switch (reginfo->ofs) {
 +      case E1000_RXDCTL(0):
 +              for (n = 0; n < 2; n++)
 +                      regs[n] = __er32(hw, E1000_RXDCTL(n));
 +              break;
 +      case E1000_TXDCTL(0):
 +              for (n = 0; n < 2; n++)
 +                      regs[n] = __er32(hw, E1000_TXDCTL(n));
 +              break;
 +      case E1000_TARC(0):
 +              for (n = 0; n < 2; n++)
 +                      regs[n] = __er32(hw, E1000_TARC(n));
 +              break;
 +      default:
 +              printk(KERN_INFO "%-15s %08x\n",
 +                      reginfo->name, __er32(hw, reginfo->ofs));
 +              return;
 +      }
 +
 +      snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
 +      printk(KERN_INFO "%-15s ", rname);
 +      for (n = 0; n < 2; n++)
 +              printk(KERN_CONT "%08x ", regs[n]);
 +      printk(KERN_CONT "\n");
 +}
 +
 +
 +/*
 + * e1000e_dump - Print registers, tx-ring and rx-ring
 + */
 +static void e1000e_dump(struct e1000_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct e1000_hw *hw = &adapter->hw;
 +      struct e1000_reg_info *reginfo;
 +      struct e1000_ring *tx_ring = adapter->tx_ring;
 +      struct e1000_tx_desc *tx_desc;
 +      struct my_u0 { u64 a; u64 b; } *u0;
 +      struct e1000_buffer *buffer_info;
 +      struct e1000_ring *rx_ring = adapter->rx_ring;
 +      union e1000_rx_desc_packet_split *rx_desc_ps;
 +      struct e1000_rx_desc *rx_desc;
 +      struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
 +      u32 staterr;
 +      int i = 0;
 +
 +      if (!netif_msg_hw(adapter))
 +              return;
 +
 +      /* Print netdevice Info */
 +      if (netdev) {
 +              dev_info(&adapter->pdev->dev, "Net device Info\n");
 +              printk(KERN_INFO "Device Name     state            "
 +                      "trans_start      last_rx\n");
 +              printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
 +                      netdev->name,
 +                      netdev->state,
 +                      netdev->trans_start,
 +                      netdev->last_rx);
 +      }
 +
 +      /* Print Registers */
 +      dev_info(&adapter->pdev->dev, "Register Dump\n");
 +      printk(KERN_INFO " Register Name   Value\n");
 +      for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
 +           reginfo->name; reginfo++) {
 +              e1000_regdump(hw, reginfo);
 +      }
 +
 +      /* Print TX Ring Summary */
 +      if (!netdev || !netif_running(netdev))
 +              goto exit;
 +
 +      dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
 +      printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
 +              " leng ntw timestamp\n");
 +      buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 +      printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
 +              0, tx_ring->next_to_use, tx_ring->next_to_clean,
 +              (u64)buffer_info->dma,
 +              buffer_info->length,
 +              buffer_info->next_to_watch,
 +              (u64)buffer_info->time_stamp);
 +
 +      /* Print TX Rings */
 +      if (!netif_msg_tx_done(adapter))
 +              goto rx_ring_summary;
 +
 +      dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
 +
 +      /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 +       *
 +       * Legacy Transmit Descriptor
 +       *   +--------------------------------------------------------------+
 +       * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
 +       *   +--------------------------------------------------------------+
 +       * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
 +       *   +--------------------------------------------------------------+
 +       *   63       48 47        36 35    32 31     24 23    16 15        0
 +       *
 +       * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
 +       *   63      48 47    40 39       32 31             16 15    8 7      0
 +       *   +----------------------------------------------------------------+
 +       * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
 +       *   +----------------------------------------------------------------+
 +       * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
 +       *   +----------------------------------------------------------------+
 +       *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
 +       *
 +       * Extended Data Descriptor (DTYP=0x1)
 +       *   +----------------------------------------------------------------+
 +       * 0 |                     Buffer Address [63:0]                      |
 +       *   +----------------------------------------------------------------+
 +       * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
 +       *   +----------------------------------------------------------------+
 +       *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 +       */
 +      printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
 +              " [bi->dma       ] leng  ntw timestamp        bi->skb "
 +              "<-- Legacy format\n");
 +      printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
 +              " [bi->dma       ] leng  ntw timestamp        bi->skb "
 +              "<-- Ext Context format\n");
 +      printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
 +              " [bi->dma       ] leng  ntw timestamp        bi->skb "
 +              "<-- Ext Data format\n");
 +      for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 +              tx_desc = E1000_TX_DESC(*tx_ring, i);
 +              buffer_info = &tx_ring->buffer_info[i];
 +              u0 = (struct my_u0 *)tx_desc;
 +              printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
 +                      "%04X  %3X %016llX %p",
 +                     (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
 +                      ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
 +                     le64_to_cpu(u0->a), le64_to_cpu(u0->b),
 +                     (u64)buffer_info->dma, buffer_info->length,
 +                     buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
 +                     buffer_info->skb);
 +              if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
 +                      printk(KERN_CONT " NTC/U\n");
 +              else if (i == tx_ring->next_to_use)
 +                      printk(KERN_CONT " NTU\n");
 +              else if (i == tx_ring->next_to_clean)
 +                      printk(KERN_CONT " NTC\n");
 +              else
 +                      printk(KERN_CONT "\n");
 +
 +              if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
 +                      print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
 +                                      16, 1, phys_to_virt(buffer_info->dma),
 +                                      buffer_info->length, true);
 +      }
 +
 +      /* Print RX Rings Summary */
 +rx_ring_summary:
 +      dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
 +      printk(KERN_INFO "Queue [NTU] [NTC]\n");
 +      printk(KERN_INFO " %5d %5X %5X\n", 0,
 +              rx_ring->next_to_use, rx_ring->next_to_clean);
 +
 +      /* Print RX Rings */
 +      if (!netif_msg_rx_status(adapter))
 +              goto exit;
 +
 +      dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
 +      switch (adapter->rx_ps_pages) {
 +      case 1:
 +      case 2:
 +      case 3:
 +              /* [Extended] Packet Split Receive Descriptor Format
 +               *
 +               *    +-----------------------------------------------------+
 +               *  0 |                Buffer Address 0 [63:0]              |
 +               *    +-----------------------------------------------------+
 +               *  8 |                Buffer Address 1 [63:0]              |
 +               *    +-----------------------------------------------------+
 +               * 16 |                Buffer Address 2 [63:0]              |
 +               *    +-----------------------------------------------------+
 +               * 24 |                Buffer Address 3 [63:0]              |
 +               *    +-----------------------------------------------------+
 +               */
 +              printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
 +                      "[buffer 1 63:0 ] "
 +                     "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
 +                     "[bi->skb] <-- Ext Pkt Split format\n");
 +              /* [Extended] Receive Descriptor (Write-Back) Format
 +               *
 +               *   63       48 47    32 31     13 12    8 7    4 3        0
 +               *   +------------------------------------------------------+
 +               * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
 +               *   | Checksum | Ident  |         | Queue |      |  Type   |
 +               *   +------------------------------------------------------+
 +               * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 +               *   +------------------------------------------------------+
 +               *   63       48 47    32 31            20 19               0
 +               */
 +              printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
 +                      "[vl   l0 ee  es] "
 +                     "[ l3  l2  l1 hs] [reserved      ] ---------------- "
 +                     "[bi->skb] <-- Ext Rx Write-Back format\n");
 +              for (i = 0; i < rx_ring->count; i++) {
 +                      buffer_info = &rx_ring->buffer_info[i];
 +                      rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 +                      u1 = (struct my_u1 *)rx_desc_ps;
 +                      staterr =
 +                              le32_to_cpu(rx_desc_ps->wb.middle.status_error);
 +                      if (staterr & E1000_RXD_STAT_DD) {
 +                              /* Descriptor Done */
 +                              printk(KERN_INFO "RWB[0x%03X]     %016llX "
 +                                      "%016llX %016llX %016llX "
 +                                      "---------------- %p", i,
 +                                      le64_to_cpu(u1->a),
 +                                      le64_to_cpu(u1->b),
 +                                      le64_to_cpu(u1->c),
 +                                      le64_to_cpu(u1->d),
 +                                      buffer_info->skb);
 +                      } else {
 +                              printk(KERN_INFO "R  [0x%03X]     %016llX "
 +                                      "%016llX %016llX %016llX %016llX %p", i,
 +                                      le64_to_cpu(u1->a),
 +                                      le64_to_cpu(u1->b),
 +                                      le64_to_cpu(u1->c),
 +                                      le64_to_cpu(u1->d),
 +                                      (u64)buffer_info->dma,
 +                                      buffer_info->skb);
 +
 +                              if (netif_msg_pktdata(adapter))
 +                                      print_hex_dump(KERN_INFO, "",
 +                                              DUMP_PREFIX_ADDRESS, 16, 1,
 +                                              phys_to_virt(buffer_info->dma),
 +                                              adapter->rx_ps_bsize0, true);
 +                      }
 +
 +                      if (i == rx_ring->next_to_use)
 +                              printk(KERN_CONT " NTU\n");
 +                      else if (i == rx_ring->next_to_clean)
 +                              printk(KERN_CONT " NTC\n");
 +                      else
 +                              printk(KERN_CONT "\n");
 +              }
 +              break;
 +      default:
 +      case 0:
 +              /* Legacy Receive Descriptor Format
 +               *
 +               * +-----------------------------------------------------+
 +               * |                Buffer Address [63:0]                |
 +               * +-----------------------------------------------------+
 +               * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
 +               * +-----------------------------------------------------+
 +               * 63       48 47    40 39      32 31         16 15      0
 +               */
 +              printk(KERN_INFO "Rl[desc]     [address 63:0  ] "
 +                      "[vl er S cks ln] [bi->dma       ] [bi->skb] "
 +                      "<-- Legacy format\n");
 +              for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
 +                      rx_desc = E1000_RX_DESC(*rx_ring, i);
 +                      buffer_info = &rx_ring->buffer_info[i];
 +                      u0 = (struct my_u0 *)rx_desc;
 +                      printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
 +                              "%016llX %p",
 +                              i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
 +                              (u64)buffer_info->dma, buffer_info->skb);
 +                      if (i == rx_ring->next_to_use)
 +                              printk(KERN_CONT " NTU\n");
 +                      else if (i == rx_ring->next_to_clean)
 +                              printk(KERN_CONT " NTC\n");
 +                      else
 +                              printk(KERN_CONT "\n");
 +
 +                      if (netif_msg_pktdata(adapter))
 +                              print_hex_dump(KERN_INFO, "",
 +                                      DUMP_PREFIX_ADDRESS,
 +                                      16, 1, phys_to_virt(buffer_info->dma),
 +                                      adapter->rx_buffer_len, true);
 +              }
 +      }
 +
 +exit:
 +      return;
 +}
 +
  /**
   * e1000_desc_unused - calculate if we have unused descriptors
   **/
@@@ -536,10 -178,10 +536,10 @@@ static void e1000_alloc_rx_buffers(stru
  
                buffer_info->skb = skb;
  map_skb:
 -              buffer_info->dma = pci_map_single(pdev, skb->data,
 +              buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
                                                  adapter->rx_buffer_len,
 -                                                PCI_DMA_FROMDEVICE);
 -              if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
 +                                                DMA_FROM_DEVICE);
 +              if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        break;
@@@ -605,12 -247,11 +605,12 @@@ static void e1000_alloc_rx_buffers_ps(s
                                        adapter->alloc_rx_buff_failed++;
                                        goto no_buffers;
                                }
 -                              ps_page->dma = pci_map_page(pdev,
 -                                                 ps_page->page,
 -                                                 0, PAGE_SIZE,
 -                                                 PCI_DMA_FROMDEVICE);
 -                              if (pci_dma_mapping_error(pdev, ps_page->dma)) {
 +                              ps_page->dma = dma_map_page(&pdev->dev,
 +                                                          ps_page->page,
 +                                                          0, PAGE_SIZE,
 +                                                          DMA_FROM_DEVICE);
 +                              if (dma_mapping_error(&pdev->dev,
 +                                                    ps_page->dma)) {
                                        dev_err(&adapter->pdev->dev,
                                          "RX DMA page map failed\n");
                                        adapter->rx_dma_failed++;
                }
  
                buffer_info->skb = skb;
 -              buffer_info->dma = pci_map_single(pdev, skb->data,
 +              buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
                                                  adapter->rx_ps_bsize0,
 -                                                PCI_DMA_FROMDEVICE);
 -              if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
 +                                                DMA_FROM_DEVICE);
 +              if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        /* cleanup skb */
@@@ -725,10 -366,10 +725,10 @@@ check_page
                }
  
                if (!buffer_info->dma)
 -                      buffer_info->dma = pci_map_page(pdev,
 +                      buffer_info->dma = dma_map_page(&pdev->dev,
                                                        buffer_info->page, 0,
                                                        PAGE_SIZE,
 -                                                      PCI_DMA_FROMDEVICE);
 +                                                      DMA_FROM_DEVICE);
  
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@@ -802,10 -443,10 +802,10 @@@ static bool e1000_clean_rx_irq(struct e
  
                cleaned = 1;
                cleaned_count++;
 -              pci_unmap_single(pdev,
 +              dma_unmap_single(&pdev->dev,
                                 buffer_info->dma,
                                 adapter->rx_buffer_len,
 -                               PCI_DMA_FROMDEVICE);
 +                               DMA_FROM_DEVICE);
                buffer_info->dma = 0;
  
                length = le16_to_cpu(rx_desc->length);
@@@ -906,11 -547,12 +906,11 @@@ static void e1000_put_txbuf(struct e100
  {
        if (buffer_info->dma) {
                if (buffer_info->mapped_as_page)
 -                      pci_unmap_page(adapter->pdev, buffer_info->dma,
 -                                     buffer_info->length, PCI_DMA_TODEVICE);
 +                      dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
 +                                     buffer_info->length, DMA_TO_DEVICE);
                else
 -                      pci_unmap_single(adapter->pdev, buffer_info->dma,
 -                                       buffer_info->length,
 -                                       PCI_DMA_TODEVICE);
 +                      dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 +                                       buffer_info->length, DMA_TO_DEVICE);
                buffer_info->dma = 0;
        }
        if (buffer_info->skb) {
@@@ -1111,9 -753,9 +1111,9 @@@ static bool e1000_clean_rx_irq_ps(struc
  
                cleaned = 1;
                cleaned_count++;
 -              pci_unmap_single(pdev, buffer_info->dma,
 +              dma_unmap_single(&pdev->dev, buffer_info->dma,
                                 adapter->rx_ps_bsize0,
 -                               PCI_DMA_FROMDEVICE);
 +                               DMA_FROM_DEVICE);
                buffer_info->dma = 0;
  
                /* see !EOP comment in other rx routine */
                         * kmap_atomic, so we can't hold the mapping
                         * very long
                         */
 -                      pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
 -                              PAGE_SIZE, PCI_DMA_FROMDEVICE);
 +                      dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
 +                                              PAGE_SIZE, DMA_FROM_DEVICE);
                        vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
                        memcpy(skb_tail_pointer(skb), vaddr, l1);
                        kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
 -                      pci_dma_sync_single_for_device(pdev, ps_page->dma,
 -                              PAGE_SIZE, PCI_DMA_FROMDEVICE);
 +                      dma_sync_single_for_device(&pdev->dev, ps_page->dma,
 +                                                 PAGE_SIZE, DMA_FROM_DEVICE);
  
                        /* remove the CRC */
                        if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
                                break;
  
                        ps_page = &buffer_info->ps_pages[j];
 -                      pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
 -                                     PCI_DMA_FROMDEVICE);
 +                      dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
 +                                     DMA_FROM_DEVICE);
                        ps_page->dma = 0;
                        skb_fill_page_desc(skb, j, ps_page->page, 0, length);
                        ps_page->page = NULL;
@@@ -1311,8 -953,8 +1311,8 @@@ static bool e1000_clean_jumbo_rx_irq(st
  
                cleaned = true;
                cleaned_count++;
 -              pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
 -                             PCI_DMA_FROMDEVICE);
 +              dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
 +                             DMA_FROM_DEVICE);
                buffer_info->dma = 0;
  
                length = le16_to_cpu(rx_desc->length);
@@@ -1448,17 -1090,17 +1448,17 @@@ static void e1000_clean_rx_ring(struct 
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
                        if (adapter->clean_rx == e1000_clean_rx_irq)
 -                              pci_unmap_single(pdev, buffer_info->dma,
 +                              dma_unmap_single(&pdev->dev, buffer_info->dma,
                                                 adapter->rx_buffer_len,
 -                                               PCI_DMA_FROMDEVICE);
 +                                               DMA_FROM_DEVICE);
                        else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
 -                              pci_unmap_page(pdev, buffer_info->dma,
 +                              dma_unmap_page(&pdev->dev, buffer_info->dma,
                                               PAGE_SIZE,
 -                                             PCI_DMA_FROMDEVICE);
 +                                             DMA_FROM_DEVICE);
                        else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
 -                              pci_unmap_single(pdev, buffer_info->dma,
 +                              dma_unmap_single(&pdev->dev, buffer_info->dma,
                                                 adapter->rx_ps_bsize0,
 -                                               PCI_DMA_FROMDEVICE);
 +                                               DMA_FROM_DEVICE);
                        buffer_info->dma = 0;
                }
  
                        ps_page = &buffer_info->ps_pages[j];
                        if (!ps_page->page)
                                break;
 -                      pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
 -                                     PCI_DMA_FROMDEVICE);
 +                      dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
 +                                     DMA_FROM_DEVICE);
                        ps_page->dma = 0;
                        put_page(ps_page->page);
                        ps_page->page = NULL;
@@@ -2923,7 -2565,7 +2923,7 @@@ static void e1000_set_multi(struct net_
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 -      struct dev_mc_list *mc_ptr;
 +      struct netdev_hw_addr *ha;
        u8  *mta_list;
        u32 rctl;
        int i;
  
                /* prepare a packed array of only addresses. */
                i = 0;
 -              netdev_for_each_mc_addr(mc_ptr, netdev)
 -                      memcpy(mta_list + (i++ * ETH_ALEN),
 -                             mc_ptr->dmi_addr, ETH_ALEN);
 +              netdev_for_each_mc_addr(ha, netdev)
 +                      memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  
                e1000_update_mc_addr_list(hw, mta_list, i);
                kfree(mta_list);
@@@ -3440,15 -3083,12 +3440,15 @@@ static int e1000_open(struct net_devic
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 +      struct pci_dev *pdev = adapter->pdev;
        int err;
  
        /* disallow open during test */
        if (test_bit(__E1000_TESTING, &adapter->state))
                return -EBUSY;
  
 +      pm_runtime_get_sync(&pdev->dev);
 +
        netif_carrier_off(netdev);
  
        /* allocate transmit descriptors */
  
        netif_start_queue(netdev);
  
 +      adapter->idle_check = true;
 +      pm_runtime_put(&pdev->dev);
 +
        /* fire a link status change interrupt to start the watchdog */
        ew32(ICS, E1000_ICS_LSC);
  
@@@ -3525,7 -3162,6 +3525,7 @@@ err_setup_rx
        e1000e_free_tx_resources(adapter);
  err_setup_tx:
        e1000e_reset(adapter);
 +      pm_runtime_put_sync(&pdev->dev);
  
        return err;
  }
  static int e1000_close(struct net_device *netdev)
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct pci_dev *pdev = adapter->pdev;
  
        WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
 -      e1000e_down(adapter);
 +
 +      pm_runtime_get_sync(&pdev->dev);
 +
 +      if (!test_bit(__E1000_DOWN, &adapter->state)) {
 +              e1000e_down(adapter);
 +              e1000_free_irq(adapter);
 +      }
        e1000_power_down_phy(adapter);
 -      e1000_free_irq(adapter);
  
        e1000e_free_tx_resources(adapter);
        e1000e_free_rx_resources(adapter);
        if (adapter->flags & FLAG_HAS_AMT)
                e1000_release_hw_control(adapter);
  
 +      pm_runtime_put_sync(&pdev->dev);
 +
        return 0;
  }
  /**
@@@ -3922,9 -3550,6 +3922,9 @@@ static void e1000_watchdog_task(struct 
  
        link = e1000e_has_link(adapter);
        if ((netif_carrier_ok(netdev)) && link) {
 +              /* Cancel scheduled suspend requests. */
 +              pm_runtime_resume(netdev->dev.parent);
 +
                e1000e_enable_receives(adapter);
                goto link_up;
        }
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        bool txb2b = 1;
 +
 +                      /* Cancel scheduled suspend requests. */
 +                      pm_runtime_resume(netdev->dev.parent);
 +
                        /* update snapshot of PHY registers on LSC */
                        e1000_phy_read_status(adapter);
                        mac->ops.get_link_up_info(&adapter->hw,
  
                        if (adapter->flags & FLAG_RX_NEEDS_RESTART)
                                schedule_work(&adapter->reset_task);
 +                      else
 +                              pm_schedule_suspend(netdev->dev.parent,
 +                                                      LINK_TIMEOUT);
                }
        }
  
@@@ -4272,11 -3890,10 +4272,11 @@@ static int e1000_tx_map(struct e1000_ad
                buffer_info->length = size;
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
 -              buffer_info->dma = pci_map_single(pdev, skb->data + offset,
 -                                                size, PCI_DMA_TODEVICE);
 +              buffer_info->dma = dma_map_single(&pdev->dev,
 +                                                skb->data + offset,
 +                                                size, DMA_TO_DEVICE);
                buffer_info->mapped_as_page = false;
 -              if (pci_dma_mapping_error(pdev, buffer_info->dma))
 +              if (dma_mapping_error(&pdev->dev, buffer_info->dma))
                        goto dma_error;
  
                len -= size;
                        buffer_info->length = size;
                        buffer_info->time_stamp = jiffies;
                        buffer_info->next_to_watch = i;
 -                      buffer_info->dma = pci_map_page(pdev, frag->page,
 +                      buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
                                                        offset, size,
 -                                                      PCI_DMA_TODEVICE);
 +                                                      DMA_TO_DEVICE);
                        buffer_info->mapped_as_page = true;
 -                      if (pci_dma_mapping_error(pdev, buffer_info->dma))
 +                      if (dma_mapping_error(&pdev->dev, buffer_info->dma))
                                goto dma_error;
  
                        len -= size;
@@@ -4488,7 -4105,7 +4488,7 @@@ static netdev_tx_t e1000_xmit_frame(str
        unsigned int max_per_txd = E1000_MAX_PER_TXD;
        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
 -      unsigned int len = skb->len - skb->data_len;
 +      unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
        unsigned int mss;
        int count = 0;
                                dev_kfree_skb_any(skb);
                                return NETDEV_TX_OK;
                        }
 -                      len = skb->len - skb->data_len;
 +                      len = skb_headlen(skb);
                }
        }
  
@@@ -4624,8 -4241,6 +4624,8 @@@ static void e1000_reset_task(struct wor
        struct e1000_adapter *adapter;
        adapter = container_of(work, struct e1000_adapter, reset_task);
  
 +      e1000e_dump(adapter);
 +      e_err("Reset adapter\n");
        e1000e_reinit_locked(adapter);
  }
  
        return retval;
  }
  
 -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
 +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
 +                          bool runtime)
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, ctrl_ext, rctl, status;
 -      u32 wufc = adapter->wol;
 +      /* Runtime suspend should only enable wakeup for link changes */
 +      u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
        int retval = 0;
  
        netif_device_detach(netdev);
@@@ -5020,6 -4633,9 +5020,9 @@@ static void __e1000e_disable_aspm(struc
        reg16 &= ~state;
        pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
  
+       if (!pdev->bus->self)
+               return;
        pos = pci_pcie_cap(pdev->bus->self);
        pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
        reg16 &= ~state;
@@@ -5035,13 -4651,20 +5038,13 @@@ void e1000e_disable_aspm(struct pci_de
        __e1000e_disable_aspm(pdev, state);
  }
  
 -#ifdef CONFIG_PM
 -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 +#ifdef CONFIG_PM_OPS
 +static bool e1000e_pm_ready(struct e1000_adapter *adapter)
  {
 -      int retval;
 -      bool wake;
 -
 -      retval = __e1000_shutdown(pdev, &wake);
 -      if (!retval)
 -              e1000_complete_shutdown(pdev, true, wake);
 -
 -      return retval;
 +      return !!adapter->tx_ring->buffer_info;
  }
  
 -static int e1000_resume(struct pci_dev *pdev)
 +static int __e1000_resume(struct pci_dev *pdev)
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
                e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
  
 -      err = pci_enable_device_mem(pdev);
 -      if (err) {
 -              dev_err(&pdev->dev,
 -                      "Cannot enable PCI device from suspend\n");
 -              return err;
 -      }
 -
 -      pci_set_master(pdev);
 -
 -      pci_enable_wake(pdev, PCI_D3hot, 0);
 -      pci_enable_wake(pdev, PCI_D3cold, 0);
 -
        e1000e_set_interrupt_capability(adapter);
        if (netif_running(netdev)) {
                err = e1000_request_irq(adapter);
  
        return 0;
  }
 -#endif
 +
 +#ifdef CONFIG_PM_SLEEP
 +static int e1000_suspend(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      int retval;
 +      bool wake;
 +
 +      retval = __e1000_shutdown(pdev, &wake, false);
 +      if (!retval)
 +              e1000_complete_shutdown(pdev, true, wake);
 +
 +      return retval;
 +}
 +
 +static int e1000_resume(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (e1000e_pm_ready(adapter))
 +              adapter->idle_check = true;
 +
 +      return __e1000_resume(pdev);
 +}
 +#endif /* CONFIG_PM_SLEEP */
 +
 +#ifdef CONFIG_PM_RUNTIME
 +static int e1000_runtime_suspend(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (e1000e_pm_ready(adapter)) {
 +              bool wake;
 +
 +              __e1000_shutdown(pdev, &wake, true);
 +      }
 +
 +      return 0;
 +}
 +
 +static int e1000_idle(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (!e1000e_pm_ready(adapter))
 +              return 0;
 +
 +      if (adapter->idle_check) {
 +              adapter->idle_check = false;
 +              if (!e1000e_has_link(adapter))
 +                      pm_schedule_suspend(dev, MSEC_PER_SEC);
 +      }
 +
 +      return -EBUSY;
 +}
 +
 +static int e1000_runtime_resume(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (!e1000e_pm_ready(adapter))
 +              return 0;
 +
 +      adapter->idle_check = !dev->power.runtime_auto;
 +      return __e1000_resume(pdev);
 +}
 +#endif /* CONFIG_PM_RUNTIME */
 +#endif /* CONFIG_PM_OPS */
  
  static void e1000_shutdown(struct pci_dev *pdev)
  {
        bool wake = false;
  
 -      __e1000_shutdown(pdev, &wake);
 +      __e1000_shutdown(pdev, &wake, false);
  
        if (system_state == SYSTEM_POWER_OFF)
                e1000_complete_shutdown(pdev, false, wake);
@@@ -5266,8 -4826,8 +5269,8 @@@ static pci_ers_result_t e1000_io_slot_r
                result = PCI_ERS_RESULT_DISCONNECT;
        } else {
                pci_set_master(pdev);
 +              pdev->state_saved = true;
                pci_restore_state(pdev);
 -              pci_save_state(pdev);
  
                pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_enable_wake(pdev, PCI_D3cold, 0);
@@@ -5408,16 -4968,16 +5411,16 @@@ static int __devinit e1000_probe(struc
                return err;
  
        pci_using_dac = 0;
 -      err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 +      err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
 -              err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 +              err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
                if (!err)
                        pci_using_dac = 1;
        } else {
 -              err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +              err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
 -                      err = pci_set_consistent_dma_mask(pdev,
 -                                                        DMA_BIT_MASK(32));
 +                      err = dma_set_coherent_mask(&pdev->dev,
 +                                                  DMA_BIT_MASK(32));
                        if (err) {
                                dev_err(&pdev->dev, "No usable DMA "
                                        "configuration, aborting\n");
  
        e1000_print_device_info(adapter);
  
 +      if (pci_dev_run_wake(pdev)) {
 +              pm_runtime_set_active(&pdev->dev);
 +              pm_runtime_enable(&pdev->dev);
 +      }
 +      pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
 +
        return 0;
  
  err_register:
@@@ -5716,16 -5270,12 +5719,16 @@@ static void __devexit e1000_remove(stru
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
 +      bool down = test_bit(__E1000_DOWN, &adapter->state);
 +
 +      pm_runtime_get_sync(&pdev->dev);
  
        /*
         * flush_scheduled work may reschedule our watchdog task, so
         * explicitly disable watchdog tasks from being rescheduled
         */
 -      set_bit(__E1000_DOWN, &adapter->state);
 +      if (!down)
 +              set_bit(__E1000_DOWN, &adapter->state);
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
  
        if (!(netdev->flags & IFF_UP))
                e1000_power_down_phy(adapter);
  
 +      /* Don't lie to e1000_close() down the road. */
 +      if (!down)
 +              clear_bit(__E1000_DOWN, &adapter->state);
        unregister_netdev(netdev);
  
 +      if (pci_dev_run_wake(pdev)) {
 +              pm_runtime_disable(&pdev->dev);
 +              pm_runtime_set_suspended(&pdev->dev);
 +      }
 +      pm_runtime_put_noidle(&pdev->dev);
 +
        /*
         * Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
@@@ -5849,22 -5390,16 +5852,22 @@@ static DEFINE_PCI_DEVICE_TABLE(e1000_pc
  };
  MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  
 +#ifdef CONFIG_PM_OPS
 +static const struct dev_pm_ops e1000_pm_ops = {
 +      SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
 +      SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
 +                              e1000_runtime_resume, e1000_idle)
 +};
 +#endif
 +
  /* PCI Device API Driver */
  static struct pci_driver e1000_driver = {
        .name     = e1000e_driver_name,
        .id_table = e1000_pci_tbl,
        .probe    = e1000_probe,
        .remove   = __devexit_p(e1000_remove),
 -#ifdef CONFIG_PM
 -      /* Power Management Hooks */
 -      .suspend  = e1000_suspend,
 -      .resume   = e1000_resume,
 +#ifdef CONFIG_PM_OPS
 +      .driver.pm = &e1000_pm_ops,
  #endif
        .shutdown = e1000_shutdown,
        .err_handler = &e1000_err_handler
  static int __init e1000_init_module(void)
  {
        int ret;
 -      printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
 -             e1000e_driver_name, e1000e_driver_version);
 -      printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
 -             e1000e_driver_name);
 +      pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
 +              e1000e_driver_version);
 +      pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
        ret = pci_register_driver(&e1000_driver);
  
        return ret;
diff --combined drivers/net/gianfar.c
index 0cef967499d3f041af84ad89b213ae621b2a4907,4e97ca182997e68f242050e3990deb6339c38a88..5267c27e317413f286fcb82b68908e229c0963b0
@@@ -82,7 -82,6 +82,7 @@@
  #include <linux/tcp.h>
  #include <linux/udp.h>
  #include <linux/in.h>
 +#include <linux/net_tstamp.h>
  
  #include <asm/io.h>
  #include <asm/irq.h>
@@@ -378,13 -377,6 +378,13 @@@ static void gfar_init_mac(struct net_de
                rctrl |= RCTRL_PADDING(priv->padding);
        }
  
 +      /* Insert receive time stamps into padding alignment bytes */
 +      if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
 +              rctrl &= ~RCTRL_PAL_MASK;
 +              rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
 +              priv->padding = 8;
 +      }
 +
        /* keep vlan related bits if it's enabled */
        if (priv->vlgrp) {
                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@@ -509,8 -501,7 +509,8 @@@ void unlock_tx_qs(struct gfar_private *
  /* Returns 1 if incoming frames use an FCB */
  static inline int gfar_uses_fcb(struct gfar_private *priv)
  {
 -      return priv->vlgrp || priv->rx_csum_enable;
 +      return priv->vlgrp || priv->rx_csum_enable ||
 +              (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
  }
  
  static void free_tx_pointers(struct gfar_private *priv)
@@@ -747,8 -738,7 +747,8 @@@ static int gfar_of_init(struct of_devic
                        FSL_GIANFAR_DEV_HAS_CSUM |
                        FSL_GIANFAR_DEV_HAS_VLAN |
                        FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 -                      FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
 +                      FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 +                      FSL_GIANFAR_DEV_HAS_TIMER;
  
        ctype = of_get_property(np, "phy-connection-type", NULL);
  
@@@ -778,48 -768,6 +778,48 @@@ err_grp_init
        return err;
  }
  
 +static int gfar_hwtstamp_ioctl(struct net_device *netdev,
 +                      struct ifreq *ifr, int cmd)
 +{
 +      struct hwtstamp_config config;
 +      struct gfar_private *priv = netdev_priv(netdev);
 +
 +      if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 +              return -EFAULT;
 +
 +      /* reserved for future extensions */
 +      if (config.flags)
 +              return -EINVAL;
 +
 +      switch (config.tx_type) {
 +      case HWTSTAMP_TX_OFF:
 +              priv->hwts_tx_en = 0;
 +              break;
 +      case HWTSTAMP_TX_ON:
 +              if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 +                      return -ERANGE;
 +              priv->hwts_tx_en = 1;
 +              break;
 +      default:
 +              return -ERANGE;
 +      }
 +
 +      switch (config.rx_filter) {
 +      case HWTSTAMP_FILTER_NONE:
 +              priv->hwts_rx_en = 0;
 +              break;
 +      default:
 +              if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 +                      return -ERANGE;
 +              priv->hwts_rx_en = 1;
 +              config.rx_filter = HWTSTAMP_FILTER_ALL;
 +              break;
 +      }
 +
 +      return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 +              -EFAULT : 0;
 +}
 +
  /* Ioctl MII Interface */
  static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  {
        if (!netif_running(dev))
                return -EINVAL;
  
 +      if (cmd == SIOCSHWTSTAMP)
 +              return gfar_hwtstamp_ioctl(dev, rq, cmd);
 +
        if (!priv->phydev)
                return -ENODEV;
  
@@@ -1033,8 -978,7 +1033,8 @@@ static int gfar_probe(struct of_device 
        else
                priv->padding = 0;
  
 -      if (dev->features & NETIF_F_IP_CSUM)
 +      if (dev->features & NETIF_F_IP_CSUM ||
 +                      priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->hard_header_len += GMAC_FCB_LEN;
  
        /* Program the isrg regs only if number of grps > 1 */
@@@ -1567,9 -1511,9 +1567,9 @@@ static void gfar_halt_nodisable(struct 
                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
                gfar_write(&regs->dmactrl, tempval);
  
-               while (!(gfar_read(&regs->ievent) &
-                        (IEVENT_GRSC | IEVENT_GTSC)))
-                       cpu_relax();
+               spin_event_timeout(((gfar_read(&regs->ievent) &
+                        (IEVENT_GRSC | IEVENT_GTSC)) ==
+                        (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
        }
  }
  
@@@ -1978,29 -1922,23 +1978,29 @@@ static int gfar_start_xmit(struct sk_bu
        struct netdev_queue *txq;
        struct gfar __iomem *regs = NULL;
        struct txfcb *fcb = NULL;
 -      struct txbd8 *txbdp, *txbdp_start, *base;
 +      struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
        u32 lstatus;
 -      int i, rq = 0;
 +      int i, rq = 0, do_tstamp = 0;
        u32 bufaddr;
        unsigned long flags;
 -      unsigned int nr_frags, length;
 -
 +      unsigned int nr_frags, nr_txbds, length;
 +      union skb_shared_tx *shtx;
  
        rq = skb->queue_mapping;
        tx_queue = priv->tx_queue[rq];
        txq = netdev_get_tx_queue(dev, rq);
        base = tx_queue->tx_bd_base;
        regs = tx_queue->grp->regs;
 +      shtx = skb_tx(skb);
 +
 +      /* check if time stamp should be generated */
 +      if (unlikely(shtx->hardware && priv->hwts_tx_en))
 +              do_tstamp = 1;
  
        /* make space for additional header when fcb is needed */
        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
 -                      (priv->vlgrp && vlan_tx_tag_present(skb))) &&
 +                      (priv->vlgrp && vlan_tx_tag_present(skb)) ||
 +                      unlikely(do_tstamp)) &&
                        (skb_headroom(skb) < GMAC_FCB_LEN)) {
                struct sk_buff *skb_new;
  
        /* total number of fragments in the SKB */
        nr_frags = skb_shinfo(skb)->nr_frags;
  
 +      /* calculate the required number of TxBDs for this skb */
 +      if (unlikely(do_tstamp))
 +              nr_txbds = nr_frags + 2;
 +      else
 +              nr_txbds = nr_frags + 1;
 +
        /* check if there is space to queue this packet */
 -      if ((nr_frags+1) > tx_queue->num_txbdfree) {
 +      if (nr_txbds > tx_queue->num_txbdfree) {
                /* no space, stop the queue */
                netif_tx_stop_queue(txq);
                dev->stats.tx_fifo_errors++;
        txq->tx_packets ++;
  
        txbdp = txbdp_start = tx_queue->cur_tx;
 +      lstatus = txbdp->lstatus;
 +
 +      /* Time stamp insertion requires one additional TxBD */
 +      if (unlikely(do_tstamp))
 +              txbdp_tstamp = txbdp = next_txbd(txbdp, base,
 +                              tx_queue->tx_ring_size);
  
        if (nr_frags == 0) {
 -              lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 +              if (unlikely(do_tstamp))
 +                      txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
 +                                      TXBD_INTERRUPT);
 +              else
 +                      lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
        } else {
                /* Place the fragment addresses and lengths into the TxBDs */
                for (i = 0; i < nr_frags; i++) {
                gfar_tx_vlan(skb, fcb);
        }
  
 -      /* setup the TxBD length and buffer pointer for the first BD */
 +      /* Setup tx hardware time stamping if requested */
 +      if (unlikely(do_tstamp)) {
 +              shtx->in_progress = 1;
 +              if (fcb == NULL)
 +                      fcb = gfar_add_fcb(skb);
 +              fcb->ptp = 1;
 +              lstatus |= BD_LFLAG(TXBD_TOE);
 +      }
 +
        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
                        skb_headlen(skb), DMA_TO_DEVICE);
  
 -      lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
 +      /*
 +       * If time stamping is requested one additional TxBD must be set up. The
 +       * first TxBD points to the FCB and must have a data length of
 +       * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
 +       * the full frame length.
 +       */
 +      if (unlikely(do_tstamp)) {
 +              txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
 +              txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
 +                              (skb_headlen(skb) - GMAC_FCB_LEN);
 +              lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
 +      } else {
 +              lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
 +      }
  
        /*
         * We can work in parallel with gfar_clean_tx_ring(), except
        tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
  
        /* reduce TxBD free count */
 -      tx_queue->num_txbdfree -= (nr_frags + 1);
 +      tx_queue->num_txbdfree -= (nr_txbds);
  
        dev->trans_start = jiffies;
  
@@@ -2350,18 -2251,16 +2350,18 @@@ static int gfar_clean_tx_ring(struct gf
        struct net_device *dev = tx_queue->dev;
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar_priv_rx_q *rx_queue = NULL;
 -      struct txbd8 *bdp;
 +      struct txbd8 *bdp, *next = NULL;
        struct txbd8 *lbdp = NULL;
        struct txbd8 *base = tx_queue->tx_bd_base;
        struct sk_buff *skb;
        int skb_dirtytx;
        int tx_ring_size = tx_queue->tx_ring_size;
 -      int frags = 0;
 +      int frags = 0, nr_txbds = 0;
        int i;
        int howmany = 0;
        u32 lstatus;
 +      size_t buflen;
 +      union skb_shared_tx *shtx;
  
        rx_queue = priv->rx_queue[tx_queue->qindex];
        bdp = tx_queue->dirty_tx;
                unsigned long flags;
  
                frags = skb_shinfo(skb)->nr_frags;
 -              lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
 +
 +              /*
 +               * When time stamping, one additional TxBD must be freed.
 +               * Also, we need to dma_unmap_single() the TxPAL.
 +               */
 +              shtx = skb_tx(skb);
 +              if (unlikely(shtx->in_progress))
 +                      nr_txbds = frags + 2;
 +              else
 +                      nr_txbds = frags + 1;
 +
 +              lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
  
                lstatus = lbdp->lstatus;
  
                                (lstatus & BD_LENGTH_MASK))
                        break;
  
 -              dma_unmap_single(&priv->ofdev->dev,
 -                              bdp->bufPtr,
 -                              bdp->length,
 -                              DMA_TO_DEVICE);
 +              if (unlikely(shtx->in_progress)) {
 +                      next = next_txbd(bdp, base, tx_ring_size);
 +                      buflen = next->length + GMAC_FCB_LEN;
 +              } else
 +                      buflen = bdp->length;
 +
 +              dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
 +                              buflen, DMA_TO_DEVICE);
 +
 +              if (unlikely(shtx->in_progress)) {
 +                      struct skb_shared_hwtstamps shhwtstamps;
 +                      u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
 +                      memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 +                      shhwtstamps.hwtstamp = ns_to_ktime(*ns);
 +                      skb_tstamp_tx(skb, &shhwtstamps);
 +                      bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
 +                      bdp = next;
 +              }
  
                bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
                bdp = next_txbd(bdp, base, tx_ring_size);
  
                howmany++;
                spin_lock_irqsave(&tx_queue->txlock, flags);
 -              tx_queue->num_txbdfree += frags + 1;
 +              tx_queue->num_txbdfree += nr_txbds;
                spin_unlock_irqrestore(&tx_queue->txlock, flags);
        }
  
@@@ -2596,17 -2470,6 +2596,17 @@@ static int gfar_process_frame(struct ne
                skb_pull(skb, amount_pull);
        }
  
 +      /* Get receive timestamp from the skb */
 +      if (priv->hwts_rx_en) {
 +              struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 +              u64 *ns = (u64 *) skb->data;
 +              memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 +              shhwtstamps->hwtstamp = ns_to_ktime(*ns);
 +      }
 +
 +      if (priv->padding)
 +              skb_pull(skb, priv->padding);
 +
        if (priv->rx_csum_enable)
                gfar_rx_checksum(skb, fcb);
  
@@@ -2643,7 -2506,8 +2643,7 @@@ int gfar_clean_rx_ring(struct gfar_priv
        bdp = rx_queue->cur_rx;
        base = rx_queue->rx_bd_base;
  
 -      amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
 -              priv->padding;
 +      amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
  
        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
                struct sk_buff *newskb;
@@@ -2930,7 -2794,7 +2930,7 @@@ static void adjust_link(struct net_devi
   * whenever dev->flags is changed */
  static void gfar_set_multi(struct net_device *dev)
  {
 -      struct dev_mc_list *mc_ptr;
 +      struct netdev_hw_addr *ha;
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
                        return;
  
                /* Parse the list, and set the appropriate bits */
 -              netdev_for_each_mc_addr(mc_ptr, dev) {
 +              netdev_for_each_mc_addr(ha, dev) {
                        if (idx < em_num) {
 -                              gfar_set_mac_for_addr(dev, idx,
 -                                              mc_ptr->dmi_addr);
 +                              gfar_set_mac_for_addr(dev, idx, ha->addr);
                                idx++;
                        } else
 -                              gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
 +                              gfar_set_hash_for_addr(dev, ha->addr);
                }
        }
  
index 408f3d7b15450a0eafd71c4213db2d04adc80936,ccc553782a0d807eac5fbe22b69cdb467575746b..949ac1a12537eb400985afc9aa22a93735df8f2d
@@@ -1621,10 -1621,10 +1621,10 @@@ static void set_rx_mode(struct net_devi
        rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
      else {
        if (!netdev_mc_empty(dev)) {
 -          struct dev_mc_list *mc_addr;
 +          struct netdev_hw_addr *ha;
  
 -          netdev_for_each_mc_addr(mc_addr, dev) {
 -              u_int position = ether_crc(6, mc_addr->dmi_addr);
 +          netdev_for_each_mc_addr(ha, dev) {
 +              u_int position = ether_crc(6, ha->addr);
                multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
            }
        }
@@@ -1804,23 -1804,30 +1804,30 @@@ static void media_check(u_long arg
      SMC_SELECT_BANK(1);
      media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
  
+     SMC_SELECT_BANK(saved_bank);
+     spin_unlock_irqrestore(&smc->lock, flags);
      /* Check for pending interrupt with watchdog flag set: with
         this, we can limp along even if the interrupt is blocked */
      if (smc->watchdog++ && ((i>>8) & i)) {
        if (!smc->fast_poll)
            printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+       local_irq_save(flags);
        smc_interrupt(dev->irq, dev);
+       local_irq_restore(flags);
        smc->fast_poll = HZ;
      }
      if (smc->fast_poll) {
        smc->fast_poll--;
        smc->media.expires = jiffies + HZ/100;
        add_timer(&smc->media);
-       SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irqrestore(&smc->lock, flags);
        return;
      }
  
+     spin_lock_irqsave(&smc->lock, flags);
+     saved_bank = inw(ioaddr + BANK_SELECT);
      if (smc->cfg & CFG_MII_SELECT) {
        if (smc->mii_if.phy_id < 0)
            goto reschedule;
@@@ -1978,15 -1985,16 +1985,16 @@@ static int smc_get_settings(struct net_
        unsigned int ioaddr = dev->base_addr;
        u16 saved_bank = inw(ioaddr + BANK_SELECT);
        int ret;
+       unsigned long flags;
  
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        SMC_SELECT_BANK(3);
        if (smc->cfg & CFG_MII_SELECT)
                ret = mii_ethtool_gset(&smc->mii_if, ecmd);
        else
                ret = smc_netdev_get_ecmd(dev, ecmd);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return ret;
  }
  
@@@ -1996,15 -2004,16 +2004,16 @@@ static int smc_set_settings(struct net_
        unsigned int ioaddr = dev->base_addr;
        u16 saved_bank = inw(ioaddr + BANK_SELECT);
        int ret;
+       unsigned long flags;
  
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        SMC_SELECT_BANK(3);
        if (smc->cfg & CFG_MII_SELECT)
                ret = mii_ethtool_sset(&smc->mii_if, ecmd);
        else
                ret = smc_netdev_set_ecmd(dev, ecmd);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return ret;
  }
  
@@@ -2014,12 -2023,13 +2023,13 @@@ static u32 smc_get_link(struct net_devi
        unsigned int ioaddr = dev->base_addr;
        u16 saved_bank = inw(ioaddr + BANK_SELECT);
        u32 ret;
+       unsigned long flags;
  
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        SMC_SELECT_BANK(3);
        ret = smc_link_ok(dev);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return ret;
  }
  
@@@ -2056,16 -2066,17 +2066,17 @@@ static int smc_ioctl (struct net_devic
        int rc = 0;
        u16 saved_bank;
        unsigned int ioaddr = dev->base_addr;
+       unsigned long flags;
  
        if (!netif_running(dev))
                return -EINVAL;
  
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        saved_bank = inw(ioaddr + BANK_SELECT);
        SMC_SELECT_BANK(3);
        rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return rc;
  }
  
diff --combined drivers/net/sb1250-mac.c
index ba4770a6c2e4fef57c61d32c8f5a4674cdcadebb,04efc0c1bda93b95bfbd840c7778d8a57ac2c5ad..fec3c29b2ea8675a5664ec79167933ba70de1ee2
  #include <asm/io.h>
  #include <asm/processor.h>    /* Processor type for cache alignment. */
  
 -/* This is only here until the firmware is ready.  In that case,
 -   the firmware leaves the ethernet address in the register for us. */
 -#ifdef CONFIG_SIBYTE_STANDALONE
 -#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
 -#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
 -#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
 -#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
 -#endif
 -
 -
 -/* These identify the driver base version and may not be removed. */
 -#if 0
 -static char version1[] __initdata =
 -"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
 -#endif
 -
 -
  /* Operational parameters that usually are not changed. */
  
  #define CONFIG_SBMAC_COALESCE
@@@ -332,6 -349,7 +332,6 @@@ static int sbmac_mii_write(struct mii_b
   ********************************************************************* */
  
  static char sbmac_string[] = "sb1250-mac";
 -static char sbmac_pretty[] = "SB1250 MAC";
  
  static char sbmac_mdio_string[] = "sb1250-mac-mdio";
  
@@@ -2094,7 -2112,7 +2094,7 @@@ static void sbmac_setmulti(struct sbmac
        uint64_t reg;
        void __iomem *port;
        int idx;
 -      struct dev_mc_list *mclist;
 +      struct netdev_hw_addr *ha;
        struct net_device *dev = sc->sbm_dev;
  
        /*
         * XXX if the table overflows */
  
        idx = 1;                /* skip station address */
 -      netdev_for_each_mc_addr(mclist, dev) {
 +      netdev_for_each_mc_addr(ha, dev) {
                if (idx == MAC_ADDR_COUNT)
                        break;
 -              reg = sbmac_addr2reg(mclist->dmi_addr);
 +              reg = sbmac_addr2reg(ha->addr);
                port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
                __raw_writeq(reg, port);
                idx++;
        }
  }
  
 -#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
 -/**********************************************************************
 - *  SBMAC_PARSE_XDIGIT(str)
 - *
 - *  Parse a hex digit, returning its value
 - *
 - *  Input parameters:
 - *       str - character
 - *
 - *  Return value:
 - *       hex value, or -1 if invalid
 - ********************************************************************* */
 -
 -static int sbmac_parse_xdigit(char str)
 -{
 -      int digit;
 -
 -      if ((str >= '0') && (str <= '9'))
 -              digit = str - '0';
 -      else if ((str >= 'a') && (str <= 'f'))
 -              digit = str - 'a' + 10;
 -      else if ((str >= 'A') && (str <= 'F'))
 -              digit = str - 'A' + 10;
 -      else
 -              return -1;
 -
 -      return digit;
 -}
 -
 -/**********************************************************************
 - *  SBMAC_PARSE_HWADDR(str,hwaddr)
 - *
 - *  Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
 - *  Ethernet address.
 - *
 - *  Input parameters:
 - *       str - string
 - *       hwaddr - pointer to hardware address
 - *
 - *  Return value:
 - *       0 if ok, else -1
 - ********************************************************************* */
 -
 -static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
 -{
 -      int digit1,digit2;
 -      int idx = 6;
 -
 -      while (*str && (idx > 0)) {
 -              digit1 = sbmac_parse_xdigit(*str);
 -              if (digit1 < 0)
 -                      return -1;
 -              str++;
 -              if (!*str)
 -                      return -1;
 -
 -              if ((*str == ':') || (*str == '-')) {
 -                      digit2 = digit1;
 -                      digit1 = 0;
 -              }
 -              else {
 -                      digit2 = sbmac_parse_xdigit(*str);
 -                      if (digit2 < 0)
 -                              return -1;
 -                      str++;
 -              }
 -
 -              *hwaddr++ = (digit1 << 4) | digit2;
 -              idx--;
 -
 -              if (*str == '-')
 -                      str++;
 -              if (*str == ':')
 -                      str++;
 -      }
 -      return 0;
 -}
 -#endif
 -
  static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
  {
        if (new_mtu >  ENET_PACKET_SIZE)
@@@ -2256,17 -2353,36 +2256,36 @@@ static int sbmac_init(struct platform_d
  
        sc->mii_bus = mdiobus_alloc();
        if (sc->mii_bus == NULL) {
-               sbmac_uninitctx(sc);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto uninit_ctx;
        }
  
+       sc->mii_bus->name = sbmac_mdio_string;
+       snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
+       sc->mii_bus->priv = sc;
+       sc->mii_bus->read = sbmac_mii_read;
+       sc->mii_bus->write = sbmac_mii_write;
+       sc->mii_bus->irq = sc->phy_irq;
+       for (i = 0; i < PHY_MAX_ADDR; ++i)
+               sc->mii_bus->irq[i] = SBMAC_PHY_INT;
+       sc->mii_bus->parent = &pldev->dev;
+       /*
+        * Probe PHY address
+        */
+       err = mdiobus_register(sc->mii_bus);
+       if (err) {
+               printk(KERN_ERR "%s: unable to register MDIO bus\n",
+                      dev->name);
+               goto free_mdio;
+       }
+       dev_set_drvdata(&pldev->dev, sc->mii_bus);
        err = register_netdev(dev);
        if (err) {
                printk(KERN_ERR "%s.%d: unable to register netdev\n",
                       sbmac_string, idx);
-               mdiobus_free(sc->mii_bus);
-               sbmac_uninitctx(sc);
-               return err;
+               goto unreg_mdio;
        }
  
        pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
        pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
               dev->name, base, eaddr);
  
-       sc->mii_bus->name = sbmac_mdio_string;
-       snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
-       sc->mii_bus->priv = sc;
-       sc->mii_bus->read = sbmac_mii_read;
-       sc->mii_bus->write = sbmac_mii_write;
-       sc->mii_bus->irq = sc->phy_irq;
-       for (i = 0; i < PHY_MAX_ADDR; ++i)
-               sc->mii_bus->irq[i] = SBMAC_PHY_INT;
-       sc->mii_bus->parent = &pldev->dev;
-       dev_set_drvdata(&pldev->dev, sc->mii_bus);
        return 0;
+ unreg_mdio:
+       mdiobus_unregister(sc->mii_bus);
+       dev_set_drvdata(&pldev->dev, NULL);
+ free_mdio:
+       mdiobus_free(sc->mii_bus);
+ uninit_ctx:
+       sbmac_uninitctx(sc);
+       return err;
  }
  
  
@@@ -2320,16 -2432,6 +2335,6 @@@ static int sbmac_open(struct net_devic
                goto out_err;
        }
  
-       /*
-        * Probe PHY address
-        */
-       err = mdiobus_register(sc->mii_bus);
-       if (err) {
-               printk(KERN_ERR "%s: unable to register MDIO bus\n",
-                      dev->name);
-               goto out_unirq;
-       }
        sc->sbm_speed = sbmac_speed_none;
        sc->sbm_duplex = sbmac_duplex_none;
        sc->sbm_fc = sbmac_fc_none;
        return 0;
  
  out_unregister:
-       mdiobus_unregister(sc->mii_bus);
- out_unirq:
        free_irq(dev->irq, dev);
  out_err:
        return err;
  }
@@@ -2553,9 -2651,6 +2554,6 @@@ static int sbmac_close(struct net_devic
  
        phy_disconnect(sc->phy_dev);
        sc->phy_dev = NULL;
-       mdiobus_unregister(sc->mii_bus);
        free_irq(dev->irq, dev);
  
        sbdma_emptyring(&(sc->sbm_txdma));
  static int sbmac_poll(struct napi_struct *napi, int budget)
  {
        struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
 -      struct net_device *dev = sc->sbm_dev;
        int work_done;
  
        work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
@@@ -2662,6 -2758,7 +2660,7 @@@ static int __exit sbmac_remove(struct p
  
        unregister_netdev(dev);
        sbmac_uninitctx(sc);
+       mdiobus_unregister(sc->mii_bus);
        mdiobus_free(sc->mii_bus);
        iounmap(sc->sbm_base);
        free_netdev(dev);
        return 0;
  }
  
 -
 -static struct platform_device **sbmac_pldev;
 -static int sbmac_max_units;
 -
 -#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
 -static void __init sbmac_setup_hwaddr(int idx, char *addr)
 -{
 -      void __iomem *sbm_base;
 -      unsigned long start, end;
 -      uint8_t eaddr[6];
 -      uint64_t val;
 -
 -      if (idx >= sbmac_max_units)
 -              return;
 -
 -      start = A_MAC_CHANNEL_BASE(idx);
 -      end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
 -
 -      sbm_base = ioremap_nocache(start, end - start + 1);
 -      if (!sbm_base) {
 -              printk(KERN_ERR "%s: unable to map device registers\n",
 -                     sbmac_string);
 -              return;
 -      }
 -
 -      sbmac_parse_hwaddr(addr, eaddr);
 -      val = sbmac_addr2reg(eaddr);
 -      __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
 -      val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
 -
 -      iounmap(sbm_base);
 -}
 -#endif
 -
 -static int __init sbmac_platform_probe_one(int idx)
 -{
 -      struct platform_device *pldev;
 -      struct {
 -              struct resource r;
 -              char name[strlen(sbmac_pretty) + 4];
 -      } *res;
 -      int err;
 -
 -      res = kzalloc(sizeof(*res), GFP_KERNEL);
 -      if (!res) {
 -              printk(KERN_ERR "%s.%d: unable to allocate memory\n",
 -                     sbmac_string, idx);
 -              err = -ENOMEM;
 -              goto out_err;
 -      }
 -
 -      /*
 -       * This is the base address of the MAC.
 -       */
 -      snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
 -      res->r.name = res->name;
 -      res->r.flags = IORESOURCE_MEM;
 -      res->r.start = A_MAC_CHANNEL_BASE(idx);
 -      res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
 -
 -      pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
 -      if (IS_ERR(pldev)) {
 -              printk(KERN_ERR "%s.%d: unable to register platform device\n",
 -                     sbmac_string, idx);
 -              err = PTR_ERR(pldev);
 -              goto out_kfree;
 -      }
 -
 -      if (!pldev->dev.driver) {
 -              err = 0;                /* No hardware at this address. */
 -              goto out_unregister;
 -      }
 -
 -      sbmac_pldev[idx] = pldev;
 -      return 0;
 -
 -out_unregister:
 -      platform_device_unregister(pldev);
 -
 -out_kfree:
 -      kfree(res);
 -
 -out_err:
 -      return err;
 -}
 -
 -static void __init sbmac_platform_probe(void)
 -{
 -      int i;
 -
 -      /* Set the number of available units based on the SOC type.  */
 -      switch (soc_type) {
 -      case K_SYS_SOC_TYPE_BCM1250:
 -      case K_SYS_SOC_TYPE_BCM1250_ALT:
 -              sbmac_max_units = 3;
 -              break;
 -      case K_SYS_SOC_TYPE_BCM1120:
 -      case K_SYS_SOC_TYPE_BCM1125:
 -      case K_SYS_SOC_TYPE_BCM1125H:
 -      case K_SYS_SOC_TYPE_BCM1250_ALT2:       /* Hybrid */
 -              sbmac_max_units = 2;
 -              break;
 -      case K_SYS_SOC_TYPE_BCM1x55:
 -      case K_SYS_SOC_TYPE_BCM1x80:
 -              sbmac_max_units = 4;
 -              break;
 -      default:
 -              return;                         /* none */
 -      }
 -
 -      /*
 -       * For bringup when not using the firmware, we can pre-fill
 -       * the MAC addresses using the environment variables
 -       * specified in this file (or maybe from the config file?)
 -       */
 -#ifdef SBMAC_ETH0_HWADDR
 -      sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
 -#endif
 -#ifdef SBMAC_ETH1_HWADDR
 -      sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
 -#endif
 -#ifdef SBMAC_ETH2_HWADDR
 -      sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
 -#endif
 -#ifdef SBMAC_ETH3_HWADDR
 -      sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
 -#endif
 -
 -      sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
 -                            GFP_KERNEL);
 -      if (!sbmac_pldev) {
 -              printk(KERN_ERR "%s: unable to allocate memory\n",
 -                     sbmac_string);
 -              return;
 -      }
 -
 -      /*
 -       * Walk through the Ethernet controllers and find
 -       * those who have their MAC addresses set.
 -       */
 -      for (i = 0; i < sbmac_max_units; i++)
 -              if (sbmac_platform_probe_one(i))
 -                      break;
 -}
 -
 -
 -static void __exit sbmac_platform_cleanup(void)
 -{
 -      int i;
 -
 -      for (i = 0; i < sbmac_max_units; i++)
 -              platform_device_unregister(sbmac_pldev[i]);
 -      kfree(sbmac_pldev);
 -}
 -
 -
  static struct platform_driver sbmac_driver = {
        .probe = sbmac_probe,
        .remove = __exit_p(sbmac_remove),
  
  static int __init sbmac_init_module(void)
  {
 -      int err;
 -
 -      err = platform_driver_register(&sbmac_driver);
 -      if (err)
 -              return err;
 -
 -      sbmac_platform_probe();
 -
 -      return err;
 +      return platform_driver_register(&sbmac_driver);
  }
  
  static void __exit sbmac_cleanup_module(void)
  {
 -      sbmac_platform_cleanup();
        platform_driver_unregister(&sbmac_driver);
  }
  
diff --combined drivers/net/sfc/efx.c
index bc75ef683c9f037bbb1ceeb7b9b5defa7a5dfb7f,649a264d6a81b80ee35f54850fce49a4545565c1..156460527231b4899044b714f489cb9bc357cb56
@@@ -225,17 -225,17 +225,17 @@@ static void efx_fini_channels(struct ef
   * never be concurrently called more than once on the same channel,
   * though different channels may be being processed concurrently.
   */
 -static int efx_process_channel(struct efx_channel *channel, int rx_quota)
 +static int efx_process_channel(struct efx_channel *channel, int budget)
  {
        struct efx_nic *efx = channel->efx;
 -      int rx_packets;
 +      int spent;
  
        if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
                     !channel->enabled))
                return 0;
  
 -      rx_packets = efx_nic_process_eventq(channel, rx_quota);
 -      if (rx_packets == 0)
 +      spent = efx_nic_process_eventq(channel, budget);
 +      if (spent == 0)
                return 0;
  
        /* Deliver last RX packet. */
  
        efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
  
 -      return rx_packets;
 +      return spent;
  }
  
  /* Mark channel as finished processing
@@@ -278,17 -278,17 +278,17 @@@ static int efx_poll(struct napi_struct 
  {
        struct efx_channel *channel =
                container_of(napi, struct efx_channel, napi_str);
 -      int rx_packets;
 +      int spent;
  
        EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
                  channel->channel, raw_smp_processor_id());
  
 -      rx_packets = efx_process_channel(channel, budget);
 +      spent = efx_process_channel(channel, budget);
  
 -      if (rx_packets < budget) {
 +      if (spent < budget) {
                struct efx_nic *efx = channel->efx;
  
 -              if (channel->used_flags & EFX_USED_BY_RX &&
 +              if (channel->channel < efx->n_rx_channels &&
                    efx->irq_rx_adaptive &&
                    unlikely(++channel->irq_count == 1000)) {
                        if (unlikely(channel->irq_mod_score <
                efx_channel_processed(channel);
        }
  
 -      return rx_packets;
 +      return spent;
  }
  
  /* Process the eventq of the specified channel immediately on this CPU
@@@ -333,6 -333,7 +333,6 @@@ void efx_process_channel_now(struct efx
  {
        struct efx_nic *efx = channel->efx;
  
 -      BUG_ON(!channel->used_flags);
        BUG_ON(!channel->enabled);
  
        /* Disable interrupts and wait for ISRs to complete */
@@@ -445,12 -446,12 +445,12 @@@ static void efx_set_channel_names(struc
  
        efx_for_each_channel(channel, efx) {
                number = channel->channel;
 -              if (efx->n_channels > efx->n_rx_queues) {
 -                      if (channel->channel < efx->n_rx_queues) {
 +              if (efx->n_channels > efx->n_rx_channels) {
 +                      if (channel->channel < efx->n_rx_channels) {
                                type = "-rx";
                        } else {
                                type = "-tx";
 -                              number -= efx->n_rx_queues;
 +                              number -= efx->n_rx_channels;
                        }
                }
                snprintf(channel->name, sizeof(channel->name),
@@@ -584,6 -585,8 +584,6 @@@ static void efx_remove_channel(struct e
        efx_for_each_channel_tx_queue(tx_queue, channel)
                efx_remove_tx_queue(tx_queue);
        efx_remove_eventq(channel);
 -
 -      channel->used_flags = 0;
  }
  
  void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@@ -953,9 -956,10 +953,9 @@@ static void efx_fini_io(struct efx_nic 
        pci_disable_device(efx->pci_dev);
  }
  
 -/* Get number of RX queues wanted.  Return number of online CPU
 - * packages in the expectation that an IRQ balancer will spread
 - * interrupts across them. */
 -static int efx_wanted_rx_queues(void)
 +/* Get number of channels wanted.  Each channel will have its own IRQ,
 + * 1 RX queue and/or 2 TX queues. */
 +static int efx_wanted_channels(void)
  {
        cpumask_var_t core_mask;
        int count;
@@@ -991,39 -995,34 +991,39 @@@ static void efx_probe_interrupts(struc
  
        if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
                struct msix_entry xentries[EFX_MAX_CHANNELS];
 -              int wanted_ints;
 -              int rx_queues;
 +              int n_channels;
  
 -              /* We want one RX queue and interrupt per CPU package
 -               * (or as specified by the rss_cpus module parameter).
 -               * We will need one channel per interrupt.
 -               */
 -              rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
 -              wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
 -              wanted_ints = min(wanted_ints, max_channels);
 +              n_channels = efx_wanted_channels();
 +              if (separate_tx_channels)
 +                      n_channels *= 2;
 +              n_channels = min(n_channels, max_channels);
  
 -              for (i = 0; i < wanted_ints; i++)
 +              for (i = 0; i < n_channels; i++)
                        xentries[i].entry = i;
 -              rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
 +              rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
                if (rc > 0) {
                        EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
 -                              " available (%d < %d).\n", rc, wanted_ints);
 +                              " available (%d < %d).\n", rc, n_channels);
                        EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
 -                      EFX_BUG_ON_PARANOID(rc >= wanted_ints);
 -                      wanted_ints = rc;
 +                      EFX_BUG_ON_PARANOID(rc >= n_channels);
 +                      n_channels = rc;
                        rc = pci_enable_msix(efx->pci_dev, xentries,
 -                                           wanted_ints);
 +                                           n_channels);
                }
  
                if (rc == 0) {
 -                      efx->n_rx_queues = min(rx_queues, wanted_ints);
 -                      efx->n_channels = wanted_ints;
 -                      for (i = 0; i < wanted_ints; i++)
 +                      efx->n_channels = n_channels;
 +                      if (separate_tx_channels) {
 +                              efx->n_tx_channels =
 +                                      max(efx->n_channels / 2, 1U);
 +                              efx->n_rx_channels =
 +                                      max(efx->n_channels -
 +                                          efx->n_tx_channels, 1U);
 +                      } else {
 +                              efx->n_tx_channels = efx->n_channels;
 +                              efx->n_rx_channels = efx->n_channels;
 +                      }
 +                      for (i = 0; i < n_channels; i++)
                                efx->channel[i].irq = xentries[i].vector;
                } else {
                        /* Fall back to single channel MSI */
  
        /* Try single interrupt MSI */
        if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
 -              efx->n_rx_queues = 1;
                efx->n_channels = 1;
 +              efx->n_rx_channels = 1;
 +              efx->n_tx_channels = 1;
                rc = pci_enable_msi(efx->pci_dev);
                if (rc == 0) {
                        efx->channel[0].irq = efx->pci_dev->irq;
  
        /* Assume legacy interrupts */
        if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
 -              efx->n_rx_queues = 1;
                efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
 +              efx->n_rx_channels = 1;
 +              efx->n_tx_channels = 1;
                efx->legacy_irq = efx->pci_dev->irq;
        }
  }
@@@ -1071,24 -1068,21 +1071,24 @@@ static void efx_remove_interrupts(struc
  
  static void efx_set_channels(struct efx_nic *efx)
  {
 +      struct efx_channel *channel;
        struct efx_tx_queue *tx_queue;
        struct efx_rx_queue *rx_queue;
 +      unsigned tx_channel_offset =
 +              separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
  
 -      efx_for_each_tx_queue(tx_queue, efx) {
 -              if (separate_tx_channels)
 -                      tx_queue->channel = &efx->channel[efx->n_channels-1];
 -              else
 -                      tx_queue->channel = &efx->channel[0];
 -              tx_queue->channel->used_flags |= EFX_USED_BY_TX;
 +      efx_for_each_channel(channel, efx) {
 +              if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
 +                      channel->tx_queue = &efx->tx_queue[
 +                              (channel->channel - tx_channel_offset) *
 +                              EFX_TXQ_TYPES];
 +                      efx_for_each_channel_tx_queue(tx_queue, channel)
 +                              tx_queue->channel = channel;
 +              }
        }
  
 -      efx_for_each_rx_queue(rx_queue, efx) {
 +      efx_for_each_rx_queue(rx_queue, efx)
                rx_queue->channel = &efx->channel[rx_queue->queue];
 -              rx_queue->channel->used_flags |= EFX_USED_BY_RX;
 -      }
  }
  
  static int efx_probe_nic(struct efx_nic *efx)
        if (rc)
                return rc;
  
 -      /* Determine the number of channels and RX queues by trying to hook
 +      /* Determine the number of channels and queues by trying to hook
         * in MSI-X interrupts. */
        efx_probe_interrupts(efx);
  
        efx_set_channels(efx);
 +      efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
  
        /* Initialise the interrupt moderation settings */
        efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@@ -1194,12 -1187,11 +1194,12 @@@ static void efx_start_all(struct efx_ni
        /* Mark the port as enabled so port reconfigurations can start, then
         * restart the transmit interface early so the watchdog timer stops */
        efx_start_port(efx);
 -      if (efx_dev_registered(efx))
 -              efx_wake_queue(efx);
  
 -      efx_for_each_channel(channel, efx)
 +      efx_for_each_channel(channel, efx) {
 +              if (efx_dev_registered(efx))
 +                      efx_wake_queue(channel);
                efx_start_channel(channel);
 +      }
  
        efx_nic_enable_interrupts(efx);
  
@@@ -1290,9 -1282,7 +1290,9 @@@ static void efx_stop_all(struct efx_ni
        /* Stop the kernel transmit interface late, so the watchdog
         * timer isn't ticking over the flush */
        if (efx_dev_registered(efx)) {
 -              efx_stop_queue(efx);
 +              struct efx_channel *channel;
 +              efx_for_each_channel(channel, efx)
 +                      efx_stop_queue(channel);
                netif_tx_lock_bh(efx->net_dev);
                netif_tx_unlock_bh(efx->net_dev);
        }
@@@ -1547,8 -1537,9 +1547,8 @@@ static void efx_watchdog(struct net_dev
  {
        struct efx_nic *efx = netdev_priv(net_dev);
  
 -      EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
 -              " resetting channels\n",
 -              atomic_read(&efx->netif_stop_count), efx->port_enabled);
 +      EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
 +              efx->port_enabled);
  
        efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
  }
@@@ -1612,7 -1603,7 +1612,7 @@@ static int efx_set_mac_address(struct n
  static void efx_set_multicast_list(struct net_device *net_dev)
  {
        struct efx_nic *efx = netdev_priv(net_dev);
 -      struct dev_mc_list *mc_list;
 +      struct netdev_hw_addr *ha;
        union efx_multicast_hash *mc_hash = &efx->multicast_hash;
        u32 crc;
        int bit;
                memset(mc_hash, 0xff, sizeof(*mc_hash));
        } else {
                memset(mc_hash, 0x00, sizeof(*mc_hash));
 -              netdev_for_each_mc_addr(mc_list, net_dev) {
 -                      crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
 +              netdev_for_each_mc_addr(ha, net_dev) {
 +                      crc = ether_crc_le(ETH_ALEN, ha->addr);
                        bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
                        set_bit_le(bit, mc_hash->byte);
                }
@@@ -1870,6 -1861,7 +1870,7 @@@ out
        }
  
        if (disabled) {
+               dev_close(efx->net_dev);
                EFX_ERR(efx, "has been disabled\n");
                efx->state = STATE_DISABLED;
        } else {
@@@ -1893,8 -1885,7 +1894,7 @@@ static void efx_reset_work(struct work_
        }
  
        rtnl_lock();
-       if (efx_reset(efx, efx->reset_pending))
-               dev_close(efx->net_dev);
+       (void)efx_reset(efx, efx->reset_pending);
        rtnl_unlock();
  }
  
@@@ -2023,22 -2014,22 +2023,22 @@@ static int efx_init_struct(struct efx_n
  
        efx->net_dev = net_dev;
        efx->rx_checksum_enabled = true;
 -      spin_lock_init(&efx->netif_stop_lock);
        spin_lock_init(&efx->stats_lock);
        mutex_init(&efx->mac_lock);
        efx->mac_op = type->default_mac_ops;
        efx->phy_op = &efx_dummy_phy_operations;
        efx->mdio.dev = net_dev;
        INIT_WORK(&efx->mac_work, efx_mac_work);
 -      atomic_set(&efx->netif_stop_count, 1);
  
        for (i = 0; i < EFX_MAX_CHANNELS; i++) {
                channel = &efx->channel[i];
                channel->efx = efx;
                channel->channel = i;
                channel->work_pending = false;
 +              spin_lock_init(&channel->tx_stop_lock);
 +              atomic_set(&channel->tx_stop_count, 1);
        }
 -      for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
 +      for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
                tx_queue = &efx->tx_queue[i];
                tx_queue->efx = efx;
                tx_queue->queue = i;
@@@ -2210,7 -2201,7 +2210,7 @@@ static int __devinit efx_pci_probe(stru
        int i, rc;
  
        /* Allocate and initialise a struct net_device and struct efx_nic */
 -      net_dev = alloc_etherdev(sizeof(*efx));
 +      net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
        if (!net_dev)
                return -ENOMEM;
        net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --combined drivers/net/sfc/falcon.c
index f7df24dce38a6f23f1210e94550d788da422d995,08278e7302b386145f75c408c1c3eef193981978..655b697b45b2db80a147722a0d1c91bedb093672
@@@ -175,19 -175,16 +175,19 @@@ irqreturn_t falcon_legacy_interrupt_a1(
        EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
                  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
  
 -      /* Check to see if we have a serious error condition */
 -      syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 -      if (unlikely(syserr))
 -              return efx_nic_fatal_interrupt(efx);
 -
        /* Determine interrupting queues, clear interrupt status
         * register and acknowledge the device interrupt.
         */
        BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
        queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
 +
 +      /* Check to see if we have a serious error condition */
 +      if (queues & (1U << efx->fatal_irq_level)) {
 +              syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 +              if (unlikely(syserr))
 +                      return efx_nic_fatal_interrupt(efx);
 +      }
 +
        EFX_ZERO_OWORD(*int_ker);
        wmb(); /* Ensure the vector is cleared before interrupt ack */
        falcon_irq_ack_a1(efx);
@@@ -507,9 -504,6 +507,9 @@@ static void falcon_reset_macs(struct ef
        /* Ensure the correct MAC is selected before statistics
         * are re-enabled by the caller */
        efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
 +
 +      /* This can run even when the GMAC is selected */
 +      falcon_setup_xaui(efx);
  }
  
  void falcon_drain_tx_fifo(struct efx_nic *efx)
@@@ -1326,7 -1320,9 +1326,9 @@@ static int falcon_probe_nvconfig(struc
  
        EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
  
-       falcon_probe_board(efx, board_rev);
+       rc = falcon_probe_board(efx, board_rev);
+       if (rc)
+               goto fail2;
  
        kfree(nvconfig);
        return 0;
diff --combined drivers/net/sfc/nic.h
index 5825f37b51bd1801fe79fd6de724e32d649c0287,3166bafdfbefc617d6df6fe1cf1589008c66b19f..bbc2c0c2f8430d15f386fc93807bc8b86c33f245
@@@ -135,14 -135,12 +135,14 @@@ static inline struct falcon_board *falc
   * @fw_build: Firmware build number
   * @mcdi: Management-Controller-to-Driver Interface
   * @wol_filter_id: Wake-on-LAN packet filter id
 + * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
   */
  struct siena_nic_data {
        u64 fw_version;
        u32 fw_build;
        struct efx_mcdi_iface mcdi;
        int wol_filter_id;
 +      u8 ipv6_rss_key[40];
  };
  
  extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@@ -158,7 -156,7 +158,7 @@@ extern struct efx_nic_type siena_a0_nic
   **************************************************************************
   */
  
- extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+ extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
  
  /* TX data path */
  extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
@@@ -205,7 -203,6 +205,7 @@@ extern void falcon_irq_ack_a1(struct ef
  extern int efx_nic_flush_queues(struct efx_nic *efx);
  extern void falcon_start_nic_stats(struct efx_nic *efx);
  extern void falcon_stop_nic_stats(struct efx_nic *efx);
 +extern void falcon_setup_xaui(struct efx_nic *efx);
  extern int falcon_reset_xaui(struct efx_nic *efx);
  extern void efx_nic_init_common(struct efx_nic *efx);
  
diff --combined drivers/net/sfc/siena.c
index 7bf93faff3ab28b1770efb2ebc4094a1d9120da9,e0c46f59d1f8793d365710d1e80d6c51ae469b9d..727b4228e0819bfe336e38ed6504bd8c2d852edc
@@@ -13,7 -13,6 +13,7 @@@
  #include <linux/pci.h>
  #include <linux/module.h>
  #include <linux/slab.h>
 +#include <linux/random.h>
  #include "net_driver.h"
  #include "bitfield.h"
  #include "efx.h"
@@@ -275,9 -274,6 +275,9 @@@ static int siena_probe_nic(struct efx_n
                goto fail5;
        }
  
 +      get_random_bytes(&nic_data->ipv6_rss_key,
 +                       sizeof(nic_data->ipv6_rss_key));
 +
        return 0;
  
  fail5:
@@@ -297,7 -293,6 +297,7 @@@ fail1
   */
  static int siena_init_nic(struct efx_nic *efx)
  {
 +      struct siena_nic_data *nic_data = efx->nic_data;
        efx_oword_t temp;
        int rc;
  
        EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
        efx_writeo(efx, &temp, FR_AZ_RX_CFG);
  
 +      /* Enable IPv6 RSS */
 +      BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
 +                   2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
 +                   FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
 +      memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
 +      efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
 +      memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
 +      efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
 +      EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
 +                           FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
 +      memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
 +             FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
 +      efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
 +
        if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
                /* No MCDI operation has been defined to set thresholds */
                EFX_ERR(efx, "ignoring RX flow control thresholds\n");
@@@ -475,8 -456,17 +475,17 @@@ static int siena_try_update_nic_stats(s
  
  static void siena_update_nic_stats(struct efx_nic *efx)
  {
-       while (siena_try_update_nic_stats(efx) == -EAGAIN)
-               cpu_relax();
+       int retry;
+       /* If we're unlucky enough to read statistics wduring the DMA, wait
+        * up to 10ms for it to finish (typically takes <500us) */
+       for (retry = 0; retry < 100; ++retry) {
+               if (siena_try_update_nic_stats(efx) == 0)
+                       return;
+               udelay(100);
+       }
+       /* Use the old values instead */
  }
  
  static void siena_start_nic_stats(struct efx_nic *efx)
index 811b2dc423d1fac523fc9a2822a5d4276d786b74,3547cf13d219c07ca56e7468ac2905b830c4959e..b3fe0de4046970dc27fbc31b478b9cce42cf9c63
@@@ -64,11 -64,6 +64,11 @@@ static int is_wireless_rndis(struct usb
  
  #endif
  
 +static const u8 mbm_guid[16] = {
 +      0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
 +      0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
 +};
 +
  /*
   * probes control interface, claims data interface, collects the bulk
   * endpoints, activates data interface (if needed), maybe sets MTU.
@@@ -84,8 -79,6 +84,8 @@@ int usbnet_generic_cdc_bind(struct usbn
        int                             status;
        int                             rndis;
        struct usb_driver               *driver = driver_of(intf);
 +      struct usb_cdc_mdlm_desc        *desc = NULL;
 +      struct usb_cdc_mdlm_detail_desc *detail = NULL;
  
        if (sizeof dev->data < sizeof *info)
                return -EDOM;
                         * side link address we were given.
                         */
                        break;
 +              case USB_CDC_MDLM_TYPE:
 +                      if (desc) {
 +                              dev_dbg(&intf->dev, "extra MDLM descriptor\n");
 +                              goto bad_desc;
 +                      }
 +
 +                      desc = (void *)buf;
 +
 +                      if (desc->bLength != sizeof(*desc))
 +                              goto bad_desc;
 +
 +                      if (memcmp(&desc->bGUID, mbm_guid, 16))
 +                              goto bad_desc;
 +                      break;
 +              case USB_CDC_MDLM_DETAIL_TYPE:
 +                      if (detail) {
 +                              dev_dbg(&intf->dev, "extra MDLM detail descriptor\n");
 +                              goto bad_desc;
 +                      }
 +
 +                      detail = (void *)buf;
 +
 +                      if (detail->bGuidDescriptorType == 0) {
 +                              if (detail->bLength < (sizeof(*detail) + 1))
 +                                      goto bad_desc;
 +                      } else
 +                              goto bad_desc;
 +                      break;
                }
  next_desc:
                len -= buf [0]; /* bLength */
@@@ -466,6 -431,7 +466,7 @@@ static const struct driver_info mbm_inf
        .bind =         cdc_bind,
        .unbind =       usbnet_cdc_unbind,
        .status =       cdc_status,
+       .manage_power = cdc_manage_power,
  };
  
  /*-------------------------------------------------------------------------*/
@@@ -577,10 -543,80 +578,10 @@@ static const struct usb_device_id       produ
                        USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long) &cdc_info,
  }, {
 -      /* Ericsson F3507g */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson F3507g ver. 2 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson F3607gw */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson F3607gw ver 2 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson F3607gw ver 3 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson F3307 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson F3307 ver 2 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson C3607w */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Ericsson C3607w ver 2 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Toshiba F3507g */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Toshiba F3607gw */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Toshiba F3607gw ver 2 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Dell F3507g */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Dell F3607gw */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 -}, {
 -      /* Dell F3607gw ver 2 */
 -      USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
 -                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 -      .driver_info = (unsigned long) &mbm_info,
 +      USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
 +                      USB_CDC_PROTO_NONE),
 +      .driver_info = (unsigned long)&mbm_info,
 +
  },
        { },            // END
  };
index 679da7e7522ebe3383e8f014065c4714d9d3aaf7,c24067f1a0cb526560f40247aa647c1c57e2f734..ca42ccb23d766ac4204f75cda33aed63915b350f
@@@ -132,7 -132,7 +132,7 @@@ static int p54p_upload_firmware(struct 
  
  static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
        int ring_index, struct p54p_desc *ring, u32 ring_limit,
 -      struct sk_buff **rx_buf)
 +      struct sk_buff **rx_buf, u32 index)
  {
        struct p54p_priv *priv = dev->priv;
        struct p54p_ring_control *ring_control = priv->ring_control;
  
        idx = le32_to_cpu(ring_control->host_idx[ring_index]);
        limit = idx;
 -      limit -= le32_to_cpu(ring_control->device_idx[ring_index]);
 +      limit -= index;
        limit = ring_limit - limit;
  
        i = idx % ring_limit;
@@@ -232,7 -232,7 +232,7 @@@ static void p54p_check_rx_ring(struct i
                i %= ring_limit;
        }
  
 -      p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
 +      p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
  }
  
  static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
        u32 idx, i;
  
        i = (*index) % ring_limit;
-       (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
+       (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
        idx %= ring_limit;
  
        while (i != idx) {
@@@ -277,6 -277,14 +277,6 @@@ static void p54p_tasklet(unsigned long 
        struct p54p_priv *priv = dev->priv;
        struct p54p_ring_control *ring_control = priv->ring_control;
  
 -      p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
 -                         ARRAY_SIZE(ring_control->tx_mgmt),
 -                         priv->tx_buf_mgmt);
 -
 -      p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
 -                         ARRAY_SIZE(ring_control->tx_data),
 -                         priv->tx_buf_data);
 -
        p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
                ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
  
  
        wmb();
        P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
 +
 +      p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
 +                         ARRAY_SIZE(ring_control->tx_mgmt),
 +                         priv->tx_buf_mgmt);
 +
 +      p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
 +                         ARRAY_SIZE(ring_control->tx_data),
 +                         priv->tx_buf_data);
  }
  
  static irqreturn_t p54p_interrupt(int irq, void *dev_id)
@@@ -445,10 -445,10 +445,10 @@@ static int p54p_open(struct ieee80211_h
        priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
  
        p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
 -              ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
 +              ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
  
        p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
 -              ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
 +              ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
  
        P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
        P54P_READ(ring_control_base);
diff --combined include/net/sctp/sctp.h
index 59151557406ce2967a3d5441f05cde0bc592fab1,fa6cde578a1d122a43a0f63e07969a36cf3e27ff..e9a408718636712c884707828ffcf854d29307fe
@@@ -128,6 -128,7 +128,7 @@@ extern int sctp_register_pf(struct sctp
  int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
  int sctp_inet_listen(struct socket *sock, int backlog);
  void sctp_write_space(struct sock *sk);
+ void sctp_data_ready(struct sock *sk, int len);
  unsigned int sctp_poll(struct file *file, struct socket *sock,
                poll_table *wait);
  void sctp_sock_rfree(struct sk_buff *skb);
@@@ -268,7 -269,7 +269,7 @@@ enum 
  #define SCTP_MIB_MAX    __SCTP_MIB_MAX
  struct sctp_mib {
          unsigned long   mibs[SCTP_MIB_MAX];
 -} __SNMP_MIB_ALIGN__;
 +};
  
  
  /* Print debugging messages.  */
diff --combined include/net/sock.h
index cc7f91ec972c8053de4ebec1d800916406c910c3,1ad6435f252eda64b17e6b4568c58f42a0eed6d0..328e03f47dd1b1df8929bf2daed2204f03a35fd7
@@@ -74,7 -74,7 +74,7 @@@
                                        printk(KERN_DEBUG msg); } while (0)
  #else
  /* Validate arguments and do nothing */
- static void inline int __attribute__ ((format (printf, 2, 3)))
+ static inline void __attribute__ ((format (printf, 2, 3)))
  SOCK_DEBUG(struct sock *sk, const char *msg, ...)
  {
  }
@@@ -159,7 -159,7 +159,7 @@@ struct sock_common 
    *   @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
    *   @sk_lock:       synchronizer
    *   @sk_rcvbuf: size of receive buffer in bytes
 -  *   @sk_sleep: sock wait queue
 +  *   @sk_wq: sock wait queue and async head
    *   @sk_dst_cache: destination cache
    *   @sk_dst_lock: destination cache lock
    *   @sk_policy: flow policy
    *   @sk_rcvlowat: %SO_RCVLOWAT setting
    *   @sk_rcvtimeo: %SO_RCVTIMEO setting
    *   @sk_sndtimeo: %SO_SNDTIMEO setting
 +  *   @sk_rxhash: flow hash received from netif layer
    *   @sk_filter: socket filtering instructions
    *   @sk_protinfo: private area, net family specific, when not using slab
    *   @sk_timer: sock cleanup timer
@@@ -256,13 -255,14 +256,13 @@@ struct sock 
                struct sk_buff *head;
                struct sk_buff *tail;
                int len;
 -              int limit;
        } sk_backlog;
 -      wait_queue_head_t       *sk_sleep;
 +      struct socket_wq        *sk_wq;
        struct dst_entry        *sk_dst_cache;
  #ifdef CONFIG_XFRM
        struct xfrm_policy      *sk_policy[2];
  #endif
 -      rwlock_t                sk_dst_lock;
 +      spinlock_t              sk_dst_lock;
        atomic_t                sk_rmem_alloc;
        atomic_t                sk_wmem_alloc;
        atomic_t                sk_omem_alloc;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
        int                     sk_rcvlowat;
 +#ifdef CONFIG_RPS
 +      __u32                   sk_rxhash;
 +#endif
        unsigned long           sk_flags;
        unsigned long           sk_lingertime;
        struct sk_buff_head     sk_error_queue;
@@@ -607,20 -604,10 +607,20 @@@ static inline void __sk_add_backlog(str
        skb->next = NULL;
  }
  
 +/*
 + * Take into account size of receive queue and backlog queue
 + */
 +static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 +{
 +      unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 +
 +      return qsize + skb->truesize > sk->sk_rcvbuf;
 +}
 +
  /* The per-socket spinlock must be held here. */
  static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
  {
 -      if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
 +      if (sk_rcvqueues_full(sk, skb))
                return -ENOBUFS;
  
        __sk_add_backlog(sk, skb);
@@@ -633,40 -620,6 +633,40 @@@ static inline int sk_backlog_rcv(struc
        return sk->sk_backlog_rcv(sk, skb);
  }
  
 +static inline void sock_rps_record_flow(const struct sock *sk)
 +{
 +#ifdef CONFIG_RPS
 +      struct rps_sock_flow_table *sock_flow_table;
 +
 +      rcu_read_lock();
 +      sock_flow_table = rcu_dereference(rps_sock_flow_table);
 +      rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
 +      rcu_read_unlock();
 +#endif
 +}
 +
 +static inline void sock_rps_reset_flow(const struct sock *sk)
 +{
 +#ifdef CONFIG_RPS
 +      struct rps_sock_flow_table *sock_flow_table;
 +
 +      rcu_read_lock();
 +      sock_flow_table = rcu_dereference(rps_sock_flow_table);
 +      rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
 +      rcu_read_unlock();
 +#endif
 +}
 +
 +static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
 +{
 +#ifdef CONFIG_RPS
 +      if (unlikely(sk->sk_rxhash != rxhash)) {
 +              sock_rps_reset_flow(sk);
 +              sk->sk_rxhash = rxhash;
 +      }
 +#endif
 +}
 +
  #define sk_wait_event(__sk, __timeo, __condition)                     \
        ({      int __rc;                                               \
                release_sock(__sk);                                     \
@@@ -1021,16 -974,6 +1021,16 @@@ extern void release_sock(struct sock *s
                                SINGLE_DEPTH_NESTING)
  #define bh_unlock_sock(__sk)  spin_unlock(&((__sk)->sk_lock.slock))
  
 +static inline void lock_sock_bh(struct sock *sk)
 +{
 +      spin_lock_bh(&sk->sk_lock.slock);
 +}
 +
 +static inline void unlock_sock_bh(struct sock *sk)
 +{
 +      spin_unlock_bh(&sk->sk_lock.slock);
 +}
 +
  extern struct sock            *sk_alloc(struct net *net, int family,
                                          gfp_t priority,
                                          struct proto *prot);
@@@ -1217,10 -1160,6 +1217,10 @@@ static inline void sk_set_socket(struc
        sk->sk_socket = sock;
  }
  
 +static inline wait_queue_head_t *sk_sleep(struct sock *sk)
 +{
 +      return &sk->sk_wq->wait;
 +}
  /* Detach socket from process context.
   * Announce socket dead, detach it from wait queue and inode.
   * Note that parent inode held reference count on this struct sock,
@@@ -1233,14 -1172,14 +1233,14 @@@ static inline void sock_orphan(struct s
        write_lock_bh(&sk->sk_callback_lock);
        sock_set_flag(sk, SOCK_DEAD);
        sk_set_socket(sk, NULL);
 -      sk->sk_sleep  = NULL;
 +      sk->sk_wq  = NULL;
        write_unlock_bh(&sk->sk_callback_lock);
  }
  
  static inline void sock_graft(struct sock *sk, struct socket *parent)
  {
        write_lock_bh(&sk->sk_callback_lock);
 -      sk->sk_sleep = &parent->wait;
 +      rcu_assign_pointer(sk->sk_wq, parent->wq);
        parent->sk = sk;
        sk_set_socket(sk, parent);
        security_sock_graft(sk, parent);
@@@ -1253,9 -1192,7 +1253,9 @@@ extern unsigned long sock_i_ino(struct 
  static inline struct dst_entry *
  __sk_dst_get(struct sock *sk)
  {
 -      return sk->sk_dst_cache;
 +      return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
 +                                                     sock_owned_by_user(sk) ||
 +                                                     lockdep_is_held(&sk->sk_lock.slock));
  }
  
  static inline struct dst_entry *
@@@ -1263,65 -1200,50 +1263,65 @@@ sk_dst_get(struct sock *sk
  {
        struct dst_entry *dst;
  
 -      read_lock(&sk->sk_dst_lock);
 -      dst = sk->sk_dst_cache;
 +      rcu_read_lock();
 +      dst = rcu_dereference(sk->sk_dst_cache);
        if (dst)
                dst_hold(dst);
 -      read_unlock(&sk->sk_dst_lock);
 +      rcu_read_unlock();
        return dst;
  }
  
 +extern void sk_reset_txq(struct sock *sk);
 +
 +static inline void dst_negative_advice(struct sock *sk)
 +{
 +      struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 +
 +      if (dst && dst->ops->negative_advice) {
 +              ndst = dst->ops->negative_advice(dst);
 +
 +              if (ndst != dst) {
 +                      rcu_assign_pointer(sk->sk_dst_cache, ndst);
 +                      sk_reset_txq(sk);
 +              }
 +      }
 +}
 +
  static inline void
  __sk_dst_set(struct sock *sk, struct dst_entry *dst)
  {
        struct dst_entry *old_dst;
  
        sk_tx_queue_clear(sk);
 -      old_dst = sk->sk_dst_cache;
 -      sk->sk_dst_cache = dst;
 +      /*
 +       * This can be called while sk is owned by the caller only,
 +       * with no state that can be checked in a rcu_dereference_check() cond
 +       */
 +      old_dst = rcu_dereference_raw(sk->sk_dst_cache);
 +      rcu_assign_pointer(sk->sk_dst_cache, dst);
        dst_release(old_dst);
  }
  
  static inline void
  sk_dst_set(struct sock *sk, struct dst_entry *dst)
  {
 -      write_lock(&sk->sk_dst_lock);
 +      spin_lock(&sk->sk_dst_lock);
        __sk_dst_set(sk, dst);
 -      write_unlock(&sk->sk_dst_lock);
 +      spin_unlock(&sk->sk_dst_lock);
  }
  
  static inline void
  __sk_dst_reset(struct sock *sk)
  {
 -      struct dst_entry *old_dst;
 -
 -      sk_tx_queue_clear(sk);
 -      old_dst = sk->sk_dst_cache;
 -      sk->sk_dst_cache = NULL;
 -      dst_release(old_dst);
 +      __sk_dst_set(sk, NULL);
  }
  
  static inline void
  sk_dst_reset(struct sock *sk)
  {
 -      write_lock(&sk->sk_dst_lock);
 +      spin_lock(&sk->sk_dst_lock);
        __sk_dst_reset(sk);
 -      write_unlock(&sk->sk_dst_lock);
 +      spin_unlock(&sk->sk_dst_lock);
  }
  
  extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
@@@ -1392,12 -1314,12 +1392,12 @@@ static inline int sk_has_allocations(co
  }
  
  /**
 - * sk_has_sleeper - check if there are any waiting processes
 - * @sk: socket
 + * wq_has_sleeper - check if there are any waiting processes
 + * @sk: struct socket_wq
   *
 - * Returns true if socket has waiting processes
 + * Returns true if socket_wq has waiting processes
   *
 - * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
 + * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
   * barrier call. They were added due to the race found within the tcp code.
   *
   * Consider following tcp code paths:
   *   ...                 ...
   *   tp->rcv_nxt check   sock_def_readable
   *   ...                 {
 - *   schedule               ...
 - *                          if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
 - *                              wake_up_interruptible(sk->sk_sleep)
 + *   schedule               rcu_read_lock();
 + *                          wq = rcu_dereference(sk->sk_wq);
 + *                          if (wq && waitqueue_active(&wq->wait))
 + *                              wake_up_interruptible(&wq->wait)
   *                          ...
   *                       }
   *
   * could then endup calling schedule and sleep forever if there are no more
   * data on the socket.
   *
 - * The sk_has_sleeper is always called right after a call to read_lock, so we
 - * can use smp_mb__after_lock barrier.
   */
 -static inline int sk_has_sleeper(struct sock *sk)
 +static inline bool wq_has_sleeper(struct socket_wq *wq)
  {
 +
        /*
         * We need to be sure we are in sync with the
         * add_wait_queue modifications to the wait queue.
         *
         * This memory barrier is paired in the sock_poll_wait.
         */
 -      smp_mb__after_lock();
 -      return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
 +      smp_mb();
 +      return wq && waitqueue_active(&wq->wait);
  }
  
  /**
   * @wait_address:   socket wait queue
   * @p:              poll_table
   *
 - * See the comments in the sk_has_sleeper function.
 + * See the comments in the wq_has_sleeper function.
   */
  static inline void sock_poll_wait(struct file *filp,
                wait_queue_head_t *wait_address, poll_table *p)
                 * We need to be sure we are in sync with the
                 * socket flags modification.
                 *
 -               * This memory barrier is paired in the sk_has_sleeper.
 +               * This memory barrier is paired in the wq_has_sleeper.
                */
                smp_mb();
        }
@@@ -1635,24 -1557,7 +1635,24 @@@ sock_recv_timestamp(struct msghdr *msg
                sk->sk_stamp = kt;
  }
  
 -extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);
 +extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 +                                   struct sk_buff *skb);
 +
 +static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 +                                        struct sk_buff *skb)
 +{
 +#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)                     | \
 +                         (1UL << SOCK_RCVTSTAMP)                      | \
 +                         (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)       | \
 +                         (1UL << SOCK_TIMESTAMPING_SOFTWARE)          | \
 +                         (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE)      | \
 +                         (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
 +
 +      if (sk->sk_flags & FLAGS_TS_OR_DROPS)
 +              __sock_recv_ts_and_drops(msg, sk, skb);
 +      else
 +              sk->sk_stamp = skb->tstamp;
 +}
  
  /**
   * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
diff --combined net/bluetooth/l2cap.c
index c1e60eed5a97a785f2a4547cb0684d8b58dd35cf,9753b690a8b356b9bd24e88efb45d8d6e2b5ca56..864c76f4a678582c81fb9d37c0586f28bd6962c5
@@@ -1147,7 -1147,7 +1147,7 @@@ static int l2cap_sock_accept(struct soc
        BT_DBG("sk %p timeo %ld", sk, timeo);
  
        /* Wait for an incoming connection. (wake-one). */
 -      add_wait_queue_exclusive(sk->sk_sleep, &wait);
 +      add_wait_queue_exclusive(sk_sleep(sk), &wait);
        while (!(nsk = bt_accept_dequeue(sk, newsock))) {
                set_current_state(TASK_INTERRUPTIBLE);
                if (!timeo) {
                }
        }
        set_current_state(TASK_RUNNING);
 -      remove_wait_queue(sk->sk_sleep, &wait);
 +      remove_wait_queue(sk_sleep(sk), &wait);
  
        if (err)
                goto done;
@@@ -1626,7 -1626,10 +1626,10 @@@ static int l2cap_sock_sendmsg(struct ki
        /* Connectionless channel */
        if (sk->sk_type == SOCK_DGRAM) {
                skb = l2cap_create_connless_pdu(sk, msg, len);
-               err = l2cap_do_send(sk, skb);
+               if (IS_ERR(skb))
+                       err = PTR_ERR(skb);
+               else
+                       err = l2cap_do_send(sk, skb);
                goto done;
        }
  
index 78cbc39f56c4f1f1cd140a79d3c651b6b880384f,8da6429269dddd922f9c3fb4eda2b86a7bffe0d2..e0a3e3537b147fd263611f4109da28f3157ae60c
@@@ -70,17 -70,13 +70,13 @@@ int inet_csk_bind_conflict(const struc
                    (!sk->sk_bound_dev_if ||
                     !sk2->sk_bound_dev_if ||
                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
-                       const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
                        if (!reuse || !sk2->sk_reuse ||
                            sk2->sk_state == TCP_LISTEN) {
+                               const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
                                if (!sk2_rcv_saddr || !sk_rcv_saddr ||
                                    sk2_rcv_saddr == sk_rcv_saddr)
                                        break;
-                       } else if (reuse && sk2->sk_reuse &&
-                                  sk2_rcv_saddr &&
-                                  sk2_rcv_saddr == sk_rcv_saddr)
-                               break;
+                       }
                }
        }
        return node != NULL;
@@@ -124,11 -120,9 +120,9 @@@ again
                                                smallest_size = tb->num_owners;
                                                smallest_rover = rover;
                                                if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
-                                                       if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
-                                                               spin_unlock(&head->lock);
-                                                               snum = smallest_rover;
-                                                               goto have_snum;
-                                                       }
+                                                       spin_unlock(&head->lock);
+                                                       snum = smallest_rover;
+                                                       goto have_snum;
                                                }
                                        }
                                        goto next;
@@@ -240,7 -234,7 +234,7 @@@ static int inet_csk_wait_for_connect(st
         * having to remove and re-insert us on the wait queue.
         */
        for (;;) {
 -              prepare_to_wait_exclusive(sk->sk_sleep, &wait,
 +              prepare_to_wait_exclusive(sk_sleep(sk), &wait,
                                          TASK_INTERRUPTIBLE);
                release_sock(sk);
                if (reqsk_queue_empty(&icsk->icsk_accept_queue))
                if (!timeo)
                        break;
        }
 -      finish_wait(sk->sk_sleep, &wait);
 +      finish_wait(sk_sleep(sk), &wait);
        return err;
  }
  
index 9ca1efc923a1a05ff85079bc3e0e0ba37c3f68d3,628db24bcf22bceff783384ea7779a87c4cb0ab7..0c5e3c3b7fd56d87222ec0c5a691f1e961a9f58b
@@@ -42,16 -42,11 +42,11 @@@ int inet6_csk_bind_conflict(const struc
                if (sk != sk2 &&
                    (!sk->sk_bound_dev_if ||
                     !sk2->sk_bound_dev_if ||
-                    sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
-                       if ((!sk->sk_reuse || !sk2->sk_reuse ||
-                            sk2->sk_state == TCP_LISTEN) &&
-                            ipv6_rcv_saddr_equal(sk, sk2))
-                               break;
-                       else if (sk->sk_reuse && sk2->sk_reuse &&
-                               !ipv6_addr_any(inet6_rcv_saddr(sk)) &&
-                               ipv6_rcv_saddr_equal(sk, sk2))
-                               break;
-               }
+                    sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
+                   (!sk->sk_reuse || !sk2->sk_reuse ||
+                    sk2->sk_state == TCP_LISTEN) &&
+                    ipv6_rcv_saddr_equal(sk, sk2))
+                       break;
        }
  
        return node != NULL;
@@@ -183,7 -178,7 +178,7 @@@ struct dst_entry *__inet6_csk_dst_check
        return dst;
  }
  
 -int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
 +int inet6_csk_xmit(struct sk_buff *skb)
  {
        struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(sk);
        /* Restore final destination back after routing done */
        ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
  
 -      return ip6_xmit(sk, skb, &fl, np->opt, 0);
 +      return ip6_xmit(sk, skb, &fl, np->opt);
  }
  
  EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --combined net/sctp/socket.c
index d54700af927ade324abba113d107857e70229de9,44a1ab03a3f0124ac33c63aa62f54b5663b52914..bae764005d2df19bf2d483edde15ec56eb4d0a68
@@@ -3719,9 -3719,12 +3719,9 @@@ SCTP_STATIC int sctp_init_sock(struct s
        sp->hmac = NULL;
  
        SCTP_DBG_OBJCNT_INC(sock);
-       percpu_counter_inc(&sctp_sockets_allocated);
  
 -      /* Set socket backlog limit. */
 -      sk->sk_backlog.limit = sysctl_sctp_rmem[1];
 -
        local_bh_disable();
+       percpu_counter_inc(&sctp_sockets_allocated);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        local_bh_enable();
  
@@@ -3738,8 -3741,8 +3738,8 @@@ SCTP_STATIC void sctp_destroy_sock(stru
        /* Release our hold on the endpoint. */
        ep = sctp_sk(sk)->ep;
        sctp_endpoint_free(ep);
-       percpu_counter_dec(&sctp_sockets_allocated);
        local_bh_disable();
+       percpu_counter_dec(&sctp_sockets_allocated);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        local_bh_enable();
  }
@@@ -5479,6 -5482,7 +5479,6 @@@ pp_found
                 */
                int reuse = sk->sk_reuse;
                struct sock *sk2;
 -              struct hlist_node *node;
  
                SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
                if (pp->fastreuse && sk->sk_reuse &&
@@@ -5699,7 -5703,7 +5699,7 @@@ unsigned int sctp_poll(struct file *fil
        struct sctp_sock *sp = sctp_sk(sk);
        unsigned int mask;
  
 -      poll_wait(file, sk->sk_sleep, wait);
 +      poll_wait(file, sk_sleep(sk), wait);
  
        /* A TCP-style listening socket becomes readable when the accept queue
         * is not empty.
@@@ -5940,7 -5944,7 +5940,7 @@@ static int sctp_wait_for_packet(struct 
        int error;
        DEFINE_WAIT(wait);
  
 -      prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
 +      prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  
        /* Socket errors? */
        error = sock_error(sk);
        sctp_lock_sock(sk);
  
  ready:
 -      finish_wait(sk->sk_sleep, &wait);
 +      finish_wait(sk_sleep(sk), &wait);
        return 0;
  
  interrupted:
        error = sock_intr_errno(*timeo_p);
  
  out:
 -      finish_wait(sk->sk_sleep, &wait);
 +      finish_wait(sk_sleep(sk), &wait);
        *err = error;
        return error;
  }
@@@ -6058,14 -6062,14 +6058,14 @@@ static void __sctp_write_space(struct s
                        wake_up_interruptible(&asoc->wait);
  
                if (sctp_writeable(sk)) {
 -                      if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
 -                              wake_up_interruptible(sk->sk_sleep);
 +                      if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
 +                              wake_up_interruptible(sk_sleep(sk));
  
                        /* Note that we try to include the Async I/O support
                         * here by modeling from the current TCP/UDP code.
                         * We have not tested with it yet.
                         */
 -                      if (sock->fasync_list &&
 +                      if (sock->wq->fasync_list &&
                            !(sk->sk_shutdown & SEND_SHUTDOWN))
                                sock_wake_async(sock,
                                                SOCK_WAKE_SPACE, POLL_OUT);
@@@ -6185,6 -6189,16 +6185,19 @@@ do_nonblock
        goto out;
  }
  
 -      read_lock_bh(&sk->sk_callback_lock);
 -      if (sk_has_sleeper(sk))
 -              wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ void sctp_data_ready(struct sock *sk, int len)
+ {
 -      read_unlock_bh(&sk->sk_callback_lock);
++      struct socket_wq *wq;
++
++      rcu_read_lock();
++      wq = rcu_dereference(sk->sk_wq);
++      if (wq_has_sleeper(wq))
++              wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+                                               POLLRDNORM | POLLRDBAND);
+       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
++      rcu_read_unlock();
+ }
  /* If socket sndbuf has changed, wake up all per association waiters.  */
  void sctp_write_space(struct sock *sk)
  {
@@@ -6293,7 -6307,7 +6306,7 @@@ static int sctp_wait_for_accept(struct 
  
  
        for (;;) {
 -              prepare_to_wait_exclusive(sk->sk_sleep, &wait,
 +              prepare_to_wait_exclusive(sk_sleep(sk), &wait,
                                          TASK_INTERRUPTIBLE);
  
                if (list_empty(&ep->asocs)) {
                        break;
        }
  
 -      finish_wait(sk->sk_sleep, &wait);
 +      finish_wait(sk_sleep(sk), &wait);
  
        return err;
  }
@@@ -6329,7 -6343,7 +6342,7 @@@ static void sctp_wait_for_close(struct 
        DEFINE_WAIT(wait);
  
        do {
 -              prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
 +              prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                if (list_empty(&sctp_sk(sk)->ep->asocs))
                        break;
                sctp_release_sock(sk);
                sctp_lock_sock(sk);
        } while (!signal_pending(current) && timeout);
  
 -      finish_wait(sk->sk_sleep, &wait);
 +      finish_wait(sk_sleep(sk), &wait);
  }
  
  static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)