Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Apr 2012 17:37:38 +0000 (10:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Apr 2012 17:37:38 +0000 (10:37 -0700)
Pull networking updates from David Miller:

 1) Fix inaccuracies in network driver interface documentation, from Ben
    Hutchings.

 2) Fix handling of negative offsets in BPF JITs, from Jan Seiffert.

 3) Compile warning, locking, and refcounting fixes in netfilter's
    xt_CT, from Pablo Neira Ayuso.

 4) phonet sendmsg needs to validate user length just like any other
    datagram protocol, fix from Sasha Levin.

 5) Ipv6 multicast code uses wrong loop index, from RongQing Li.

 6) Link handling and firmware fixes in bnx2x driver from Yaniv Rosner
    and Yuval Mintz.

 7) mlx4 erroneously allocates 4 pages at a time, regardless of page
    size, fix from Thadeu Lima de Souza Cascardo.

 8) SCTP socket option wasn't extended in a backwards compatible way,
    fix from Thomas Graf.

 9) Add missing address change event emissions to bonding, from Shlomo
    Pongratz.

10) /proc/net/dev regressed because it uses a private offset to track
    where we are in the hash table, but this doesn't track the offset
    pullback that the seq_file code does resulting in some entries being
    missed in large dumps.

    Fix from Eric Dumazet.

11) do_tcp_sendpage() unloads the send queue way too fast, because it
    invokes tcp_push() when it shouldn't.  Let the natural sequence
    generated by the splice paths, and the assosciated MSG_MORE
    settings, guide the tcp_push() calls.

    Otherwise what goes out of TCP is spaghetti and doesn't batch
    effectively into GSO/TSO clusters.

    From Eric Dumazet.

12) Once we put a SKB into either the netlink receiver's queue or a
    socket error queue, it can be consumed and freed up, therefore we
    cannot touch it after queueing it like that.

    Fixes from Eric Dumazet.

13) PPP has this annoying behavior in that for every transmit call it
    immediately stops the TX queue, then calls down into the next layer
    to transmit the PPP frame.

    But if that next layer can take it immediately, it just un-stops the
    TX queue right before returning from the transmit method.

    Besides being useless work, it makes several facilities unusable, in
    particular things like the equalizers.  Well behaved devices should
    only stop the TX queue when they really are full, and in PPP's case
    when it gets backlogged to the downstream device.

    David Woodhouse therefore fixed PPP to not stop the TX queue until
    it's downstream can't take data any more.

14) IFF_UNICAST_FLT got accidently lost in some recent stmmac driver
    changes, re-add.  From Marc Kleine-Budde.

15) Fix link flaps in ixgbe, from Eric W. Multanen.

16) Descriptor writeback fixes in e1000e from Matthew Vick.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits)
  net: fix a race in sock_queue_err_skb()
  netlink: fix races after skb queueing
  doc, net: Update ndo_start_xmit return type and values
  doc, net: Remove instruction to set net_device::trans_start
  doc, net: Update netdev operation names
  doc, net: Update documentation of synchronisation for TX multiqueue
  doc, net: Remove obsolete reference to dev->poll
  ethtool: Remove exception to the requirement of holding RTNL lock
  MAINTAINERS: update for Marvell Ethernet drivers
  bonding: properly unset current_arp_slave on slave link up
  phonet: Check input from user before allocating
  tcp: tcp_sendpages() should call tcp_push() once
  ipv6: fix array index in ip6_mc_add_src()
  mlx4: allocate just enough pages instead of always 4 pages
  stmmac: re-add IFF_UNICAST_FLT for dwmac1000
  bnx2x: Clear MDC/MDIO warning message
  bnx2x: Fix BCM57711+BCM84823 link issue
  bnx2x: Clear BCM84833 LED after fan failure
  bnx2x: Fix BCM84833 PHY FW version presentation
  bnx2x: Fix link issue for BCM8727 boards.
  ...

38 files changed:
Documentation/networking/driver.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/netdevices.txt
MAINTAINERS
arch/x86/net/bpf_jit.S
arch/x86/net/bpf_jit_comp.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/phy/icplus.c
drivers/net/ppp/ppp_generic.c
fs/splice.c
include/linux/ethtool.h
include/linux/netdevice.h
include/linux/netfilter/xt_set.h
include/linux/socket.h
include/net/netfilter/xt_log.h
net/core/dev.c
net/core/dev_addr_lists.c
net/core/filter.c
net/core/skbuff.c
net/ipv4/tcp.c
net/ipv6/mcast.c
net/netfilter/nf_conntrack_core.c
net/netfilter/xt_CT.c
net/netlink/af_netlink.c
net/phonet/pep.c
net/sctp/socket.c
net/socket.c

index 03283daa64fef72360667fb979a256aa10a3bb91..da59e2884130cbfc8b128b5be6b222ca10da82b8 100644 (file)
@@ -2,16 +2,16 @@ Document about softnet driver issues
 
 Transmit path guidelines:
 
-1) The hard_start_xmit method must never return '1' under any
-   normal circumstances.  It is considered a hard error unless
+1) The ndo_start_xmit method must not return NETDEV_TX_BUSY under
+   any normal circumstances.  It is considered a hard error unless
    there is no way your device can tell ahead of time when it's
    transmit function will become busy.
 
    Instead it must maintain the queue properly.  For example,
    for a driver implementing scatter-gather this means:
 
-       static int drv_hard_start_xmit(struct sk_buff *skb,
-                                      struct net_device *dev)
+       static netdev_tx_t drv_hard_start_xmit(struct sk_buff *skb,
+                                              struct net_device *dev)
        {
                struct drv *dp = netdev_priv(dev);
 
@@ -23,7 +23,7 @@ Transmit path guidelines:
                        unlock_tx(dp);
                        printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
                               dev->name);
-                       return 1;
+                       return NETDEV_TX_BUSY;
                }
 
                ... queue packet to card ...
@@ -35,6 +35,7 @@ Transmit path guidelines:
                ...
                unlock_tx(dp);
                ...
+               return NETDEV_TX_OK;
        }
 
    And then at the end of your TX reclamation event handling:
@@ -58,15 +59,12 @@ Transmit path guidelines:
             TX_BUFFS_AVAIL(dp) > 0)
                netif_wake_queue(dp->dev);
 
-2) Do not forget to update netdev->trans_start to jiffies after
-   each new tx packet is given to the hardware.
-
-3) A hard_start_xmit method must not modify the shared parts of a
+2) An ndo_start_xmit method must not modify the shared parts of a
    cloned SKB.
 
-4) Do not forget that once you return 0 from your hard_start_xmit
-   method, it is your driver's responsibility to free up the SKB
-   and in some finite amount of time.
+3) Do not forget that once you return NETDEV_TX_OK from your
+   ndo_start_xmit method, it is your driver's responsibility to free
+   up the SKB and in some finite amount of time.
 
    For example, this means that it is not allowed for your TX
    mitigation scheme to let TX packets "hang out" in the TX
@@ -74,8 +72,9 @@ Transmit path guidelines:
    This error can deadlock sockets waiting for send buffer room
    to be freed up.
 
-   If you return 1 from the hard_start_xmit method, you must not keep
-   any reference to that SKB and you must not attempt to free it up.
+   If you return NETDEV_TX_BUSY from the ndo_start_xmit method, you
+   must not keep any reference to that SKB and you must not attempt
+   to free it up.
 
 Probing guidelines:
 
@@ -85,10 +84,10 @@ Probing guidelines:
 
 Close/stop guidelines:
 
-1) After the dev->stop routine has been called, the hardware must
+1) After the ndo_stop routine has been called, the hardware must
    not receive or transmit any data.  All in flight packets must
    be aborted. If necessary, poll or wait for completion of 
    any reset commands.
 
-2) The dev->stop routine will be called by unregister_netdevice
+2) The ndo_stop routine will be called by unregister_netdevice
    if device is still UP.
index ad3e80e17b4f49a287c1d97d39e38cd8b092aa86..bd80ba5847d2b8b44548180c7cd9f912832bc4f1 100644 (file)
@@ -604,15 +604,8 @@ IP Variables:
 ip_local_port_range - 2 INTEGERS
        Defines the local port range that is used by TCP and UDP to
        choose the local port. The first number is the first, the
-       second the last local port number. Default value depends on
-       amount of memory available on the system:
-       > 128Mb 32768-61000
-       < 128Mb 1024-4999 or even less.
-       This number defines number of active connections, which this
-       system can issue simultaneously to systems not supporting
-       TCP extensions (timestamps). With tcp_tw_recycle enabled
-       (i.e. by default) range 1024-4999 is enough to issue up to
-       2000 connections per second to systems supporting timestamps.
+       second the last local port number. The default values are
+       32768 and 61000 respectively.
 
 ip_local_reserved_ports - list of comma separated ranges
        Specify the ports which are reserved for known third-party
index 89358341682a1290879d26b854c1527991b54768..c7ecc7080494da43fc469eac5b62b06905c13559 100644 (file)
@@ -47,26 +47,25 @@ packets is preferred.
 
 struct net_device synchronization rules
 =======================================
-dev->open:
+ndo_open:
        Synchronization: rtnl_lock() semaphore.
        Context: process
 
-dev->stop:
+ndo_stop:
        Synchronization: rtnl_lock() semaphore.
        Context: process
-       Note1: netif_running() is guaranteed false
-       Note2: dev->poll() is guaranteed to be stopped
+       Note: netif_running() is guaranteed false
 
-dev->do_ioctl:
+ndo_do_ioctl:
        Synchronization: rtnl_lock() semaphore.
        Context: process
 
-dev->get_stats:
+ndo_get_stats:
        Synchronization: dev_base_lock rwlock.
        Context: nominally process, but don't sleep inside an rwlock
 
-dev->hard_start_xmit:
-       Synchronization: netif_tx_lock spinlock.
+ndo_start_xmit:
+       Synchronization: __netif_tx_lock spinlock.
 
        When the driver sets NETIF_F_LLTX in dev->features this will be
        called without holding netif_tx_lock. In this case the driver
@@ -87,20 +86,20 @@ dev->hard_start_xmit:
        o NETDEV_TX_LOCKED Locking failed, please retry quickly.
          Only valid when NETIF_F_LLTX is set.
 
-dev->tx_timeout:
-       Synchronization: netif_tx_lock spinlock.
+ndo_tx_timeout:
+       Synchronization: netif_tx_lock spinlock; all TX queues frozen.
        Context: BHs disabled
        Notes: netif_queue_stopped() is guaranteed true
 
-dev->set_rx_mode:
-       Synchronization: netif_tx_lock spinlock.
+ndo_set_rx_mode:
+       Synchronization: netif_addr_lock spinlock.
        Context: BHs disabled
 
 struct napi_struct synchronization rules
 ========================================
 napi->poll:
        Synchronization: NAPI_STATE_SCHED bit in napi->state.  Device
-               driver's dev->close method will invoke napi_disable() on
+               driver's ndo_stop method will invoke napi_disable() on
                all NAPI instances which will do a sleeping poll on the
                NAPI_STATE_SCHED napi->state bit, waiting for all pending
                NAPI activity to cease.
index 3e25ba82e0fa578ce0dfbbb6d6f8f63bb083b29c..6d05ae23603679310ba6f0a685e0308c1148d47a 100644 (file)
@@ -4309,6 +4309,13 @@ W:       http://www.kernel.org/doc/man-pages
 L:     linux-man@vger.kernel.org
 S:     Maintained
 
+MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
+M:     Mirko Lindner <mlindner@marvell.com>
+M:     Stephen Hemminger <shemminger@vyatta.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/marvell/sk*
+
 MARVELL LIBERTAS WIRELESS DRIVER
 M:     Dan Williams <dcbw@redhat.com>
 L:     libertas-dev@lists.infradead.org
@@ -4339,12 +4346,6 @@ M:       Nicolas Pitre <nico@fluxnic.net>
 S:     Odd Fixes
 F:     drivers/mmc/host/mvsdio.*
 
-MARVELL YUKON / SYSKONNECT DRIVER
-M:     Mirko Lindner <mlindner@syskonnect.de>
-M:     Ralph Roesler <rroesler@syskonnect.de>
-W:     http://www.syskonnect.com
-S:     Supported
-
 MATROX FRAMEBUFFER DRIVER
 L:     linux-fbdev@vger.kernel.org
 S:     Orphan
@@ -6116,12 +6117,6 @@ W:       http://www.winischhofer.at/linuxsisusbvga.shtml
 S:     Maintained
 F:     drivers/usb/misc/sisusbvga/
 
-SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
-M:     Stephen Hemminger <shemminger@vyatta.com>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/marvell/sk*
-
 SLAB ALLOCATOR
 M:     Christoph Lameter <cl@linux-foundation.org>
 M:     Pekka Enberg <penberg@kernel.org>
index 66870223f8c5e001e7fc5bbe02b659c8778def86..877b9a1b21523183d06973b60a9beb2459fff895 100644 (file)
  * r9d : hlen = skb->len - skb->data_len
  */
 #define SKBDATA        %r8
-
-sk_load_word_ind:
-       .globl  sk_load_word_ind
-
-       add     %ebx,%esi       /* offset += X */
-#      test    %esi,%esi       /* if (offset < 0) goto bpf_error; */
-       js      bpf_error
+#define SKF_MAX_NEG_OFF    $(-0x200000) /* SKF_LL_OFF from filter.h */
 
 sk_load_word:
        .globl  sk_load_word
 
+       test    %esi,%esi
+       js      bpf_slow_path_word_neg
+
+sk_load_word_positive_offset:
+       .globl  sk_load_word_positive_offset
+
        mov     %r9d,%eax               # hlen
        sub     %esi,%eax               # hlen - offset
        cmp     $3,%eax
@@ -37,16 +37,15 @@ sk_load_word:
        bswap   %eax                    /* ntohl() */
        ret
 
-
-sk_load_half_ind:
-       .globl sk_load_half_ind
-
-       add     %ebx,%esi       /* offset += X */
-       js      bpf_error
-
 sk_load_half:
        .globl  sk_load_half
 
+       test    %esi,%esi
+       js      bpf_slow_path_half_neg
+
+sk_load_half_positive_offset:
+       .globl  sk_load_half_positive_offset
+
        mov     %r9d,%eax
        sub     %esi,%eax               #       hlen - offset
        cmp     $1,%eax
@@ -55,14 +54,15 @@ sk_load_half:
        rol     $8,%ax                  # ntohs()
        ret
 
-sk_load_byte_ind:
-       .globl sk_load_byte_ind
-       add     %ebx,%esi       /* offset += X */
-       js      bpf_error
-
 sk_load_byte:
        .globl  sk_load_byte
 
+       test    %esi,%esi
+       js      bpf_slow_path_byte_neg
+
+sk_load_byte_positive_offset:
+       .globl  sk_load_byte_positive_offset
+
        cmp     %esi,%r9d   /* if (offset >= hlen) goto bpf_slow_path_byte */
        jle     bpf_slow_path_byte
        movzbl  (SKBDATA,%rsi),%eax
@@ -73,25 +73,21 @@ sk_load_byte:
  *
  * Implements BPF_S_LDX_B_MSH : ldxb  4*([offset]&0xf)
  * Must preserve A accumulator (%eax)
- * Inputs : %esi is the offset value, already known positive
+ * Inputs : %esi is the offset value
  */
-ENTRY(sk_load_byte_msh)
-       CFI_STARTPROC
+sk_load_byte_msh:
+       .globl  sk_load_byte_msh
+       test    %esi,%esi
+       js      bpf_slow_path_byte_msh_neg
+
+sk_load_byte_msh_positive_offset:
+       .globl  sk_load_byte_msh_positive_offset
        cmp     %esi,%r9d      /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
        jle     bpf_slow_path_byte_msh
        movzbl  (SKBDATA,%rsi),%ebx
        and     $15,%bl
        shl     $2,%bl
        ret
-       CFI_ENDPROC
-ENDPROC(sk_load_byte_msh)
-
-bpf_error:
-# force a return 0 from jit handler
-       xor             %eax,%eax
-       mov             -8(%rbp),%rbx
-       leaveq
-       ret
 
 /* rsi contains offset and can be scratched */
 #define bpf_slow_path_common(LEN)              \
@@ -138,3 +134,67 @@ bpf_slow_path_byte_msh:
        shl     $2,%al
        xchg    %eax,%ebx
        ret
+
+#define sk_negative_common(SIZE)                               \
+       push    %rdi;   /* save skb */                          \
+       push    %r9;                                            \
+       push    SKBDATA;                                        \
+/* rsi already has offset */                                   \
+       mov     $SIZE,%ecx;     /* size */                      \
+       call    bpf_internal_load_pointer_neg_helper;           \
+       test    %rax,%rax;                                      \
+       pop     SKBDATA;                                        \
+       pop     %r9;                                            \
+       pop     %rdi;                                           \
+       jz      bpf_error
+
+
+bpf_slow_path_word_neg:
+       cmp     SKF_MAX_NEG_OFF, %esi   /* test range */
+       jl      bpf_error       /* offset lower -> error  */
+sk_load_word_negative_offset:
+       .globl  sk_load_word_negative_offset
+       sk_negative_common(4)
+       mov     (%rax), %eax
+       bswap   %eax
+       ret
+
+bpf_slow_path_half_neg:
+       cmp     SKF_MAX_NEG_OFF, %esi
+       jl      bpf_error
+sk_load_half_negative_offset:
+       .globl  sk_load_half_negative_offset
+       sk_negative_common(2)
+       mov     (%rax),%ax
+       rol     $8,%ax
+       movzwl  %ax,%eax
+       ret
+
+bpf_slow_path_byte_neg:
+       cmp     SKF_MAX_NEG_OFF, %esi
+       jl      bpf_error
+sk_load_byte_negative_offset:
+       .globl  sk_load_byte_negative_offset
+       sk_negative_common(1)
+       movzbl  (%rax), %eax
+       ret
+
+bpf_slow_path_byte_msh_neg:
+       cmp     SKF_MAX_NEG_OFF, %esi
+       jl      bpf_error
+sk_load_byte_msh_negative_offset:
+       .globl  sk_load_byte_msh_negative_offset
+       xchg    %eax,%ebx /* dont lose A , X is about to be scratched */
+       sk_negative_common(1)
+       movzbl  (%rax),%eax
+       and     $15,%al
+       shl     $2,%al
+       xchg    %eax,%ebx
+       ret
+
+bpf_error:
+# force a return 0 from jit handler
+       xor             %eax,%eax
+       mov             -8(%rbp),%rbx
+       leaveq
+       ret
index 5a5b6e4dd7386586b5a9e8d5559b6b104f48d447..0597f95b6da663af5a43d5f5effd0bdf0447fe4f 100644 (file)
@@ -30,7 +30,10 @@ int bpf_jit_enable __read_mostly;
  * assembly code in arch/x86/net/bpf_jit.S
  */
 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
-extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
+extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
+extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
+extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
+extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
 
 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 {
@@ -117,6 +120,8 @@ static inline void bpf_flush_icache(void *start, void *end)
        set_fs(old_fs);
 }
 
+#define CHOOSE_LOAD_FUNC(K, func) \
+       ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
 void bpf_jit_compile(struct sk_filter *fp)
 {
@@ -473,44 +478,46 @@ void bpf_jit_compile(struct sk_filter *fp)
 #endif
                                break;
                        case BPF_S_LD_W_ABS:
-                               func = sk_load_word;
+                               func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 common_load:                   seen |= SEEN_DATAREF;
-                               if ((int)K < 0) {
-                                       /* Abort the JIT because __load_pointer() is needed. */
-                                       goto out;
-                               }
                                t_offset = func - (image + addrs[i]);
                                EMIT1_off32(0xbe, K); /* mov imm32,%esi */
                                EMIT1_off32(0xe8, t_offset); /* call */
                                break;
                        case BPF_S_LD_H_ABS:
-                               func = sk_load_half;
+                               func = CHOOSE_LOAD_FUNC(K, sk_load_half);
                                goto common_load;
                        case BPF_S_LD_B_ABS:
-                               func = sk_load_byte;
+                               func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
                                goto common_load;
                        case BPF_S_LDX_B_MSH:
-                               if ((int)K < 0) {
-                                       /* Abort the JIT because __load_pointer() is needed. */
-                                       goto out;
-                               }
+                               func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
                                seen |= SEEN_DATAREF | SEEN_XREG;
-                               t_offset = sk_load_byte_msh - (image + addrs[i]);
+                               t_offset = func - (image + addrs[i]);
                                EMIT1_off32(0xbe, K);   /* mov imm32,%esi */
                                EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
                                break;
                        case BPF_S_LD_W_IND:
-                               func = sk_load_word_ind;
+                               func = sk_load_word;
 common_load_ind:               seen |= SEEN_DATAREF | SEEN_XREG;
                                t_offset = func - (image + addrs[i]);
-                               EMIT1_off32(0xbe, K);   /* mov imm32,%esi   */
+                               if (K) {
+                                       if (is_imm8(K)) {
+                                               EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
+                                       } else {
+                                               EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
+                                               EMIT(K, 4);
+                                       }
+                               } else {
+                                       EMIT2(0x89,0xde); /* mov %ebx,%esi */
+                               }
                                EMIT1_off32(0xe8, t_offset);    /* call sk_load_xxx_ind */
                                break;
                        case BPF_S_LD_H_IND:
-                               func = sk_load_half_ind;
+                               func = sk_load_half;
                                goto common_load_ind;
                        case BPF_S_LD_B_IND:
-                               func = sk_load_byte_ind;
+                               func = sk_load_byte;
                                goto common_load_ind;
                        case BPF_S_JMP_JA:
                                t_offset = addrs[i + K] - addrs[i];
index 941b4e189adf67391ac06c8effefa820fbd0d5cb..62d2409bb293c54de4173e8ef9452e35b6f172e2 100644 (file)
@@ -2034,6 +2034,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        write_unlock_bh(&bond->lock);
        unblock_netpoll_tx();
 
+       if (bond->slave_cnt == 0)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
+
        bond_compute_features(bond);
        if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
            (old_features & NETIF_F_VLAN_CHALLENGED))
@@ -3007,7 +3010,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
                                           trans_start + delta_in_ticks)) ||
                            bond->curr_active_slave != slave) {
                                slave->link = BOND_LINK_UP;
-                               bond->current_arp_slave = NULL;
+                               if (bond->current_arp_slave) {
+                                       bond_set_slave_inactive_flags(
+                                               bond->current_arp_slave);
+                                       bond->current_arp_slave = NULL;
+                               }
 
                                pr_info("%s: link status definitely up for interface %s.\n",
                                        bond->dev->name, slave->dev->name);
@@ -3701,17 +3708,52 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
        read_unlock(&bond->lock);
 }
 
-static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms)
+static int bond_neigh_init(struct neighbour *n)
 {
-       struct bonding *bond = netdev_priv(dev);
+       struct bonding *bond = netdev_priv(n->dev);
        struct slave *slave = bond->first_slave;
+       const struct net_device_ops *slave_ops;
+       struct neigh_parms parms;
+       int ret;
+
+       if (!slave)
+               return 0;
+
+       slave_ops = slave->dev->netdev_ops;
+
+       if (!slave_ops->ndo_neigh_setup)
+               return 0;
+
+       parms.neigh_setup = NULL;
+       parms.neigh_cleanup = NULL;
+       ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
+       if (ret)
+               return ret;
+
+       /*
+        * Assign slave's neigh_cleanup to neighbour in case cleanup is called
+        * after the last slave has been detached.  Assumes that all slaves
+        * utilize the same neigh_cleanup (true at this writing as only user
+        * is ipoib).
+        */
+       n->parms->neigh_cleanup = parms.neigh_cleanup;
+
+       if (!parms.neigh_setup)
+               return 0;
+
+       return parms.neigh_setup(n);
+}
+
+/*
+ * The bonding ndo_neigh_setup is called at init time beofre any
+ * slave exists. So we must declare proxy setup function which will
+ * be used at run time to resolve the actual slave neigh param setup.
+ */
+static int bond_neigh_setup(struct net_device *dev,
+                           struct neigh_parms *parms)
+{
+       parms->neigh_setup   = bond_neigh_init;
 
-       if (slave) {
-               const struct net_device_ops *slave_ops
-                       = slave->dev->netdev_ops;
-               if (slave_ops->ndo_neigh_setup)
-                       return slave_ops->ndo_neigh_setup(slave->dev, parms);
-       }
        return 0;
 }
 
index 44556b719e813ef164848d343b7bec97bb989b5d..4b054812713a7b3f9968d4b82310c171a6f4fff6 100644 (file)
@@ -1874,7 +1874,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                 * bnx2x_periodic_task().
                 */
                smp_mb();
-               queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
        } else
                bp->port.pmf = 0;
 
index cd6dfa9eaa3aa24c122316c9e4d052ca93d6c47d..b9b2633234363485d7f86980ee4ed3e7abac0e09 100644 (file)
        (IRO[149].base + ((funcId) * IRO[149].m1))
 #define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
 #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-       (IRO[315].base + ((pfId) * IRO[315].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
        (IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+       (IRO[317].base + ((pfId) * IRO[317].m1))
 #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
-       (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+       (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
 #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
-       (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+       (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
-       (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+       (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
-       (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+       (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
 #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
-       (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
+       (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
 #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
-       (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+       (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
 #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
-       (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+       (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
 #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-       (IRO[314].base + ((pfId) * IRO[314].m1))
+       (IRO[315].base + ((pfId) * IRO[315].m1))
 #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-       (IRO[306].base + ((pfId) * IRO[306].m1))
+       (IRO[307].base + ((pfId) * IRO[307].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-       (IRO[305].base + ((pfId) * IRO[305].m1))
+       (IRO[306].base + ((pfId) * IRO[306].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-       (IRO[304].base + ((pfId) * IRO[304].m1))
+       (IRO[305].base + ((pfId) * IRO[305].m1))
 #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
        (IRO[151].base + ((funcId) * IRO[151].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
 #define TSTORM_FUNC_EN_OFFSET(funcId) \
        (IRO[103].base + ((funcId) * IRO[103].m1))
 #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-       (IRO[271].base + ((pfId) * IRO[271].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
        (IRO[272].base + ((pfId) * IRO[272].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
        (IRO[273].base + ((pfId) * IRO[273].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
        (IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+       (IRO[275].base + ((pfId) * IRO[275].m1))
 #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-       (IRO[270].base + ((pfId) * IRO[270].m1))
+       (IRO[271].base + ((pfId) * IRO[271].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-       (IRO[269].base + ((pfId) * IRO[269].m1))
+       (IRO[270].base + ((pfId) * IRO[270].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-       (IRO[268].base + ((pfId) * IRO[268].m1))
+       (IRO[269].base + ((pfId) * IRO[269].m1))
 #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-       (IRO[267].base + ((pfId) * IRO[267].m1))
+       (IRO[268].base + ((pfId) * IRO[268].m1))
 #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
-       (IRO[276].base + ((pfId) * IRO[276].m1))
+       (IRO[277].base + ((pfId) * IRO[277].m1))
 #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-       (IRO[263].base + ((pfId) * IRO[263].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
        (IRO[264].base + ((pfId) * IRO[264].m1))
-#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
        (IRO[265].base + ((pfId) * IRO[265].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
        (IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+       (IRO[267].base + ((pfId) * IRO[267].m1))
 #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
        (IRO[202].base + ((pfId) * IRO[202].m1))
 #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
        (IRO[105].base + ((funcId) * IRO[105].m1))
 #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
-       (IRO[216].base + ((pfId) * IRO[216].m1))
+       (IRO[217].base + ((pfId) * IRO[217].m1))
 #define TSTORM_VF_TO_PF_OFFSET(funcId) \
        (IRO[104].base + ((funcId) * IRO[104].m1))
 #define USTORM_AGG_DATA_OFFSET (IRO[206].base)
 #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
        (IRO[183].base + ((portId) * IRO[183].m1))
 #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
-       (IRO[317].base + ((pfId) * IRO[317].m1))
+       (IRO[318].base + ((pfId) * IRO[318].m1))
 #define USTORM_FUNC_EN_OFFSET(funcId) \
        (IRO[178].base + ((funcId) * IRO[178].m1))
 #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-       (IRO[281].base + ((pfId) * IRO[281].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
        (IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+       (IRO[283].base + ((pfId) * IRO[283].m1))
 #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-       (IRO[286].base + ((pfId) * IRO[286].m1))
+       (IRO[287].base + ((pfId) * IRO[287].m1))
 #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
-       (IRO[283].base + ((pfId) * IRO[283].m1))
+       (IRO[284].base + ((pfId) * IRO[284].m1))
 #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-       (IRO[279].base + ((pfId) * IRO[279].m1))
+       (IRO[280].base + ((pfId) * IRO[280].m1))
 #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-       (IRO[278].base + ((pfId) * IRO[278].m1))
+       (IRO[279].base + ((pfId) * IRO[279].m1))
 #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-       (IRO[277].base + ((pfId) * IRO[277].m1))
+       (IRO[278].base + ((pfId) * IRO[278].m1))
 #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-       (IRO[280].base + ((pfId) * IRO[280].m1))
+       (IRO[281].base + ((pfId) * IRO[281].m1))
 #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
-       (IRO[284].base + ((pfId) * IRO[284].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
        (IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+       (IRO[286].base + ((pfId) * IRO[286].m1))
 #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
        (IRO[182].base + ((pfId) * IRO[182].m1))
 #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
 #define XSTORM_FUNC_EN_OFFSET(funcId) \
        (IRO[47].base + ((funcId) * IRO[47].m1))
 #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-       (IRO[294].base + ((pfId) * IRO[294].m1))
+       (IRO[295].base + ((pfId) * IRO[295].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
-       (IRO[297].base + ((pfId) * IRO[297].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
        (IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
        (IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
        (IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
        (IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
        (IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
        (IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+       (IRO[304].base + ((pfId) * IRO[304].m1))
 #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-       (IRO[293].base + ((pfId) * IRO[293].m1))
+       (IRO[294].base + ((pfId) * IRO[294].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-       (IRO[292].base + ((pfId) * IRO[292].m1))
+       (IRO[293].base + ((pfId) * IRO[293].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-       (IRO[291].base + ((pfId) * IRO[291].m1))
+       (IRO[292].base + ((pfId) * IRO[292].m1))
 #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-       (IRO[296].base + ((pfId) * IRO[296].m1))
+       (IRO[297].base + ((pfId) * IRO[297].m1))
 #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
-       (IRO[295].base + ((pfId) * IRO[295].m1))
+       (IRO[296].base + ((pfId) * IRO[296].m1))
 #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
-       (IRO[290].base + ((pfId) * IRO[290].m1))
+       (IRO[291].base + ((pfId) * IRO[291].m1))
 #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-       (IRO[289].base + ((pfId) * IRO[289].m1))
+       (IRO[290].base + ((pfId) * IRO[290].m1))
 #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
-       (IRO[288].base + ((pfId) * IRO[288].m1))
+       (IRO[289].base + ((pfId) * IRO[289].m1))
 #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
-       (IRO[287].base + ((pfId) * IRO[287].m1))
+       (IRO[288].base + ((pfId) * IRO[288].m1))
 #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
        (IRO[44].base + ((pfId) * IRO[44].m1))
 #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
index efa557b76ac75e60235bf5fac8c828ef0588cbff..ad95324dc0420681c21e12ead836eafdceb35a59 100644 (file)
@@ -1371,7 +1371,14 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
                pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
                        XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
                        XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
-                       XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
+                       XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
+                       XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+               /* Write pause and PFC registers */
+               REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+               REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+               REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+               pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+
        }
 
        /* Write pause and PFC registers */
@@ -3648,6 +3655,33 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
                bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
                bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
+       } else if (CHIP_IS_E3(bp) &&
+               SINGLE_MEDIA_DIRECT(params)) {
+               u8 lane = bnx2x_get_warpcore_lane(phy, params);
+               u16 gp_status, gp_mask;
+               bnx2x_cl45_read(bp, phy,
+                               MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
+                               &gp_status);
+               gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
+                          MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
+                       lane;
+               if ((gp_status & gp_mask) == gp_mask) {
+                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+               } else {
+                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+                       ld_pause = ((ld_pause &
+                                    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+                                   << 3);
+                       lp_pause = ((lp_pause &
+                                    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+                                   << 3);
+               }
        } else {
                bnx2x_cl45_read(bp, phy,
                                MDIO_AN_DEVAD,
@@ -3698,7 +3732,23 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
        u16 val16 = 0, lane, bam37 = 0;
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
-
+       /* Set to default registers that may be overriden by 10G force */
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                        MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+                        MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                        MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                       MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                       MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+                        MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                        MDIO_WC_REG_RX66_CONTROL, 0x7415);
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                        MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
        /* Disable Autoneg: re-enable it after adv is done. */
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
                         MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
@@ -3944,13 +3994,13 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
 
        } else {
                misc1_val |= 0x9;
-               tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
-                          (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
-                          (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+               tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+                          (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+                          (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
                tx_driver_val =
-                     ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+                     ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
                       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
-                      (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+                      (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
        }
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4368,7 +4418,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                switch (serdes_net_if) {
                case PORT_HW_CFG_NET_SERDES_IF_KR:
                        /* Enable KR Auto Neg */
-                       if (params->loopback_mode == LOOPBACK_NONE)
+                       if (params->loopback_mode != LOOPBACK_EXT)
                                bnx2x_warpcore_enable_AN_KR(phy, params, vars);
                        else {
                                DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
@@ -6166,12 +6216,14 @@ int bnx2x_set_led(struct link_params *params,
 
                tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
                if (params->phy[EXT_PHY1].type ==
-                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
-                       EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp & 0xfff1);
-               else {
-                       EMAC_WR(bp, EMAC_REG_EMAC_LED,
-                               (tmp | EMAC_LED_OVERRIDE));
-               }
+                       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+                       tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
+                               EMAC_LED_100MB_OVERRIDE |
+                               EMAC_LED_10MB_OVERRIDE);
+               else
+                       tmp |= EMAC_LED_OVERRIDE;
+
+               EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
                break;
 
        case LED_MODE_OPER:
@@ -6226,10 +6278,15 @@ int bnx2x_set_led(struct link_params *params,
                                       hw_led_mode);
                } else if ((params->phy[EXT_PHY1].type ==
                            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
-                          (mode != LED_MODE_OPER)) {
+                          (mode == LED_MODE_ON)) {
                        REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
                        tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
-                       EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 0x3);
+                       EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
+                               EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
+                       /* Break here; otherwise, it'll disable the
+                        * intended override.
+                        */
+                       break;
                } else
                        REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
                               hw_led_mode);
@@ -6244,13 +6301,9 @@ int bnx2x_set_led(struct link_params *params,
                               LED_BLINK_RATE_VAL_E1X_E2);
                REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
                       port*4, 1);
-               if ((params->phy[EXT_PHY1].type !=
-                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
-                   (mode != LED_MODE_OPER)) {
-                       tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
-                       EMAC_WR(bp, EMAC_REG_EMAC_LED,
-                               (tmp & (~EMAC_LED_OVERRIDE)));
-               }
+               tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+               EMAC_WR(bp, EMAC_REG_EMAC_LED,
+                       (tmp & (~EMAC_LED_OVERRIDE)));
 
                if (CHIP_IS_E1(bp) &&
                    ((speed == SPEED_2500) ||
@@ -6843,6 +6896,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                          SINGLE_MEDIA_DIRECT(params)) &&
                         (phy_vars[active_external_phy].fault_detected == 0));
 
+       /* Update the PFC configuration in case it was changed */
+       if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+               vars->link_status |= LINK_STATUS_PFC_ENABLED;
+       else
+               vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
        if (vars->link_up)
                rc = bnx2x_update_link_up(params, vars, link_10g_plus);
        else
@@ -8030,7 +8089,9 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
        netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
                              " Port %d from %s part number %s\n",
                         params->port, vendor_name, vendor_pn);
-       phy->flags |= FLAGS_SFP_NOT_APPROVED;
+       if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+           PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
+               phy->flags |= FLAGS_SFP_NOT_APPROVED;
        return -EINVAL;
 }
 
@@ -9090,6 +9151,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                tmp2 &= 0xFFEF;
                bnx2x_cl45_write(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+               bnx2x_cl45_read(bp, phy,
+                               MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+                               &tmp2);
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+                                (tmp2 & 0x7fff));
        }
 
        return 0;
@@ -9270,12 +9337,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                                 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
                                 ((1<<5) | (1<<2)));
        }
-       DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
-       bnx2x_8727_specific_func(phy, params, ENABLE_TX);
-       /* If transmitter is disabled, ignore false link up indication */
-       bnx2x_cl45_read(bp, phy,
-                       MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
-       if (val1 & (1<<15)) {
+
+       if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+               DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n");
+               bnx2x_sfp_set_transmitter(params, phy, 1);
+       } else {
                DP(NETIF_MSG_LINK, "Tx is disabled\n");
                return 0;
        }
@@ -9369,8 +9435,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
-               bnx2x_save_spirom_version(bp, port,
-                               ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
+               bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
                                phy->ver_addr);
        } else {
                /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
@@ -9793,6 +9858,15 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
                                other_shmem_base_addr));
 
        u32 shmem_base_path[2];
+
+       /* Work around for 84833 LED failure inside RESET status */
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+               MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+               MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+               MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
+               MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
+
        shmem_base_path[0] = params->shmem_base;
        shmem_base_path[1] = other_shmem_base_addr;
 
@@ -10103,7 +10177,7 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
        u8 port;
        u16 val16;
 
-       if (!(CHIP_IS_E1(bp)))
+       if (!(CHIP_IS_E1x(bp)))
                port = BP_PATH(bp);
        else
                port = params->port;
@@ -10130,7 +10204,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
        u16 val;
        u8 port;
 
-       if (!(CHIP_IS_E1(bp)))
+       if (!(CHIP_IS_E1x(bp)))
                port = BP_PATH(bp);
        else
                port = params->port;
@@ -12049,6 +12123,9 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
 
        bnx2x_emac_init(params, vars);
 
+       if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+               vars->link_status |= LINK_STATUS_PFC_ENABLED;
+
        if (params->num_phys == 0) {
                DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
                return -EINVAL;
@@ -12128,10 +12205,10 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
         * Hold it as vars low
         */
         /* clear link led */
+       bnx2x_set_mdio_clk(bp, params->chip_id, port);
        bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
 
        if (reset_ext_phy) {
-               bnx2x_set_mdio_clk(bp, params->chip_id, port);
                for (phy_index = EXT_PHY1; phy_index < params->num_phys;
                      phy_index++) {
                        if (params->phy[phy_index].link_reset) {
index ab0a250f95fa14ea0107e7fcde58c0d3c33f8612..c25803b9c0ca3e3e91ecb69cc53f92124e0101c7 100644 (file)
 #define XMAC_CTRL_REG_TX_EN                                     (0x1<<0)
 #define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN                                 (0x1<<18)
 #define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN                                 (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON                      (0x1<<1)
 #define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN                     (0x1<<0)
 #define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN                       (0x1<<3)
 #define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN                          (0x1<<4)
@@ -6820,10 +6821,13 @@ Theotherbitsarereservedandshouldbezero*/
 
 #define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL     0x0020
 #define MDIO_AN_REG_8481_LEGACY_MII_CTRL       0xffe0
+#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G     0x40
 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS     0xffe1
 #define MDIO_AN_REG_8481_LEGACY_AN_ADV         0xffe4
 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION   0xffe6
 #define MDIO_AN_REG_8481_1000T_CTRL            0xffe9
+#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL      0xfff0
+#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF       0x0008
 #define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW   0xfff5
 #define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS  0xfff7
 #define MDIO_AN_REG_8481_AUX_CTRL              0xfff8
@@ -6943,6 +6947,10 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_GP2_STATUS_GP_2_2                  0x81d2
 #define MDIO_WC_REG_GP2_STATUS_GP_2_3                  0x81d3
 #define MDIO_WC_REG_GP2_STATUS_GP_2_4                  0x81d4
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
 #define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP               0x81EE
 #define MDIO_WC_REG_UC_INFO_B1_VERSION                 0x81F0
 #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE           0x81F2
index 86cdd47939925b64ebe5f3d1859165d6f1880f30..b83897f76ee34786360261d6148c31bcf9563588 100644 (file)
@@ -161,6 +161,12 @@ struct e1000_info;
 /* Time to wait before putting the device into D3 if there's no link (in ms). */
 #define LINK_TIMEOUT           100
 
+/*
+ * Count for polling __E1000_RESET condition every 10-20msec.
+ * Experimentation has shown the reset can take approximately 210msec.
+ */
+#define E1000_CHECK_RESET_COUNT                25
+
 #define DEFAULT_RDTR                   0
 #define DEFAULT_RADV                   8
 #define BURST_RDTR                     0x20
index 2c38a65ade8753df06caf24639f204e5eb90c2fc..19ab2154802c171fcadaeceb82419681b51c90f2 100644 (file)
@@ -1059,6 +1059,13 @@ static void e1000_print_hw_hang(struct work_struct *work)
                ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
                /* execute the writes immediately */
                e1e_flush();
+               /*
+                * Due to rare timing issues, write to TIDV again to ensure
+                * the write is successful
+                */
+               ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+               /* execute the writes immediately */
+               e1e_flush();
                adapter->tx_hang_recheck = true;
                return;
        }
@@ -3616,6 +3623,16 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
 
        /* execute the writes immediately */
        e1e_flush();
+
+       /*
+        * due to rare timing issues, write to TIDV/RDTR again to ensure the
+        * write is successful
+        */
+       ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+       ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+       /* execute the writes immediately */
+       e1e_flush();
 }
 
 static void e1000e_update_stats(struct e1000_adapter *adapter);
@@ -3968,6 +3985,10 @@ static int e1000_close(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = adapter->pdev;
+       int count = E1000_CHECK_RESET_COUNT;
+
+       while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+               usleep_range(10000, 20000);
 
        WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
 
@@ -5472,6 +5493,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
        netif_device_detach(netdev);
 
        if (netif_running(netdev)) {
+               int count = E1000_CHECK_RESET_COUNT;
+
+               while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+                       usleep_range(10000, 20000);
+
                WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
                e1000e_down(adapter);
                e1000_free_irq(adapter);
index dde65f951400dc7efc1c6b3d09d5c8e44710db6f..652e4b09546db699eb4ff76189f8750bd163ca9e 100644 (file)
 #define DCB_NO_HW_CHG   1  /* DCB configuration did not change */
 #define DCB_HW_CHG      2  /* DCB configuration changed, no reset */
 
-int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
-                       struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
+int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
+                      struct ixgbe_dcb_config *dcfg, int tc_max)
 {
-       struct tc_configuration *src_tc_cfg = NULL;
-       struct tc_configuration *dst_tc_cfg = NULL;
-       int i;
+       struct tc_configuration *src = NULL;
+       struct tc_configuration *dst = NULL;
+       int i, j;
+       int tx = DCB_TX_CONFIG;
+       int rx = DCB_RX_CONFIG;
+       int changes = 0;
 
-       if (!src_dcb_cfg || !dst_dcb_cfg)
-               return -EINVAL;
+       if (!scfg || !dcfg)
+               return changes;
 
        for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
-               src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
-               dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
+               src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
+               dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0];
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
-                               src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
+               if (dst->path[tx].prio_type != src->path[tx].prio_type) {
+                       dst->path[tx].prio_type = src->path[tx].prio_type;
+                       changes |= BIT_PG_TX;
+               }
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
-                               src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
+               if (dst->path[tx].bwg_id != src->path[tx].bwg_id) {
+                       dst->path[tx].bwg_id = src->path[tx].bwg_id;
+                       changes |= BIT_PG_TX;
+               }
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
-                               src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
+               if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) {
+                       dst->path[tx].bwg_percent = src->path[tx].bwg_percent;
+                       changes |= BIT_PG_TX;
+               }
 
-               dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
-                               src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
+               if (dst->path[tx].up_to_tc_bitmap !=
+                               src->path[tx].up_to_tc_bitmap) {
+                       dst->path[tx].up_to_tc_bitmap =
+                               src->path[tx].up_to_tc_bitmap;
+                       changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
-                               src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
+               if (dst->path[rx].prio_type != src->path[rx].prio_type) {
+                       dst->path[rx].prio_type = src->path[rx].prio_type;
+                       changes |= BIT_PG_RX;
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
-                               src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
+               if (dst->path[rx].bwg_id != src->path[rx].bwg_id) {
+                       dst->path[rx].bwg_id = src->path[rx].bwg_id;
+                       changes |= BIT_PG_RX;
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
-                               src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
+               if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) {
+                       dst->path[rx].bwg_percent = src->path[rx].bwg_percent;
+                       changes |= BIT_PG_RX;
+               }
 
-               dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
-                               src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
+               if (dst->path[rx].up_to_tc_bitmap !=
+                               src->path[rx].up_to_tc_bitmap) {
+                       dst->path[rx].up_to_tc_bitmap =
+                               src->path[rx].up_to_tc_bitmap;
+                       changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
+               }
        }
 
        for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
-               dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
-                       [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-                               [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
-               dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
-                       [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
-                               [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
+               j = i - DCB_PG_ATTR_BW_ID_0;
+               if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) {
+                       dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j];
+                       changes |= BIT_PG_TX;
+               }
+               if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) {
+                       dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j];
+                       changes |= BIT_PG_RX;
+               }
        }
 
        for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
-               dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
-                       src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
+               j = i - DCB_PFC_UP_ATTR_0;
+               if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) {
+                       dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc;
+                       changes |= BIT_PFC;
+               }
        }
 
-       dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
+       if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
+               dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
+               changes |= BIT_PFC;
+       }
 
-       return 0;
+       return changes;
 }
 
 static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
@@ -179,20 +211,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
        if (up_map != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
                        up_map;
-
-       if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
-            adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
-            adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
-            adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
-               adapter->dcb_set_bitmap |= BIT_PG_TX;
-
-       if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
-               adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -201,10 +219,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
-
-       if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[0][bwg_id])
-               adapter->dcb_set_bitmap |= BIT_PG_TX;
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -223,20 +237,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
        if (up_map != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
                        up_map;
-
-       if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
-            adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
-            adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
-            adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
-           (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
-               adapter->dcb_set_bitmap |= BIT_PG_RX;
-
-       if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
-               adapter->dcb_set_bitmap |= BIT_PFC;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -245,10 +245,6 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
-
-       if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[1][bwg_id])
-               adapter->dcb_set_bitmap |= BIT_PG_RX;
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -298,10 +294,8 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
 
        adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
        if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
-           adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
-               adapter->dcb_set_bitmap |= BIT_PFC;
+           adapter->dcb_cfg.tc_config[priority].dcb_pfc)
                adapter->temp_dcb_cfg.pfc_mode_enable = true;
-       }
 }
 
 static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -336,7 +330,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int ret, i;
+       int ret = DCB_NO_HW_CHG;
+       int i;
 #ifdef IXGBE_FCOE
        struct dcb_app app = {
                              .selector = DCB_APP_IDTYPE_ETHTYPE,
@@ -355,12 +350,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 
        /* Fail command if not in CEE mode */
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return 1;
+               return ret;
 
-       ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-                                MAX_TRAFFIC_CLASS);
-       if (ret)
-               return DCB_NO_HW_CHG;
+       adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
+                                                     &adapter->dcb_cfg,
+                                                     MAX_TRAFFIC_CLASS);
+       if (!adapter->dcb_set_bitmap)
+               return ret;
 
        if (adapter->dcb_cfg.pfc_mode_enable) {
                switch (adapter->hw.mac.type) {
@@ -420,6 +416,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 
                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
                        netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
+
+               ret = DCB_HW_CHG_RST;
        }
 
        if (adapter->dcb_set_bitmap & BIT_PFC) {
@@ -430,7 +428,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
                                     DCB_TX_CONFIG, prio_tc);
                ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
                ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
-               ret = DCB_HW_CHG;
+               if (ret != DCB_HW_CHG_RST)
+                       ret = DCB_HW_CHG;
        }
 
        if (adapter->dcb_cfg.pfc_mode_enable)
@@ -531,9 +530,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        adapter->temp_dcb_cfg.pfc_mode_enable = state;
-       if (adapter->temp_dcb_cfg.pfc_mode_enable !=
-               adapter->dcb_cfg.pfc_mode_enable)
-               adapter->dcb_set_bitmap |= BIT_PFC;
 }
 
 /**
index b806d9b4defb0426bde077c07376db82adde8b36..c9b504e2dfc3bfdc4707dd0f3db240b1f365ec0c 100644 (file)
@@ -2469,6 +2469,17 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
        return err;
 }
 
+static inline bool needs_copy(const struct rx_ring_info *re,
+                             unsigned length)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       /* Some architectures need the IP header to be aligned */
+       if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
+               return true;
+#endif
+       return length < copybreak;
+}
+
 /* For small just reuse existing skb for next receive */
 static struct sk_buff *receive_copy(struct sky2_port *sky2,
                                    const struct rx_ring_info *re,
@@ -2599,7 +2610,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
                goto error;
 
 okay:
-       if (length < copybreak)
+       if (needs_copy(re, length))
                skb = receive_copy(sky2, re, length);
        else
                skb = receive_new(sky2, re, length);
index 9e2b911a12304ee88f5934c1c895d394d662e05a..d69fee41f24aa296106063f06826a06096b07f42 100644 (file)
@@ -83,8 +83,9 @@
 
 #define MLX4_EN_WATCHDOG_TIMEOUT       (15 * HZ)
 
-#define MLX4_EN_ALLOC_ORDER    2
-#define MLX4_EN_ALLOC_SIZE     (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
+/* Use the maximum between 16384 and a single page */
+#define MLX4_EN_ALLOC_SIZE     PAGE_ALIGN(16384)
+#define MLX4_EN_ALLOC_ORDER    get_order(MLX4_EN_ALLOC_SIZE)
 
 #define MLX4_EN_MAX_LRO_DESCRIPTORS    32
 
index 7b23554f80b6c0f8540bccddea0c9ae31c75f35e..f54509377efad8354345176fdb9c1951a9fdd4de 100644 (file)
@@ -5810,7 +5810,10 @@ static void __rtl8169_resume(struct net_device *dev)
 
        rtl_pll_power_up(tp);
 
+       rtl_lock_work(tp);
+       napi_enable(&tp->napi);
        set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       rtl_unlock_work(tp);
 
        rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
 }
index e85ffbd548302994514ec1e46e8faa48be9cd78a..48d56da62f08e94a6b4b26bf594aa8ae022a36bc 100644 (file)
@@ -1737,10 +1737,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
        struct mac_device_info *mac;
 
        /* Identify the MAC HW device */
-       if (priv->plat->has_gmac)
+       if (priv->plat->has_gmac) {
+               priv->dev->priv_flags |= IFF_UNICAST_FLT;
                mac = dwmac1000_setup(priv->ioaddr);
-       else
+       } else {
                mac = dwmac100_setup(priv->ioaddr);
+       }
        if (!mac)
                return -ENOMEM;
 
index 0856e1b7a849ad5d6e32cf7fe91e4b5bee861e30..f08c85acf761d3893b54f3d2f70e2fddcb6c5904 100644 (file)
@@ -162,7 +162,8 @@ static int ip101a_g_config_init(struct phy_device *phydev)
        /* Enable Auto Power Saving mode */
        c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
        c |= IP101A_G_APS_ON;
-       return c;
+
+       return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
 }
 
 static int ip175c_read_status(struct phy_device *phydev)
index 159da2905fe979dbdf03a4c065b72ed75ea9592c..33f8c51968b6da62cff64d968445fcf0d55249d9 100644 (file)
@@ -235,7 +235,7 @@ struct ppp_net {
 /* Prototypes. */
 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
                        struct file *file, unsigned int cmd, unsigned long arg);
-static void ppp_xmit_process(struct ppp *ppp);
+static int ppp_xmit_process(struct ppp *ppp);
 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
 static void ppp_push(struct ppp *ppp);
 static void ppp_channel_push(struct channel *pch);
@@ -968,9 +968,9 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
        proto = npindex_to_proto[npi];
        put_unaligned_be16(proto, pp);
 
-       netif_stop_queue(dev);
        skb_queue_tail(&ppp->file.xq, skb);
-       ppp_xmit_process(ppp);
+       if (!ppp_xmit_process(ppp))
+               netif_stop_queue(dev);
        return NETDEV_TX_OK;
 
  outf:
@@ -1048,10 +1048,11 @@ static void ppp_setup(struct net_device *dev)
  * Called to do any work queued up on the transmit side
  * that can now be done.
  */
-static void
+static int
 ppp_xmit_process(struct ppp *ppp)
 {
        struct sk_buff *skb;
+       int ret = 0;
 
        ppp_xmit_lock(ppp);
        if (!ppp->closing) {
@@ -1061,10 +1062,13 @@ ppp_xmit_process(struct ppp *ppp)
                        ppp_send_frame(ppp, skb);
                /* If there's no work left to do, tell the core net
                   code that we can accept some more. */
-               if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
+               if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
                        netif_wake_queue(ppp->dev);
+                       ret = 1;
+               }
        }
        ppp_xmit_unlock(ppp);
+       return ret;
 }
 
 static inline struct sk_buff *
index 5f883de7ef3ad0bd8dfe9392356725660f9a69b3..f8476841eb04e08edc2ad069e97cddc3da242e7f 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/uio.h>
 #include <linux/security.h>
 #include <linux/gfp.h>
+#include <linux/socket.h>
 
 /*
  * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -690,7 +691,9 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
        if (!likely(file->f_op && file->f_op->sendpage))
                return -EINVAL;
 
-       more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
+       more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+       if (sd->len < sd->total_len)
+               more |= MSG_SENDPAGE_NOTLAST;
        return file->f_op->sendpage(file, buf->page, buf->offset,
                                    sd->len, &pos, more);
 }
index e1d9e0ede3095a080c0a40cefa70799a0654e756..f5647b59a90e6ade8f551096c6e7bc19193156ae 100644 (file)
@@ -896,8 +896,7 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
  *
  * All operations are optional (i.e. the function pointer may be set
  * to %NULL) and callers must take this into account.  Callers must
- * hold the RTNL, except that for @get_drvinfo the caller may or may
- * not hold the RTNL.
+ * hold the RTNL lock.
  *
  * See the structures used by these operations for further documentation.
  *
index 1f77540bdc95495ab1bc13edf9f6ac0332a649ae..5cbaa20f16596858f08916103f99377a658b3d3d 100644 (file)
@@ -2604,8 +2604,6 @@ extern void               net_disable_timestamp(void);
 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
 extern void dev_seq_stop(struct seq_file *seq, void *v);
-extern int dev_seq_open_ops(struct inode *inode, struct file *file,
-                           const struct seq_operations *ops);
 #endif
 
 extern int netdev_class_create_file(struct class_attribute *class_attr);
index c0405ac928701a7d234986576baa69624ec1b71c..e3a9978f259f55425202664754e13c5504ed579e 100644 (file)
@@ -58,8 +58,8 @@ struct xt_set_info_target_v1 {
 struct xt_set_info_target_v2 {
        struct xt_set_info add_set;
        struct xt_set_info del_set;
-       u32 flags;
-       u32 timeout;
+       __u32 flags;
+       __u32 timeout;
 };
 
 #endif /*_XT_SET_H*/
index da2d3e2543f31cbd1464d40609dac996379c1449..b84bbd48b874b22d40d22c3cc8d57cc58346b86f 100644 (file)
@@ -265,7 +265,7 @@ struct ucred {
 #define MSG_NOSIGNAL   0x4000  /* Do not generate SIGPIPE */
 #define MSG_MORE       0x8000  /* Sender will send more */
 #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
-
+#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
 #define MSG_EOF         MSG_FIN
 
 #define MSG_CMSG_CLOEXEC 0x40000000    /* Set close_on_exit for file
index 7e1544e8f70d36599205fd821f1cdc34705d1676..9d9756cca0132bbc603c313b0ecb18670fef3bb0 100644 (file)
@@ -47,7 +47,7 @@ static void sb_close(struct sbuff *m)
        if (likely(m != &emergency))
                kfree(m);
        else {
-               xchg(&emergency_ptr, m);
+               emergency_ptr = m;
                local_bh_enable();
        }
 }
index 6c7dc9d78e10e1de7f9b706077cd13fc060cd811..c25d453b2803be9a5ad1261fee7cd7120aa60fd5 100644 (file)
@@ -4028,54 +4028,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
 
 #ifdef CONFIG_PROC_FS
 
-#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
-
-struct dev_iter_state {
-       struct seq_net_private p;
-       unsigned int pos; /* bucket << BUCKET_SPACE + offset */
-};
+#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
 
 #define get_bucket(x) ((x) >> BUCKET_SPACE)
 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
 
-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
+static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
 {
-       struct dev_iter_state *state = seq->private;
        struct net *net = seq_file_net(seq);
        struct net_device *dev;
        struct hlist_node *p;
        struct hlist_head *h;
-       unsigned int count, bucket, offset;
+       unsigned int count = 0, offset = get_offset(*pos);
 
-       bucket = get_bucket(state->pos);
-       offset = get_offset(state->pos);
-       h = &net->dev_name_head[bucket];
-       count = 0;
+       h = &net->dev_name_head[get_bucket(*pos)];
        hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
-               if (count++ == offset) {
-                       state->pos = set_bucket_offset(bucket, count);
+               if (++count == offset)
                        return dev;
-               }
        }
 
        return NULL;
 }
 
-static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
+static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
 {
-       struct dev_iter_state *state = seq->private;
        struct net_device *dev;
        unsigned int bucket;
 
-       bucket = get_bucket(state->pos);
        do {
-               dev = dev_from_same_bucket(seq);
+               dev = dev_from_same_bucket(seq, pos);
                if (dev)
                        return dev;
 
-               bucket++;
-               state->pos = set_bucket_offset(bucket, 0);
+               bucket = get_bucket(*pos) + 1;
+               *pos = set_bucket_offset(bucket, 1);
        } while (bucket < NETDEV_HASHENTRIES);
 
        return NULL;
@@ -4088,33 +4075,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(RCU)
 {
-       struct dev_iter_state *state = seq->private;
-
        rcu_read_lock();
        if (!*pos)
                return SEQ_START_TOKEN;
 
-       /* check for end of the hash */
-       if (state->pos == 0 && *pos > 1)
+       if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
                return NULL;
 
-       return dev_from_new_bucket(seq);
+       return dev_from_bucket(seq, pos);
 }
 
 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       struct net_device *dev;
-
        ++*pos;
-
-       if (v == SEQ_START_TOKEN)
-               return dev_from_new_bucket(seq);
-
-       dev = dev_from_same_bucket(seq);
-       if (dev)
-               return dev;
-
-       return dev_from_new_bucket(seq);
+       return dev_from_bucket(seq, pos);
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4213,13 +4187,7 @@ static const struct seq_operations dev_seq_ops = {
 static int dev_seq_open(struct inode *inode, struct file *file)
 {
        return seq_open_net(inode, file, &dev_seq_ops,
-                           sizeof(struct dev_iter_state));
-}
-
-int dev_seq_open_ops(struct inode *inode, struct file *file,
-                    const struct seq_operations *ops)
-{
-       return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations dev_seq_fops = {
index 29c07fef922847aaabf4cd24c91f91d477c4edc0..626698f0db8b4624ee2f18c5e1856227fe9c67ed 100644 (file)
@@ -696,7 +696,8 @@ static const struct seq_operations dev_mc_seq_ops = {
 
 static int dev_mc_seq_open(struct inode *inode, struct file *file)
 {
-       return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
+       return seq_open_net(inode, file, &dev_mc_seq_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations dev_mc_seq_fops = {
index cf4989ac503bcc8163f90ee0e3e9610155845551..6f755cca45206934444464da8b8bcb0289921717 100644 (file)
 #include <linux/reciprocal_div.h>
 #include <linux/ratelimit.h>
 
-/* No hurry in this branch */
-static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
+/* No hurry in this branch
+ *
+ * Exported for the bpf jit load helper.
+ */
+void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
 {
        u8 *ptr = NULL;
 
@@ -59,7 +62,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
 {
        if (k >= 0)
                return skb_header_pointer(skb, k, size, buffer);
-       return __load_pointer(skb, k, size);
+       return bpf_internal_load_pointer_neg_helper(skb, k, size);
 }
 
 /**
index f223cdc75da6af27a688430ee98c0743608ea822..baf8d281152cebc9e55ce8a7343d427b24b8b61e 100644 (file)
@@ -3161,6 +3161,8 @@ static void sock_rmem_free(struct sk_buff *skb)
  */
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 {
+       int len = skb->len;
+
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
            (unsigned)sk->sk_rcvbuf)
                return -ENOMEM;
@@ -3175,7 +3177,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 
        skb_queue_tail(&sk->sk_error_queue, skb);
        if (!sock_flag(sk, SOCK_DEAD))
-               sk->sk_data_ready(sk, skb->len);
+               sk->sk_data_ready(sk, len);
        return 0;
 }
 EXPORT_SYMBOL(sock_queue_err_skb);
index cfd7edda0a8eb6e8dd908a1bc5c309225c4ce495..5d54ed30e821fc1744432178b33d0585831cd32f 100644 (file)
@@ -860,7 +860,7 @@ wait_for_memory:
        }
 
 out:
-       if (copied)
+       if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
                tcp_push(sk, flags, mss_now, tp->nonagle);
        return copied;
 
index 16c33e308121da59c61629a13fa239dada189874..b2869cab2092ae2d08e6b090c98fbe6aece35e75 100644 (file)
@@ -2044,7 +2044,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
                if (!delta)
                        pmc->mca_sfcount[sfmode]--;
                for (j=0; j<i; j++)
-                       (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
+                       ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
        } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
                struct ip6_sf_list *psf;
 
index cbdb754dbb10d9a88ab4eef9e1ddf4ff8b3362a6..3cc4487ac349997850940c6cbd274c196863f6aa 100644 (file)
@@ -735,6 +735,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
+       atomic_dec(&net->ct.count);
        kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
        return ERR_PTR(-ENOMEM);
 #endif
index 0c8e43810ce363190c2c49477a3490ee6a077191..59530e93fa58f7abdaa4a0734ad2fd1e5241c040 100644 (file)
@@ -150,6 +150,17 @@ err1:
        return ret;
 }
 
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
+{
+       typeof(nf_ct_timeout_put_hook) timeout_put;
+
+       timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
+       if (timeout_put)
+               timeout_put(timeout);
+}
+#endif
+
 static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
 {
        struct xt_ct_target_info_v1 *info = par->targinfo;
@@ -158,7 +169,9 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
        struct nf_conn *ct;
        int ret = 0;
        u8 proto;
-
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+       struct ctnl_timeout *timeout;
+#endif
        if (info->flags & ~XT_CT_NOTRACK)
                return -EINVAL;
 
@@ -216,7 +229,6 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        if (info->timeout) {
                typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
-               struct ctnl_timeout *timeout;
                struct nf_conn_timeout *timeout_ext;
 
                rcu_read_lock();
@@ -245,7 +257,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
                                pr_info("Timeout policy `%s' can only be "
                                        "used by L3 protocol number %d\n",
                                        info->timeout, timeout->l3num);
-                               goto err4;
+                               goto err5;
                        }
                        /* Make sure the timeout policy matches any existing
                         * protocol tracker, otherwise default to generic.
@@ -258,13 +270,13 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
                                        "used by L4 protocol number %d\n",
                                        info->timeout,
                                        timeout->l4proto->l4proto);
-                               goto err4;
+                               goto err5;
                        }
                        timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
-                                                           GFP_KERNEL);
+                                                           GFP_ATOMIC);
                        if (timeout_ext == NULL) {
                                ret = -ENOMEM;
-                               goto err4;
+                               goto err5;
                        }
                } else {
                        ret = -ENOENT;
@@ -281,8 +293,12 @@ out:
        info->ct = ct;
        return 0;
 
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+err5:
+       __xt_ct_tg_timeout_put(timeout);
 err4:
        rcu_read_unlock();
+#endif
 err3:
        nf_conntrack_free(ct);
 err2:
index 32bb75324e76d0141c99df87449f3013b5c5a060..faa48f70b7c9b132bfaf3c8ce561982e82c4c7b6 100644 (file)
@@ -829,12 +829,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
        return 0;
 }
 
-int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
 {
        int len = skb->len;
 
        skb_queue_tail(&sk->sk_receive_queue, skb);
        sk->sk_data_ready(sk, len);
+       return len;
+}
+
+int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+{
+       int len = __netlink_sendskb(sk, skb);
+
        sock_put(sk);
        return len;
 }
@@ -957,8 +964,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
            !test_bit(0, &nlk->state)) {
                skb_set_owner_r(skb, sk);
-               skb_queue_tail(&sk->sk_receive_queue, skb);
-               sk->sk_data_ready(sk, skb->len);
+               __netlink_sendskb(sk, skb);
                return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
        }
        return -1;
@@ -1698,10 +1704,8 @@ static int netlink_dump(struct sock *sk)
 
                if (sk_filter(sk, skb))
                        kfree_skb(skb);
-               else {
-                       skb_queue_tail(&sk->sk_receive_queue, skb);
-                       sk->sk_data_ready(sk, skb->len);
-               }
+               else
+                       __netlink_sendskb(sk, skb);
                return 0;
        }
 
@@ -1715,10 +1719,8 @@ static int netlink_dump(struct sock *sk)
 
        if (sk_filter(sk, skb))
                kfree_skb(skb);
-       else {
-               skb_queue_tail(&sk->sk_receive_queue, skb);
-               sk->sk_data_ready(sk, skb->len);
-       }
+       else
+               __netlink_sendskb(sk, skb);
 
        if (cb->done)
                cb->done(cb);
index 9f60008740e32875fb80d64b070cdf7262366650..9726fe684ab8a35ded1f1acb9ae4ea0dbdbe6b12 100644 (file)
@@ -1130,6 +1130,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
        int flags = msg->msg_flags;
        int err, done;
 
+       if (len > USHRT_MAX)
+               return -EMSGSIZE;
+
        if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
                                MSG_CMSG_COMPAT)) ||
                        !(msg->msg_flags & MSG_EOR))
index 06b42b7f5a0237c054403c3b695aea26dcae036b..92ba71dfe080125b58bc01cd40e7a2d158c9ab61 100644 (file)
@@ -4133,9 +4133,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
                                  int __user *optlen)
 {
-       if (len < sizeof(struct sctp_event_subscribe))
+       if (len <= 0)
                return -EINVAL;
-       len = sizeof(struct sctp_event_subscribe);
+       if (len > sizeof(struct sctp_event_subscribe))
+               len = sizeof(struct sctp_event_subscribe);
        if (put_user(len, optlen))
                return -EFAULT;
        if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
index 484cc6953fc61ebcb049604e5dbdb57a4ffcd385..851edcd6b0982d5a820da29e2145be75bddcff9c 100644 (file)
@@ -811,9 +811,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
 
        sock = file->private_data;
 
-       flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
-       if (more)
-               flags |= MSG_MORE;
+       flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+       /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
+       flags |= more;
 
        return kernel_sendpage(sock, page, offset, size, flags);
 }