pci_set_power_state(pdev, PCI_D3hot);
}
+ bnx2x_disable_pcie_error_reporting(bp);
+ if (remove_netdev) {
+ if (bp->regview)
+ iounmap(bp->regview);
- if (bp->regview)
- iounmap(bp->regview);
-
- /* for vf doorbells are part of the regview and were unmapped along with
- * it. FW is only loaded by PF.
- */
- if (IS_PF(bp)) {
- if (bp->doorbells)
- iounmap(bp->doorbells);
+ /* For vfs, doorbells are part of the regview and were unmapped
+ * along with it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
- bnx2x_release_firmware(bp);
- }
- bnx2x_free_mem_bp(bp);
+ bnx2x_release_firmware(bp);
+ }
+ bnx2x_free_mem_bp(bp);
- if (remove_netdev)
free_netdev(dev);
- if (atomic_read(&pdev->enable_cnt) == 1)
- pci_release_regions(pdev);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+ }
pci_disable_device(pdev);
}
int sysctl_tcp_nometrics_save __read_mostly;
-static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
++static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
++ const struct inetpeer_addr *daddr,
+ struct net *net, unsigned int hash);
+
struct tcp_fastopen_metrics {
u16 mss;
u16 syn_loss:10; /* Recurring Fast Open SYN losses */
}
}
+ #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
+
+ static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+ {
+ if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+ tcpm_suck_dst(tm, dst, false);
+ }
+
+ #define TCP_METRICS_RECLAIM_DEPTH 5
+ #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
+
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
- struct inetpeer_addr *addr,
+ struct inetpeer_addr *saddr,
+ struct inetpeer_addr *daddr,
- unsigned int hash,
- bool reclaim)
+ unsigned int hash)
{
struct tcp_metrics_block *tm;
struct net *net;
spin_lock_bh(&tcp_metrics_lock);
net = dev_net(dst->dev);
- tm = __tcp_get_metrics(addr, net, hash);
+
+ /* While waiting for the spin-lock the cache might have been populated
+ * with this entry and so we have to check again.
+ */
++ tm = __tcp_get_metrics(saddr, daddr, net, hash);
+ if (tm == TCP_METRICS_RECLAIM_PTR) {
+ reclaim = true;
+ tm = NULL;
+ }
+ if (tm) {
+ tcpm_check_stamp(tm, dst);
+ goto out_unlock;
+ }
+
if (unlikely(reclaim)) {
struct tcp_metrics_block *oldest;
bool create)
{
struct tcp_metrics_block *tm;
- struct inetpeer_addr addr;
+ struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net;
- bool reclaim;
- addr.family = sk->sk_family;
- switch (addr.family) {
+ saddr.family = sk->sk_family;
+ daddr.family = sk->sk_family;
+ switch (daddr.family) {
case AF_INET:
- addr.addr.a4 = inet_sk(sk)->inet_daddr;
- hash = (__force unsigned int) addr.addr.a4;
+ saddr.addr.a4 = inet_sk(sk)->inet_saddr;
+ daddr.addr.a4 = inet_sk(sk)->inet_daddr;
+ hash = (__force unsigned int) daddr.addr.a4;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
net = dev_net(dst->dev);
hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
- tm = __tcp_get_metrics(&addr, net, hash);
+ tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
- reclaim = false;
- if (tm == TCP_METRICS_RECLAIM_PTR) {
- reclaim = true;
+ if (tm == TCP_METRICS_RECLAIM_PTR)
tm = NULL;
- }
if (!tm && create)
- tm = tcpm_new(dst, &saddr, &daddr, hash, reclaim);
- tm = tcpm_new(dst, &addr, hash);
++ tm = tcpm_new(dst, &saddr, &daddr, hash);
else
tcpm_check_stamp(tm, dst);