VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
0, &data0, &data1);
+ if (status != VXGE_HW_OK)
+ goto exit;
data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
* See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
*/
struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en;
- u8 hash_type_ipv4_en;
- u8 hash_type_tcpipv6_en;
- u8 hash_type_ipv6_en;
- u8 hash_type_tcpipv6ex_en;
- u8 hash_type_ipv6ex_en;
+ u8 hash_type_tcpipv4_en:1,
+ hash_type_ipv4_en:1,
+ hash_type_tcpipv6_en:1,
+ hash_type_ipv6_en:1,
+ hash_type_tcpipv6ex_en:1,
+ hash_type_ipv6ex_en:1;
};
void vxge_hw_device_debug_set(
}
}
+static int vxge_set_flags(struct net_device *dev, u32 data)
+{
+ struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ enum vxge_hw_status status;
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
+ return 0;
+
+ if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
+ return -EINVAL;
+
+ vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
+
+ /* Enabling RTH requires some of the logic in vxge_device_register and a
+ * vpath reset. Due to these restrictions, only allow modification
+ * while the interface is down.
+ */
+ status = vxge_reset_all_vpaths(vdev);
+ if (status != VXGE_HW_OK) {
+ vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
+ return -EFAULT;
+ }
+
+ if (vdev->devh->config.rth_en)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ return 0;
+}
+
static const struct ethtool_ops vxge_ethtool_ops = {
.get_settings = vxge_ethtool_gset,
.set_settings = vxge_ethtool_sset,
.phys_id = vxge_ethtool_idnic,
.get_sset_count = vxge_ethtool_get_sset_count,
.get_ethtool_stats = vxge_get_ethtool_stats,
+ .set_flags = vxge_set_flags,
};
void vxge_initialize_ethtool_ops(struct net_device *ndev)
else
skb_checksum_none_assert(skb);
+ /* rth_hash_type and rth_it_hit are non-zero regardless of
+ * whether rss is enabled. Only the rth_value is zero/non-zero
+ * if rss is disabled/enabled, so key off of that.
+ */
+ if (ext_info.rth_value)
+ skb->rxhash = ext_info.rth_value;
+
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
mtable[index] = index % vdev->no_of_vpath;
}
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
/* set indirection table, bucket-to-vpath mapping */
status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
vdev->no_of_vpath,
return status;
}
+ /* Fill RTH hash types */
+ hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
+ hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
+ hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
+ hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
+ hash_types.hash_type_tcpipv6ex_en =
+ vdev->config.rth_hash_type_tcpipv6ex;
+ hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
+
/*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
+ * Because the itable_set() method uses the active_table field
+ * for the target virtual path the RTH config should be updated
+ * for all VPATHs. The h/w only uses the lowest numbered VPATH
+ * when steering frames.
+ */
for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
goto out2;
}
}
+ printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+ hldev->config.rth_en ? "enabled" : "disabled");
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
vxge_initialize_ethtool_ops(ndev);
+ if (vdev->config.rth_steering != NO_STEERING) {
+ ndev->features |= NETIF_F_RXHASH;
+ hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+ }
+
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
no_of_vpath, GFP_KERNEL);
ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
ll_config->addr_learn_en = addr_learn_en;
ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
- ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_tcpipv4 = 1;
+ ll_config->rth_hash_type_ipv4 = 0;
+ ll_config->rth_hash_type_tcpipv6 = 0;
+ ll_config->rth_hash_type_ipv6 = 0;
+ ll_config->rth_hash_type_tcpipv6ex = 0;
+ ll_config->rth_hash_type_ipv6ex = 0;
ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
int addr_learn_en;
- int rth_steering;
- int rth_algorithm;
- int rth_hash_type_tcpipv4;
- int rth_hash_type_ipv4;
- int rth_hash_type_tcpipv6;
- int rth_hash_type_ipv6;
- int rth_hash_type_tcpipv6ex;
- int rth_hash_type_ipv6ex;
- int rth_bkt_sz;
+ u32 rth_steering:2,
+ rth_algorithm:2,
+ rth_hash_type_tcpipv4:1,
+ rth_hash_type_ipv4:1,
+ rth_hash_type_tcpipv6:1,
+ rth_hash_type_ipv6:1,
+ rth_hash_type_tcpipv6ex:1,
+ rth_hash_type_ipv6ex:1,
+ rth_bkt_sz:8;
int rth_jhash_golden_ratio;
int tx_steering_type;
int fifo_indicate_max_pkts;
VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
};
-/**
- * enum enum vxge_hw_ring_hash_type - RTH hash types
- * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
- * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
- * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
- *
- * RTH hash types
- */
-enum vxge_hw_ring_hash_type {
- VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
- VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
- VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
- VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
-};
-
enum vxge_hw_status vxge_hw_ring_rxd_reserve(
struct __vxge_hw_ring *ring_handle,
void **rxdh);