char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
+ cpumask_var_t affinity_mask;
};
/* Store only the lower range. Higher range is given by fw. */
return enic->rq_count + enic->wq_count + 1;
}
+static inline bool enic_is_err_intr(struct enic *enic, int intr)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ return intr == enic_legacy_err_intr();
+ case VNIC_DEV_INTR_MODE_MSIX:
+ return intr == enic_msix_err_intr(enic);
+ case VNIC_DEV_INTR_MODE_MSI:
+ default:
+ return false;
+ }
+}
+
+static inline bool enic_is_notify_intr(struct enic *enic, int intr)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ return intr == enic_legacy_notify_intr();
+ case VNIC_DEV_INTR_MODE_MSIX:
+ return intr == enic_msix_notify_intr(enic);
+ case VNIC_DEV_INTR_MODE_MSI:
+ default:
+ return false;
+ }
+}
+
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
#include <linux/ktime.h>
+#include <linux/numa.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
{3, 6}, /* 10 - 40 Gbps */
};
+static void enic_init_affinity_hint(struct enic *enic)
+{
+ int numa_node = dev_to_node(&enic->pdev->dev);
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
+ (enic->msix[i].affinity_mask &&
+ !cpumask_empty(enic->msix[i].affinity_mask)))
+ continue;
+ if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
+ GFP_KERNEL))
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ enic->msix[i].affinity_mask);
+ }
+}
+
+static void enic_free_affinity_hint(struct enic *enic)
+{
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
+ continue;
+ free_cpumask_var(enic->msix[i].affinity_mask);
+ }
+}
+
+static void enic_set_affinity_hint(struct enic *enic)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) ||
+ enic_is_notify_intr(enic, i) ||
+ !enic->msix[i].affinity_mask ||
+ cpumask_empty(enic->msix[i].affinity_mask))
+ continue;
+ err = irq_set_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
+ if (err)
+ netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ err);
+ }
+
+ for (i = 0; i < enic->wq_count; i++) {
+ int wq_intr = enic_msix_wq_intr(enic, i);
+
+ if (enic->msix[wq_intr].affinity_mask &&
+ !cpumask_empty(enic->msix[wq_intr].affinity_mask))
+ netif_set_xps_queue(enic->netdev,
+ enic->msix[wq_intr].affinity_mask,
+ i);
+ }
+}
+
+static void enic_unset_affinity_hint(struct enic *enic)
+{
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++)
+ irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+}
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
netdev_err(netdev, "Unable to request irq.\n");
return err;
}
+ enic_init_affinity_hint(enic);
+ enic_set_affinity_hint(enic);
err = enic_dev_notify_set(enic);
if (err) {
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic);
err_out_free_intr:
+ enic_unset_affinity_hint(enic);
enic_free_intr(enic);
return err;
}
enic_dev_notify_unset(enic);
+ enic_unset_affinity_hint(enic);
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
+ enic_free_affinity_hint(enic);
}
static void enic_kdump_kernel_config(struct enic *enic)
return 0;
err_out_free_vnic_resources:
+ enic_free_affinity_hint(enic);
enic_clear_intr_mode(enic);
enic_free_vnic_resources(enic);