ib_attr->num_of_specs < 1)
return false;
-- eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
-- if (eth_spec->type != IB_FLOW_SPEC_ETH ||
-- eth_spec->size != sizeof(*eth_spec))
++ flow_spec = (union ib_flow_spec *)(ib_attr + 1);
++ if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
++ struct ib_flow_spec_ipv4 *ipv4_spec;
++
++ ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
++ if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
++ return true;
++
return false;
- return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
- is_multicast_ether_addr(eth_spec->val.dst_mac);
++ }
+
- return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
- is_multicast_ether_addr(eth_spec->val.dst_mac);
++ if (flow_spec->type == IB_FLOW_SPEC_ETH) {
++ struct ib_flow_spec_eth *eth_spec;
++
++ eth_spec = (struct ib_flow_spec_eth *)flow_spec;
++ return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
++ is_multicast_ether_addr(eth_spec->val.dst_mac);
++ }
+
++ return false;
}
static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
return netdev;
}
++static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
++{
++ if (!dev->delay_drop.dbg)
++ return;
++ debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
++ kfree(dev->delay_drop.dbg);
++ dev->delay_drop.dbg = NULL;
++}
++
++static void cancel_delay_drop(struct mlx5_ib_dev *dev)
++{
++ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
++ return;
++
++ cancel_work_sync(&dev->delay_drop.delay_drop_work);
++ delay_drop_debugfs_cleanup(dev);
++}
++
++static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *pos)
++{
++ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
++ char lbuf[20];
++ int len;
++
++ len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
++ return simple_read_from_buffer(buf, count, pos, lbuf, len);
++}
++
++static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *pos)
++{
++ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
++ u32 timeout;
++ u32 var;
++
++ if (kstrtouint_from_user(buf, count, 0, &var))
++ return -EFAULT;
++
++ timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
++ 1000);
++ if (timeout != var)
++ mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
++ timeout);
++
++ delay_drop->timeout = timeout;
++
++ return count;
++}
++
++static const struct file_operations fops_delay_drop_timeout = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .write = delay_drop_timeout_write,
++ .read = delay_drop_timeout_read,
++};
++
++static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
++{
++ struct mlx5_ib_dbg_delay_drop *dbg;
++
++ if (!mlx5_debugfs_root)
++ return 0;
++
++ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
++ if (!dbg)
++ return -ENOMEM;
++
++ dbg->dir_debugfs =
++ debugfs_create_dir("delay_drop",
++ dev->mdev->priv.dbg_root);
++ if (!dbg->dir_debugfs)
++ return -ENOMEM;
++
++ dbg->events_cnt_debugfs =
++ debugfs_create_atomic_t("num_timeout_events", 0400,
++ dbg->dir_debugfs,
++ &dev->delay_drop.events_cnt);
++ if (!dbg->events_cnt_debugfs)
++ goto out_debugfs;
++
++ dbg->rqs_cnt_debugfs =
++ debugfs_create_atomic_t("num_rqs", 0400,
++ dbg->dir_debugfs,
++ &dev->delay_drop.rqs_cnt);
++ if (!dbg->rqs_cnt_debugfs)
++ goto out_debugfs;
++
++ dbg->timeout_debugfs =
++ debugfs_create_file("timeout", 0600,
++ dbg->dir_debugfs,
++ &dev->delay_drop,
++ &fops_delay_drop_timeout);
++ if (!dbg->timeout_debugfs)
++ goto out_debugfs;
++
++ return 0;
++
++out_debugfs:
++ delay_drop_debugfs_cleanup(dev);
++ return -ENOMEM;
++}
++
++static void init_delay_drop(struct mlx5_ib_dev *dev)
++{
++ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
++ return;
++
++ mutex_init(&dev->delay_drop.lock);
++ dev->delay_drop.dev = dev;
++ dev->delay_drop.activate = false;
++ dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
++ INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
++ atomic_set(&dev->delay_drop.rqs_cnt, 0);
++ atomic_set(&dev->delay_drop.events_cnt, 0);
++
++ if (delay_drop_debugfs_init(dev))
++ mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
++}
++
++ const struct cpumask *mlx5_ib_get_vector_affinity(struct ib_device *ibdev,
++ int comp_vector)
++ {
++ struct mlx5_ib_dev *dev = to_mdev(ibdev);
++
++ return mlx5_get_vector_affinity(dev->mdev, comp_vector);
++ }
++
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_dev *dev;
else
return RDMA_AH_ATTR_TYPE_IB;
}
+
+ +/* Return slid in 16bit CPU encoding */
+ +static inline u16 ib_slid_cpu16(u32 slid)
+ +{
+ + return (u16)slid;
+ +}
+ +
+ +/* Return slid in 16bit BE encoding */
+ +static inline u16 ib_slid_be16(u32 slid)
+ +{
+ + return cpu_to_be16((u16)slid);
+ +}
+++
++ /**
++ * ib_get_vector_affinity - Get the affinity mappings of a given completion
++ * vector
++ * @device: the rdma device
++ * @comp_vector: index of completion vector
++ *
++ * Returns NULL on failure, otherwise a corresponding cpu map of the
++ * completion vector (returns all-cpus map if the device driver doesn't
++ * implement get_vector_affinity).
++ */
++ static inline const struct cpumask *
++ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
++ {
++ if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
++ !device->get_vector_affinity)
++ return NULL;
++
++ return device->get_vector_affinity(device, comp_vector);
++
++ }
++
#endif /* IB_VERBS_H */