Merge branches '32bit_lid' and 'irq_affinity' into k.o/merge-test
authorDoug Ledford <dledford@redhat.com>
Thu, 10 Aug 2017 18:31:29 +0000 (14:31 -0400)
committerDoug Ledford <dledford@redhat.com>
Thu, 10 Aug 2017 18:31:29 +0000 (14:31 -0400)
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Both add new code
include/rdma/ib_verbs.h - Both add new code

Signed-off-by: Doug Ledford <dledford@redhat.com>
1  2  3 
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
include/linux/mlx5/driver.h
include/rdma/ib_verbs.h

Simple merge
Simple merge
index 9279631d8da0276cfcf0b1f9f9c30a73ced832c2,a7f2e60085c46c2300e3695029fbe1373cc0d480,a6d3bcfbf2297e4170bfb7e11e65f2e508c4967c..483110ec3b80578ebf715c4945999f3087171615
@@@@ -2111,26 -2036,15 -2036,15 +2111,26 @@@@ static bool flow_is_multicast_only(stru
            ib_attr->num_of_specs < 1)
                return false;
   
 --     eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
 --     if (eth_spec->type != IB_FLOW_SPEC_ETH ||
 --         eth_spec->size != sizeof(*eth_spec))
 ++     flow_spec = (union ib_flow_spec *)(ib_attr + 1);
 ++     if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
 ++             struct ib_flow_spec_ipv4 *ipv4_spec;
 ++
 ++             ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
 ++             if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
 ++                     return true;
 ++
                return false;
  -     return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
  -            is_multicast_ether_addr(eth_spec->val.dst_mac);
 ++     }
 + 
 -      return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
 -             is_multicast_ether_addr(eth_spec->val.dst_mac);
 ++     if (flow_spec->type == IB_FLOW_SPEC_ETH) {
 ++             struct ib_flow_spec_eth *eth_spec;
 ++
 ++             eth_spec = (struct ib_flow_spec_eth *)flow_spec;
 ++             return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
 ++                    is_multicast_ether_addr(eth_spec->val.dst_mac);
 ++     }
  +
 ++     return false;
   }
   
   static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
@@@@ -3696,126 -3556,6 -3556,14 +3696,134 @@@@ mlx5_ib_alloc_rdma_netdev(struct ib_dev
        return netdev;
   }
   
 ++static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
 ++{
 ++     if (!dev->delay_drop.dbg)
 ++             return;
 ++     debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
 ++     kfree(dev->delay_drop.dbg);
 ++     dev->delay_drop.dbg = NULL;
 ++}
 ++
 ++static void cancel_delay_drop(struct mlx5_ib_dev *dev)
 ++{
 ++     if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
 ++             return;
 ++
 ++     cancel_work_sync(&dev->delay_drop.delay_drop_work);
 ++     delay_drop_debugfs_cleanup(dev);
 ++}
 ++
 ++static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
 ++                                    size_t count, loff_t *pos)
 ++{
 ++     struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
 ++     char lbuf[20];
 ++     int len;
 ++
 ++     len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
 ++     return simple_read_from_buffer(buf, count, pos, lbuf, len);
 ++}
 ++
 ++static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
 ++                                     size_t count, loff_t *pos)
 ++{
 ++     struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
 ++     u32 timeout;
 ++     u32 var;
 ++
 ++     if (kstrtouint_from_user(buf, count, 0, &var))
 ++             return -EFAULT;
 ++
 ++     timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
 ++                     1000);
 ++     if (timeout != var)
 ++             mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
 ++                         timeout);
 ++
 ++     delay_drop->timeout = timeout;
 ++
 ++     return count;
 ++}
 ++
 ++static const struct file_operations fops_delay_drop_timeout = {
 ++     .owner  = THIS_MODULE,
 ++     .open   = simple_open,
 ++     .write  = delay_drop_timeout_write,
 ++     .read   = delay_drop_timeout_read,
 ++};
 ++
 ++static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
 ++{
 ++     struct mlx5_ib_dbg_delay_drop *dbg;
 ++
 ++     if (!mlx5_debugfs_root)
 ++             return 0;
 ++
 ++     dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
 ++     if (!dbg)
 ++             return -ENOMEM;
 ++
 ++     dbg->dir_debugfs =
 ++             debugfs_create_dir("delay_drop",
 ++                                dev->mdev->priv.dbg_root);
 ++     if (!dbg->dir_debugfs)
 ++             return -ENOMEM;
 ++
 ++     dbg->events_cnt_debugfs =
 ++             debugfs_create_atomic_t("num_timeout_events", 0400,
 ++                                     dbg->dir_debugfs,
 ++                                     &dev->delay_drop.events_cnt);
 ++     if (!dbg->events_cnt_debugfs)
 ++             goto out_debugfs;
 ++
 ++     dbg->rqs_cnt_debugfs =
 ++             debugfs_create_atomic_t("num_rqs", 0400,
 ++                                     dbg->dir_debugfs,
 ++                                     &dev->delay_drop.rqs_cnt);
 ++     if (!dbg->rqs_cnt_debugfs)
 ++             goto out_debugfs;
 ++
 ++     dbg->timeout_debugfs =
 ++             debugfs_create_file("timeout", 0600,
 ++                                 dbg->dir_debugfs,
 ++                                 &dev->delay_drop,
 ++                                 &fops_delay_drop_timeout);
 ++     if (!dbg->timeout_debugfs)
 ++             goto out_debugfs;
 ++
 ++     return 0;
 ++
 ++out_debugfs:
 ++     delay_drop_debugfs_cleanup(dev);
 ++     return -ENOMEM;
 ++}
 ++
 ++static void init_delay_drop(struct mlx5_ib_dev *dev)
 ++{
 ++     if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
 ++             return;
 ++
 ++     mutex_init(&dev->delay_drop.lock);
 ++     dev->delay_drop.dev = dev;
 ++     dev->delay_drop.activate = false;
 ++     dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
 ++     INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
 ++     atomic_set(&dev->delay_drop.rqs_cnt, 0);
 ++     atomic_set(&dev->delay_drop.events_cnt, 0);
 ++
 ++     if (delay_drop_debugfs_init(dev))
 ++             mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
 ++}
 ++
++ const struct cpumask *mlx5_ib_get_vector_affinity(struct ib_device *ibdev,
++              int comp_vector)
++ {
++      struct mlx5_ib_dev *dev = to_mdev(ibdev);
++ 
++      return mlx5_get_vector_affinity(dev->mdev, comp_vector);
++ }
++ 
   static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
   {
        struct mlx5_ib_dev *dev;
index 3cec683fd70fe39e44596de002315ecae1b401ee,c065132b956d6ba772f812bff21a190d5759bf13,e464e8179655e0a6c91e4831bb21ded285563bea..d8e761cd5448e63f167fa3bec04dcceabf4f65ac
@@@@ -1156,24 -1143,18 -1081,12 +1094,18 @@@@ static int mlx5_load_one(struct mlx5_co
                goto err_stop_eqs;
        }
   
--      err = mlx5_irq_set_affinity_hints(dev);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-               goto err_affinity_hints;
-       }
-  
 +      err = mlx5_init_fs(dev);
        if (err) {
 -              dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
 -              goto err_affinity_hints;
 +              dev_err(&pdev->dev, "Failed to init flow steering\n");
 +              goto err_fs;
        }
   
 -      err = mlx5_init_fs(dev);
 ++     err = mlx5_core_set_hca_defaults(dev);
  +     if (err) {
 -              dev_err(&pdev->dev, "Failed to init flow steering\n");
 ++             dev_err(&pdev->dev, "Failed to set hca defaults\n");
  +             goto err_fs;
  +     }
  +
   #ifdef CONFIG_MLX5_CORE_EN
        mlx5_eswitch_attach(dev->priv.eswitch);
   #endif
Simple merge
index 1082b4c81b2ce607e11c4b69b5d7de1124edc0af,7eaf7d2ab4241b3d629e111652b9000638f2b4eb,73ed2e4e802fcbb75d879a9ec0ca8076e1945818..5ca3ac1e9113cb747fa0c1208b10614188b42e9e
@@@@ -3717,4 -3706,16 -3708,26 +3719,38 @@@@ static inline enum rdma_ah_attr_type rd
        else
                return RDMA_AH_ATTR_TYPE_IB;
   }
+  
+ +/* Return slid in 16bit CPU encoding */
+ +static inline u16 ib_slid_cpu16(u32 slid)
+ +{
+ +     return (u16)slid;
+ +}
+ +
+ +/* Return slid in 16bit BE encoding */
+ +static inline u16 ib_slid_be16(u32 slid)
+ +{
+ +     return cpu_to_be16((u16)slid);
+ +}
+++
++ /**
++  * ib_get_vector_affinity - Get the affinity mappings of a given completion
++  *   vector
++  * @device:         the rdma device
++  * @comp_vector:    index of completion vector
++  *
++  * Returns NULL on failure, otherwise a corresponding cpu map of the
++  * completion vector (returns all-cpus map if the device driver doesn't
++  * implement get_vector_affinity).
++  */
++ static inline const struct cpumask *
++ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
++ {
++      if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
++          !device->get_vector_affinity)
++              return NULL;
++ 
++      return device->get_vector_affinity(device, comp_vector);
++ 
++ }
++ 
   #endif /* IB_VERBS_H */