return err;
}
+#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct mlx4_uar *uar;
int err;
- if (attr->flags)
+ if (entries < 1 || entries > dev->dev->caps.max_cqes)
return ERR_PTR(-EINVAL);
- if (entries < 1 || entries > dev->dev->caps.max_cqes)
+ if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
return ERR_PTR(-EINVAL);
cq = kmalloc(sizeof *cq, GFP_KERNEL);
spin_lock_init(&cq->lock);
cq->resize_buf = NULL;
cq->resize_umem = NULL;
+ cq->create_flags = attr->flags;
INIT_LIST_HEAD(&cq->send_qp_list);
INIT_LIST_HEAD(&cq->recv_qp_list);
vector = dev->eq_table[vector % ibdev->num_comp_vectors];
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
- cq->db.dma, &cq->mcq, vector, 0, 0);
+ cq->db.dma, &cq->mcq, vector, 0,
+ !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
if (err)
goto err_dbmap;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
int have_ib_ports;
+ struct mlx4_uverbs_ex_query_device cmd;
+ struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
+ struct mlx4_clock_params clock_params;
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
+ if (uhw->inlen) {
+ if (uhw->inlen < sizeof(cmd))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd.comp_mask)
+ return -EINVAL;
+
+ if (cmd.reserved)
+ return -EINVAL;
+ }
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
+ props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
+ props->timestamp_mask = 0xFFFFFFFFFFFFULL;
+ err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
+ if (err)
+ goto out;
+
+ if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
+ resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+ resp.response_length += sizeof(resp.hca_core_clock_offset);
+ resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ }
+
+ if (uhw->outlen) {
+ err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+ if (err)
+ goto out;
+ }
out:
kfree(in_mad);
kfree(out_mad);
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
}
+ ibdev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ);
+
mlx4_ib_alloc_eqs(dev, ibdev);
spin_lock_init(&iboe->lock);
struct mutex resize_mutex;
struct ib_umem *umem;
struct ib_umem *resize_umem;
+ int create_flags;
/* List of qps that it serves.*/
struct list_head send_qp_list;
struct list_head recv_qp_list;
u8 port;
};
+struct mlx4_uverbs_ex_query_device {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+enum query_device_resp_mask {
+ QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
+};
+
+struct mlx4_uverbs_ex_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __u64 hca_core_clock_offset;
+};
+
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx4_ib_dev, ib_dev);