#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
+#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
/* selectively copy writable fields clearing any reserved area
*/
-static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_general_caps *from)
+static void copy_rw_fields(void *to, struct mlx5_caps *from)
{
+ __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
u64 v64;
- to->log_max_qp = from->log_max_qp & 0x1f;
- to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f;
- to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f;
- to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f;
- to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f;
- to->pkey_table_size = cpu_to_be16(to_fw_pkey_sz(from->pkey_table_size));
- v64 = from->flags & MLX5_CAP_BITS_RW_MASK;
- to->flags = cpu_to_be64(v64);
+ MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
+ MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
+ MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
+ MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
+ MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
+ MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
+ MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
+ v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
+ *flags_off = cpu_to_be64(v64);
}
static u16 get_pkey_table_size(int pkey)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
}
-static void fw2drv_caps(struct mlx5_caps *caps,
- struct mlx5_cmd_query_hca_cap_mbox_out *out)
+static void fw2drv_caps(struct mlx5_caps *caps, void *out)
{
struct mlx5_general_caps *gen = &caps->gen;
- u16 t16;
-
- gen->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
- gen->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
- gen->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
- gen->log_max_strq = out->hca_cap.log_max_strq_sz;
- gen->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
- gen->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
- gen->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
- gen->max_eqes = out->hca_cap.log_max_eq_sz;
- gen->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
- gen->log_max_eq = out->hca_cap.log_max_eq & 0xf;
- gen->max_indirection = out->hca_cap.max_indirection;
- gen->log_max_mrw_sz = out->hca_cap.log_max_mrw_sz;
- gen->log_max_bsf_list_size = 0;
- gen->log_max_klm_list_size = 0;
- gen->log_max_ra_req_dc = out->hca_cap.log_max_ra_req_dc;
- gen->log_max_ra_res_dc = out->hca_cap.log_max_ra_res_dc;
- gen->log_max_ra_req_qp = out->hca_cap.log_max_ra_req_qp;
- gen->log_max_ra_res_qp = out->hca_cap.log_max_ra_res_qp;
- gen->max_qp_counters = be16_to_cpu(out->hca_cap.max_qp_count);
- gen->pkey_table_size = get_pkey_table_size(be16_to_cpu(out->hca_cap.pkey_table_size));
- gen->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
- gen->num_ports = out->hca_cap.num_ports & 0xf;
- gen->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
- gen->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
- gen->flags = be64_to_cpu(out->hca_cap.flags);
- pr_debug("flags = 0x%llx\n", gen->flags);
- gen->uar_sz = out->hca_cap.uar_sz;
- gen->min_log_pg_sz = out->hca_cap.log_pg_sz;
- t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
- if (t16 & 0x8000) {
- gen->bf_reg_size = 1 << (t16 & 0x1f);
- gen->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
- } else {
- gen->bf_reg_size = 0;
- gen->bf_regs_per_page = 0;
- }
- gen->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
- gen->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
- gen->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
- gen->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
- gen->log_max_xrcd = out->hca_cap.log_max_xrcd;
- gen->log_uar_page_sz = be16_to_cpu(out->hca_cap.log_uar_page_sz);
+ gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
+ gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
+ gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
+ gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
+ gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
+ gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
+ gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
+ gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
+ gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
+ gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
+ gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
+ gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
+ gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
+ gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
+ gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
+ gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
+ gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
+ gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
+ gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
+ gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
+ gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
+ gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
+ gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
+ gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
+ gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
+ pr_debug("flags = 0x%llx\n", gen->flags);
+ gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
+ gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
+ gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
+ gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
+ gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
+ gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
+ gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
+ gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
+ gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
+ gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
+ gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
}
static const char *caps_opmod_str(u16 opmod)
int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
u16 opmod)
{
- struct mlx5_cmd_query_hca_cap_mbox_out *out;
- struct mlx5_cmd_query_hca_cap_mbox_in in;
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *out;
int err;
- memset(&in, 0, sizeof(in));
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto query_ex;
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
- in.hdr.opmod = cpu_to_be16(opmod);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
-
- err = mlx5_cmd_status_to_err(&out->hdr);
+ err = mlx5_cmd_status_to_err_v2(out);
if (err) {
mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
goto query_ex;
}
mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
- fw2drv_caps(caps, out);
+ fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
query_ex:
kfree(out);
return err;
}
-static int set_caps(struct mlx5_core_dev *dev,
- struct mlx5_cmd_set_hca_cap_mbox_in *in)
+static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
{
- struct mlx5_cmd_set_hca_cap_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
int err;
- memset(&out, 0, sizeof(out));
+ memset(out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
- err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
if (err)
return err;
- err = mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_status_to_err_v2(out);
return err;
}
static int handle_hca_cap(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
+ void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
struct mlx5_caps *cur_caps = NULL;
struct mlx5_caps *max_caps = NULL;
int err = -ENOMEM;
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL);
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
goto query_ex;
/* disable checksum */
cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
- copy_rw_fields(&set_ctx->hca_cap, &cur_caps->gen);
- err = set_caps(dev, set_ctx);
+ copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
+ cur_caps);
+ err = set_caps(dev, set_ctx, set_sz);
query_ex:
kfree(cur_caps);