#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+#define QUERY_FUNC_CAP_BASE_TUNNEL_QPN_OFFSET 0x44
+#define QUERY_FUNC_CAP_BASE_PROXY_QPN_OFFSET 0x48
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
size = dev->caps.num_mgms + dev->caps.num_amgms;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ size = dev->caps.base_tunnel_sqpn + 8 * slave;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_BASE_TUNNEL_QPN_OFFSET);
+
+ size = dev->caps.sqp_start + 8 * slave;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_BASE_PROXY_QPN_OFFSET);
+
} else
err = -EINVAL;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
func_cap->mcg_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_BASE_TUNNEL_QPN_OFFSET);
+ func_cap->base_tunnel_qpn = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_BASE_PROXY_QPN_OFFSET);
+ func_cap->base_proxy_qpn = size & 0xFFFFFF;
+
for (i = 1; i <= func_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
MLX4_CMD_QUERY_FUNC_CAP,
int max_eq;
int reserved_eq;
int mcg_quota;
+ u32 base_qpn;
+ u32 base_tunnel_qpn;
+ u32 base_proxy_qpn;
u8 physical_port[MLX4_MAX_PORTS + 1];
u8 port_flags[MLX4_MAX_PORTS + 1];
};
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+ dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
return 0;
}
/*The function checks if there are live vf, return the num of them*/
return -ENODEV;
}
+ /* Calculate our sqp_start */
+ dev->caps.sqp_start = func_cap.base_proxy_qpn;
+ dev->caps.base_tunnel_sqpn = func_cap.base_tunnel_qpn;
+
return 0;
}
* We also reserve the MSB of the 24-bit QP number to indicate
* that a QP is an XRC QP.
*/
- dev->caps.sqp_start =
+ dev->caps.base_sqpn =
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
{
}
+ /* Reserve 8 real SQPs in both native and SRIOV modes.
+ * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
+ * (for all PFs and VFs), and 8 corresponding tunnel QPs.
+ * Each proxy SQP works opposite its own tunnel QP.
+ *
+ * The QPs are arranged as follows:
+ * a. 8 real SQPs
+ * b. All the proxy SQPs (8 per function)
+ * c. All the tunnel QPs (8 per function)
+ */
+
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 23) - 1, dev->caps.sqp_start + 8,
+ (1 << 23) - 1, dev->caps.base_sqpn + 8 +
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
reserved_from_top);
+
+ /* In mfunc, sqp_start is the base of the proxy SQPs, since the PF also
+ * uses paravirtualized SQPs.
+ * In native mode, sqp_start is the base of the real SQPs. */
+ if (mlx4_is_mfunc(dev)) {
+ dev->caps.sqp_start = dev->caps.base_sqpn +
+ 8 * (mlx4_master_func_num(dev) + 1);
+ dev->caps.base_tunnel_sqpn = dev->caps.sqp_start + 8 * MLX4_MFUNC_MAX;
+ } else
+ dev->caps.sqp_start = dev->caps.base_sqpn;
+
if (err)
return err;
- return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
+ return mlx4_CONF_SPECIAL_QP(dev, dev->caps.base_sqpn);
}
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
{
- return mlx4_is_qp_reserved(dev, qpn);
+ return mlx4_is_qp_reserved(dev, qpn) &&
+ (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
}
static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
{
- return (qpn < dev->caps.sqp_start + 8);
+ return (qpn < dev->caps.base_sqpn + 8 +
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
+}
+
+static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
+{
+ int base = dev->caps.sqp_start + slave * 8;
+
+ if (qpn >= base && qpn < base + 8)
+ return 1;
+
+ return 0;
}
static inline int mlx4_is_mfunc(struct mlx4_dev *dev)