struct res_fs_rule {
struct res_common com;
int qpn;
+ /* VF DMFS mbox with port flipped */
+ void *mirr_mbox;
+ /* > 0 --> apply mirror when getting into HA mode */
+ /* = 0 --> un-apply mirror when getting out of HA mode */
+ u32 mirr_mbox_size;
+ struct list_head mirr_list;
+ u64 mirr_rule_id;
};
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
return err;
}
+static u32 qp_attach_mbox_size(void *mbox)
+{
+ u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+ struct _rule_hw *rule_header;
+
+ rule_header = (struct _rule_hw *)(mbox + size);
+
+ while (rule_header->size) {
+ size += rule_header->size * sizeof(u32);
+ rule_header += 1;
+ }
+ return size;
+}
+
+static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
+
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header;
int header_id;
+ struct res_fs_rule *rrule;
+ u32 mbox_size;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
err = -EINVAL;
- goto err_put;
+ goto err_put_qp;
}
break;
case MLX4_NET_TRANS_RULE_ID_IB:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
- goto err_put;
+ goto err_put_qp;
}
vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
default:
pr_err("Corrupted mailbox\n");
err = -EINVAL;
- goto err_put;
+ goto err_put_qp;
}
execute:
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- goto err_put;
+ goto err_put_qp;
+
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
mlx4_err(dev, "Fail to add flow steering resources\n");
- /* detach rule*/
+ goto err_detach;
+ }
+
+ err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
+ if (err)
+ goto err_detach;
+
+ mbox_size = qp_attach_mbox_size(inbox->buf);
+ rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
+ if (!rrule->mirr_mbox) {
+ err = -ENOMEM;
+ goto err_put_rule;
+ }
+ rrule->mirr_mbox_size = mbox_size;
+ rrule->mirr_rule_id = 0;
+ memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
+
+ /* set different port */
+ ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
+ if (ctrl->port == 1)
+ ctrl->port = 2;
+ else
+ ctrl->port = 1;
+
+ if (mlx4_is_bonded(dev))
+ mlx4_do_mirror_rule(dev, rrule);
+
+ atomic_inc(&rqp->ref_count);
+
+err_put_rule:
+ put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
+err_detach:
+ /* detach rule on error */
+ if (err)
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
- goto err_put;
- }
- atomic_inc(&rqp->ref_count);
-err_put:
+err_put_qp:
put_res(dev, slave, qpn, RES_QP);
return err;
}
+static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
+{
+ int err;
+
+ err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to remove flow steering resources\n");
+ return err;
+ }
+
+ mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ return 0;
+}
+
int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
int err;
struct res_qp *rqp;
struct res_fs_rule *rrule;
+ u64 mirr_reg_id;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
if (err)
return err;
+
+ if (!rrule->mirr_mbox) {
+ mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
+ put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
+ return -EINVAL;
+ }
+ mirr_reg_id = rrule->mirr_rule_id;
+ kfree(rrule->mirr_mbox);
+
/* Release the rule form busy state before removal */
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
if (err)
return err;
+ if (mirr_reg_id && mlx4_is_bonded(dev)) {
+ err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
+ if (err) {
+ mlx4_err(dev, "Fail to get resource of mirror rule\n");
+ } else {
+ put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
+ mlx4_undo_mirror_rule(dev, rrule);
+ }
+ }
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to remove flow steering resources\n");
spin_unlock_irq(mlx4_tlock(dev));
}
+static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ struct res_fs_rule *mirr_rule;
+ u64 reg_id;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (!fs_rule->mirr_mbox) {
+ mlx4_err(dev, "rule mirroring mailbox is null\n");
+ return -EINVAL;
+ }
+ memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
+ err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ if (err)
+ goto err;
+
+ err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
+ if (err)
+ goto err_detach;
+
+ err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
+ if (err)
+ goto err_rem;
+
+ fs_rule->mirr_rule_id = reg_id;
+ mirr_rule->mirr_rule_id = 0;
+ mirr_rule->mirr_mbox_size = 0;
+ mirr_rule->mirr_mbox = NULL;
+ put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
+
+ return 0;
+err_rem:
+ rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
+err_detach:
+ mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+err:
+ return err;
+}
+
+static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
+ struct rb_node *p;
+ struct res_fs_rule *fs_rule;
+ int err = 0;
+ LIST_HEAD(mirr_list);
+
+ for (p = rb_first(root); p; p = rb_next(p)) {
+ fs_rule = rb_entry(p, struct res_fs_rule, com.node);
+ if ((bond && fs_rule->mirr_mbox_size) ||
+ (!bond && !fs_rule->mirr_mbox_size))
+ list_add_tail(&fs_rule->mirr_list, &mirr_list);
+ }
+
+ list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
+ if (bond)
+ err += mlx4_do_mirror_rule(dev, fs_rule);
+ else
+ err += mlx4_undo_mirror_rule(dev, fs_rule);
+ }
+ return err;
+}
+
+int mlx4_bond_fs_rules(struct mlx4_dev *dev)
+{
+ return mlx4_mirror_fs_rules(dev, true);
+}
+
+int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
+{
+ return mlx4_mirror_fs_rules(dev, false);
+}
+
static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);