2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
46 struct mlx5_flow_handle
*
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
48 struct mlx5_flow_spec
*spec
,
49 struct mlx5_esw_flow_attr
*attr
)
51 struct mlx5_flow_destination dest
[2] = {};
52 struct mlx5_flow_act flow_act
= {0};
53 struct mlx5_fc
*counter
= NULL
;
54 struct mlx5_flow_handle
*rule
;
58 if (esw
->mode
!= SRIOV_OFFLOADS
)
59 return ERR_PTR(-EOPNOTSUPP
);
61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
62 flow_act
.action
= attr
->action
& ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
| MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
64 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
65 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
66 dest
[i
].vport_num
= attr
->out_rep
->vport
;
69 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
70 counter
= mlx5_fc_create(esw
->dev
, true);
72 return ERR_CAST(counter
);
73 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
74 dest
[i
].counter
= counter
;
78 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
79 MLX5_SET(fte_match_set_misc
, misc
, source_port
, attr
->in_rep
->vport
);
81 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
82 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
84 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
|
85 MLX5_MATCH_MISC_PARAMETERS
;
86 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_DECAP
)
87 spec
->match_criteria_enable
|= MLX5_MATCH_INNER_HEADERS
;
90 flow_act
.encap_id
= attr
->encap
->encap_id
;
92 rule
= mlx5_add_flow_rules((struct mlx5_flow_table
*)esw
->fdb_table
.fdb
,
93 spec
, &flow_act
, dest
, i
);
95 mlx5_fc_destroy(esw
->dev
, counter
);
100 static int esw_set_global_vlan_pop(struct mlx5_eswitch
*esw
, u8 val
)
102 struct mlx5_eswitch_rep
*rep
;
103 int vf_vport
, err
= 0;
105 esw_debug(esw
->dev
, "%s applying global %s policy\n", __func__
, val
? "pop" : "none");
106 for (vf_vport
= 1; vf_vport
< esw
->enabled_vports
; vf_vport
++) {
107 rep
= &esw
->offloads
.vport_reps
[vf_vport
];
111 err
= __mlx5_eswitch_set_vport_vlan(esw
, rep
->vport
, 0, 0, val
);
120 static struct mlx5_eswitch_rep
*
121 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr
*attr
, bool push
, bool pop
)
123 struct mlx5_eswitch_rep
*in_rep
, *out_rep
, *vport
= NULL
;
125 in_rep
= attr
->in_rep
;
126 out_rep
= attr
->out_rep
;
138 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr
*attr
,
139 bool push
, bool pop
, bool fwd
)
141 struct mlx5_eswitch_rep
*in_rep
, *out_rep
;
143 if ((push
|| pop
) && !fwd
)
146 in_rep
= attr
->in_rep
;
147 out_rep
= attr
->out_rep
;
149 if (push
&& in_rep
->vport
== FDB_UPLINK_VPORT
)
152 if (pop
&& out_rep
->vport
== FDB_UPLINK_VPORT
)
155 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
156 if (!push
&& !pop
&& fwd
)
157 if (in_rep
->vlan
&& out_rep
->vport
== FDB_UPLINK_VPORT
)
160 /* protects against (1) setting rules with different vlans to push and
161 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
163 if (push
&& in_rep
->vlan_refcount
&& (in_rep
->vlan
!= attr
->vlan
))
172 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
173 struct mlx5_esw_flow_attr
*attr
)
175 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
176 struct mlx5_eswitch_rep
*vport
= NULL
;
180 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
181 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
182 fwd
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
184 err
= esw_add_vlan_action_check(attr
, push
, pop
, fwd
);
188 attr
->vlan_handled
= false;
190 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
192 if (!push
&& !pop
&& fwd
) {
193 /* tracks VF --> wire rules without vlan push action */
194 if (attr
->out_rep
->vport
== FDB_UPLINK_VPORT
) {
195 vport
->vlan_refcount
++;
196 attr
->vlan_handled
= true;
205 if (!(offloads
->vlan_push_pop_refcount
)) {
206 /* it's the 1st vlan rule, apply global vlan pop policy */
207 err
= esw_set_global_vlan_pop(esw
, SET_VLAN_STRIP
);
211 offloads
->vlan_push_pop_refcount
++;
214 if (vport
->vlan_refcount
)
217 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
, attr
->vlan
, 0,
218 SET_VLAN_INSERT
| SET_VLAN_STRIP
);
221 vport
->vlan
= attr
->vlan
;
223 vport
->vlan_refcount
++;
227 attr
->vlan_handled
= true;
231 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
232 struct mlx5_esw_flow_attr
*attr
)
234 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
235 struct mlx5_eswitch_rep
*vport
= NULL
;
239 if (!attr
->vlan_handled
)
242 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
243 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
244 fwd
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
246 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
248 if (!push
&& !pop
&& fwd
) {
249 /* tracks VF --> wire rules without vlan push action */
250 if (attr
->out_rep
->vport
== FDB_UPLINK_VPORT
)
251 vport
->vlan_refcount
--;
257 vport
->vlan_refcount
--;
258 if (vport
->vlan_refcount
)
259 goto skip_unset_push
;
262 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
,
263 0, 0, SET_VLAN_STRIP
);
269 offloads
->vlan_push_pop_refcount
--;
270 if (offloads
->vlan_push_pop_refcount
)
273 /* no more vlan rules, stop global vlan pop policy */
274 err
= esw_set_global_vlan_pop(esw
, 0);
280 static struct mlx5_flow_handle
*
281 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch
*esw
, int vport
, u32 sqn
)
283 struct mlx5_flow_act flow_act
= {0};
284 struct mlx5_flow_destination dest
;
285 struct mlx5_flow_handle
*flow_rule
;
286 struct mlx5_flow_spec
*spec
;
289 spec
= mlx5_vzalloc(sizeof(*spec
));
291 esw_warn(esw
->dev
, "FDB: Failed to alloc match parameters\n");
292 flow_rule
= ERR_PTR(-ENOMEM
);
296 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
297 MLX5_SET(fte_match_set_misc
, misc
, source_sqn
, sqn
);
298 MLX5_SET(fte_match_set_misc
, misc
, source_port
, 0x0); /* source vport is 0 */
300 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
301 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_sqn
);
302 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
304 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
305 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
306 dest
.vport_num
= vport
;
307 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
309 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.fdb
, spec
,
310 &flow_act
, &dest
, 1);
311 if (IS_ERR(flow_rule
))
312 esw_warn(esw
->dev
, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule
));
318 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch
*esw
,
319 struct mlx5_eswitch_rep
*rep
)
321 struct mlx5_esw_sq
*esw_sq
, *tmp
;
323 if (esw
->mode
!= SRIOV_OFFLOADS
)
326 list_for_each_entry_safe(esw_sq
, tmp
, &rep
->vport_sqs_list
, list
) {
327 mlx5_del_flow_rules(esw_sq
->send_to_vport_rule
);
328 list_del(&esw_sq
->list
);
333 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch
*esw
,
334 struct mlx5_eswitch_rep
*rep
,
335 u16
*sqns_array
, int sqns_num
)
337 struct mlx5_flow_handle
*flow_rule
;
338 struct mlx5_esw_sq
*esw_sq
;
342 if (esw
->mode
!= SRIOV_OFFLOADS
)
345 for (i
= 0; i
< sqns_num
; i
++) {
346 esw_sq
= kzalloc(sizeof(*esw_sq
), GFP_KERNEL
);
352 /* Add re-inject rule to the PF/representor sqs */
353 flow_rule
= mlx5_eswitch_add_send_to_vport_rule(esw
,
356 if (IS_ERR(flow_rule
)) {
357 err
= PTR_ERR(flow_rule
);
361 esw_sq
->send_to_vport_rule
= flow_rule
;
362 list_add(&esw_sq
->list
, &rep
->vport_sqs_list
);
367 mlx5_eswitch_sqs2vport_stop(esw
, rep
);
371 static int esw_add_fdb_miss_rule(struct mlx5_eswitch
*esw
)
373 struct mlx5_flow_act flow_act
= {0};
374 struct mlx5_flow_destination dest
;
375 struct mlx5_flow_handle
*flow_rule
= NULL
;
376 struct mlx5_flow_spec
*spec
;
379 spec
= mlx5_vzalloc(sizeof(*spec
));
381 esw_warn(esw
->dev
, "FDB: Failed to alloc match parameters\n");
386 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
388 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
390 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.fdb
, spec
,
391 &flow_act
, &dest
, 1);
392 if (IS_ERR(flow_rule
)) {
393 err
= PTR_ERR(flow_rule
);
394 esw_warn(esw
->dev
, "FDB: Failed to add miss flow rule err %d\n", err
);
398 esw
->fdb_table
.offloads
.miss_rule
= flow_rule
;
404 #define MAX_PF_SQ 256
405 #define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
406 #define ESW_OFFLOADS_NUM_GROUPS 4
408 static int esw_create_offloads_fdb_table(struct mlx5_eswitch
*esw
, int nvports
)
410 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
411 struct mlx5_core_dev
*dev
= esw
->dev
;
412 struct mlx5_flow_namespace
*root_ns
;
413 struct mlx5_flow_table
*fdb
= NULL
;
414 struct mlx5_flow_group
*g
;
416 void *match_criteria
;
417 int table_size
, ix
, err
= 0;
420 flow_group_in
= mlx5_vzalloc(inlen
);
424 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
426 esw_warn(dev
, "Failed to get FDB flow namespace\n");
431 esw_debug(dev
, "Create offloads FDB table, log_max_size(%d)\n",
432 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
));
434 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, encap
) &&
435 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, decap
))
436 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN
;
438 fdb
= mlx5_create_auto_grouped_flow_table(root_ns
, FDB_FAST_PATH
,
439 ESW_OFFLOADS_NUM_ENTRIES
,
440 ESW_OFFLOADS_NUM_GROUPS
, 0,
444 esw_warn(dev
, "Failed to create Fast path FDB Table err %d\n", err
);
447 esw
->fdb_table
.fdb
= fdb
;
449 table_size
= nvports
+ MAX_PF_SQ
+ 1;
450 fdb
= mlx5_create_flow_table(root_ns
, FDB_SLOW_PATH
, table_size
, 0, 0);
453 esw_warn(dev
, "Failed to create slow path FDB Table err %d\n", err
);
456 esw
->fdb_table
.offloads
.fdb
= fdb
;
458 /* create send-to-vport group */
459 memset(flow_group_in
, 0, inlen
);
460 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
461 MLX5_MATCH_MISC_PARAMETERS
);
463 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
465 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_sqn
);
466 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
468 ix
= nvports
+ MAX_PF_SQ
;
469 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
470 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
- 1);
472 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
475 esw_warn(dev
, "Failed to create send-to-vport flow group err(%d)\n", err
);
478 esw
->fdb_table
.offloads
.send_to_vport_grp
= g
;
480 /* create miss group */
481 memset(flow_group_in
, 0, inlen
);
482 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, 0);
484 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
485 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
+ 1);
487 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
490 esw_warn(dev
, "Failed to create miss flow group err(%d)\n", err
);
493 esw
->fdb_table
.offloads
.miss_grp
= g
;
495 err
= esw_add_fdb_miss_rule(esw
);
502 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
504 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
506 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.fdb
);
508 mlx5_destroy_flow_table(esw
->fdb_table
.fdb
);
511 kvfree(flow_group_in
);
515 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch
*esw
)
517 if (!esw
->fdb_table
.fdb
)
520 esw_debug(esw
->dev
, "Destroy offloads FDB Table\n");
521 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule
);
522 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
523 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
525 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.fdb
);
526 mlx5_destroy_flow_table(esw
->fdb_table
.fdb
);
529 static int esw_create_offloads_table(struct mlx5_eswitch
*esw
)
531 struct mlx5_flow_namespace
*ns
;
532 struct mlx5_flow_table
*ft_offloads
;
533 struct mlx5_core_dev
*dev
= esw
->dev
;
536 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
538 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
542 ft_offloads
= mlx5_create_flow_table(ns
, 0, dev
->priv
.sriov
.num_vfs
+ 2, 0, 0);
543 if (IS_ERR(ft_offloads
)) {
544 err
= PTR_ERR(ft_offloads
);
545 esw_warn(esw
->dev
, "Failed to create offloads table, err %d\n", err
);
549 esw
->offloads
.ft_offloads
= ft_offloads
;
553 static void esw_destroy_offloads_table(struct mlx5_eswitch
*esw
)
555 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
557 mlx5_destroy_flow_table(offloads
->ft_offloads
);
560 static int esw_create_vport_rx_group(struct mlx5_eswitch
*esw
)
562 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
563 struct mlx5_flow_group
*g
;
564 struct mlx5_priv
*priv
= &esw
->dev
->priv
;
566 void *match_criteria
, *misc
;
568 int nvports
= priv
->sriov
.num_vfs
+ 2;
570 flow_group_in
= mlx5_vzalloc(inlen
);
574 /* create vport rx group */
575 memset(flow_group_in
, 0, inlen
);
576 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
577 MLX5_MATCH_MISC_PARAMETERS
);
579 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
580 misc
= MLX5_ADDR_OF(fte_match_param
, match_criteria
, misc_parameters
);
581 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
583 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
584 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, nvports
- 1);
586 g
= mlx5_create_flow_group(esw
->offloads
.ft_offloads
, flow_group_in
);
590 mlx5_core_warn(esw
->dev
, "Failed to create vport rx group err %d\n", err
);
594 esw
->offloads
.vport_rx_group
= g
;
596 kfree(flow_group_in
);
600 static void esw_destroy_vport_rx_group(struct mlx5_eswitch
*esw
)
602 mlx5_destroy_flow_group(esw
->offloads
.vport_rx_group
);
605 struct mlx5_flow_handle
*
606 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
, u32 tirn
)
608 struct mlx5_flow_act flow_act
= {0};
609 struct mlx5_flow_destination dest
;
610 struct mlx5_flow_handle
*flow_rule
;
611 struct mlx5_flow_spec
*spec
;
614 spec
= mlx5_vzalloc(sizeof(*spec
));
616 esw_warn(esw
->dev
, "Failed to alloc match parameters\n");
617 flow_rule
= ERR_PTR(-ENOMEM
);
621 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
622 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
624 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
625 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
627 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
628 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
631 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
632 flow_rule
= mlx5_add_flow_rules(esw
->offloads
.ft_offloads
, spec
,
633 &flow_act
, &dest
, 1);
634 if (IS_ERR(flow_rule
)) {
635 esw_warn(esw
->dev
, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule
));
644 static int esw_offloads_start(struct mlx5_eswitch
*esw
)
646 int err
, err1
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
648 if (esw
->mode
!= SRIOV_LEGACY
) {
649 esw_warn(esw
->dev
, "Can't set offloads mode, SRIOV legacy not enabled\n");
653 mlx5_eswitch_disable_sriov(esw
);
654 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_OFFLOADS
);
656 esw_warn(esw
->dev
, "Failed setting eswitch to offloads, err %d\n", err
);
657 err1
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_LEGACY
);
659 esw_warn(esw
->dev
, "Failed setting eswitch back to legacy, err %d\n", err1
);
661 if (esw
->offloads
.inline_mode
== MLX5_INLINE_MODE_NONE
) {
662 if (mlx5_eswitch_inline_mode_get(esw
,
664 &esw
->offloads
.inline_mode
)) {
665 esw
->offloads
.inline_mode
= MLX5_INLINE_MODE_L2
;
666 esw_warn(esw
->dev
, "Inline mode is different between vports\n");
672 int esw_offloads_init(struct mlx5_eswitch
*esw
, int nvports
)
674 struct mlx5_eswitch_rep
*rep
;
678 /* disable PF RoCE so missed packets don't go through RoCE steering */
679 mlx5_dev_list_lock();
680 mlx5_remove_dev_by_protocol(esw
->dev
, MLX5_INTERFACE_PROTOCOL_IB
);
681 mlx5_dev_list_unlock();
683 err
= esw_create_offloads_fdb_table(esw
, nvports
);
687 err
= esw_create_offloads_table(esw
);
691 err
= esw_create_vport_rx_group(esw
);
695 for (vport
= 0; vport
< nvports
; vport
++) {
696 rep
= &esw
->offloads
.vport_reps
[vport
];
700 err
= rep
->load(esw
, rep
);
708 for (vport
--; vport
>= 0; vport
--) {
709 rep
= &esw
->offloads
.vport_reps
[vport
];
712 rep
->unload(esw
, rep
);
714 esw_destroy_vport_rx_group(esw
);
717 esw_destroy_offloads_table(esw
);
720 esw_destroy_offloads_fdb_table(esw
);
723 /* enable back PF RoCE */
724 mlx5_dev_list_lock();
725 mlx5_add_dev_by_protocol(esw
->dev
, MLX5_INTERFACE_PROTOCOL_IB
);
726 mlx5_dev_list_unlock();
731 static int esw_offloads_stop(struct mlx5_eswitch
*esw
)
733 int err
, err1
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
735 mlx5_eswitch_disable_sriov(esw
);
736 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_LEGACY
);
738 esw_warn(esw
->dev
, "Failed setting eswitch to legacy, err %d\n", err
);
739 err1
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_OFFLOADS
);
741 esw_warn(esw
->dev
, "Failed setting eswitch back to offloads, err %d\n", err
);
744 /* enable back PF RoCE */
745 mlx5_dev_list_lock();
746 mlx5_add_dev_by_protocol(esw
->dev
, MLX5_INTERFACE_PROTOCOL_IB
);
747 mlx5_dev_list_unlock();
752 void esw_offloads_cleanup(struct mlx5_eswitch
*esw
, int nvports
)
754 struct mlx5_eswitch_rep
*rep
;
757 for (vport
= 0; vport
< nvports
; vport
++) {
758 rep
= &esw
->offloads
.vport_reps
[vport
];
761 rep
->unload(esw
, rep
);
764 esw_destroy_vport_rx_group(esw
);
765 esw_destroy_offloads_table(esw
);
766 esw_destroy_offloads_fdb_table(esw
);
769 static int esw_mode_from_devlink(u16 mode
, u16
*mlx5_mode
)
772 case DEVLINK_ESWITCH_MODE_LEGACY
:
773 *mlx5_mode
= SRIOV_LEGACY
;
775 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
776 *mlx5_mode
= SRIOV_OFFLOADS
;
785 static int esw_mode_to_devlink(u16 mlx5_mode
, u16
*mode
)
789 *mode
= DEVLINK_ESWITCH_MODE_LEGACY
;
792 *mode
= DEVLINK_ESWITCH_MODE_SWITCHDEV
;
801 static int esw_inline_mode_from_devlink(u8 mode
, u8
*mlx5_mode
)
804 case DEVLINK_ESWITCH_INLINE_MODE_NONE
:
805 *mlx5_mode
= MLX5_INLINE_MODE_NONE
;
807 case DEVLINK_ESWITCH_INLINE_MODE_LINK
:
808 *mlx5_mode
= MLX5_INLINE_MODE_L2
;
810 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK
:
811 *mlx5_mode
= MLX5_INLINE_MODE_IP
;
813 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
:
814 *mlx5_mode
= MLX5_INLINE_MODE_TCP_UDP
;
823 static int esw_inline_mode_to_devlink(u8 mlx5_mode
, u8
*mode
)
826 case MLX5_INLINE_MODE_NONE
:
827 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NONE
;
829 case MLX5_INLINE_MODE_L2
:
830 *mode
= DEVLINK_ESWITCH_INLINE_MODE_LINK
;
832 case MLX5_INLINE_MODE_IP
:
833 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NETWORK
;
835 case MLX5_INLINE_MODE_TCP_UDP
:
836 *mode
= DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
;
845 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
)
847 struct mlx5_core_dev
*dev
;
848 u16 cur_mlx5_mode
, mlx5_mode
= 0;
850 dev
= devlink_priv(devlink
);
852 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
855 cur_mlx5_mode
= dev
->priv
.eswitch
->mode
;
857 if (cur_mlx5_mode
== SRIOV_NONE
)
860 if (esw_mode_from_devlink(mode
, &mlx5_mode
))
863 if (cur_mlx5_mode
== mlx5_mode
)
866 if (mode
== DEVLINK_ESWITCH_MODE_SWITCHDEV
)
867 return esw_offloads_start(dev
->priv
.eswitch
);
868 else if (mode
== DEVLINK_ESWITCH_MODE_LEGACY
)
869 return esw_offloads_stop(dev
->priv
.eswitch
);
874 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
876 struct mlx5_core_dev
*dev
;
878 dev
= devlink_priv(devlink
);
880 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
883 if (dev
->priv
.eswitch
->mode
== SRIOV_NONE
)
886 return esw_mode_to_devlink(dev
->priv
.eswitch
->mode
, mode
);
889 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
)
891 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
892 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
893 int num_vports
= esw
->enabled_vports
;
898 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
901 if (esw
->mode
== SRIOV_NONE
)
904 if (MLX5_CAP_ETH(dev
, wqe_inline_mode
) !=
905 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
908 err
= esw_inline_mode_from_devlink(mode
, &mlx5_mode
);
912 for (vport
= 1; vport
< num_vports
; vport
++) {
913 err
= mlx5_modify_nic_vport_min_inline(dev
, vport
, mlx5_mode
);
915 esw_warn(dev
, "Failed to set min inline on vport %d\n",
917 goto revert_inline_mode
;
921 esw
->offloads
.inline_mode
= mlx5_mode
;
926 mlx5_modify_nic_vport_min_inline(dev
,
928 esw
->offloads
.inline_mode
);
933 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
)
935 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
936 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
938 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
941 if (esw
->mode
== SRIOV_NONE
)
944 if (MLX5_CAP_ETH(dev
, wqe_inline_mode
) !=
945 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
948 return esw_inline_mode_to_devlink(esw
->offloads
.inline_mode
, mode
);
951 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch
*esw
, int nvfs
, u8
*mode
)
953 struct mlx5_core_dev
*dev
= esw
->dev
;
955 u8 prev_mlx5_mode
, mlx5_mode
= MLX5_INLINE_MODE_L2
;
957 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
960 if (esw
->mode
== SRIOV_NONE
)
963 if (MLX5_CAP_ETH(dev
, wqe_inline_mode
) !=
964 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
967 for (vport
= 1; vport
<= nvfs
; vport
++) {
968 mlx5_query_nic_vport_min_inline(dev
, vport
, &mlx5_mode
);
969 if (vport
> 1 && prev_mlx5_mode
!= mlx5_mode
)
971 prev_mlx5_mode
= mlx5_mode
;
978 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch
*esw
,
980 struct mlx5_eswitch_rep
*__rep
)
982 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
983 struct mlx5_eswitch_rep
*rep
;
985 rep
= &offloads
->vport_reps
[vport_index
];
987 memset(rep
, 0, sizeof(*rep
));
989 rep
->load
= __rep
->load
;
990 rep
->unload
= __rep
->unload
;
991 rep
->vport
= __rep
->vport
;
992 rep
->netdev
= __rep
->netdev
;
993 ether_addr_copy(rep
->hw_id
, __rep
->hw_id
);
995 INIT_LIST_HEAD(&rep
->vport_sqs_list
);
999 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch
*esw
,
1002 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1003 struct mlx5_eswitch_rep
*rep
;
1005 rep
= &offloads
->vport_reps
[vport_index
];
1007 if (esw
->mode
== SRIOV_OFFLOADS
&& esw
->vports
[vport_index
].enabled
)
1008 rep
->unload(esw
, rep
);
1013 struct net_device
*mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch
*esw
)
1015 #define UPLINK_REP_INDEX 0
1016 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1017 struct mlx5_eswitch_rep
*rep
;
1019 rep
= &offloads
->vport_reps
[UPLINK_REP_INDEX
];