2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16
mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
58 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
);
61 fid
= f
? f
->fid
: fid
;
64 fid
= mlxsw_sp_port
->pvid
;
69 static struct mlxsw_sp_port
*
70 mlxsw_sp_port_orig_get(struct net_device
*dev
,
71 struct mlxsw_sp_port
*mlxsw_sp_port
)
73 struct mlxsw_sp_port
*mlxsw_sp_vport
;
76 if (!is_vlan_dev(dev
))
79 vid
= vlan_dev_vlan_id(dev
);
80 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
81 WARN_ON(!mlxsw_sp_vport
);
83 return mlxsw_sp_vport
;
86 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
87 struct switchdev_attr
*attr
)
89 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
90 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
92 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
98 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
99 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
100 attr
->u
.ppid
.id_len
);
102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
103 attr
->u
.brport_flags
=
104 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
105 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
106 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
118 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
119 enum mlxsw_reg_spms_state spms_state
;
125 case BR_STATE_FORWARDING
:
126 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
128 case BR_STATE_LEARNING
:
129 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
131 case BR_STATE_LISTENING
: /* fall-through */
132 case BR_STATE_DISABLED
: /* fall-through */
133 case BR_STATE_BLOCKING
:
134 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
140 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
143 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
146 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
147 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
149 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
150 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
153 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
159 struct switchdev_trans
*trans
,
162 if (switchdev_trans_ph_prepare(trans
))
165 mlxsw_sp_port
->stp_state
= state
;
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
170 u16 idx_begin
, u16 idx_end
, bool set
,
173 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
174 u16 local_port
= mlxsw_sp_port
->local_port
;
175 enum mlxsw_flood_table_type table_type
;
176 u16 range
= idx_end
- idx_begin
+ 1;
180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
181 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
183 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
185 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
189 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
190 table_type
, range
, local_port
, set
);
191 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
195 /* Flooding control allows one to decide whether a given port will
196 * flood unicast traffic for which there is no FDB entry.
201 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, idx_begin
,
202 table_type
, range
, local_port
, set
);
203 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
205 goto err_flood_bm_set
;
210 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
211 table_type
, range
, local_port
, !set
);
212 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
218 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
221 struct net_device
*dev
= mlxsw_sp_port
->dev
;
222 u16 vid
, last_visited_vid
;
225 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
226 u16 fid
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
)->fid
;
227 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
229 return __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vfid
, vfid
,
233 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
234 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
237 last_visited_vid
= vid
;
238 goto err_port_flood_set
;
245 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
246 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
247 netdev_err(dev
, "Failed to configure unicast flooding\n");
251 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
256 /* In case of vFIDs, index into the flooding table is relative to
257 * the start of the vFIDs range.
259 vfid
= mlxsw_sp_fid_to_vfid(fid
);
260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
,
264 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
270 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
271 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
273 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, vid
,
277 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
278 err
= __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, vid
,
281 goto err_port_vid_learning_set
;
286 err_port_vid_learning_set
:
287 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
288 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, vid
, !set
);
292 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
293 struct switchdev_trans
*trans
,
294 unsigned long brport_flags
)
296 unsigned long learning
= mlxsw_sp_port
->learning
? BR_LEARNING
: 0;
297 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
300 if (!mlxsw_sp_port
->bridged
)
303 if (switchdev_trans_ph_prepare(trans
))
306 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
307 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
,
308 !mlxsw_sp_port
->uc_flood
);
313 if ((learning
^ brport_flags
) & BR_LEARNING
) {
314 err
= mlxsw_sp_port_learning_set(mlxsw_sp_port
,
315 !mlxsw_sp_port
->learning
);
317 goto err_port_learning_set
;
320 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
321 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
322 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
326 err_port_learning_set
:
327 if ((uc_flood
^ brport_flags
) & BR_FLOOD
)
328 mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
,
329 mlxsw_sp_port
->uc_flood
);
333 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
335 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
338 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
339 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
342 mlxsw_sp
->ageing_time
= ageing_time
;
346 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
347 struct switchdev_trans
*trans
,
348 unsigned long ageing_clock_t
)
350 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
351 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
352 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
354 if (switchdev_trans_ph_prepare(trans
)) {
355 if (ageing_time
< MLXSW_SP_MIN_AGEING_TIME
||
356 ageing_time
> MLXSW_SP_MAX_AGEING_TIME
)
362 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
365 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
366 struct switchdev_trans
*trans
,
367 struct net_device
*orig_dev
,
370 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
372 /* SWITCHDEV_TRANS_PREPARE phase */
373 if ((!vlan_enabled
) && (mlxsw_sp
->master_bridge
.dev
== orig_dev
)) {
374 netdev_err(mlxsw_sp_port
->dev
, "Bridge must be vlan-aware\n");
381 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
382 const struct switchdev_attr
*attr
,
383 struct switchdev_trans
*trans
)
385 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
388 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
393 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
394 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
397 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
398 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
399 attr
->u
.brport_flags
);
401 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
402 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
403 attr
->u
.ageing_time
);
405 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
406 err
= mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port
, trans
,
408 attr
->u
.vlan_filtering
);
418 static int mlxsw_sp_fid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
420 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
422 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, fid
);
423 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
426 static int mlxsw_sp_fid_map(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool valid
)
428 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
429 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
431 mlxsw_reg_svfa_pack(svfa_pl
, 0, mt
, valid
, fid
, fid
);
432 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
435 static struct mlxsw_sp_fid
*mlxsw_sp_fid_alloc(u16 fid
)
437 struct mlxsw_sp_fid
*f
;
439 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
448 struct mlxsw_sp_fid
*mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
450 struct mlxsw_sp_fid
*f
;
453 err
= mlxsw_sp_fid_op(mlxsw_sp
, fid
, true);
457 /* Although all the ports member in the FID might be using a
458 * {Port, VID} to FID mapping, we create a global VID-to-FID
459 * mapping. This allows a port to transition to VLAN mode,
460 * knowing the global mapping exists.
462 err
= mlxsw_sp_fid_map(mlxsw_sp
, fid
, true);
466 f
= mlxsw_sp_fid_alloc(fid
);
469 goto err_allocate_fid
;
472 list_add(&f
->list
, &mlxsw_sp
->fids
);
477 mlxsw_sp_fid_map(mlxsw_sp
, fid
, false);
479 mlxsw_sp_fid_op(mlxsw_sp
, fid
, false);
483 void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, struct mlxsw_sp_fid
*f
)
490 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
494 mlxsw_sp_fid_map(mlxsw_sp
, fid
, false);
496 mlxsw_sp_fid_op(mlxsw_sp
, fid
, false);
499 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
502 struct mlxsw_sp_fid
*f
;
504 f
= mlxsw_sp_fid_find(mlxsw_sp_port
->mlxsw_sp
, fid
);
506 f
= mlxsw_sp_fid_create(mlxsw_sp_port
->mlxsw_sp
, fid
);
513 netdev_dbg(mlxsw_sp_port
->dev
, "Joined FID=%d\n", fid
);
518 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
521 struct mlxsw_sp_fid
*f
;
523 f
= mlxsw_sp_fid_find(mlxsw_sp_port
->mlxsw_sp
, fid
);
527 netdev_dbg(mlxsw_sp_port
->dev
, "Left FID=%d\n", fid
);
529 mlxsw_sp_port_fdb_flush(mlxsw_sp_port
, fid
);
531 if (--f
->ref_count
== 0)
532 mlxsw_sp_fid_destroy(mlxsw_sp_port
->mlxsw_sp
, f
);
535 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
,
538 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
540 /* If port doesn't have vPorts, then it can use the global
541 * VID-to-FID mapping.
543 if (list_empty(&mlxsw_sp_port
->vports_list
))
546 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, valid
, fid
, fid
);
549 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
550 u16 fid_begin
, u16 fid_end
)
554 for (fid
= fid_begin
; fid
<= fid_end
; fid
++) {
555 err
= __mlxsw_sp_port_fid_join(mlxsw_sp_port
, fid
);
557 goto err_port_fid_join
;
560 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
,
563 goto err_port_flood_set
;
565 for (fid
= fid_begin
; fid
<= fid_end
; fid
++) {
566 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, true);
568 goto err_port_fid_map
;
574 for (fid
--; fid
>= fid_begin
; fid
--)
575 mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, false);
576 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
, false,
581 for (fid
--; fid
>= fid_begin
; fid
--)
582 __mlxsw_sp_port_fid_leave(mlxsw_sp_port
, fid
);
586 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
587 u16 fid_begin
, u16 fid_end
)
591 for (fid
= fid_begin
; fid
<= fid_end
; fid
++)
592 mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, false);
594 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
, false,
597 for (fid
= fid_begin
; fid
<= fid_end
; fid
++)
598 __mlxsw_sp_port_fid_leave(mlxsw_sp_port
, fid
);
601 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
604 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
605 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
607 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
608 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
611 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
614 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
615 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
617 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
618 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
621 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
623 struct net_device
*dev
= mlxsw_sp_port
->dev
;
627 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
629 netdev_err(dev
, "Failed to disallow untagged traffic\n");
633 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
635 netdev_err(dev
, "Failed to set PVID\n");
639 /* Only allow if not already allowed. */
640 if (!mlxsw_sp_port
->pvid
) {
641 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
,
644 netdev_err(dev
, "Failed to allow untagged traffic\n");
645 goto err_port_allow_untagged_set
;
650 mlxsw_sp_port
->pvid
= vid
;
653 err_port_allow_untagged_set
:
654 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
658 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
659 u16 vid_begin
, u16 vid_end
, bool is_member
,
665 for (vid
= vid_begin
; vid
<= vid_end
;
666 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
667 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
670 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
671 is_member
, untagged
);
679 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
680 u16 vid_begin
, u16 vid_end
,
686 for (vid
= vid_begin
; vid
<= vid_end
;
687 vid
+= MLXSW_REG_SPVMLR_REC_MAX_COUNT
) {
688 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVMLR_REC_MAX_COUNT
- 1),
691 err
= __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
,
692 vid_e
, learn_enable
);
700 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
701 u16 vid_begin
, u16 vid_end
,
702 bool flag_untagged
, bool flag_pvid
)
704 struct net_device
*dev
= mlxsw_sp_port
->dev
;
708 if (!mlxsw_sp_port
->bridged
)
711 err
= mlxsw_sp_port_fid_join(mlxsw_sp_port
, vid_begin
, vid_end
);
713 netdev_err(dev
, "Failed to join FIDs\n");
717 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
718 true, flag_untagged
);
720 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
722 goto err_port_vlans_set
;
725 old_pvid
= mlxsw_sp_port
->pvid
;
726 if (flag_pvid
&& old_pvid
!= vid_begin
) {
727 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
729 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
730 goto err_port_pvid_set
;
732 } else if (!flag_pvid
&& old_pvid
>= vid_begin
&& old_pvid
<= vid_end
) {
733 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
735 netdev_err(dev
, "Unable to del PVID\n");
736 goto err_port_pvid_set
;
740 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid_begin
, vid_end
,
741 mlxsw_sp_port
->learning
);
743 netdev_err(dev
, "Failed to set learning for VIDs %d-%d\n",
745 goto err_port_vid_learning_set
;
748 /* Changing activity bits only if HW operation succeded */
749 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
750 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
752 set_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
754 clear_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
757 /* STP state change must be done after we set active VLANs */
758 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
759 mlxsw_sp_port
->stp_state
);
761 netdev_err(dev
, "Failed to set STP state\n");
762 goto err_port_stp_state_set
;
767 err_port_stp_state_set
:
768 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
769 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
770 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid_begin
, vid_end
,
772 err_port_vid_learning_set
:
773 if (old_pvid
!= mlxsw_sp_port
->pvid
)
774 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
776 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
779 mlxsw_sp_port_fid_leave(mlxsw_sp_port
, vid_begin
, vid_end
);
783 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
784 const struct switchdev_obj_port_vlan
*vlan
,
785 struct switchdev_trans
*trans
)
787 bool flag_untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
788 bool flag_pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
790 if (switchdev_trans_ph_prepare(trans
))
793 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
794 vlan
->vid_begin
, vlan
->vid_end
,
795 flag_untagged
, flag_pvid
);
798 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
800 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
801 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
804 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
806 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
807 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
810 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
811 const char *mac
, u16 fid
, bool adding
,
812 enum mlxsw_reg_sfd_rec_action action
,
818 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
822 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
823 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
824 mac
, fid
, action
, local_port
);
825 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
831 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
832 const char *mac
, u16 fid
, bool adding
,
835 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
, adding
,
836 MLXSW_REG_SFD_REC_ACTION_NOP
, dynamic
);
839 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *mac
, u16 fid
,
842 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, 0, mac
, fid
, adding
,
843 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER
,
847 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
848 const char *mac
, u16 fid
, u16 lag_vid
,
849 bool adding
, bool dynamic
)
854 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
858 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
859 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
860 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
862 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
869 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
870 const struct switchdev_obj_port_fdb
*fdb
,
871 struct switchdev_trans
*trans
)
873 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
876 if (switchdev_trans_ph_prepare(trans
))
879 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
880 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
883 if (!mlxsw_sp_port
->lagged
)
884 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
885 mlxsw_sp_port
->local_port
,
886 fdb
->addr
, fid
, true, false);
888 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
889 mlxsw_sp_port
->lag_id
,
890 fdb
->addr
, fid
, lag_vid
,
894 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *addr
,
895 u16 fid
, u16 mid
, bool adding
)
900 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
904 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
905 mlxsw_reg_sfd_mc_pack(sfd_pl
, 0, addr
, fid
,
906 MLXSW_REG_SFD_REC_ACTION_NOP
, mid
);
907 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
912 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mid
,
913 bool add
, bool clear_all_ports
)
915 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
919 smid_pl
= kmalloc(MLXSW_REG_SMID_LEN
, GFP_KERNEL
);
923 mlxsw_reg_smid_pack(smid_pl
, mid
, mlxsw_sp_port
->local_port
, add
);
924 if (clear_all_ports
) {
925 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
926 if (mlxsw_sp
->ports
[i
])
927 mlxsw_reg_smid_port_mask_set(smid_pl
, i
, 1);
929 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(smid
), smid_pl
);
934 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_get(struct mlxsw_sp
*mlxsw_sp
,
935 const unsigned char *addr
,
938 struct mlxsw_sp_mid
*mid
;
940 list_for_each_entry(mid
, &mlxsw_sp
->br_mids
.list
, list
) {
941 if (ether_addr_equal(mid
->addr
, addr
) && mid
->vid
== vid
)
947 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_alloc(struct mlxsw_sp
*mlxsw_sp
,
948 const unsigned char *addr
,
951 struct mlxsw_sp_mid
*mid
;
954 mid_idx
= find_first_zero_bit(mlxsw_sp
->br_mids
.mapped
,
956 if (mid_idx
== MLXSW_SP_MID_MAX
)
959 mid
= kzalloc(sizeof(*mid
), GFP_KERNEL
);
963 set_bit(mid_idx
, mlxsw_sp
->br_mids
.mapped
);
964 ether_addr_copy(mid
->addr
, addr
);
968 list_add_tail(&mid
->list
, &mlxsw_sp
->br_mids
.list
);
973 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp
*mlxsw_sp
,
974 struct mlxsw_sp_mid
*mid
)
976 if (--mid
->ref_count
== 0) {
977 list_del(&mid
->list
);
978 clear_bit(mid
->mid
, mlxsw_sp
->br_mids
.mapped
);
985 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
986 const struct switchdev_obj_port_mdb
*mdb
,
987 struct switchdev_trans
*trans
)
989 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
990 struct net_device
*dev
= mlxsw_sp_port
->dev
;
991 struct mlxsw_sp_mid
*mid
;
992 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
995 if (switchdev_trans_ph_prepare(trans
))
998 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
1000 mid
= __mlxsw_sp_mc_alloc(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
1002 netdev_err(dev
, "Unable to allocate MC group\n");
1008 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, true,
1009 mid
->ref_count
== 1);
1011 netdev_err(dev
, "Unable to set SMID\n");
1015 if (mid
->ref_count
== 1) {
1016 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid
->mid
,
1019 netdev_err(dev
, "Unable to set MC SFD\n");
1027 __mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
);
1031 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
1032 const struct switchdev_obj
*obj
,
1033 struct switchdev_trans
*trans
)
1035 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1038 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1043 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1044 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
1047 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
1048 SWITCHDEV_OBJ_PORT_VLAN(obj
),
1051 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
1052 err
= mlxsw_sp_router_fib4_add(mlxsw_sp_port
,
1053 SWITCHDEV_OBJ_IPV4_FIB(obj
),
1056 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1057 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
1058 SWITCHDEV_OBJ_PORT_FDB(obj
),
1061 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1062 err
= mlxsw_sp_port_mdb_add(mlxsw_sp_port
,
1063 SWITCHDEV_OBJ_PORT_MDB(obj
),
1074 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1075 u16 vid_begin
, u16 vid_end
)
1079 if (!mlxsw_sp_port
->bridged
)
1082 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid_begin
, vid_end
,
1085 pvid
= mlxsw_sp_port
->pvid
;
1086 if (pvid
>= vid_begin
&& pvid
<= vid_end
)
1087 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
1089 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
1092 mlxsw_sp_port_fid_leave(mlxsw_sp_port
, vid_begin
, vid_end
);
1094 /* Changing activity bits only if HW operation succeded */
1095 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
1096 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
1101 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1102 const struct switchdev_obj_port_vlan
*vlan
)
1104 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, vlan
->vid_begin
,
1108 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
)
1112 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
1113 __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, vid
, vid
);
1117 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1118 const struct switchdev_obj_port_fdb
*fdb
)
1120 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
1123 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1124 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1127 if (!mlxsw_sp_port
->lagged
)
1128 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
1129 mlxsw_sp_port
->local_port
,
1133 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
1134 mlxsw_sp_port
->lag_id
,
1135 fdb
->addr
, fid
, lag_vid
,
1139 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1140 const struct switchdev_obj_port_mdb
*mdb
)
1142 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1143 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1144 struct mlxsw_sp_mid
*mid
;
1145 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
1149 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
1151 netdev_err(dev
, "Unable to remove port from MC DB\n");
1155 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, false, false);
1157 netdev_err(dev
, "Unable to remove port from SMID\n");
1160 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
)) {
1161 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid_idx
,
1164 netdev_err(dev
, "Unable to remove MC SFD\n");
1170 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
1171 const struct switchdev_obj
*obj
)
1173 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1176 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1181 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1182 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
1185 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
1186 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1188 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
1189 err
= mlxsw_sp_router_fib4_del(mlxsw_sp_port
,
1190 SWITCHDEV_OBJ_IPV4_FIB(obj
));
1192 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1193 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
1194 SWITCHDEV_OBJ_PORT_FDB(obj
));
1196 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1197 err
= mlxsw_sp_port_mdb_del(mlxsw_sp_port
,
1198 SWITCHDEV_OBJ_PORT_MDB(obj
));
1208 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
1211 struct mlxsw_sp_port
*mlxsw_sp_port
;
1214 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
1215 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
1217 return mlxsw_sp_port
;
1222 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1223 struct switchdev_obj_port_fdb
*fdb
,
1224 switchdev_obj_dump_cb_t
*cb
,
1225 struct net_device
*orig_dev
)
1227 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1228 struct mlxsw_sp_port
*tmp
;
1229 struct mlxsw_sp_fid
*f
;
1241 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1245 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
);
1246 vport_fid
= f
? f
->fid
: 0;
1248 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
1250 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
1251 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1255 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
1257 /* Even in case of error, we have to run the dump to the end
1258 * so the session in firmware is finished.
1263 for (i
= 0; i
< num_rec
; i
++) {
1264 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
1265 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
1266 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &fid
,
1268 if (local_port
== mlxsw_sp_port
->local_port
) {
1269 if (vport_fid
&& vport_fid
== fid
)
1271 else if (!vport_fid
&&
1272 !mlxsw_sp_fid_is_vfid(fid
))
1276 ether_addr_copy(fdb
->addr
, mac
);
1277 fdb
->ndm_state
= NUD_REACHABLE
;
1278 err
= cb(&fdb
->obj
);
1283 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
1284 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
1285 mac
, &fid
, &lag_id
);
1286 tmp
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1287 if (tmp
&& tmp
->local_port
==
1288 mlxsw_sp_port
->local_port
) {
1289 /* LAG records can only point to LAG
1290 * devices or VLAN devices on top.
1292 if (!netif_is_lag_master(orig_dev
) &&
1293 !is_vlan_dev(orig_dev
))
1295 if (vport_fid
&& vport_fid
== fid
)
1297 else if (!vport_fid
&&
1298 !mlxsw_sp_fid_is_vfid(fid
))
1302 ether_addr_copy(fdb
->addr
, mac
);
1303 fdb
->ndm_state
= NUD_REACHABLE
;
1304 err
= cb(&fdb
->obj
);
1311 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
1315 return stored_err
? stored_err
: err
;
1318 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1319 struct switchdev_obj_port_vlan
*vlan
,
1320 switchdev_obj_dump_cb_t
*cb
)
1325 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1327 vlan
->vid_begin
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1328 vlan
->vid_end
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1329 return cb(&vlan
->obj
);
1332 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1334 if (vid
== mlxsw_sp_port
->pvid
)
1335 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1336 if (test_bit(vid
, mlxsw_sp_port
->untagged_vlans
))
1337 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1338 vlan
->vid_begin
= vid
;
1339 vlan
->vid_end
= vid
;
1340 err
= cb(&vlan
->obj
);
1347 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
1348 struct switchdev_obj
*obj
,
1349 switchdev_obj_dump_cb_t
*cb
)
1351 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1354 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1359 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1360 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
1361 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
1363 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1364 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
1365 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
,
1376 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
1377 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
1378 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
1379 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
1380 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
1381 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
1384 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync
, bool adding
,
1386 struct net_device
*dev
)
1388 struct switchdev_notifier_fdb_info info
;
1389 unsigned long notifier_type
;
1391 if (learning_sync
) {
1394 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
1395 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
1399 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
1400 char *sfn_pl
, int rec_index
,
1403 struct mlxsw_sp_port
*mlxsw_sp_port
;
1407 bool do_notification
= true;
1410 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1411 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1412 if (!mlxsw_sp_port
) {
1413 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1417 if (mlxsw_sp_fid_is_vfid(fid
)) {
1418 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1420 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port
,
1422 if (!mlxsw_sp_vport
) {
1423 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1427 /* Override the physical port with the vPort. */
1428 mlxsw_sp_port
= mlxsw_sp_vport
;
1434 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
,
1437 if (net_ratelimit())
1438 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1442 if (!do_notification
)
1444 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
,
1445 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
1450 do_notification
= false;
1454 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1455 char *sfn_pl
, int rec_index
,
1458 struct mlxsw_sp_port
*mlxsw_sp_port
;
1459 struct net_device
*dev
;
1464 bool do_notification
= true;
1467 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1468 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1469 if (!mlxsw_sp_port
) {
1470 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1474 if (mlxsw_sp_fid_is_vfid(fid
)) {
1475 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1477 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port
,
1479 if (!mlxsw_sp_vport
) {
1480 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1484 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1485 dev
= mlxsw_sp_vport
->dev
;
1487 /* Override the physical port with the vPort. */
1488 mlxsw_sp_port
= mlxsw_sp_vport
;
1490 dev
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
;
1495 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1498 if (net_ratelimit())
1499 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1503 if (!do_notification
)
1505 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
, adding
, mac
,
1511 do_notification
= false;
1515 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1516 char *sfn_pl
, int rec_index
)
1518 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1519 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1520 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1523 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1524 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1527 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1528 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1531 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1532 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1538 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1540 mlxsw_core_schedule_dw(&mlxsw_sp
->fdb_notify
.dw
,
1541 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
1544 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1546 struct mlxsw_sp
*mlxsw_sp
;
1552 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1556 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
1559 mlxsw_reg_sfn_pack(sfn_pl
);
1560 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1562 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1565 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1566 for (i
= 0; i
< num_rec
; i
++)
1567 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1572 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1575 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1579 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1581 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1584 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1585 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1586 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1590 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1592 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1595 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1597 return mlxsw_sp_fdb_init(mlxsw_sp
);
1600 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1602 mlxsw_sp_fdb_fini(mlxsw_sp
);
1605 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1607 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1610 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)