mlxsw: Enable configuration of flooding domains
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / ethernet / mellanox / mlxsw / switchx2.c
CommitLineData
31557f0f
JP
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/slab.h>
43#include <linux/device.h>
44#include <linux/skbuff.h>
45#include <linux/if_vlan.h>
46#include <net/switchdev.h>
47#include <generated/utsrelease.h>
48
49#include "core.h"
50#include "reg.h"
51#include "port.h"
52#include "trap.h"
53#include "txheader.h"
54
55static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
56static const char mlxsw_sx_driver_version[] = "1.0";
57
58struct mlxsw_sx_port;
59
31557f0f
JP
60struct mlxsw_sx {
61 struct mlxsw_sx_port **ports;
62 struct mlxsw_core *core;
63 const struct mlxsw_bus_info *bus_info;
ffe05328 64 u8 hw_id[ETH_ALEN];
31557f0f
JP
65};
66
67struct mlxsw_sx_port_pcpu_stats {
68 u64 rx_packets;
69 u64 rx_bytes;
70 u64 tx_packets;
71 u64 tx_bytes;
72 struct u64_stats_sync syncp;
73 u32 tx_dropped;
74};
75
76struct mlxsw_sx_port {
77 struct net_device *dev;
78 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
79 struct mlxsw_sx *mlxsw_sx;
80 u8 local_port;
81};
82
83/* tx_hdr_version
84 * Tx header version.
85 * Must be set to 0.
86 */
87MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
88
89/* tx_hdr_ctl
90 * Packet control type.
91 * 0 - Ethernet control (e.g. EMADs, LACP)
92 * 1 - Ethernet data
93 */
94MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
95
96/* tx_hdr_proto
97 * Packet protocol type. Must be set to 1 (Ethernet).
98 */
99MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
100
101/* tx_hdr_etclass
102 * Egress TClass to be used on the egress device on the egress port.
103 * The MSB is specified in the 'ctclass3' field.
104 * Range is 0-15, where 15 is the highest priority.
105 */
106MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
107
108/* tx_hdr_swid
109 * Switch partition ID.
110 */
111MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
112
113/* tx_hdr_port_mid
114 * Destination local port for unicast packets.
115 * Destination multicast ID for multicast packets.
116 *
117 * Control packets are directed to a specific egress port, while data
118 * packets are transmitted through the CPU port (0) into the switch partition,
119 * where forwarding rules are applied.
120 */
121MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122
123/* tx_hdr_ctclass3
124 * See field 'etclass'.
125 */
126MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
127
128/* tx_hdr_rdq
129 * RDQ for control packets sent to remote CPU.
130 * Must be set to 0x1F for EMADs, otherwise 0.
131 */
132MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
133
134/* tx_hdr_cpu_sig
135 * Signature control for packets going to CPU. Must be set to 0.
136 */
137MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
138
139/* tx_hdr_sig
140 * Stacking protocl signature. Must be set to 0xE0E0.
141 */
142MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
143
144/* tx_hdr_stclass
145 * Stacking TClass.
146 */
147MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
148
149/* tx_hdr_emad
150 * EMAD bit. Must be set for EMADs.
151 */
152MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
153
154/* tx_hdr_type
155 * 0 - Data packets
156 * 6 - Control packets
157 */
158MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159
160static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
161 const struct mlxsw_tx_info *tx_info)
162{
163 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
164 bool is_emad = tx_info->is_emad;
165
166 memset(txhdr, 0, MLXSW_TXHDR_LEN);
167
168 /* We currently set default values for the egress tclass (QoS). */
169 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
170 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
171 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
172 mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
173 MLXSW_TXHDR_ETCLASS_5);
174 mlxsw_tx_hdr_swid_set(txhdr, 0);
175 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
176 mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
177 mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
178 MLXSW_TXHDR_RDQ_OTHER);
179 mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
180 mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
181 mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
182 mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
183 MLXSW_TXHDR_NOT_EMAD);
184 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
185}
186
187static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
188 bool is_up)
189{
190 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
191 char paos_pl[MLXSW_REG_PAOS_LEN];
192
193 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
194 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
195 MLXSW_PORT_ADMIN_STATUS_DOWN);
196 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
197}
198
199static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
200 bool *p_is_up)
201{
202 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
203 char paos_pl[MLXSW_REG_PAOS_LEN];
204 u8 oper_status;
205 int err;
206
207 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
208 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
209 if (err)
210 return err;
211 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
212 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
213 return 0;
214}
215
216static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
217{
218 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
219 char pmtu_pl[MLXSW_REG_PMTU_LEN];
220 int max_mtu;
221 int err;
222
223 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
224 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
225 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
226 if (err)
227 return err;
228 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
229
230 if (mtu > max_mtu)
231 return -EINVAL;
232
233 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
234 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
235}
236
237static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
238{
239 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
240 char pspa_pl[MLXSW_REG_PSPA_LEN];
241
242 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
243 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
244}
245
e61011b5
IS
246static int
247mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
248{
249 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
250 char sspr_pl[MLXSW_REG_SSPR_LEN];
251
252 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
253 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
254}
255
31557f0f
JP
256static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
257 bool *p_usable)
258{
259 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
260 char pmlp_pl[MLXSW_REG_PMLP_LEN];
261 int err;
262
263 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
264 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
265 if (err)
266 return err;
267 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
268 return 0;
269}
270
271static int mlxsw_sx_port_open(struct net_device *dev)
272{
273 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
274 int err;
275
276 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
277 if (err)
278 return err;
279 netif_start_queue(dev);
280 return 0;
281}
282
283static int mlxsw_sx_port_stop(struct net_device *dev)
284{
285 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
286
287 netif_stop_queue(dev);
288 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
289}
290
291static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
292 struct net_device *dev)
293{
294 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
295 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
296 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
297 const struct mlxsw_tx_info tx_info = {
298 .local_port = mlxsw_sx_port->local_port,
299 .is_emad = false,
300 };
e577516b 301 u64 len;
31557f0f
JP
302 int err;
303
d003462a
IS
304 if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
305 return NETDEV_TX_BUSY;
306
31557f0f 307 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
d003462a 308 struct sk_buff *skb_orig = skb;
31557f0f 309
d003462a
IS
310 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
311 if (!skb) {
31557f0f 312 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
d003462a 313 dev_kfree_skb_any(skb_orig);
31557f0f
JP
314 return NETDEV_TX_OK;
315 }
31557f0f
JP
316 }
317 mlxsw_sx_txhdr_construct(skb, &tx_info);
e577516b 318 len = skb->len;
d003462a
IS
319 /* Due to a race we might fail here because of a full queue. In that
320 * unlikely case we simply drop the packet.
321 */
31557f0f 322 err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
31557f0f
JP
323
324 if (!err) {
325 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
326 u64_stats_update_begin(&pcpu_stats->syncp);
327 pcpu_stats->tx_packets++;
e577516b 328 pcpu_stats->tx_bytes += len;
31557f0f
JP
329 u64_stats_update_end(&pcpu_stats->syncp);
330 } else {
331 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
332 dev_kfree_skb_any(skb);
333 }
334 return NETDEV_TX_OK;
335}
336
337static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
338{
339 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
340 int err;
341
342 err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
343 if (err)
344 return err;
345 dev->mtu = mtu;
346 return 0;
347}
348
349static struct rtnl_link_stats64 *
350mlxsw_sx_port_get_stats64(struct net_device *dev,
351 struct rtnl_link_stats64 *stats)
352{
353 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
354 struct mlxsw_sx_port_pcpu_stats *p;
355 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
356 u32 tx_dropped = 0;
357 unsigned int start;
358 int i;
359
360 for_each_possible_cpu(i) {
361 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
362 do {
363 start = u64_stats_fetch_begin_irq(&p->syncp);
364 rx_packets = p->rx_packets;
365 rx_bytes = p->rx_bytes;
366 tx_packets = p->tx_packets;
367 tx_bytes = p->tx_bytes;
368 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
369
370 stats->rx_packets += rx_packets;
371 stats->rx_bytes += rx_bytes;
372 stats->tx_packets += tx_packets;
373 stats->tx_bytes += tx_bytes;
374 /* tx_dropped is u32, updated without syncp protection. */
375 tx_dropped += p->tx_dropped;
376 }
377 stats->tx_dropped = tx_dropped;
378 return stats;
379}
380
381static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
382 .ndo_open = mlxsw_sx_port_open,
383 .ndo_stop = mlxsw_sx_port_stop,
384 .ndo_start_xmit = mlxsw_sx_port_xmit,
385 .ndo_change_mtu = mlxsw_sx_port_change_mtu,
386 .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
387};
388
389static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
390 struct ethtool_drvinfo *drvinfo)
391{
392 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
393 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
394
395 strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
396 strlcpy(drvinfo->version, mlxsw_sx_driver_version,
397 sizeof(drvinfo->version));
398 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
399 "%d.%d.%d",
400 mlxsw_sx->bus_info->fw_rev.major,
401 mlxsw_sx->bus_info->fw_rev.minor,
402 mlxsw_sx->bus_info->fw_rev.subminor);
403 strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
404 sizeof(drvinfo->bus_info));
405}
406
407struct mlxsw_sx_port_hw_stats {
408 char str[ETH_GSTRING_LEN];
409 u64 (*getter)(char *payload);
410};
411
412static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
413 {
414 .str = "a_frames_transmitted_ok",
415 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
416 },
417 {
418 .str = "a_frames_received_ok",
419 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
420 },
421 {
422 .str = "a_frame_check_sequence_errors",
423 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
424 },
425 {
426 .str = "a_alignment_errors",
427 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
428 },
429 {
430 .str = "a_octets_transmitted_ok",
431 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
432 },
433 {
434 .str = "a_octets_received_ok",
435 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
436 },
437 {
438 .str = "a_multicast_frames_xmitted_ok",
439 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
440 },
441 {
442 .str = "a_broadcast_frames_xmitted_ok",
443 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
444 },
445 {
446 .str = "a_multicast_frames_received_ok",
447 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
448 },
449 {
450 .str = "a_broadcast_frames_received_ok",
451 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
452 },
453 {
454 .str = "a_in_range_length_errors",
455 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
456 },
457 {
458 .str = "a_out_of_range_length_field",
459 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
460 },
461 {
462 .str = "a_frame_too_long_errors",
463 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
464 },
465 {
466 .str = "a_symbol_error_during_carrier",
467 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
468 },
469 {
470 .str = "a_mac_control_frames_transmitted",
471 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
472 },
473 {
474 .str = "a_mac_control_frames_received",
475 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
476 },
477 {
478 .str = "a_unsupported_opcodes_received",
479 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
480 },
481 {
482 .str = "a_pause_mac_ctrl_frames_received",
483 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
484 },
485 {
486 .str = "a_pause_mac_ctrl_frames_xmitted",
487 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
488 },
489};
490
491#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
492
493static void mlxsw_sx_port_get_strings(struct net_device *dev,
494 u32 stringset, u8 *data)
495{
496 u8 *p = data;
497 int i;
498
499 switch (stringset) {
500 case ETH_SS_STATS:
501 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
502 memcpy(p, mlxsw_sx_port_hw_stats[i].str,
503 ETH_GSTRING_LEN);
504 p += ETH_GSTRING_LEN;
505 }
506 break;
507 }
508}
509
510static void mlxsw_sx_port_get_stats(struct net_device *dev,
511 struct ethtool_stats *stats, u64 *data)
512{
513 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
514 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
515 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
516 int i;
517 int err;
518
519 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
520 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
521 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
522 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
523}
524
525static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
526{
527 switch (sset) {
528 case ETH_SS_STATS:
529 return MLXSW_SX_PORT_HW_STATS_LEN;
530 default:
531 return -EOPNOTSUPP;
532 }
533}
534
535struct mlxsw_sx_port_link_mode {
536 u32 mask;
537 u32 supported;
538 u32 advertised;
539 u32 speed;
540};
541
542static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
543 {
544 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
545 .supported = SUPPORTED_100baseT_Full,
546 .advertised = ADVERTISED_100baseT_Full,
547 .speed = 100,
548 },
549 {
550 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
551 .speed = 100,
552 },
553 {
554 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
555 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
556 .supported = SUPPORTED_1000baseKX_Full,
557 .advertised = ADVERTISED_1000baseKX_Full,
558 .speed = 1000,
559 },
560 {
561 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
562 .supported = SUPPORTED_10000baseT_Full,
563 .advertised = ADVERTISED_10000baseT_Full,
564 .speed = 10000,
565 },
566 {
567 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
568 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
569 .supported = SUPPORTED_10000baseKX4_Full,
570 .advertised = ADVERTISED_10000baseKX4_Full,
571 .speed = 10000,
572 },
573 {
574 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
575 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
576 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
577 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
578 .supported = SUPPORTED_10000baseKR_Full,
579 .advertised = ADVERTISED_10000baseKR_Full,
580 .speed = 10000,
581 },
582 {
583 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
584 .supported = SUPPORTED_20000baseKR2_Full,
585 .advertised = ADVERTISED_20000baseKR2_Full,
586 .speed = 20000,
587 },
588 {
589 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
590 .supported = SUPPORTED_40000baseCR4_Full,
591 .advertised = ADVERTISED_40000baseCR4_Full,
592 .speed = 40000,
593 },
594 {
595 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
596 .supported = SUPPORTED_40000baseKR4_Full,
597 .advertised = ADVERTISED_40000baseKR4_Full,
598 .speed = 40000,
599 },
600 {
601 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
602 .supported = SUPPORTED_40000baseSR4_Full,
603 .advertised = ADVERTISED_40000baseSR4_Full,
604 .speed = 40000,
605 },
606 {
607 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
608 .supported = SUPPORTED_40000baseLR4_Full,
609 .advertised = ADVERTISED_40000baseLR4_Full,
610 .speed = 40000,
611 },
612 {
613 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
614 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
615 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
616 .speed = 25000,
617 },
618 {
619 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
620 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
621 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
622 .speed = 50000,
623 },
624 {
625 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
626 .supported = SUPPORTED_56000baseKR4_Full,
627 .advertised = ADVERTISED_56000baseKR4_Full,
628 .speed = 56000,
629 },
630 {
631 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
632 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
633 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
634 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
635 .speed = 100000,
636 },
637};
638
639#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
640
641static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
642{
643 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
644 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
645 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
646 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
647 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
648 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
649 return SUPPORTED_FIBRE;
650
651 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
652 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
653 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
654 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
655 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
656 return SUPPORTED_Backplane;
657 return 0;
658}
659
660static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
661{
662 u32 modes = 0;
663 int i;
664
665 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
666 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
667 modes |= mlxsw_sx_port_link_mode[i].supported;
668 }
669 return modes;
670}
671
672static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
673{
674 u32 modes = 0;
675 int i;
676
677 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
678 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
679 modes |= mlxsw_sx_port_link_mode[i].advertised;
680 }
681 return modes;
682}
683
684static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
685 struct ethtool_cmd *cmd)
686{
687 u32 speed = SPEED_UNKNOWN;
688 u8 duplex = DUPLEX_UNKNOWN;
689 int i;
690
691 if (!carrier_ok)
692 goto out;
693
694 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
695 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
696 speed = mlxsw_sx_port_link_mode[i].speed;
697 duplex = DUPLEX_FULL;
698 break;
699 }
700 }
701out:
702 ethtool_cmd_speed_set(cmd, speed);
703 cmd->duplex = duplex;
704}
705
706static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
707{
708 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
709 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
710 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
711 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
712 return PORT_FIBRE;
713
714 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
715 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
716 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
717 return PORT_DA;
718
719 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
720 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
721 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
722 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
723 return PORT_NONE;
724
725 return PORT_OTHER;
726}
727
728static int mlxsw_sx_port_get_settings(struct net_device *dev,
729 struct ethtool_cmd *cmd)
730{
731 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
732 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
733 char ptys_pl[MLXSW_REG_PTYS_LEN];
734 u32 eth_proto_cap;
735 u32 eth_proto_admin;
736 u32 eth_proto_oper;
737 int err;
738
739 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
740 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
741 if (err) {
742 netdev_err(dev, "Failed to get proto");
743 return err;
744 }
745 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
746 &eth_proto_admin, &eth_proto_oper);
747
748 cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
749 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
750 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
751 cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
752 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
753 eth_proto_oper, cmd);
754
755 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
756 cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
757 cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
758
759 cmd->transceiver = XCVR_INTERNAL;
760 return 0;
761}
762
763static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
764{
765 u32 ptys_proto = 0;
766 int i;
767
768 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
769 if (advertising & mlxsw_sx_port_link_mode[i].advertised)
770 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
771 }
772 return ptys_proto;
773}
774
775static u32 mlxsw_sx_to_ptys_speed(u32 speed)
776{
777 u32 ptys_proto = 0;
778 int i;
779
780 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
781 if (speed == mlxsw_sx_port_link_mode[i].speed)
782 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
783 }
784 return ptys_proto;
785}
786
787static int mlxsw_sx_port_set_settings(struct net_device *dev,
788 struct ethtool_cmd *cmd)
789{
790 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
791 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
792 char ptys_pl[MLXSW_REG_PTYS_LEN];
793 u32 speed;
794 u32 eth_proto_new;
795 u32 eth_proto_cap;
796 u32 eth_proto_admin;
797 bool is_up;
798 int err;
799
800 speed = ethtool_cmd_speed(cmd);
801
802 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
803 mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
804 mlxsw_sx_to_ptys_speed(speed);
805
806 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
807 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
808 if (err) {
809 netdev_err(dev, "Failed to get proto");
810 return err;
811 }
812 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
813
814 eth_proto_new = eth_proto_new & eth_proto_cap;
815 if (!eth_proto_new) {
816 netdev_err(dev, "Not supported proto admin requested");
817 return -EINVAL;
818 }
819 if (eth_proto_new == eth_proto_admin)
820 return 0;
821
822 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
823 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
824 if (err) {
825 netdev_err(dev, "Failed to set proto admin");
826 return err;
827 }
828
829 err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
830 if (err) {
831 netdev_err(dev, "Failed to get oper status");
832 return err;
833 }
834 if (!is_up)
835 return 0;
836
837 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
838 if (err) {
839 netdev_err(dev, "Failed to set admin status");
840 return err;
841 }
842
843 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
844 if (err) {
845 netdev_err(dev, "Failed to set admin status");
846 return err;
847 }
848
849 return 0;
850}
851
852static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
853 .get_drvinfo = mlxsw_sx_port_get_drvinfo,
854 .get_link = ethtool_op_get_link,
855 .get_strings = mlxsw_sx_port_get_strings,
856 .get_ethtool_stats = mlxsw_sx_port_get_stats,
857 .get_sset_count = mlxsw_sx_port_get_sset_count,
858 .get_settings = mlxsw_sx_port_get_settings,
859 .set_settings = mlxsw_sx_port_set_settings,
860};
861
862static int mlxsw_sx_port_attr_get(struct net_device *dev,
863 struct switchdev_attr *attr)
864{
865 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
866 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
867
868 switch (attr->id) {
1f868398 869 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
31557f0f
JP
870 attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
871 memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
872 break;
873 default:
874 return -EOPNOTSUPP;
875 }
876
877 return 0;
878}
879
880static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
881 .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
882};
883
884static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
885{
886 char spad_pl[MLXSW_REG_SPAD_LEN];
887 int err;
888
889 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
890 if (err)
891 return err;
892 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
893 return 0;
894}
895
896static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
897{
898 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
899 struct net_device *dev = mlxsw_sx_port->dev;
900 char ppad_pl[MLXSW_REG_PPAD_LEN];
901 int err;
902
903 mlxsw_reg_ppad_pack(ppad_pl, false, 0);
904 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
905 if (err)
906 return err;
907 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
908 /* The last byte value in base mac address is guaranteed
909 * to be such it does not overflow when adding local_port
910 * value.
911 */
912 dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
913 return 0;
914}
915
916static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
917 u16 vid, enum mlxsw_reg_spms_state state)
918{
919 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
920 char *spms_pl;
921 int err;
922
923 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
924 if (!spms_pl)
925 return -ENOMEM;
ebb7963f
JP
926 mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
927 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
31557f0f
JP
928 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
929 kfree(spms_pl);
930 return err;
931}
932
933static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
934 u32 speed)
935{
936 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
937 char ptys_pl[MLXSW_REG_PTYS_LEN];
938
939 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
940 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
941}
942
943static int
944mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
945 enum mlxsw_reg_spmlr_learn_mode mode)
946{
947 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
948 char spmlr_pl[MLXSW_REG_SPMLR_LEN];
949
950 mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
951 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
952}
953
954static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
955{
956 struct mlxsw_sx_port *mlxsw_sx_port;
957 struct net_device *dev;
958 bool usable;
959 int err;
960
961 dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
962 if (!dev)
963 return -ENOMEM;
964 mlxsw_sx_port = netdev_priv(dev);
965 mlxsw_sx_port->dev = dev;
966 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
967 mlxsw_sx_port->local_port = local_port;
968
969 mlxsw_sx_port->pcpu_stats =
970 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
971 if (!mlxsw_sx_port->pcpu_stats) {
972 err = -ENOMEM;
973 goto err_alloc_stats;
974 }
975
976 dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
977 dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
978 dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
979
980 err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
981 if (err) {
982 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
983 mlxsw_sx_port->local_port);
984 goto err_dev_addr_get;
985 }
986
987 netif_carrier_off(dev);
988
989 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
990 NETIF_F_VLAN_CHALLENGED;
991
992 /* Each packet needs to have a Tx header (metadata) on top all other
993 * headers.
994 */
995 dev->hard_header_len += MLXSW_TXHDR_LEN;
996
997 err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
998 if (err) {
999 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
1000 mlxsw_sx_port->local_port);
1001 goto err_port_module_check;
1002 }
1003
1004 if (!usable) {
1005 dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1006 mlxsw_sx_port->local_port);
1007 goto port_not_usable;
1008 }
1009
e61011b5
IS
1010 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1011 if (err) {
1012 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1013 mlxsw_sx_port->local_port);
1014 goto err_port_system_port_mapping_set;
1015 }
1016
31557f0f
JP
1017 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1018 if (err) {
1019 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1020 mlxsw_sx_port->local_port);
1021 goto err_port_swid_set;
1022 }
1023
1024 err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
1025 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
1026 if (err) {
1027 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1028 mlxsw_sx_port->local_port);
1029 goto err_port_speed_set;
1030 }
1031
1032 err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
1033 if (err) {
1034 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1035 mlxsw_sx_port->local_port);
1036 goto err_port_mtu_set;
1037 }
1038
1039 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1040 if (err)
1041 goto err_port_admin_status_set;
1042
1043 err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1044 MLXSW_PORT_DEFAULT_VID,
1045 MLXSW_REG_SPMS_STATE_FORWARDING);
1046 if (err) {
1047 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1048 mlxsw_sx_port->local_port);
1049 goto err_port_stp_state_set;
1050 }
1051
1052 err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1053 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1054 if (err) {
1055 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1056 mlxsw_sx_port->local_port);
1057 goto err_port_mac_learning_mode_set;
1058 }
1059
1060 err = register_netdev(dev);
1061 if (err) {
1062 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1063 mlxsw_sx_port->local_port);
1064 goto err_register_netdev;
1065 }
1066
1067 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1068 return 0;
1069
1070err_register_netdev:
1071err_port_admin_status_set:
1072err_port_mac_learning_mode_set:
1073err_port_stp_state_set:
1074err_port_mtu_set:
1075err_port_speed_set:
1076err_port_swid_set:
e61011b5 1077err_port_system_port_mapping_set:
31557f0f
JP
1078port_not_usable:
1079err_port_module_check:
1080err_dev_addr_get:
1081 free_percpu(mlxsw_sx_port->pcpu_stats);
1082err_alloc_stats:
1083 free_netdev(dev);
1084 return err;
1085}
1086
1087static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1088{
1089 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1090
1091 if (!mlxsw_sx_port)
1092 return;
1093 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1094 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1095 free_percpu(mlxsw_sx_port->pcpu_stats);
26a80f6e 1096 free_netdev(mlxsw_sx_port->dev);
31557f0f
JP
1097}
1098
1099static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1100{
1101 int i;
1102
1103 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1104 mlxsw_sx_port_remove(mlxsw_sx, i);
1105 kfree(mlxsw_sx->ports);
1106}
1107
1108static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1109{
1110 size_t alloc_size;
1111 int i;
1112 int err;
1113
1114 alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
1115 mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1116 if (!mlxsw_sx->ports)
1117 return -ENOMEM;
1118
1119 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1120 err = mlxsw_sx_port_create(mlxsw_sx, i);
1121 if (err)
1122 goto err_port_create;
1123 }
1124 return 0;
1125
1126err_port_create:
1127 for (i--; i >= 1; i--)
1128 mlxsw_sx_port_remove(mlxsw_sx, i);
1129 kfree(mlxsw_sx->ports);
1130 return err;
1131}
1132
1133static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1134 char *pude_pl, void *priv)
1135{
1136 struct mlxsw_sx *mlxsw_sx = priv;
1137 struct mlxsw_sx_port *mlxsw_sx_port;
1138 enum mlxsw_reg_pude_oper_status status;
1139 u8 local_port;
1140
1141 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1142 mlxsw_sx_port = mlxsw_sx->ports[local_port];
1143 if (!mlxsw_sx_port) {
1144 dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1145 local_port);
1146 return;
1147 }
1148
1149 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1150 if (MLXSW_PORT_OPER_STATUS_UP == status) {
1151 netdev_info(mlxsw_sx_port->dev, "link up\n");
1152 netif_carrier_on(mlxsw_sx_port->dev);
1153 } else {
1154 netdev_info(mlxsw_sx_port->dev, "link down\n");
1155 netif_carrier_off(mlxsw_sx_port->dev);
1156 }
1157}
1158
1159static struct mlxsw_event_listener mlxsw_sx_pude_event = {
1160 .func = mlxsw_sx_pude_event_func,
1161 .trap_id = MLXSW_TRAP_ID_PUDE,
1162};
1163
1164static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
1165 enum mlxsw_event_trap_id trap_id)
1166{
1167 struct mlxsw_event_listener *el;
1168 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1169 int err;
1170
1171 switch (trap_id) {
1172 case MLXSW_TRAP_ID_PUDE:
1173 el = &mlxsw_sx_pude_event;
1174 break;
1175 }
1176 err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
1177 if (err)
1178 return err;
1179
f24af330 1180 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
31557f0f
JP
1181 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1182 if (err)
1183 goto err_event_trap_set;
1184
1185 return 0;
1186
1187err_event_trap_set:
1188 mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
1189 return err;
1190}
1191
1192static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
1193 enum mlxsw_event_trap_id trap_id)
1194{
1195 struct mlxsw_event_listener *el;
1196
1197 switch (trap_id) {
1198 case MLXSW_TRAP_ID_PUDE:
1199 el = &mlxsw_sx_pude_event;
1200 break;
1201 }
1202 mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
1203}
1204
1205static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1206 void *priv)
1207{
1208 struct mlxsw_sx *mlxsw_sx = priv;
1209 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1210 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1211
1212 if (unlikely(!mlxsw_sx_port)) {
6cf9dc8b
JP
1213 dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1214 local_port);
31557f0f
JP
1215 return;
1216 }
1217
1218 skb->dev = mlxsw_sx_port->dev;
1219
1220 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1221 u64_stats_update_begin(&pcpu_stats->syncp);
1222 pcpu_stats->rx_packets++;
1223 pcpu_stats->rx_bytes += skb->len;
1224 u64_stats_update_end(&pcpu_stats->syncp);
1225
1226 skb->protocol = eth_type_trans(skb, skb->dev);
1227 netif_receive_skb(skb);
1228}
1229
1230static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
1231 {
1232 .func = mlxsw_sx_rx_listener_func,
1233 .local_port = MLXSW_PORT_DONT_CARE,
1234 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1235 },
1236 /* Traps for specific L2 packet types, not trapped as FDB MC */
1237 {
1238 .func = mlxsw_sx_rx_listener_func,
1239 .local_port = MLXSW_PORT_DONT_CARE,
1240 .trap_id = MLXSW_TRAP_ID_STP,
1241 },
1242 {
1243 .func = mlxsw_sx_rx_listener_func,
1244 .local_port = MLXSW_PORT_DONT_CARE,
1245 .trap_id = MLXSW_TRAP_ID_LACP,
1246 },
1247 {
1248 .func = mlxsw_sx_rx_listener_func,
1249 .local_port = MLXSW_PORT_DONT_CARE,
1250 .trap_id = MLXSW_TRAP_ID_EAPOL,
1251 },
1252 {
1253 .func = mlxsw_sx_rx_listener_func,
1254 .local_port = MLXSW_PORT_DONT_CARE,
1255 .trap_id = MLXSW_TRAP_ID_LLDP,
1256 },
1257 {
1258 .func = mlxsw_sx_rx_listener_func,
1259 .local_port = MLXSW_PORT_DONT_CARE,
1260 .trap_id = MLXSW_TRAP_ID_MMRP,
1261 },
1262 {
1263 .func = mlxsw_sx_rx_listener_func,
1264 .local_port = MLXSW_PORT_DONT_CARE,
1265 .trap_id = MLXSW_TRAP_ID_MVRP,
1266 },
1267 {
1268 .func = mlxsw_sx_rx_listener_func,
1269 .local_port = MLXSW_PORT_DONT_CARE,
1270 .trap_id = MLXSW_TRAP_ID_RPVST,
1271 },
1272 {
1273 .func = mlxsw_sx_rx_listener_func,
1274 .local_port = MLXSW_PORT_DONT_CARE,
1275 .trap_id = MLXSW_TRAP_ID_DHCP,
1276 },
1277 {
1278 .func = mlxsw_sx_rx_listener_func,
1279 .local_port = MLXSW_PORT_DONT_CARE,
1280 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1281 },
1282 {
1283 .func = mlxsw_sx_rx_listener_func,
1284 .local_port = MLXSW_PORT_DONT_CARE,
1285 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1286 },
1287 {
1288 .func = mlxsw_sx_rx_listener_func,
1289 .local_port = MLXSW_PORT_DONT_CARE,
1290 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1291 },
1292 {
1293 .func = mlxsw_sx_rx_listener_func,
1294 .local_port = MLXSW_PORT_DONT_CARE,
1295 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1296 },
1297 {
1298 .func = mlxsw_sx_rx_listener_func,
1299 .local_port = MLXSW_PORT_DONT_CARE,
1300 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1301 },
1302};
1303
1304static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1305{
1306 char htgt_pl[MLXSW_REG_HTGT_LEN];
1307 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1308 int i;
1309 int err;
1310
1311 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1312 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1313 if (err)
1314 return err;
1315
801bd3de
IS
1316 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1317 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1318 if (err)
1319 return err;
1320
31557f0f
JP
1321 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
1322 err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
1323 &mlxsw_sx_rx_listener[i],
1324 mlxsw_sx);
1325 if (err)
1326 goto err_rx_listener_register;
1327
1328 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
31557f0f
JP
1329 mlxsw_sx_rx_listener[i].trap_id);
1330 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1331 if (err)
1332 goto err_rx_trap_set;
1333 }
1334 return 0;
1335
1336err_rx_trap_set:
1337 mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1338 &mlxsw_sx_rx_listener[i],
1339 mlxsw_sx);
1340err_rx_listener_register:
1341 for (i--; i >= 0; i--) {
1342 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
31557f0f
JP
1343 mlxsw_sx_rx_listener[i].trap_id);
1344 mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1345
1346 mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1347 &mlxsw_sx_rx_listener[i],
1348 mlxsw_sx);
1349 }
1350 return err;
1351}
1352
1353static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1354{
1355 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1356 int i;
1357
1358 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
1359 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
31557f0f
JP
1360 mlxsw_sx_rx_listener[i].trap_id);
1361 mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1362
1363 mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1364 &mlxsw_sx_rx_listener[i],
1365 mlxsw_sx);
1366 }
1367}
1368
1369static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1370{
1371 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1372 char sgcr_pl[MLXSW_REG_SGCR_LEN];
31557f0f
JP
1373 char *sftr_pl;
1374 int err;
1375
31557f0f
JP
1376 /* Configure a flooding table, which includes only CPU port. */
1377 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1378 if (!sftr_pl)
1379 return -ENOMEM;
bc2055f8
IS
1380 mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1381 MLXSW_PORT_CPU_PORT, true);
31557f0f
JP
1382 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1383 kfree(sftr_pl);
1384 if (err)
1385 return err;
1386
1387 /* Flood different packet types using the flooding table. */
1388 mlxsw_reg_sfgc_pack(sfgc_pl,
1389 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1390 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1391 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1392 0);
1393 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1394 if (err)
1395 return err;
1396
1397 mlxsw_reg_sfgc_pack(sfgc_pl,
1398 MLXSW_REG_SFGC_TYPE_BROADCAST,
1399 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1400 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1401 0);
1402 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1403 if (err)
1404 return err;
1405
1406 mlxsw_reg_sfgc_pack(sfgc_pl,
1407 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1408 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1409 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1410 0);
1411 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1412 if (err)
1413 return err;
1414
1415 mlxsw_reg_sfgc_pack(sfgc_pl,
1416 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1417 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1418 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1419 0);
1420 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1421 if (err)
1422 return err;
1423
1424 mlxsw_reg_sfgc_pack(sfgc_pl,
1425 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1426 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1427 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1428 0);
1429 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1430 if (err)
1431 return err;
1432
1433 mlxsw_reg_sgcr_pack(sgcr_pl, true);
1434 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1435}
1436
1437static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
1438 const struct mlxsw_bus_info *mlxsw_bus_info)
1439{
1440 struct mlxsw_sx *mlxsw_sx = priv;
1441 int err;
1442
1443 mlxsw_sx->core = mlxsw_core;
1444 mlxsw_sx->bus_info = mlxsw_bus_info;
1445
1446 err = mlxsw_sx_hw_id_get(mlxsw_sx);
1447 if (err) {
1448 dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1449 return err;
1450 }
1451
1452 err = mlxsw_sx_ports_create(mlxsw_sx);
1453 if (err) {
1454 dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1455 return err;
1456 }
1457
1458 err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1459 if (err) {
1460 dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
1461 goto err_event_register;
1462 }
1463
1464 err = mlxsw_sx_traps_init(mlxsw_sx);
1465 if (err) {
1466 dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
1467 goto err_rx_listener_register;
1468 }
1469
1470 err = mlxsw_sx_flood_init(mlxsw_sx);
1471 if (err) {
1472 dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1473 goto err_flood_init;
1474 }
1475
1476 return 0;
1477
1478err_flood_init:
1479 mlxsw_sx_traps_fini(mlxsw_sx);
1480err_rx_listener_register:
1481 mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1482err_event_register:
1483 mlxsw_sx_ports_remove(mlxsw_sx);
1484 return err;
1485}
1486
1487static void mlxsw_sx_fini(void *priv)
1488{
1489 struct mlxsw_sx *mlxsw_sx = priv;
1490
1491 mlxsw_sx_traps_fini(mlxsw_sx);
1492 mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1493 mlxsw_sx_ports_remove(mlxsw_sx);
1494}
1495
1496static struct mlxsw_config_profile mlxsw_sx_config_profile = {
1497 .used_max_vepa_channels = 1,
1498 .max_vepa_channels = 0,
1499 .used_max_lag = 1,
1500 .max_lag = 64,
1501 .used_max_port_per_lag = 1,
1502 .max_port_per_lag = 16,
1503 .used_max_mid = 1,
1504 .max_mid = 7000,
1505 .used_max_pgt = 1,
1506 .max_pgt = 0,
1507 .used_max_system_port = 1,
1508 .max_system_port = 48000,
1509 .used_max_vlan_groups = 1,
1510 .max_vlan_groups = 127,
1511 .used_max_regions = 1,
1512 .max_regions = 400,
1513 .used_flood_tables = 1,
1514 .max_flood_tables = 2,
1515 .max_vid_flood_tables = 1,
1516 .used_flood_mode = 1,
1517 .flood_mode = 3,
1518 .used_max_ib_mc = 1,
1519 .max_ib_mc = 0,
1520 .used_max_pkey = 1,
1521 .max_pkey = 0,
1522 .swid_config = {
1523 {
1524 .used_type = 1,
1525 .type = MLXSW_PORT_SWID_TYPE_ETH,
1526 }
1527 },
1528};
1529
1530static struct mlxsw_driver mlxsw_sx_driver = {
1531 .kind = MLXSW_DEVICE_KIND_SWITCHX2,
1532 .owner = THIS_MODULE,
1533 .priv_size = sizeof(struct mlxsw_sx),
1534 .init = mlxsw_sx_init,
1535 .fini = mlxsw_sx_fini,
1536 .txhdr_construct = mlxsw_sx_txhdr_construct,
1537 .txhdr_len = MLXSW_TXHDR_LEN,
1538 .profile = &mlxsw_sx_config_profile,
1539};
1540
1541static int __init mlxsw_sx_module_init(void)
1542{
1543 return mlxsw_core_driver_register(&mlxsw_sx_driver);
1544}
1545
1546static void __exit mlxsw_sx_module_exit(void)
1547{
1548 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1549}
1550
1551module_init(mlxsw_sx_module_init);
1552module_exit(mlxsw_sx_module_exit);
1553
1554MODULE_LICENSE("Dual BSD/GPL");
1555MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1556MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1557MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);