usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / ethernet / mellanox / mlxsw / switchx2.c
CommitLineData
31557f0f
JP
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/slab.h>
43#include <linux/device.h>
44#include <linux/skbuff.h>
45#include <linux/if_vlan.h>
46#include <net/switchdev.h>
47#include <generated/utsrelease.h>
48
49#include "core.h"
50#include "reg.h"
51#include "port.h"
52#include "trap.h"
53#include "txheader.h"
54
55static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
56static const char mlxsw_sx_driver_version[] = "1.0";
57
58struct mlxsw_sx_port;
59
31557f0f
JP
60struct mlxsw_sx {
61 struct mlxsw_sx_port **ports;
62 struct mlxsw_core *core;
63 const struct mlxsw_bus_info *bus_info;
ffe05328 64 u8 hw_id[ETH_ALEN];
31557f0f
JP
65};
66
67struct mlxsw_sx_port_pcpu_stats {
68 u64 rx_packets;
69 u64 rx_bytes;
70 u64 tx_packets;
71 u64 tx_bytes;
72 struct u64_stats_sync syncp;
73 u32 tx_dropped;
74};
75
76struct mlxsw_sx_port {
77 struct net_device *dev;
78 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
79 struct mlxsw_sx *mlxsw_sx;
80 u8 local_port;
81};
82
83/* tx_hdr_version
84 * Tx header version.
85 * Must be set to 0.
86 */
87MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
88
89/* tx_hdr_ctl
90 * Packet control type.
91 * 0 - Ethernet control (e.g. EMADs, LACP)
92 * 1 - Ethernet data
93 */
94MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
95
96/* tx_hdr_proto
97 * Packet protocol type. Must be set to 1 (Ethernet).
98 */
99MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
100
101/* tx_hdr_etclass
102 * Egress TClass to be used on the egress device on the egress port.
103 * The MSB is specified in the 'ctclass3' field.
104 * Range is 0-15, where 15 is the highest priority.
105 */
106MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
107
108/* tx_hdr_swid
109 * Switch partition ID.
110 */
111MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
112
113/* tx_hdr_port_mid
114 * Destination local port for unicast packets.
115 * Destination multicast ID for multicast packets.
116 *
117 * Control packets are directed to a specific egress port, while data
118 * packets are transmitted through the CPU port (0) into the switch partition,
119 * where forwarding rules are applied.
120 */
121MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122
123/* tx_hdr_ctclass3
124 * See field 'etclass'.
125 */
126MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
127
128/* tx_hdr_rdq
129 * RDQ for control packets sent to remote CPU.
130 * Must be set to 0x1F for EMADs, otherwise 0.
131 */
132MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
133
134/* tx_hdr_cpu_sig
135 * Signature control for packets going to CPU. Must be set to 0.
136 */
137MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
138
139/* tx_hdr_sig
140 * Stacking protocl signature. Must be set to 0xE0E0.
141 */
142MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
143
144/* tx_hdr_stclass
145 * Stacking TClass.
146 */
147MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
148
149/* tx_hdr_emad
150 * EMAD bit. Must be set for EMADs.
151 */
152MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
153
154/* tx_hdr_type
155 * 0 - Data packets
156 * 6 - Control packets
157 */
158MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159
160static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
161 const struct mlxsw_tx_info *tx_info)
162{
163 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
164 bool is_emad = tx_info->is_emad;
165
166 memset(txhdr, 0, MLXSW_TXHDR_LEN);
167
168 /* We currently set default values for the egress tclass (QoS). */
169 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
170 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
171 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
172 mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
173 MLXSW_TXHDR_ETCLASS_5);
174 mlxsw_tx_hdr_swid_set(txhdr, 0);
175 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
176 mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
177 mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
178 MLXSW_TXHDR_RDQ_OTHER);
179 mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
180 mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
181 mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
182 mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
183 MLXSW_TXHDR_NOT_EMAD);
184 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
185}
186
187static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
188 bool is_up)
189{
190 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
191 char paos_pl[MLXSW_REG_PAOS_LEN];
192
193 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
194 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
195 MLXSW_PORT_ADMIN_STATUS_DOWN);
196 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
197}
198
199static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
200 bool *p_is_up)
201{
202 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
203 char paos_pl[MLXSW_REG_PAOS_LEN];
204 u8 oper_status;
205 int err;
206
207 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
208 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
209 if (err)
210 return err;
211 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
212 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
213 return 0;
214}
215
216static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
217{
218 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
219 char pmtu_pl[MLXSW_REG_PMTU_LEN];
220 int max_mtu;
221 int err;
222
223 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
224 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
225 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
226 if (err)
227 return err;
228 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
229
230 if (mtu > max_mtu)
231 return -EINVAL;
232
233 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
234 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
235}
236
237static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
238{
239 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
240 char pspa_pl[MLXSW_REG_PSPA_LEN];
241
242 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
243 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
244}
245
e61011b5
IS
246static int
247mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
248{
249 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
250 char sspr_pl[MLXSW_REG_SSPR_LEN];
251
252 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
253 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
254}
255
31557f0f
JP
256static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
257 bool *p_usable)
258{
259 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
260 char pmlp_pl[MLXSW_REG_PMLP_LEN];
261 int err;
262
263 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
264 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
265 if (err)
266 return err;
267 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
268 return 0;
269}
270
271static int mlxsw_sx_port_open(struct net_device *dev)
272{
273 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
274 int err;
275
276 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
277 if (err)
278 return err;
279 netif_start_queue(dev);
280 return 0;
281}
282
283static int mlxsw_sx_port_stop(struct net_device *dev)
284{
285 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
286
287 netif_stop_queue(dev);
288 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
289}
290
291static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
292 struct net_device *dev)
293{
294 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
295 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
296 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
297 const struct mlxsw_tx_info tx_info = {
298 .local_port = mlxsw_sx_port->local_port,
299 .is_emad = false,
300 };
e577516b 301 u64 len;
31557f0f
JP
302 int err;
303
d003462a
IS
304 if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
305 return NETDEV_TX_BUSY;
306
31557f0f 307 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
d003462a 308 struct sk_buff *skb_orig = skb;
31557f0f 309
d003462a
IS
310 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
311 if (!skb) {
31557f0f 312 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
d003462a 313 dev_kfree_skb_any(skb_orig);
31557f0f
JP
314 return NETDEV_TX_OK;
315 }
811bb1b9 316 dev_consume_skb_any(skb_orig);
31557f0f
JP
317 }
318 mlxsw_sx_txhdr_construct(skb, &tx_info);
e577516b 319 len = skb->len;
d003462a
IS
320 /* Due to a race we might fail here because of a full queue. In that
321 * unlikely case we simply drop the packet.
322 */
31557f0f 323 err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
31557f0f
JP
324
325 if (!err) {
326 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
327 u64_stats_update_begin(&pcpu_stats->syncp);
328 pcpu_stats->tx_packets++;
e577516b 329 pcpu_stats->tx_bytes += len;
31557f0f
JP
330 u64_stats_update_end(&pcpu_stats->syncp);
331 } else {
332 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
333 dev_kfree_skb_any(skb);
334 }
335 return NETDEV_TX_OK;
336}
337
338static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
339{
340 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
341 int err;
342
343 err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
344 if (err)
345 return err;
346 dev->mtu = mtu;
347 return 0;
348}
349
350static struct rtnl_link_stats64 *
351mlxsw_sx_port_get_stats64(struct net_device *dev,
352 struct rtnl_link_stats64 *stats)
353{
354 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
355 struct mlxsw_sx_port_pcpu_stats *p;
356 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
357 u32 tx_dropped = 0;
358 unsigned int start;
359 int i;
360
361 for_each_possible_cpu(i) {
362 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
363 do {
364 start = u64_stats_fetch_begin_irq(&p->syncp);
365 rx_packets = p->rx_packets;
366 rx_bytes = p->rx_bytes;
367 tx_packets = p->tx_packets;
368 tx_bytes = p->tx_bytes;
369 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
370
371 stats->rx_packets += rx_packets;
372 stats->rx_bytes += rx_bytes;
373 stats->tx_packets += tx_packets;
374 stats->tx_bytes += tx_bytes;
375 /* tx_dropped is u32, updated without syncp protection. */
376 tx_dropped += p->tx_dropped;
377 }
378 stats->tx_dropped = tx_dropped;
379 return stats;
380}
381
382static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
383 .ndo_open = mlxsw_sx_port_open,
384 .ndo_stop = mlxsw_sx_port_stop,
385 .ndo_start_xmit = mlxsw_sx_port_xmit,
386 .ndo_change_mtu = mlxsw_sx_port_change_mtu,
387 .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
388};
389
390static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
391 struct ethtool_drvinfo *drvinfo)
392{
393 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
394 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
395
396 strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
397 strlcpy(drvinfo->version, mlxsw_sx_driver_version,
398 sizeof(drvinfo->version));
399 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
400 "%d.%d.%d",
401 mlxsw_sx->bus_info->fw_rev.major,
402 mlxsw_sx->bus_info->fw_rev.minor,
403 mlxsw_sx->bus_info->fw_rev.subminor);
404 strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
405 sizeof(drvinfo->bus_info));
406}
407
408struct mlxsw_sx_port_hw_stats {
409 char str[ETH_GSTRING_LEN];
410 u64 (*getter)(char *payload);
411};
412
413static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
414 {
415 .str = "a_frames_transmitted_ok",
416 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
417 },
418 {
419 .str = "a_frames_received_ok",
420 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
421 },
422 {
423 .str = "a_frame_check_sequence_errors",
424 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
425 },
426 {
427 .str = "a_alignment_errors",
428 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
429 },
430 {
431 .str = "a_octets_transmitted_ok",
432 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
433 },
434 {
435 .str = "a_octets_received_ok",
436 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
437 },
438 {
439 .str = "a_multicast_frames_xmitted_ok",
440 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
441 },
442 {
443 .str = "a_broadcast_frames_xmitted_ok",
444 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
445 },
446 {
447 .str = "a_multicast_frames_received_ok",
448 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
449 },
450 {
451 .str = "a_broadcast_frames_received_ok",
452 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
453 },
454 {
455 .str = "a_in_range_length_errors",
456 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
457 },
458 {
459 .str = "a_out_of_range_length_field",
460 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
461 },
462 {
463 .str = "a_frame_too_long_errors",
464 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
465 },
466 {
467 .str = "a_symbol_error_during_carrier",
468 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
469 },
470 {
471 .str = "a_mac_control_frames_transmitted",
472 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
473 },
474 {
475 .str = "a_mac_control_frames_received",
476 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
477 },
478 {
479 .str = "a_unsupported_opcodes_received",
480 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
481 },
482 {
483 .str = "a_pause_mac_ctrl_frames_received",
484 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
485 },
486 {
487 .str = "a_pause_mac_ctrl_frames_xmitted",
488 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
489 },
490};
491
492#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
493
494static void mlxsw_sx_port_get_strings(struct net_device *dev,
495 u32 stringset, u8 *data)
496{
497 u8 *p = data;
498 int i;
499
500 switch (stringset) {
501 case ETH_SS_STATS:
502 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
503 memcpy(p, mlxsw_sx_port_hw_stats[i].str,
504 ETH_GSTRING_LEN);
505 p += ETH_GSTRING_LEN;
506 }
507 break;
508 }
509}
510
511static void mlxsw_sx_port_get_stats(struct net_device *dev,
512 struct ethtool_stats *stats, u64 *data)
513{
514 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
515 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
516 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
517 int i;
518 int err;
519
520 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
521 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
522 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
523 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
524}
525
526static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
527{
528 switch (sset) {
529 case ETH_SS_STATS:
530 return MLXSW_SX_PORT_HW_STATS_LEN;
531 default:
532 return -EOPNOTSUPP;
533 }
534}
535
536struct mlxsw_sx_port_link_mode {
537 u32 mask;
538 u32 supported;
539 u32 advertised;
540 u32 speed;
541};
542
543static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
544 {
545 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
546 .supported = SUPPORTED_100baseT_Full,
547 .advertised = ADVERTISED_100baseT_Full,
548 .speed = 100,
549 },
550 {
551 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
552 .speed = 100,
553 },
554 {
555 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
556 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
557 .supported = SUPPORTED_1000baseKX_Full,
558 .advertised = ADVERTISED_1000baseKX_Full,
559 .speed = 1000,
560 },
561 {
562 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
563 .supported = SUPPORTED_10000baseT_Full,
564 .advertised = ADVERTISED_10000baseT_Full,
565 .speed = 10000,
566 },
567 {
568 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
569 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
570 .supported = SUPPORTED_10000baseKX4_Full,
571 .advertised = ADVERTISED_10000baseKX4_Full,
572 .speed = 10000,
573 },
574 {
575 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
576 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
577 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
578 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
579 .supported = SUPPORTED_10000baseKR_Full,
580 .advertised = ADVERTISED_10000baseKR_Full,
581 .speed = 10000,
582 },
583 {
584 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
585 .supported = SUPPORTED_20000baseKR2_Full,
586 .advertised = ADVERTISED_20000baseKR2_Full,
587 .speed = 20000,
588 },
589 {
590 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
591 .supported = SUPPORTED_40000baseCR4_Full,
592 .advertised = ADVERTISED_40000baseCR4_Full,
593 .speed = 40000,
594 },
595 {
596 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
597 .supported = SUPPORTED_40000baseKR4_Full,
598 .advertised = ADVERTISED_40000baseKR4_Full,
599 .speed = 40000,
600 },
601 {
602 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
603 .supported = SUPPORTED_40000baseSR4_Full,
604 .advertised = ADVERTISED_40000baseSR4_Full,
605 .speed = 40000,
606 },
607 {
608 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
609 .supported = SUPPORTED_40000baseLR4_Full,
610 .advertised = ADVERTISED_40000baseLR4_Full,
611 .speed = 40000,
612 },
613 {
614 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
615 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
616 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
617 .speed = 25000,
618 },
619 {
620 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
621 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
622 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
623 .speed = 50000,
624 },
625 {
626 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
627 .supported = SUPPORTED_56000baseKR4_Full,
628 .advertised = ADVERTISED_56000baseKR4_Full,
629 .speed = 56000,
630 },
631 {
632 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
633 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
634 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
635 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
636 .speed = 100000,
637 },
638};
639
640#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
641
642static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
643{
644 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
645 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
646 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
647 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
648 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
649 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
650 return SUPPORTED_FIBRE;
651
652 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
653 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
654 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
655 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
656 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
657 return SUPPORTED_Backplane;
658 return 0;
659}
660
661static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
662{
663 u32 modes = 0;
664 int i;
665
666 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
667 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
668 modes |= mlxsw_sx_port_link_mode[i].supported;
669 }
670 return modes;
671}
672
673static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
674{
675 u32 modes = 0;
676 int i;
677
678 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
679 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
680 modes |= mlxsw_sx_port_link_mode[i].advertised;
681 }
682 return modes;
683}
684
685static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
686 struct ethtool_cmd *cmd)
687{
688 u32 speed = SPEED_UNKNOWN;
689 u8 duplex = DUPLEX_UNKNOWN;
690 int i;
691
692 if (!carrier_ok)
693 goto out;
694
695 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
696 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
697 speed = mlxsw_sx_port_link_mode[i].speed;
698 duplex = DUPLEX_FULL;
699 break;
700 }
701 }
702out:
703 ethtool_cmd_speed_set(cmd, speed);
704 cmd->duplex = duplex;
705}
706
707static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
708{
709 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
710 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
711 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
712 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
713 return PORT_FIBRE;
714
715 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
716 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
717 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
718 return PORT_DA;
719
720 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
721 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
722 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
723 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
724 return PORT_NONE;
725
726 return PORT_OTHER;
727}
728
729static int mlxsw_sx_port_get_settings(struct net_device *dev,
730 struct ethtool_cmd *cmd)
731{
732 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
733 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
734 char ptys_pl[MLXSW_REG_PTYS_LEN];
735 u32 eth_proto_cap;
736 u32 eth_proto_admin;
737 u32 eth_proto_oper;
738 int err;
739
740 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
741 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
742 if (err) {
743 netdev_err(dev, "Failed to get proto");
744 return err;
745 }
746 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
747 &eth_proto_admin, &eth_proto_oper);
748
749 cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
750 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
751 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
752 cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
753 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
754 eth_proto_oper, cmd);
755
756 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
757 cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
758 cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
759
760 cmd->transceiver = XCVR_INTERNAL;
761 return 0;
762}
763
764static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
765{
766 u32 ptys_proto = 0;
767 int i;
768
769 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
770 if (advertising & mlxsw_sx_port_link_mode[i].advertised)
771 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
772 }
773 return ptys_proto;
774}
775
776static u32 mlxsw_sx_to_ptys_speed(u32 speed)
777{
778 u32 ptys_proto = 0;
779 int i;
780
781 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
782 if (speed == mlxsw_sx_port_link_mode[i].speed)
783 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
784 }
785 return ptys_proto;
786}
787
788static int mlxsw_sx_port_set_settings(struct net_device *dev,
789 struct ethtool_cmd *cmd)
790{
791 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
792 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
793 char ptys_pl[MLXSW_REG_PTYS_LEN];
794 u32 speed;
795 u32 eth_proto_new;
796 u32 eth_proto_cap;
797 u32 eth_proto_admin;
798 bool is_up;
799 int err;
800
801 speed = ethtool_cmd_speed(cmd);
802
803 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
804 mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
805 mlxsw_sx_to_ptys_speed(speed);
806
807 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
808 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
809 if (err) {
810 netdev_err(dev, "Failed to get proto");
811 return err;
812 }
813 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
814
815 eth_proto_new = eth_proto_new & eth_proto_cap;
816 if (!eth_proto_new) {
817 netdev_err(dev, "Not supported proto admin requested");
818 return -EINVAL;
819 }
820 if (eth_proto_new == eth_proto_admin)
821 return 0;
822
823 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
824 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
825 if (err) {
826 netdev_err(dev, "Failed to set proto admin");
827 return err;
828 }
829
830 err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
831 if (err) {
832 netdev_err(dev, "Failed to get oper status");
833 return err;
834 }
835 if (!is_up)
836 return 0;
837
838 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
839 if (err) {
840 netdev_err(dev, "Failed to set admin status");
841 return err;
842 }
843
844 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
845 if (err) {
846 netdev_err(dev, "Failed to set admin status");
847 return err;
848 }
849
850 return 0;
851}
852
853static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
854 .get_drvinfo = mlxsw_sx_port_get_drvinfo,
855 .get_link = ethtool_op_get_link,
856 .get_strings = mlxsw_sx_port_get_strings,
857 .get_ethtool_stats = mlxsw_sx_port_get_stats,
858 .get_sset_count = mlxsw_sx_port_get_sset_count,
859 .get_settings = mlxsw_sx_port_get_settings,
860 .set_settings = mlxsw_sx_port_set_settings,
861};
862
863static int mlxsw_sx_port_attr_get(struct net_device *dev,
864 struct switchdev_attr *attr)
865{
866 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
867 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
868
869 switch (attr->id) {
1f868398 870 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
31557f0f
JP
871 attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
872 memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
873 break;
874 default:
875 return -EOPNOTSUPP;
876 }
877
878 return 0;
879}
880
881static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
882 .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
883};
884
885static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
886{
887 char spad_pl[MLXSW_REG_SPAD_LEN];
888 int err;
889
890 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
891 if (err)
892 return err;
893 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
894 return 0;
895}
896
897static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
898{
899 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
900 struct net_device *dev = mlxsw_sx_port->dev;
901 char ppad_pl[MLXSW_REG_PPAD_LEN];
902 int err;
903
904 mlxsw_reg_ppad_pack(ppad_pl, false, 0);
905 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
906 if (err)
907 return err;
908 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
909 /* The last byte value in base mac address is guaranteed
910 * to be such it does not overflow when adding local_port
911 * value.
912 */
913 dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
914 return 0;
915}
916
917static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
918 u16 vid, enum mlxsw_reg_spms_state state)
919{
920 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
921 char *spms_pl;
922 int err;
923
924 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
925 if (!spms_pl)
926 return -ENOMEM;
ebb7963f
JP
927 mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
928 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
31557f0f
JP
929 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
930 kfree(spms_pl);
931 return err;
932}
933
934static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
935 u32 speed)
936{
937 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
938 char ptys_pl[MLXSW_REG_PTYS_LEN];
939
940 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
941 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
942}
943
944static int
945mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
946 enum mlxsw_reg_spmlr_learn_mode mode)
947{
948 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
949 char spmlr_pl[MLXSW_REG_SPMLR_LEN];
950
951 mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
952 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
953}
954
955static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
956{
957 struct mlxsw_sx_port *mlxsw_sx_port;
958 struct net_device *dev;
959 bool usable;
960 int err;
961
962 dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
963 if (!dev)
964 return -ENOMEM;
965 mlxsw_sx_port = netdev_priv(dev);
966 mlxsw_sx_port->dev = dev;
967 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
968 mlxsw_sx_port->local_port = local_port;
969
970 mlxsw_sx_port->pcpu_stats =
971 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
972 if (!mlxsw_sx_port->pcpu_stats) {
973 err = -ENOMEM;
974 goto err_alloc_stats;
975 }
976
977 dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
978 dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
979 dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
980
981 err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
982 if (err) {
983 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
984 mlxsw_sx_port->local_port);
985 goto err_dev_addr_get;
986 }
987
988 netif_carrier_off(dev);
989
990 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
991 NETIF_F_VLAN_CHALLENGED;
992
993 /* Each packet needs to have a Tx header (metadata) on top all other
994 * headers.
995 */
996 dev->hard_header_len += MLXSW_TXHDR_LEN;
997
998 err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
999 if (err) {
1000 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
1001 mlxsw_sx_port->local_port);
1002 goto err_port_module_check;
1003 }
1004
1005 if (!usable) {
1006 dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1007 mlxsw_sx_port->local_port);
1008 goto port_not_usable;
1009 }
1010
e61011b5
IS
1011 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1012 if (err) {
1013 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1014 mlxsw_sx_port->local_port);
1015 goto err_port_system_port_mapping_set;
1016 }
1017
31557f0f
JP
1018 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1019 if (err) {
1020 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1021 mlxsw_sx_port->local_port);
1022 goto err_port_swid_set;
1023 }
1024
1025 err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
1026 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
1027 if (err) {
1028 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1029 mlxsw_sx_port->local_port);
1030 goto err_port_speed_set;
1031 }
1032
1033 err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
1034 if (err) {
1035 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1036 mlxsw_sx_port->local_port);
1037 goto err_port_mtu_set;
1038 }
1039
1040 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1041 if (err)
1042 goto err_port_admin_status_set;
1043
1044 err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1045 MLXSW_PORT_DEFAULT_VID,
1046 MLXSW_REG_SPMS_STATE_FORWARDING);
1047 if (err) {
1048 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1049 mlxsw_sx_port->local_port);
1050 goto err_port_stp_state_set;
1051 }
1052
1053 err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1054 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1055 if (err) {
1056 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1057 mlxsw_sx_port->local_port);
1058 goto err_port_mac_learning_mode_set;
1059 }
1060
1061 err = register_netdev(dev);
1062 if (err) {
1063 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1064 mlxsw_sx_port->local_port);
1065 goto err_register_netdev;
1066 }
1067
1068 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1069 return 0;
1070
1071err_register_netdev:
31557f0f
JP
1072err_port_mac_learning_mode_set:
1073err_port_stp_state_set:
4b0c2541 1074err_port_admin_status_set:
31557f0f
JP
1075err_port_mtu_set:
1076err_port_speed_set:
1077err_port_swid_set:
e61011b5 1078err_port_system_port_mapping_set:
31557f0f
JP
1079port_not_usable:
1080err_port_module_check:
1081err_dev_addr_get:
1082 free_percpu(mlxsw_sx_port->pcpu_stats);
1083err_alloc_stats:
1084 free_netdev(dev);
1085 return err;
1086}
1087
1088static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1089{
1090 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1091
1092 if (!mlxsw_sx_port)
1093 return;
1094 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1095 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1096 free_percpu(mlxsw_sx_port->pcpu_stats);
26a80f6e 1097 free_netdev(mlxsw_sx_port->dev);
31557f0f
JP
1098}
1099
1100static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1101{
1102 int i;
1103
1104 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1105 mlxsw_sx_port_remove(mlxsw_sx, i);
1106 kfree(mlxsw_sx->ports);
1107}
1108
1109static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1110{
1111 size_t alloc_size;
1112 int i;
1113 int err;
1114
1115 alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
1116 mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1117 if (!mlxsw_sx->ports)
1118 return -ENOMEM;
1119
1120 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1121 err = mlxsw_sx_port_create(mlxsw_sx, i);
1122 if (err)
1123 goto err_port_create;
1124 }
1125 return 0;
1126
1127err_port_create:
1128 for (i--; i >= 1; i--)
1129 mlxsw_sx_port_remove(mlxsw_sx, i);
1130 kfree(mlxsw_sx->ports);
1131 return err;
1132}
1133
1134static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1135 char *pude_pl, void *priv)
1136{
1137 struct mlxsw_sx *mlxsw_sx = priv;
1138 struct mlxsw_sx_port *mlxsw_sx_port;
1139 enum mlxsw_reg_pude_oper_status status;
1140 u8 local_port;
1141
1142 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1143 mlxsw_sx_port = mlxsw_sx->ports[local_port];
1144 if (!mlxsw_sx_port) {
1145 dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1146 local_port);
1147 return;
1148 }
1149
1150 status = mlxsw_reg_pude_oper_status_get(pude_pl);
ef743fdd 1151 if (status == MLXSW_PORT_OPER_STATUS_UP) {
31557f0f
JP
1152 netdev_info(mlxsw_sx_port->dev, "link up\n");
1153 netif_carrier_on(mlxsw_sx_port->dev);
1154 } else {
1155 netdev_info(mlxsw_sx_port->dev, "link down\n");
1156 netif_carrier_off(mlxsw_sx_port->dev);
1157 }
1158}
1159
1160static struct mlxsw_event_listener mlxsw_sx_pude_event = {
1161 .func = mlxsw_sx_pude_event_func,
1162 .trap_id = MLXSW_TRAP_ID_PUDE,
1163};
1164
1165static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
1166 enum mlxsw_event_trap_id trap_id)
1167{
1168 struct mlxsw_event_listener *el;
1169 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1170 int err;
1171
1172 switch (trap_id) {
1173 case MLXSW_TRAP_ID_PUDE:
1174 el = &mlxsw_sx_pude_event;
1175 break;
1176 }
1177 err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
1178 if (err)
1179 return err;
1180
f24af330 1181 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
31557f0f
JP
1182 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1183 if (err)
1184 goto err_event_trap_set;
1185
1186 return 0;
1187
1188err_event_trap_set:
1189 mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
1190 return err;
1191}
1192
1193static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
1194 enum mlxsw_event_trap_id trap_id)
1195{
1196 struct mlxsw_event_listener *el;
1197
1198 switch (trap_id) {
1199 case MLXSW_TRAP_ID_PUDE:
1200 el = &mlxsw_sx_pude_event;
1201 break;
1202 }
1203 mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
1204}
1205
1206static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1207 void *priv)
1208{
1209 struct mlxsw_sx *mlxsw_sx = priv;
1210 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1211 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1212
1213 if (unlikely(!mlxsw_sx_port)) {
6cf9dc8b
JP
1214 dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1215 local_port);
31557f0f
JP
1216 return;
1217 }
1218
1219 skb->dev = mlxsw_sx_port->dev;
1220
1221 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1222 u64_stats_update_begin(&pcpu_stats->syncp);
1223 pcpu_stats->rx_packets++;
1224 pcpu_stats->rx_bytes += skb->len;
1225 u64_stats_update_end(&pcpu_stats->syncp);
1226
1227 skb->protocol = eth_type_trans(skb, skb->dev);
1228 netif_receive_skb(skb);
1229}
1230
1231static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
1232 {
1233 .func = mlxsw_sx_rx_listener_func,
1234 .local_port = MLXSW_PORT_DONT_CARE,
1235 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1236 },
1237 /* Traps for specific L2 packet types, not trapped as FDB MC */
1238 {
1239 .func = mlxsw_sx_rx_listener_func,
1240 .local_port = MLXSW_PORT_DONT_CARE,
1241 .trap_id = MLXSW_TRAP_ID_STP,
1242 },
1243 {
1244 .func = mlxsw_sx_rx_listener_func,
1245 .local_port = MLXSW_PORT_DONT_CARE,
1246 .trap_id = MLXSW_TRAP_ID_LACP,
1247 },
1248 {
1249 .func = mlxsw_sx_rx_listener_func,
1250 .local_port = MLXSW_PORT_DONT_CARE,
1251 .trap_id = MLXSW_TRAP_ID_EAPOL,
1252 },
1253 {
1254 .func = mlxsw_sx_rx_listener_func,
1255 .local_port = MLXSW_PORT_DONT_CARE,
1256 .trap_id = MLXSW_TRAP_ID_LLDP,
1257 },
1258 {
1259 .func = mlxsw_sx_rx_listener_func,
1260 .local_port = MLXSW_PORT_DONT_CARE,
1261 .trap_id = MLXSW_TRAP_ID_MMRP,
1262 },
1263 {
1264 .func = mlxsw_sx_rx_listener_func,
1265 .local_port = MLXSW_PORT_DONT_CARE,
1266 .trap_id = MLXSW_TRAP_ID_MVRP,
1267 },
1268 {
1269 .func = mlxsw_sx_rx_listener_func,
1270 .local_port = MLXSW_PORT_DONT_CARE,
1271 .trap_id = MLXSW_TRAP_ID_RPVST,
1272 },
1273 {
1274 .func = mlxsw_sx_rx_listener_func,
1275 .local_port = MLXSW_PORT_DONT_CARE,
1276 .trap_id = MLXSW_TRAP_ID_DHCP,
1277 },
1278 {
1279 .func = mlxsw_sx_rx_listener_func,
1280 .local_port = MLXSW_PORT_DONT_CARE,
1281 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1282 },
1283 {
1284 .func = mlxsw_sx_rx_listener_func,
1285 .local_port = MLXSW_PORT_DONT_CARE,
1286 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1287 },
1288 {
1289 .func = mlxsw_sx_rx_listener_func,
1290 .local_port = MLXSW_PORT_DONT_CARE,
1291 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1292 },
1293 {
1294 .func = mlxsw_sx_rx_listener_func,
1295 .local_port = MLXSW_PORT_DONT_CARE,
1296 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1297 },
1298 {
1299 .func = mlxsw_sx_rx_listener_func,
1300 .local_port = MLXSW_PORT_DONT_CARE,
1301 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1302 },
1303};
1304
1305static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1306{
1307 char htgt_pl[MLXSW_REG_HTGT_LEN];
1308 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1309 int i;
1310 int err;
1311
1312 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1313 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1314 if (err)
1315 return err;
1316
801bd3de
IS
1317 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1318 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1319 if (err)
1320 return err;
1321
31557f0f
JP
1322 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
1323 err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
1324 &mlxsw_sx_rx_listener[i],
1325 mlxsw_sx);
1326 if (err)
1327 goto err_rx_listener_register;
1328
1329 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
31557f0f
JP
1330 mlxsw_sx_rx_listener[i].trap_id);
1331 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1332 if (err)
1333 goto err_rx_trap_set;
1334 }
1335 return 0;
1336
1337err_rx_trap_set:
1338 mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1339 &mlxsw_sx_rx_listener[i],
1340 mlxsw_sx);
1341err_rx_listener_register:
1342 for (i--; i >= 0; i--) {
1343 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
31557f0f
JP
1344 mlxsw_sx_rx_listener[i].trap_id);
1345 mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1346
1347 mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1348 &mlxsw_sx_rx_listener[i],
1349 mlxsw_sx);
1350 }
1351 return err;
1352}
1353
1354static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1355{
1356 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1357 int i;
1358
1359 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
1360 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
31557f0f
JP
1361 mlxsw_sx_rx_listener[i].trap_id);
1362 mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1363
1364 mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1365 &mlxsw_sx_rx_listener[i],
1366 mlxsw_sx);
1367 }
1368}
1369
1370static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1371{
1372 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1373 char sgcr_pl[MLXSW_REG_SGCR_LEN];
31557f0f
JP
1374 char *sftr_pl;
1375 int err;
1376
31557f0f
JP
1377 /* Configure a flooding table, which includes only CPU port. */
1378 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1379 if (!sftr_pl)
1380 return -ENOMEM;
bc2055f8
IS
1381 mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1382 MLXSW_PORT_CPU_PORT, true);
31557f0f
JP
1383 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1384 kfree(sftr_pl);
1385 if (err)
1386 return err;
1387
1388 /* Flood different packet types using the flooding table. */
1389 mlxsw_reg_sfgc_pack(sfgc_pl,
1390 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1391 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1392 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1393 0);
1394 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1395 if (err)
1396 return err;
1397
1398 mlxsw_reg_sfgc_pack(sfgc_pl,
1399 MLXSW_REG_SFGC_TYPE_BROADCAST,
1400 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1401 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1402 0);
1403 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1404 if (err)
1405 return err;
1406
1407 mlxsw_reg_sfgc_pack(sfgc_pl,
1408 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1409 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1410 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1411 0);
1412 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1413 if (err)
1414 return err;
1415
1416 mlxsw_reg_sfgc_pack(sfgc_pl,
1417 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1418 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1419 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1420 0);
1421 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1422 if (err)
1423 return err;
1424
1425 mlxsw_reg_sfgc_pack(sfgc_pl,
1426 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1427 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1428 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1429 0);
1430 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1431 if (err)
1432 return err;
1433
1434 mlxsw_reg_sgcr_pack(sgcr_pl, true);
1435 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1436}
1437
1438static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
1439 const struct mlxsw_bus_info *mlxsw_bus_info)
1440{
1441 struct mlxsw_sx *mlxsw_sx = priv;
1442 int err;
1443
1444 mlxsw_sx->core = mlxsw_core;
1445 mlxsw_sx->bus_info = mlxsw_bus_info;
1446
1447 err = mlxsw_sx_hw_id_get(mlxsw_sx);
1448 if (err) {
1449 dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1450 return err;
1451 }
1452
1453 err = mlxsw_sx_ports_create(mlxsw_sx);
1454 if (err) {
1455 dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1456 return err;
1457 }
1458
1459 err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1460 if (err) {
1461 dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
1462 goto err_event_register;
1463 }
1464
1465 err = mlxsw_sx_traps_init(mlxsw_sx);
1466 if (err) {
1467 dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
1468 goto err_rx_listener_register;
1469 }
1470
1471 err = mlxsw_sx_flood_init(mlxsw_sx);
1472 if (err) {
1473 dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1474 goto err_flood_init;
1475 }
1476
1477 return 0;
1478
1479err_flood_init:
1480 mlxsw_sx_traps_fini(mlxsw_sx);
1481err_rx_listener_register:
1482 mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1483err_event_register:
1484 mlxsw_sx_ports_remove(mlxsw_sx);
1485 return err;
1486}
1487
1488static void mlxsw_sx_fini(void *priv)
1489{
1490 struct mlxsw_sx *mlxsw_sx = priv;
1491
1492 mlxsw_sx_traps_fini(mlxsw_sx);
1493 mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1494 mlxsw_sx_ports_remove(mlxsw_sx);
1495}
1496
1497static struct mlxsw_config_profile mlxsw_sx_config_profile = {
1498 .used_max_vepa_channels = 1,
1499 .max_vepa_channels = 0,
1500 .used_max_lag = 1,
1501 .max_lag = 64,
1502 .used_max_port_per_lag = 1,
1503 .max_port_per_lag = 16,
1504 .used_max_mid = 1,
1505 .max_mid = 7000,
1506 .used_max_pgt = 1,
1507 .max_pgt = 0,
1508 .used_max_system_port = 1,
1509 .max_system_port = 48000,
1510 .used_max_vlan_groups = 1,
1511 .max_vlan_groups = 127,
1512 .used_max_regions = 1,
1513 .max_regions = 400,
1514 .used_flood_tables = 1,
1515 .max_flood_tables = 2,
1516 .max_vid_flood_tables = 1,
1517 .used_flood_mode = 1,
1518 .flood_mode = 3,
1519 .used_max_ib_mc = 1,
1520 .max_ib_mc = 0,
1521 .used_max_pkey = 1,
1522 .max_pkey = 0,
1523 .swid_config = {
1524 {
1525 .used_type = 1,
1526 .type = MLXSW_PORT_SWID_TYPE_ETH,
1527 }
1528 },
1529};
1530
1531static struct mlxsw_driver mlxsw_sx_driver = {
1532 .kind = MLXSW_DEVICE_KIND_SWITCHX2,
1533 .owner = THIS_MODULE,
1534 .priv_size = sizeof(struct mlxsw_sx),
1535 .init = mlxsw_sx_init,
1536 .fini = mlxsw_sx_fini,
1537 .txhdr_construct = mlxsw_sx_txhdr_construct,
1538 .txhdr_len = MLXSW_TXHDR_LEN,
1539 .profile = &mlxsw_sx_config_profile,
1540};
1541
1542static int __init mlxsw_sx_module_init(void)
1543{
1544 return mlxsw_core_driver_register(&mlxsw_sx_driver);
1545}
1546
1547static void __exit mlxsw_sx_module_exit(void)
1548{
1549 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1550}
1551
1552module_init(mlxsw_sx_module_init);
1553module_exit(mlxsw_sx_module_exit);
1554
1555MODULE_LICENSE("Dual BSD/GPL");
1556MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1557MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1558MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);