Merge branch 'bind_unbind' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / ethernet / rocker / rocker_ofdpa.c
1 /*
2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
3 * implementation
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/hashtable.h>
17 #include <linux/crc32.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/if_vlan.h>
21 #include <linux/if_bridge.h>
22 #include <net/neighbour.h>
23 #include <net/switchdev.h>
24 #include <net/ip_fib.h>
25 #include <net/arp.h>
26
27 #include "rocker.h"
28 #include "rocker_tlv.h"
29
30 struct ofdpa_flow_tbl_key {
31 u32 priority;
32 enum rocker_of_dpa_table_id tbl_id;
33 union {
34 struct {
35 u32 in_pport;
36 u32 in_pport_mask;
37 enum rocker_of_dpa_table_id goto_tbl;
38 } ig_port;
39 struct {
40 u32 in_pport;
41 __be16 vlan_id;
42 __be16 vlan_id_mask;
43 enum rocker_of_dpa_table_id goto_tbl;
44 bool untagged;
45 __be16 new_vlan_id;
46 } vlan;
47 struct {
48 u32 in_pport;
49 u32 in_pport_mask;
50 __be16 eth_type;
51 u8 eth_dst[ETH_ALEN];
52 u8 eth_dst_mask[ETH_ALEN];
53 __be16 vlan_id;
54 __be16 vlan_id_mask;
55 enum rocker_of_dpa_table_id goto_tbl;
56 bool copy_to_cpu;
57 } term_mac;
58 struct {
59 __be16 eth_type;
60 __be32 dst4;
61 __be32 dst4_mask;
62 enum rocker_of_dpa_table_id goto_tbl;
63 u32 group_id;
64 } ucast_routing;
65 struct {
66 u8 eth_dst[ETH_ALEN];
67 u8 eth_dst_mask[ETH_ALEN];
68 int has_eth_dst;
69 int has_eth_dst_mask;
70 __be16 vlan_id;
71 u32 tunnel_id;
72 enum rocker_of_dpa_table_id goto_tbl;
73 u32 group_id;
74 bool copy_to_cpu;
75 } bridge;
76 struct {
77 u32 in_pport;
78 u32 in_pport_mask;
79 u8 eth_src[ETH_ALEN];
80 u8 eth_src_mask[ETH_ALEN];
81 u8 eth_dst[ETH_ALEN];
82 u8 eth_dst_mask[ETH_ALEN];
83 __be16 eth_type;
84 __be16 vlan_id;
85 __be16 vlan_id_mask;
86 u8 ip_proto;
87 u8 ip_proto_mask;
88 u8 ip_tos;
89 u8 ip_tos_mask;
90 u32 group_id;
91 } acl;
92 };
93 };
94
95 struct ofdpa_flow_tbl_entry {
96 struct hlist_node entry;
97 u32 cmd;
98 u64 cookie;
99 struct ofdpa_flow_tbl_key key;
100 size_t key_len;
101 u32 key_crc32; /* key */
102 struct fib_info *fi;
103 };
104
105 struct ofdpa_group_tbl_entry {
106 struct hlist_node entry;
107 u32 cmd;
108 u32 group_id; /* key */
109 u16 group_count;
110 u32 *group_ids;
111 union {
112 struct {
113 u8 pop_vlan;
114 } l2_interface;
115 struct {
116 u8 eth_src[ETH_ALEN];
117 u8 eth_dst[ETH_ALEN];
118 __be16 vlan_id;
119 u32 group_id;
120 } l2_rewrite;
121 struct {
122 u8 eth_src[ETH_ALEN];
123 u8 eth_dst[ETH_ALEN];
124 __be16 vlan_id;
125 bool ttl_check;
126 u32 group_id;
127 } l3_unicast;
128 };
129 };
130
131 struct ofdpa_fdb_tbl_entry {
132 struct hlist_node entry;
133 u32 key_crc32; /* key */
134 bool learned;
135 unsigned long touched;
136 struct ofdpa_fdb_tbl_key {
137 struct ofdpa_port *ofdpa_port;
138 u8 addr[ETH_ALEN];
139 __be16 vlan_id;
140 } key;
141 };
142
143 struct ofdpa_internal_vlan_tbl_entry {
144 struct hlist_node entry;
145 int ifindex; /* key */
146 u32 ref_count;
147 __be16 vlan_id;
148 };
149
150 struct ofdpa_neigh_tbl_entry {
151 struct hlist_node entry;
152 __be32 ip_addr; /* key */
153 struct net_device *dev;
154 u32 ref_count;
155 u32 index;
156 u8 eth_dst[ETH_ALEN];
157 bool ttl_check;
158 };
159
160 enum {
161 OFDPA_CTRL_LINK_LOCAL_MCAST,
162 OFDPA_CTRL_LOCAL_ARP,
163 OFDPA_CTRL_IPV4_MCAST,
164 OFDPA_CTRL_IPV6_MCAST,
165 OFDPA_CTRL_DFLT_BRIDGING,
166 OFDPA_CTRL_DFLT_OVS,
167 OFDPA_CTRL_MAX,
168 };
169
170 #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
171 #define OFDPA_N_INTERNAL_VLANS 255
172 #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
173 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
174 #define OFDPA_UNTAGGED_VID 0
175
176 struct ofdpa {
177 struct rocker *rocker;
178 DECLARE_HASHTABLE(flow_tbl, 16);
179 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
180 u64 flow_tbl_next_cookie;
181 DECLARE_HASHTABLE(group_tbl, 16);
182 spinlock_t group_tbl_lock; /* for group tbl accesses */
183 struct timer_list fdb_cleanup_timer;
184 DECLARE_HASHTABLE(fdb_tbl, 16);
185 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
186 unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
187 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
188 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
189 DECLARE_HASHTABLE(neigh_tbl, 16);
190 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
191 u32 neigh_tbl_next_index;
192 unsigned long ageing_time;
193 bool fib_aborted;
194 };
195
196 struct ofdpa_port {
197 struct ofdpa *ofdpa;
198 struct rocker_port *rocker_port;
199 struct net_device *dev;
200 u32 pport;
201 struct net_device *bridge_dev;
202 __be16 internal_vlan_id;
203 int stp_state;
204 u32 brport_flags;
205 unsigned long ageing_time;
206 bool ctrls[OFDPA_CTRL_MAX];
207 unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
208 };
209
210 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
211 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
212 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
213 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
214 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
215 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
216 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
217 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
218 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
219
220 /* Rocker priority levels for flow table entries. Higher
221 * priority match takes precedence over lower priority match.
222 */
223
224 enum {
225 OFDPA_PRIORITY_UNKNOWN = 0,
226 OFDPA_PRIORITY_IG_PORT = 1,
227 OFDPA_PRIORITY_VLAN = 1,
228 OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
229 OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
230 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
231 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
232 OFDPA_PRIORITY_BRIDGING_VLAN = 3,
233 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
234 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
235 OFDPA_PRIORITY_BRIDGING_TENANT = 3,
236 OFDPA_PRIORITY_ACL_CTRL = 3,
237 OFDPA_PRIORITY_ACL_NORMAL = 2,
238 OFDPA_PRIORITY_ACL_DFLT = 1,
239 };
240
241 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
242 {
243 u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
244 u16 end = 0xffe;
245 u16 _vlan_id = ntohs(vlan_id);
246
247 return (_vlan_id >= start && _vlan_id <= end);
248 }
249
250 static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
251 u16 vid, bool *pop_vlan)
252 {
253 __be16 vlan_id;
254
255 if (pop_vlan)
256 *pop_vlan = false;
257 vlan_id = htons(vid);
258 if (!vlan_id) {
259 vlan_id = ofdpa_port->internal_vlan_id;
260 if (pop_vlan)
261 *pop_vlan = true;
262 }
263
264 return vlan_id;
265 }
266
267 static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
268 __be16 vlan_id)
269 {
270 if (ofdpa_vlan_id_is_internal(vlan_id))
271 return 0;
272
273 return ntohs(vlan_id);
274 }
275
276 static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
277 const char *kind)
278 {
279 return ofdpa_port->bridge_dev &&
280 !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
281 }
282
283 static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
284 {
285 return ofdpa_port_is_slave(ofdpa_port, "bridge");
286 }
287
288 static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
289 {
290 return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
291 }
292
293 #define OFDPA_OP_FLAG_REMOVE BIT(0)
294 #define OFDPA_OP_FLAG_NOWAIT BIT(1)
295 #define OFDPA_OP_FLAG_LEARNED BIT(2)
296 #define OFDPA_OP_FLAG_REFRESH BIT(3)
297
298 static bool ofdpa_flags_nowait(int flags)
299 {
300 return flags & OFDPA_OP_FLAG_NOWAIT;
301 }
302
303 static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
304 size_t size)
305 {
306 struct switchdev_trans_item *elem = NULL;
307 gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
308 GFP_ATOMIC : GFP_KERNEL;
309
310 /* If in transaction prepare phase, allocate the memory
311 * and enqueue it on a transaction. If in transaction
312 * commit phase, dequeue the memory from the transaction
313 * rather than re-allocating the memory. The idea is the
314 * driver code paths for prepare and commit are identical
315 * so the memory allocated in the prepare phase is the
316 * memory used in the commit phase.
317 */
318
319 if (!trans) {
320 elem = kzalloc(size + sizeof(*elem), gfp_flags);
321 } else if (switchdev_trans_ph_prepare(trans)) {
322 elem = kzalloc(size + sizeof(*elem), gfp_flags);
323 if (!elem)
324 return NULL;
325 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
326 } else {
327 elem = switchdev_trans_item_dequeue(trans);
328 }
329
330 return elem ? elem + 1 : NULL;
331 }
332
333 static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
334 size_t size)
335 {
336 return __ofdpa_mem_alloc(trans, flags, size);
337 }
338
339 static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
340 size_t n, size_t size)
341 {
342 return __ofdpa_mem_alloc(trans, flags, n * size);
343 }
344
345 static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
346 {
347 struct switchdev_trans_item *elem;
348
349 /* Frees are ignored if in transaction prepare phase. The
350 * memory remains on the per-port list until freed in the
351 * commit phase.
352 */
353
354 if (switchdev_trans_ph_prepare(trans))
355 return;
356
357 elem = (struct switchdev_trans_item *) mem - 1;
358 kfree(elem);
359 }
360
361 /*************************************************************
362 * Flow, group, FDB, internal VLAN and neigh command prepares
363 *************************************************************/
364
365 static int
366 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
367 const struct ofdpa_flow_tbl_entry *entry)
368 {
369 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
370 entry->key.ig_port.in_pport))
371 return -EMSGSIZE;
372 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
373 entry->key.ig_port.in_pport_mask))
374 return -EMSGSIZE;
375 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
376 entry->key.ig_port.goto_tbl))
377 return -EMSGSIZE;
378
379 return 0;
380 }
381
382 static int
383 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
384 const struct ofdpa_flow_tbl_entry *entry)
385 {
386 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
387 entry->key.vlan.in_pport))
388 return -EMSGSIZE;
389 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
390 entry->key.vlan.vlan_id))
391 return -EMSGSIZE;
392 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
393 entry->key.vlan.vlan_id_mask))
394 return -EMSGSIZE;
395 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
396 entry->key.vlan.goto_tbl))
397 return -EMSGSIZE;
398 if (entry->key.vlan.untagged &&
399 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
400 entry->key.vlan.new_vlan_id))
401 return -EMSGSIZE;
402
403 return 0;
404 }
405
406 static int
407 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
408 const struct ofdpa_flow_tbl_entry *entry)
409 {
410 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
411 entry->key.term_mac.in_pport))
412 return -EMSGSIZE;
413 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
414 entry->key.term_mac.in_pport_mask))
415 return -EMSGSIZE;
416 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
417 entry->key.term_mac.eth_type))
418 return -EMSGSIZE;
419 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
420 ETH_ALEN, entry->key.term_mac.eth_dst))
421 return -EMSGSIZE;
422 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
423 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
424 return -EMSGSIZE;
425 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
426 entry->key.term_mac.vlan_id))
427 return -EMSGSIZE;
428 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
429 entry->key.term_mac.vlan_id_mask))
430 return -EMSGSIZE;
431 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
432 entry->key.term_mac.goto_tbl))
433 return -EMSGSIZE;
434 if (entry->key.term_mac.copy_to_cpu &&
435 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
436 entry->key.term_mac.copy_to_cpu))
437 return -EMSGSIZE;
438
439 return 0;
440 }
441
442 static int
443 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
444 const struct ofdpa_flow_tbl_entry *entry)
445 {
446 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
447 entry->key.ucast_routing.eth_type))
448 return -EMSGSIZE;
449 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
450 entry->key.ucast_routing.dst4))
451 return -EMSGSIZE;
452 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
453 entry->key.ucast_routing.dst4_mask))
454 return -EMSGSIZE;
455 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
456 entry->key.ucast_routing.goto_tbl))
457 return -EMSGSIZE;
458 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
459 entry->key.ucast_routing.group_id))
460 return -EMSGSIZE;
461
462 return 0;
463 }
464
465 static int
466 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
467 const struct ofdpa_flow_tbl_entry *entry)
468 {
469 if (entry->key.bridge.has_eth_dst &&
470 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
471 ETH_ALEN, entry->key.bridge.eth_dst))
472 return -EMSGSIZE;
473 if (entry->key.bridge.has_eth_dst_mask &&
474 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
475 ETH_ALEN, entry->key.bridge.eth_dst_mask))
476 return -EMSGSIZE;
477 if (entry->key.bridge.vlan_id &&
478 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
479 entry->key.bridge.vlan_id))
480 return -EMSGSIZE;
481 if (entry->key.bridge.tunnel_id &&
482 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
483 entry->key.bridge.tunnel_id))
484 return -EMSGSIZE;
485 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
486 entry->key.bridge.goto_tbl))
487 return -EMSGSIZE;
488 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
489 entry->key.bridge.group_id))
490 return -EMSGSIZE;
491 if (entry->key.bridge.copy_to_cpu &&
492 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
493 entry->key.bridge.copy_to_cpu))
494 return -EMSGSIZE;
495
496 return 0;
497 }
498
499 static int
500 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
501 const struct ofdpa_flow_tbl_entry *entry)
502 {
503 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
504 entry->key.acl.in_pport))
505 return -EMSGSIZE;
506 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
507 entry->key.acl.in_pport_mask))
508 return -EMSGSIZE;
509 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
510 ETH_ALEN, entry->key.acl.eth_src))
511 return -EMSGSIZE;
512 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
513 ETH_ALEN, entry->key.acl.eth_src_mask))
514 return -EMSGSIZE;
515 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
516 ETH_ALEN, entry->key.acl.eth_dst))
517 return -EMSGSIZE;
518 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
519 ETH_ALEN, entry->key.acl.eth_dst_mask))
520 return -EMSGSIZE;
521 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
522 entry->key.acl.eth_type))
523 return -EMSGSIZE;
524 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
525 entry->key.acl.vlan_id))
526 return -EMSGSIZE;
527 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
528 entry->key.acl.vlan_id_mask))
529 return -EMSGSIZE;
530
531 switch (ntohs(entry->key.acl.eth_type)) {
532 case ETH_P_IP:
533 case ETH_P_IPV6:
534 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
535 entry->key.acl.ip_proto))
536 return -EMSGSIZE;
537 if (rocker_tlv_put_u8(desc_info,
538 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
539 entry->key.acl.ip_proto_mask))
540 return -EMSGSIZE;
541 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
542 entry->key.acl.ip_tos & 0x3f))
543 return -EMSGSIZE;
544 if (rocker_tlv_put_u8(desc_info,
545 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
546 entry->key.acl.ip_tos_mask & 0x3f))
547 return -EMSGSIZE;
548 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
549 (entry->key.acl.ip_tos & 0xc0) >> 6))
550 return -EMSGSIZE;
551 if (rocker_tlv_put_u8(desc_info,
552 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
553 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
554 return -EMSGSIZE;
555 break;
556 }
557
558 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
559 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
560 entry->key.acl.group_id))
561 return -EMSGSIZE;
562
563 return 0;
564 }
565
566 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
567 struct rocker_desc_info *desc_info,
568 void *priv)
569 {
570 const struct ofdpa_flow_tbl_entry *entry = priv;
571 struct rocker_tlv *cmd_info;
572 int err = 0;
573
574 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
575 return -EMSGSIZE;
576 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
577 if (!cmd_info)
578 return -EMSGSIZE;
579 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
580 entry->key.tbl_id))
581 return -EMSGSIZE;
582 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
583 entry->key.priority))
584 return -EMSGSIZE;
585 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
586 return -EMSGSIZE;
587 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
588 entry->cookie))
589 return -EMSGSIZE;
590
591 switch (entry->key.tbl_id) {
592 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
593 err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
594 break;
595 case ROCKER_OF_DPA_TABLE_ID_VLAN:
596 err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
597 break;
598 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
599 err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
600 break;
601 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
602 err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
603 break;
604 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
605 err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
606 break;
607 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
608 err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
609 break;
610 default:
611 err = -ENOTSUPP;
612 break;
613 }
614
615 if (err)
616 return err;
617
618 rocker_tlv_nest_end(desc_info, cmd_info);
619
620 return 0;
621 }
622
623 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
624 struct rocker_desc_info *desc_info,
625 void *priv)
626 {
627 const struct ofdpa_flow_tbl_entry *entry = priv;
628 struct rocker_tlv *cmd_info;
629
630 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
631 return -EMSGSIZE;
632 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
633 if (!cmd_info)
634 return -EMSGSIZE;
635 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
636 entry->cookie))
637 return -EMSGSIZE;
638 rocker_tlv_nest_end(desc_info, cmd_info);
639
640 return 0;
641 }
642
643 static int
644 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
645 struct ofdpa_group_tbl_entry *entry)
646 {
647 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
648 ROCKER_GROUP_PORT_GET(entry->group_id)))
649 return -EMSGSIZE;
650 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
651 entry->l2_interface.pop_vlan))
652 return -EMSGSIZE;
653
654 return 0;
655 }
656
657 static int
658 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
659 const struct ofdpa_group_tbl_entry *entry)
660 {
661 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
662 entry->l2_rewrite.group_id))
663 return -EMSGSIZE;
664 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
665 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
666 ETH_ALEN, entry->l2_rewrite.eth_src))
667 return -EMSGSIZE;
668 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
669 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
670 ETH_ALEN, entry->l2_rewrite.eth_dst))
671 return -EMSGSIZE;
672 if (entry->l2_rewrite.vlan_id &&
673 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
674 entry->l2_rewrite.vlan_id))
675 return -EMSGSIZE;
676
677 return 0;
678 }
679
680 static int
681 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
682 const struct ofdpa_group_tbl_entry *entry)
683 {
684 int i;
685 struct rocker_tlv *group_ids;
686
687 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
688 entry->group_count))
689 return -EMSGSIZE;
690
691 group_ids = rocker_tlv_nest_start(desc_info,
692 ROCKER_TLV_OF_DPA_GROUP_IDS);
693 if (!group_ids)
694 return -EMSGSIZE;
695
696 for (i = 0; i < entry->group_count; i++)
697 /* Note TLV array is 1-based */
698 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
699 return -EMSGSIZE;
700
701 rocker_tlv_nest_end(desc_info, group_ids);
702
703 return 0;
704 }
705
706 static int
707 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
708 const struct ofdpa_group_tbl_entry *entry)
709 {
710 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
711 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
712 ETH_ALEN, entry->l3_unicast.eth_src))
713 return -EMSGSIZE;
714 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
715 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
716 ETH_ALEN, entry->l3_unicast.eth_dst))
717 return -EMSGSIZE;
718 if (entry->l3_unicast.vlan_id &&
719 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
720 entry->l3_unicast.vlan_id))
721 return -EMSGSIZE;
722 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
723 entry->l3_unicast.ttl_check))
724 return -EMSGSIZE;
725 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
726 entry->l3_unicast.group_id))
727 return -EMSGSIZE;
728
729 return 0;
730 }
731
732 static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
733 struct rocker_desc_info *desc_info,
734 void *priv)
735 {
736 struct ofdpa_group_tbl_entry *entry = priv;
737 struct rocker_tlv *cmd_info;
738 int err = 0;
739
740 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
741 return -EMSGSIZE;
742 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
743 if (!cmd_info)
744 return -EMSGSIZE;
745
746 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
747 entry->group_id))
748 return -EMSGSIZE;
749
750 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
751 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
752 err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
753 break;
754 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
755 err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
756 break;
757 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
758 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
759 err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
760 break;
761 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
762 err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
763 break;
764 default:
765 err = -ENOTSUPP;
766 break;
767 }
768
769 if (err)
770 return err;
771
772 rocker_tlv_nest_end(desc_info, cmd_info);
773
774 return 0;
775 }
776
777 static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
778 struct rocker_desc_info *desc_info,
779 void *priv)
780 {
781 const struct ofdpa_group_tbl_entry *entry = priv;
782 struct rocker_tlv *cmd_info;
783
784 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
785 return -EMSGSIZE;
786 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
787 if (!cmd_info)
788 return -EMSGSIZE;
789 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
790 entry->group_id))
791 return -EMSGSIZE;
792 rocker_tlv_nest_end(desc_info, cmd_info);
793
794 return 0;
795 }
796
797 /***************************************************
798 * Flow, group, FDB, internal VLAN and neigh tables
799 ***************************************************/
800
801 static struct ofdpa_flow_tbl_entry *
802 ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
803 const struct ofdpa_flow_tbl_entry *match)
804 {
805 struct ofdpa_flow_tbl_entry *found;
806 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
807
808 hash_for_each_possible(ofdpa->flow_tbl, found,
809 entry, match->key_crc32) {
810 if (memcmp(&found->key, &match->key, key_len) == 0)
811 return found;
812 }
813
814 return NULL;
815 }
816
817 static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
818 struct switchdev_trans *trans, int flags,
819 struct ofdpa_flow_tbl_entry *match)
820 {
821 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
822 struct ofdpa_flow_tbl_entry *found;
823 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
824 unsigned long lock_flags;
825
826 match->key_crc32 = crc32(~0, &match->key, key_len);
827
828 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
829
830 found = ofdpa_flow_tbl_find(ofdpa, match);
831
832 if (found) {
833 match->cookie = found->cookie;
834 if (!switchdev_trans_ph_prepare(trans))
835 hash_del(&found->entry);
836 ofdpa_kfree(trans, found);
837 found = match;
838 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
839 } else {
840 found = match;
841 found->cookie = ofdpa->flow_tbl_next_cookie++;
842 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
843 }
844
845 if (!switchdev_trans_ph_prepare(trans))
846 hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
847
848 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
849
850 if (!switchdev_trans_ph_prepare(trans))
851 return rocker_cmd_exec(ofdpa_port->rocker_port,
852 ofdpa_flags_nowait(flags),
853 ofdpa_cmd_flow_tbl_add,
854 found, NULL, NULL);
855 return 0;
856 }
857
858 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
859 struct switchdev_trans *trans, int flags,
860 struct ofdpa_flow_tbl_entry *match)
861 {
862 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
863 struct ofdpa_flow_tbl_entry *found;
864 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
865 unsigned long lock_flags;
866 int err = 0;
867
868 match->key_crc32 = crc32(~0, &match->key, key_len);
869
870 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
871
872 found = ofdpa_flow_tbl_find(ofdpa, match);
873
874 if (found) {
875 if (!switchdev_trans_ph_prepare(trans))
876 hash_del(&found->entry);
877 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
878 }
879
880 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
881
882 ofdpa_kfree(trans, match);
883
884 if (found) {
885 if (!switchdev_trans_ph_prepare(trans))
886 err = rocker_cmd_exec(ofdpa_port->rocker_port,
887 ofdpa_flags_nowait(flags),
888 ofdpa_cmd_flow_tbl_del,
889 found, NULL, NULL);
890 ofdpa_kfree(trans, found);
891 }
892
893 return err;
894 }
895
896 static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
897 struct switchdev_trans *trans, int flags,
898 struct ofdpa_flow_tbl_entry *entry)
899 {
900 if (flags & OFDPA_OP_FLAG_REMOVE)
901 return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
902 else
903 return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
904 }
905
906 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
907 struct switchdev_trans *trans, int flags,
908 u32 in_pport, u32 in_pport_mask,
909 enum rocker_of_dpa_table_id goto_tbl)
910 {
911 struct ofdpa_flow_tbl_entry *entry;
912
913 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
914 if (!entry)
915 return -ENOMEM;
916
917 entry->key.priority = OFDPA_PRIORITY_IG_PORT;
918 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
919 entry->key.ig_port.in_pport = in_pport;
920 entry->key.ig_port.in_pport_mask = in_pport_mask;
921 entry->key.ig_port.goto_tbl = goto_tbl;
922
923 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
924 }
925
926 static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
927 struct switchdev_trans *trans, int flags,
928 u32 in_pport, __be16 vlan_id,
929 __be16 vlan_id_mask,
930 enum rocker_of_dpa_table_id goto_tbl,
931 bool untagged, __be16 new_vlan_id)
932 {
933 struct ofdpa_flow_tbl_entry *entry;
934
935 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
936 if (!entry)
937 return -ENOMEM;
938
939 entry->key.priority = OFDPA_PRIORITY_VLAN;
940 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
941 entry->key.vlan.in_pport = in_pport;
942 entry->key.vlan.vlan_id = vlan_id;
943 entry->key.vlan.vlan_id_mask = vlan_id_mask;
944 entry->key.vlan.goto_tbl = goto_tbl;
945
946 entry->key.vlan.untagged = untagged;
947 entry->key.vlan.new_vlan_id = new_vlan_id;
948
949 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
950 }
951
952 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
953 struct switchdev_trans *trans,
954 u32 in_pport, u32 in_pport_mask,
955 __be16 eth_type, const u8 *eth_dst,
956 const u8 *eth_dst_mask, __be16 vlan_id,
957 __be16 vlan_id_mask, bool copy_to_cpu,
958 int flags)
959 {
960 struct ofdpa_flow_tbl_entry *entry;
961
962 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
963 if (!entry)
964 return -ENOMEM;
965
966 if (is_multicast_ether_addr(eth_dst)) {
967 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
968 entry->key.term_mac.goto_tbl =
969 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
970 } else {
971 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
972 entry->key.term_mac.goto_tbl =
973 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
974 }
975
976 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
977 entry->key.term_mac.in_pport = in_pport;
978 entry->key.term_mac.in_pport_mask = in_pport_mask;
979 entry->key.term_mac.eth_type = eth_type;
980 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
981 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
982 entry->key.term_mac.vlan_id = vlan_id;
983 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
984 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
985
986 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
987 }
988
989 static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
990 struct switchdev_trans *trans, int flags,
991 const u8 *eth_dst, const u8 *eth_dst_mask,
992 __be16 vlan_id, u32 tunnel_id,
993 enum rocker_of_dpa_table_id goto_tbl,
994 u32 group_id, bool copy_to_cpu)
995 {
996 struct ofdpa_flow_tbl_entry *entry;
997 u32 priority;
998 bool vlan_bridging = !!vlan_id;
999 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
1000 bool wild = false;
1001
1002 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1003 if (!entry)
1004 return -ENOMEM;
1005
1006 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1007
1008 if (eth_dst) {
1009 entry->key.bridge.has_eth_dst = 1;
1010 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
1011 }
1012 if (eth_dst_mask) {
1013 entry->key.bridge.has_eth_dst_mask = 1;
1014 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
1015 if (!ether_addr_equal(eth_dst_mask, ff_mac))
1016 wild = true;
1017 }
1018
1019 priority = OFDPA_PRIORITY_UNKNOWN;
1020 if (vlan_bridging && dflt && wild)
1021 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
1022 else if (vlan_bridging && dflt && !wild)
1023 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
1024 else if (vlan_bridging && !dflt)
1025 priority = OFDPA_PRIORITY_BRIDGING_VLAN;
1026 else if (!vlan_bridging && dflt && wild)
1027 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
1028 else if (!vlan_bridging && dflt && !wild)
1029 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
1030 else if (!vlan_bridging && !dflt)
1031 priority = OFDPA_PRIORITY_BRIDGING_TENANT;
1032
1033 entry->key.priority = priority;
1034 entry->key.bridge.vlan_id = vlan_id;
1035 entry->key.bridge.tunnel_id = tunnel_id;
1036 entry->key.bridge.goto_tbl = goto_tbl;
1037 entry->key.bridge.group_id = group_id;
1038 entry->key.bridge.copy_to_cpu = copy_to_cpu;
1039
1040 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1041 }
1042
1043 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
1044 struct switchdev_trans *trans,
1045 __be16 eth_type, __be32 dst,
1046 __be32 dst_mask, u32 priority,
1047 enum rocker_of_dpa_table_id goto_tbl,
1048 u32 group_id, struct fib_info *fi,
1049 int flags)
1050 {
1051 struct ofdpa_flow_tbl_entry *entry;
1052
1053 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1054 if (!entry)
1055 return -ENOMEM;
1056
1057 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1058 entry->key.priority = priority;
1059 entry->key.ucast_routing.eth_type = eth_type;
1060 entry->key.ucast_routing.dst4 = dst;
1061 entry->key.ucast_routing.dst4_mask = dst_mask;
1062 entry->key.ucast_routing.goto_tbl = goto_tbl;
1063 entry->key.ucast_routing.group_id = group_id;
1064 entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
1065 ucast_routing.group_id);
1066 entry->fi = fi;
1067
1068 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1069 }
1070
1071 static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
1072 struct switchdev_trans *trans, int flags,
1073 u32 in_pport, u32 in_pport_mask,
1074 const u8 *eth_src, const u8 *eth_src_mask,
1075 const u8 *eth_dst, const u8 *eth_dst_mask,
1076 __be16 eth_type, __be16 vlan_id,
1077 __be16 vlan_id_mask, u8 ip_proto,
1078 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1079 u32 group_id)
1080 {
1081 u32 priority;
1082 struct ofdpa_flow_tbl_entry *entry;
1083
1084 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1085 if (!entry)
1086 return -ENOMEM;
1087
1088 priority = OFDPA_PRIORITY_ACL_NORMAL;
1089 if (eth_dst && eth_dst_mask) {
1090 if (ether_addr_equal(eth_dst_mask, mcast_mac))
1091 priority = OFDPA_PRIORITY_ACL_DFLT;
1092 else if (is_link_local_ether_addr(eth_dst))
1093 priority = OFDPA_PRIORITY_ACL_CTRL;
1094 }
1095
1096 entry->key.priority = priority;
1097 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1098 entry->key.acl.in_pport = in_pport;
1099 entry->key.acl.in_pport_mask = in_pport_mask;
1100
1101 if (eth_src)
1102 ether_addr_copy(entry->key.acl.eth_src, eth_src);
1103 if (eth_src_mask)
1104 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1105 if (eth_dst)
1106 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1107 if (eth_dst_mask)
1108 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1109
1110 entry->key.acl.eth_type = eth_type;
1111 entry->key.acl.vlan_id = vlan_id;
1112 entry->key.acl.vlan_id_mask = vlan_id_mask;
1113 entry->key.acl.ip_proto = ip_proto;
1114 entry->key.acl.ip_proto_mask = ip_proto_mask;
1115 entry->key.acl.ip_tos = ip_tos;
1116 entry->key.acl.ip_tos_mask = ip_tos_mask;
1117 entry->key.acl.group_id = group_id;
1118
1119 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1120 }
1121
1122 static struct ofdpa_group_tbl_entry *
1123 ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1124 const struct ofdpa_group_tbl_entry *match)
1125 {
1126 struct ofdpa_group_tbl_entry *found;
1127
1128 hash_for_each_possible(ofdpa->group_tbl, found,
1129 entry, match->group_id) {
1130 if (found->group_id == match->group_id)
1131 return found;
1132 }
1133
1134 return NULL;
1135 }
1136
1137 static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
1138 struct ofdpa_group_tbl_entry *entry)
1139 {
1140 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1141 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1142 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1143 ofdpa_kfree(trans, entry->group_ids);
1144 break;
1145 default:
1146 break;
1147 }
1148 ofdpa_kfree(trans, entry);
1149 }
1150
1151 static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
1152 struct switchdev_trans *trans, int flags,
1153 struct ofdpa_group_tbl_entry *match)
1154 {
1155 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1156 struct ofdpa_group_tbl_entry *found;
1157 unsigned long lock_flags;
1158
1159 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1160
1161 found = ofdpa_group_tbl_find(ofdpa, match);
1162
1163 if (found) {
1164 if (!switchdev_trans_ph_prepare(trans))
1165 hash_del(&found->entry);
1166 ofdpa_group_tbl_entry_free(trans, found);
1167 found = match;
1168 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1169 } else {
1170 found = match;
1171 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1172 }
1173
1174 if (!switchdev_trans_ph_prepare(trans))
1175 hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1176
1177 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1178
1179 if (!switchdev_trans_ph_prepare(trans))
1180 return rocker_cmd_exec(ofdpa_port->rocker_port,
1181 ofdpa_flags_nowait(flags),
1182 ofdpa_cmd_group_tbl_add,
1183 found, NULL, NULL);
1184 return 0;
1185 }
1186
1187 static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
1188 struct switchdev_trans *trans, int flags,
1189 struct ofdpa_group_tbl_entry *match)
1190 {
1191 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1192 struct ofdpa_group_tbl_entry *found;
1193 unsigned long lock_flags;
1194 int err = 0;
1195
1196 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1197
1198 found = ofdpa_group_tbl_find(ofdpa, match);
1199
1200 if (found) {
1201 if (!switchdev_trans_ph_prepare(trans))
1202 hash_del(&found->entry);
1203 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1204 }
1205
1206 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1207
1208 ofdpa_group_tbl_entry_free(trans, match);
1209
1210 if (found) {
1211 if (!switchdev_trans_ph_prepare(trans))
1212 err = rocker_cmd_exec(ofdpa_port->rocker_port,
1213 ofdpa_flags_nowait(flags),
1214 ofdpa_cmd_group_tbl_del,
1215 found, NULL, NULL);
1216 ofdpa_group_tbl_entry_free(trans, found);
1217 }
1218
1219 return err;
1220 }
1221
1222 static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
1223 struct switchdev_trans *trans, int flags,
1224 struct ofdpa_group_tbl_entry *entry)
1225 {
1226 if (flags & OFDPA_OP_FLAG_REMOVE)
1227 return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
1228 else
1229 return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
1230 }
1231
1232 static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1233 struct switchdev_trans *trans, int flags,
1234 __be16 vlan_id, u32 out_pport,
1235 int pop_vlan)
1236 {
1237 struct ofdpa_group_tbl_entry *entry;
1238
1239 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1240 if (!entry)
1241 return -ENOMEM;
1242
1243 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1244 entry->l2_interface.pop_vlan = pop_vlan;
1245
1246 return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1247 }
1248
1249 static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1250 struct switchdev_trans *trans,
1251 int flags, u8 group_count,
1252 const u32 *group_ids, u32 group_id)
1253 {
1254 struct ofdpa_group_tbl_entry *entry;
1255
1256 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1257 if (!entry)
1258 return -ENOMEM;
1259
1260 entry->group_id = group_id;
1261 entry->group_count = group_count;
1262
1263 entry->group_ids = ofdpa_kcalloc(trans, flags,
1264 group_count, sizeof(u32));
1265 if (!entry->group_ids) {
1266 ofdpa_kfree(trans, entry);
1267 return -ENOMEM;
1268 }
1269 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1270
1271 return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1272 }
1273
1274 static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1275 struct switchdev_trans *trans, int flags,
1276 __be16 vlan_id, u8 group_count,
1277 const u32 *group_ids, u32 group_id)
1278 {
1279 return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
1280 group_count, group_ids,
1281 group_id);
1282 }
1283
1284 static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
1285 struct switchdev_trans *trans, int flags,
1286 u32 index, const u8 *src_mac, const u8 *dst_mac,
1287 __be16 vlan_id, bool ttl_check, u32 pport)
1288 {
1289 struct ofdpa_group_tbl_entry *entry;
1290
1291 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1292 if (!entry)
1293 return -ENOMEM;
1294
1295 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1296 if (src_mac)
1297 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1298 if (dst_mac)
1299 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1300 entry->l3_unicast.vlan_id = vlan_id;
1301 entry->l3_unicast.ttl_check = ttl_check;
1302 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1303
1304 return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1305 }
1306
1307 static struct ofdpa_neigh_tbl_entry *
1308 ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1309 {
1310 struct ofdpa_neigh_tbl_entry *found;
1311
1312 hash_for_each_possible(ofdpa->neigh_tbl, found,
1313 entry, be32_to_cpu(ip_addr))
1314 if (found->ip_addr == ip_addr)
1315 return found;
1316
1317 return NULL;
1318 }
1319
1320 static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1321 struct switchdev_trans *trans,
1322 struct ofdpa_neigh_tbl_entry *entry)
1323 {
1324 if (!switchdev_trans_ph_commit(trans))
1325 entry->index = ofdpa->neigh_tbl_next_index++;
1326 if (switchdev_trans_ph_prepare(trans))
1327 return;
1328 entry->ref_count++;
1329 hash_add(ofdpa->neigh_tbl, &entry->entry,
1330 be32_to_cpu(entry->ip_addr));
1331 }
1332
1333 static void ofdpa_neigh_del(struct switchdev_trans *trans,
1334 struct ofdpa_neigh_tbl_entry *entry)
1335 {
1336 if (switchdev_trans_ph_prepare(trans))
1337 return;
1338 if (--entry->ref_count == 0) {
1339 hash_del(&entry->entry);
1340 ofdpa_kfree(trans, entry);
1341 }
1342 }
1343
1344 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1345 struct switchdev_trans *trans,
1346 const u8 *eth_dst, bool ttl_check)
1347 {
1348 if (eth_dst) {
1349 ether_addr_copy(entry->eth_dst, eth_dst);
1350 entry->ttl_check = ttl_check;
1351 } else if (!switchdev_trans_ph_prepare(trans)) {
1352 entry->ref_count++;
1353 }
1354 }
1355
1356 static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1357 struct switchdev_trans *trans,
1358 int flags, __be32 ip_addr, const u8 *eth_dst)
1359 {
1360 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1361 struct ofdpa_neigh_tbl_entry *entry;
1362 struct ofdpa_neigh_tbl_entry *found;
1363 unsigned long lock_flags;
1364 __be16 eth_type = htons(ETH_P_IP);
1365 enum rocker_of_dpa_table_id goto_tbl =
1366 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1367 u32 group_id;
1368 u32 priority = 0;
1369 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1370 bool updating;
1371 bool removing;
1372 int err = 0;
1373
1374 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1375 if (!entry)
1376 return -ENOMEM;
1377
1378 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1379
1380 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1381
1382 updating = found && adding;
1383 removing = found && !adding;
1384 adding = !found && adding;
1385
1386 if (adding) {
1387 entry->ip_addr = ip_addr;
1388 entry->dev = ofdpa_port->dev;
1389 ether_addr_copy(entry->eth_dst, eth_dst);
1390 entry->ttl_check = true;
1391 ofdpa_neigh_add(ofdpa, trans, entry);
1392 } else if (removing) {
1393 memcpy(entry, found, sizeof(*entry));
1394 ofdpa_neigh_del(trans, found);
1395 } else if (updating) {
1396 ofdpa_neigh_update(found, trans, eth_dst, true);
1397 memcpy(entry, found, sizeof(*entry));
1398 } else {
1399 err = -ENOENT;
1400 }
1401
1402 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1403
1404 if (err)
1405 goto err_out;
1406
1407 /* For each active neighbor, we have an L3 unicast group and
1408 * a /32 route to the neighbor, which uses the L3 unicast
1409 * group. The L3 unicast group can also be referred to by
1410 * other routes' nexthops.
1411 */
1412
1413 err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
1414 entry->index,
1415 ofdpa_port->dev->dev_addr,
1416 entry->eth_dst,
1417 ofdpa_port->internal_vlan_id,
1418 entry->ttl_check,
1419 ofdpa_port->pport);
1420 if (err) {
1421 netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1422 err, entry->index);
1423 goto err_out;
1424 }
1425
1426 if (adding || removing) {
1427 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1428 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
1429 eth_type, ip_addr,
1430 inet_make_mask(32),
1431 priority, goto_tbl,
1432 group_id, NULL, flags);
1433
1434 if (err)
1435 netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1436 err, &entry->ip_addr, group_id);
1437 }
1438
1439 err_out:
1440 if (!adding)
1441 ofdpa_kfree(trans, entry);
1442
1443 return err;
1444 }
1445
1446 static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1447 struct switchdev_trans *trans,
1448 __be32 ip_addr)
1449 {
1450 struct net_device *dev = ofdpa_port->dev;
1451 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1452 int err = 0;
1453
1454 if (!n) {
1455 n = neigh_create(&arp_tbl, &ip_addr, dev);
1456 if (IS_ERR(n))
1457 return PTR_ERR(n);
1458 }
1459
1460 /* If the neigh is already resolved, then go ahead and
1461 * install the entry, otherwise start the ARP process to
1462 * resolve the neigh.
1463 */
1464
1465 if (n->nud_state & NUD_VALID)
1466 err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
1467 ip_addr, n->ha);
1468 else
1469 neigh_event_send(n, NULL);
1470
1471 neigh_release(n);
1472 return err;
1473 }
1474
1475 static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1476 struct switchdev_trans *trans, int flags,
1477 __be32 ip_addr, u32 *index)
1478 {
1479 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1480 struct ofdpa_neigh_tbl_entry *entry;
1481 struct ofdpa_neigh_tbl_entry *found;
1482 unsigned long lock_flags;
1483 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1484 bool updating;
1485 bool removing;
1486 bool resolved = true;
1487 int err = 0;
1488
1489 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1490 if (!entry)
1491 return -ENOMEM;
1492
1493 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1494
1495 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1496
1497 updating = found && adding;
1498 removing = found && !adding;
1499 adding = !found && adding;
1500
1501 if (adding) {
1502 entry->ip_addr = ip_addr;
1503 entry->dev = ofdpa_port->dev;
1504 ofdpa_neigh_add(ofdpa, trans, entry);
1505 *index = entry->index;
1506 resolved = false;
1507 } else if (removing) {
1508 *index = found->index;
1509 ofdpa_neigh_del(trans, found);
1510 } else if (updating) {
1511 ofdpa_neigh_update(found, trans, NULL, false);
1512 resolved = !is_zero_ether_addr(found->eth_dst);
1513 *index = found->index;
1514 } else {
1515 err = -ENOENT;
1516 }
1517
1518 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1519
1520 if (!adding)
1521 ofdpa_kfree(trans, entry);
1522
1523 if (err)
1524 return err;
1525
1526 /* Resolved means neigh ip_addr is resolved to neigh mac. */
1527
1528 if (!resolved)
1529 err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
1530
1531 return err;
1532 }
1533
1534 static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1535 int port_index)
1536 {
1537 struct rocker_port *rocker_port;
1538
1539 rocker_port = ofdpa->rocker->ports[port_index];
1540 return rocker_port ? rocker_port->wpriv : NULL;
1541 }
1542
1543 static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1544 struct switchdev_trans *trans,
1545 int flags, __be16 vlan_id)
1546 {
1547 struct ofdpa_port *p;
1548 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1549 unsigned int port_count = ofdpa->rocker->port_count;
1550 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1551 u32 *group_ids;
1552 u8 group_count = 0;
1553 int err = 0;
1554 int i;
1555
1556 group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
1557 if (!group_ids)
1558 return -ENOMEM;
1559
1560 /* Adjust the flood group for this VLAN. The flood group
1561 * references an L2 interface group for each port in this
1562 * VLAN.
1563 */
1564
1565 for (i = 0; i < port_count; i++) {
1566 p = ofdpa_port_get(ofdpa, i);
1567 if (!p)
1568 continue;
1569 if (!ofdpa_port_is_bridged(p))
1570 continue;
1571 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1572 group_ids[group_count++] =
1573 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1574 }
1575 }
1576
1577 /* If there are no bridged ports in this VLAN, we're done */
1578 if (group_count == 0)
1579 goto no_ports_in_vlan;
1580
1581 err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
1582 group_count, group_ids, group_id);
1583 if (err)
1584 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1585
1586 no_ports_in_vlan:
1587 ofdpa_kfree(trans, group_ids);
1588 return err;
1589 }
1590
1591 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
1592 struct switchdev_trans *trans, int flags,
1593 __be16 vlan_id, bool pop_vlan)
1594 {
1595 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1596 unsigned int port_count = ofdpa->rocker->port_count;
1597 struct ofdpa_port *p;
1598 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1599 u32 out_pport;
1600 int ref = 0;
1601 int err;
1602 int i;
1603
1604 /* An L2 interface group for this port in this VLAN, but
1605 * only when port STP state is LEARNING|FORWARDING.
1606 */
1607
1608 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1609 ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1610 out_pport = ofdpa_port->pport;
1611 err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1612 vlan_id, out_pport, pop_vlan);
1613 if (err) {
1614 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1615 err, out_pport);
1616 return err;
1617 }
1618 }
1619
1620 /* An L2 interface group for this VLAN to CPU port.
1621 * Add when first port joins this VLAN and destroy when
1622 * last port leaves this VLAN.
1623 */
1624
1625 for (i = 0; i < port_count; i++) {
1626 p = ofdpa_port_get(ofdpa, i);
1627 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1628 ref++;
1629 }
1630
1631 if ((!adding || ref != 1) && (adding || ref != 0))
1632 return 0;
1633
1634 out_pport = 0;
1635 err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1636 vlan_id, out_pport, pop_vlan);
1637 if (err) {
1638 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1639 return err;
1640 }
1641
1642 return 0;
1643 }
1644
1645 static struct ofdpa_ctrl {
1646 const u8 *eth_dst;
1647 const u8 *eth_dst_mask;
1648 __be16 eth_type;
1649 bool acl;
1650 bool bridge;
1651 bool term;
1652 bool copy_to_cpu;
1653 } ofdpa_ctrls[] = {
1654 [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1655 /* pass link local multicast pkts up to CPU for filtering */
1656 .eth_dst = ll_mac,
1657 .eth_dst_mask = ll_mask,
1658 .acl = true,
1659 },
1660 [OFDPA_CTRL_LOCAL_ARP] = {
1661 /* pass local ARP pkts up to CPU */
1662 .eth_dst = zero_mac,
1663 .eth_dst_mask = zero_mac,
1664 .eth_type = htons(ETH_P_ARP),
1665 .acl = true,
1666 },
1667 [OFDPA_CTRL_IPV4_MCAST] = {
1668 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1669 .eth_dst = ipv4_mcast,
1670 .eth_dst_mask = ipv4_mask,
1671 .eth_type = htons(ETH_P_IP),
1672 .term = true,
1673 .copy_to_cpu = true,
1674 },
1675 [OFDPA_CTRL_IPV6_MCAST] = {
1676 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1677 .eth_dst = ipv6_mcast,
1678 .eth_dst_mask = ipv6_mask,
1679 .eth_type = htons(ETH_P_IPV6),
1680 .term = true,
1681 .copy_to_cpu = true,
1682 },
1683 [OFDPA_CTRL_DFLT_BRIDGING] = {
1684 /* flood any pkts on vlan */
1685 .bridge = true,
1686 .copy_to_cpu = true,
1687 },
1688 [OFDPA_CTRL_DFLT_OVS] = {
1689 /* pass all pkts up to CPU */
1690 .eth_dst = zero_mac,
1691 .eth_dst_mask = zero_mac,
1692 .acl = true,
1693 },
1694 };
1695
1696 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
1697 struct switchdev_trans *trans, int flags,
1698 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1699 {
1700 u32 in_pport = ofdpa_port->pport;
1701 u32 in_pport_mask = 0xffffffff;
1702 u32 out_pport = 0;
1703 const u8 *eth_src = NULL;
1704 const u8 *eth_src_mask = NULL;
1705 __be16 vlan_id_mask = htons(0xffff);
1706 u8 ip_proto = 0;
1707 u8 ip_proto_mask = 0;
1708 u8 ip_tos = 0;
1709 u8 ip_tos_mask = 0;
1710 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1711 int err;
1712
1713 err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
1714 in_pport, in_pport_mask,
1715 eth_src, eth_src_mask,
1716 ctrl->eth_dst, ctrl->eth_dst_mask,
1717 ctrl->eth_type,
1718 vlan_id, vlan_id_mask,
1719 ip_proto, ip_proto_mask,
1720 ip_tos, ip_tos_mask,
1721 group_id);
1722
1723 if (err)
1724 netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1725
1726 return err;
1727 }
1728
1729 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1730 struct switchdev_trans *trans,
1731 int flags,
1732 const struct ofdpa_ctrl *ctrl,
1733 __be16 vlan_id)
1734 {
1735 enum rocker_of_dpa_table_id goto_tbl =
1736 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1737 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1738 u32 tunnel_id = 0;
1739 int err;
1740
1741 if (!ofdpa_port_is_bridged(ofdpa_port))
1742 return 0;
1743
1744 err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
1745 ctrl->eth_dst, ctrl->eth_dst_mask,
1746 vlan_id, tunnel_id,
1747 goto_tbl, group_id, ctrl->copy_to_cpu);
1748
1749 if (err)
1750 netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1751
1752 return err;
1753 }
1754
1755 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
1756 struct switchdev_trans *trans, int flags,
1757 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1758 {
1759 u32 in_pport_mask = 0xffffffff;
1760 __be16 vlan_id_mask = htons(0xffff);
1761 int err;
1762
1763 if (ntohs(vlan_id) == 0)
1764 vlan_id = ofdpa_port->internal_vlan_id;
1765
1766 err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
1767 ofdpa_port->pport, in_pport_mask,
1768 ctrl->eth_type, ctrl->eth_dst,
1769 ctrl->eth_dst_mask, vlan_id,
1770 vlan_id_mask, ctrl->copy_to_cpu,
1771 flags);
1772
1773 if (err)
1774 netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1775
1776 return err;
1777 }
1778
1779 static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
1780 struct switchdev_trans *trans, int flags,
1781 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1782 {
1783 if (ctrl->acl)
1784 return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
1785 ctrl, vlan_id);
1786 if (ctrl->bridge)
1787 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
1788 ctrl, vlan_id);
1789
1790 if (ctrl->term)
1791 return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
1792 ctrl, vlan_id);
1793
1794 return -EOPNOTSUPP;
1795 }
1796
1797 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
1798 struct switchdev_trans *trans, int flags,
1799 __be16 vlan_id)
1800 {
1801 int err = 0;
1802 int i;
1803
1804 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1805 if (ofdpa_port->ctrls[i]) {
1806 err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1807 &ofdpa_ctrls[i], vlan_id);
1808 if (err)
1809 return err;
1810 }
1811 }
1812
1813 return err;
1814 }
1815
1816 static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
1817 struct switchdev_trans *trans, int flags,
1818 const struct ofdpa_ctrl *ctrl)
1819 {
1820 u16 vid;
1821 int err = 0;
1822
1823 for (vid = 1; vid < VLAN_N_VID; vid++) {
1824 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1825 continue;
1826 err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1827 ctrl, htons(vid));
1828 if (err)
1829 break;
1830 }
1831
1832 return err;
1833 }
1834
1835 static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
1836 struct switchdev_trans *trans, int flags, u16 vid)
1837 {
1838 enum rocker_of_dpa_table_id goto_tbl =
1839 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1840 u32 in_pport = ofdpa_port->pport;
1841 __be16 vlan_id = htons(vid);
1842 __be16 vlan_id_mask = htons(0xffff);
1843 __be16 internal_vlan_id;
1844 bool untagged;
1845 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1846 int err;
1847
1848 internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1849
1850 if (adding &&
1851 test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1852 return 0; /* already added */
1853 else if (!adding &&
1854 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1855 return 0; /* already removed */
1856
1857 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1858
1859 if (adding) {
1860 err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
1861 internal_vlan_id);
1862 if (err) {
1863 netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1864 goto err_out;
1865 }
1866 }
1867
1868 err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
1869 internal_vlan_id, untagged);
1870 if (err) {
1871 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1872 goto err_out;
1873 }
1874
1875 err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
1876 internal_vlan_id);
1877 if (err) {
1878 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1879 goto err_out;
1880 }
1881
1882 err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
1883 in_pport, vlan_id, vlan_id_mask,
1884 goto_tbl, untagged, internal_vlan_id);
1885 if (err)
1886 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1887
1888 err_out:
1889 if (switchdev_trans_ph_prepare(trans))
1890 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1891
1892 return err;
1893 }
1894
1895 static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
1896 struct switchdev_trans *trans, int flags)
1897 {
1898 enum rocker_of_dpa_table_id goto_tbl;
1899 u32 in_pport;
1900 u32 in_pport_mask;
1901 int err;
1902
1903 /* Normal Ethernet Frames. Matches pkts from any local physical
1904 * ports. Goto VLAN tbl.
1905 */
1906
1907 in_pport = 0;
1908 in_pport_mask = 0xffff0000;
1909 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1910
1911 err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
1912 in_pport, in_pport_mask,
1913 goto_tbl);
1914 if (err)
1915 netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1916
1917 return err;
1918 }
1919
1920 struct ofdpa_fdb_learn_work {
1921 struct work_struct work;
1922 struct ofdpa_port *ofdpa_port;
1923 struct switchdev_trans *trans;
1924 int flags;
1925 u8 addr[ETH_ALEN];
1926 u16 vid;
1927 };
1928
1929 static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1930 {
1931 const struct ofdpa_fdb_learn_work *lw =
1932 container_of(work, struct ofdpa_fdb_learn_work, work);
1933 bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1934 bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1935 struct switchdev_notifier_fdb_info info;
1936
1937 info.addr = lw->addr;
1938 info.vid = lw->vid;
1939
1940 rtnl_lock();
1941 if (learned && removing)
1942 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
1943 lw->ofdpa_port->dev, &info.info);
1944 else if (learned && !removing)
1945 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
1946 lw->ofdpa_port->dev, &info.info);
1947 rtnl_unlock();
1948
1949 ofdpa_kfree(lw->trans, work);
1950 }
1951
1952 static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1953 struct switchdev_trans *trans, int flags,
1954 const u8 *addr, __be16 vlan_id)
1955 {
1956 struct ofdpa_fdb_learn_work *lw;
1957 enum rocker_of_dpa_table_id goto_tbl =
1958 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1959 u32 out_pport = ofdpa_port->pport;
1960 u32 tunnel_id = 0;
1961 u32 group_id = ROCKER_GROUP_NONE;
1962 bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
1963 bool copy_to_cpu = false;
1964 int err;
1965
1966 if (ofdpa_port_is_bridged(ofdpa_port))
1967 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1968
1969 if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1970 err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
1971 NULL, vlan_id, tunnel_id, goto_tbl,
1972 group_id, copy_to_cpu);
1973 if (err)
1974 return err;
1975 }
1976
1977 if (!syncing)
1978 return 0;
1979
1980 if (!ofdpa_port_is_bridged(ofdpa_port))
1981 return 0;
1982
1983 lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
1984 if (!lw)
1985 return -ENOMEM;
1986
1987 INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1988
1989 lw->ofdpa_port = ofdpa_port;
1990 lw->trans = trans;
1991 lw->flags = flags;
1992 ether_addr_copy(lw->addr, addr);
1993 lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1994
1995 if (switchdev_trans_ph_prepare(trans))
1996 ofdpa_kfree(trans, lw);
1997 else
1998 schedule_work(&lw->work);
1999
2000 return 0;
2001 }
2002
2003 static struct ofdpa_fdb_tbl_entry *
2004 ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
2005 const struct ofdpa_fdb_tbl_entry *match)
2006 {
2007 struct ofdpa_fdb_tbl_entry *found;
2008
2009 hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
2010 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2011 return found;
2012
2013 return NULL;
2014 }
2015
2016 static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
2017 struct switchdev_trans *trans,
2018 const unsigned char *addr,
2019 __be16 vlan_id, int flags)
2020 {
2021 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2022 struct ofdpa_fdb_tbl_entry *fdb;
2023 struct ofdpa_fdb_tbl_entry *found;
2024 bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
2025 unsigned long lock_flags;
2026
2027 fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
2028 if (!fdb)
2029 return -ENOMEM;
2030
2031 fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
2032 fdb->touched = jiffies;
2033 fdb->key.ofdpa_port = ofdpa_port;
2034 ether_addr_copy(fdb->key.addr, addr);
2035 fdb->key.vlan_id = vlan_id;
2036 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
2037
2038 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2039
2040 found = ofdpa_fdb_tbl_find(ofdpa, fdb);
2041
2042 if (found) {
2043 found->touched = jiffies;
2044 if (removing) {
2045 ofdpa_kfree(trans, fdb);
2046 if (!switchdev_trans_ph_prepare(trans))
2047 hash_del(&found->entry);
2048 }
2049 } else if (!removing) {
2050 if (!switchdev_trans_ph_prepare(trans))
2051 hash_add(ofdpa->fdb_tbl, &fdb->entry,
2052 fdb->key_crc32);
2053 }
2054
2055 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2056
2057 /* Check if adding and already exists, or removing and can't find */
2058 if (!found != !removing) {
2059 ofdpa_kfree(trans, fdb);
2060 if (!found && removing)
2061 return 0;
2062 /* Refreshing existing to update aging timers */
2063 flags |= OFDPA_OP_FLAG_REFRESH;
2064 }
2065
2066 return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
2067 }
2068
2069 static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
2070 struct switchdev_trans *trans, int flags)
2071 {
2072 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2073 struct ofdpa_fdb_tbl_entry *found;
2074 unsigned long lock_flags;
2075 struct hlist_node *tmp;
2076 int bkt;
2077 int err = 0;
2078
2079 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
2080 ofdpa_port->stp_state == BR_STATE_FORWARDING)
2081 return 0;
2082
2083 flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
2084
2085 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2086
2087 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2088 if (found->key.ofdpa_port != ofdpa_port)
2089 continue;
2090 if (!found->learned)
2091 continue;
2092 err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
2093 found->key.addr,
2094 found->key.vlan_id);
2095 if (err)
2096 goto err_out;
2097 if (!switchdev_trans_ph_prepare(trans))
2098 hash_del(&found->entry);
2099 }
2100
2101 err_out:
2102 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2103
2104 return err;
2105 }
2106
2107 static void ofdpa_fdb_cleanup(unsigned long data)
2108 {
2109 struct ofdpa *ofdpa = (struct ofdpa *)data;
2110 struct ofdpa_port *ofdpa_port;
2111 struct ofdpa_fdb_tbl_entry *entry;
2112 struct hlist_node *tmp;
2113 unsigned long next_timer = jiffies + ofdpa->ageing_time;
2114 unsigned long expires;
2115 unsigned long lock_flags;
2116 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
2117 OFDPA_OP_FLAG_LEARNED;
2118 int bkt;
2119
2120 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2121
2122 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
2123 if (!entry->learned)
2124 continue;
2125 ofdpa_port = entry->key.ofdpa_port;
2126 expires = entry->touched + ofdpa_port->ageing_time;
2127 if (time_before_eq(expires, jiffies)) {
2128 ofdpa_port_fdb_learn(ofdpa_port, NULL,
2129 flags, entry->key.addr,
2130 entry->key.vlan_id);
2131 hash_del(&entry->entry);
2132 } else if (time_before(expires, next_timer)) {
2133 next_timer = expires;
2134 }
2135 }
2136
2137 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2138
2139 mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2140 }
2141
2142 static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2143 struct switchdev_trans *trans, int flags,
2144 __be16 vlan_id)
2145 {
2146 u32 in_pport_mask = 0xffffffff;
2147 __be16 eth_type;
2148 const u8 *dst_mac_mask = ff_mac;
2149 __be16 vlan_id_mask = htons(0xffff);
2150 bool copy_to_cpu = false;
2151 int err;
2152
2153 if (ntohs(vlan_id) == 0)
2154 vlan_id = ofdpa_port->internal_vlan_id;
2155
2156 eth_type = htons(ETH_P_IP);
2157 err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2158 ofdpa_port->pport, in_pport_mask,
2159 eth_type, ofdpa_port->dev->dev_addr,
2160 dst_mac_mask, vlan_id, vlan_id_mask,
2161 copy_to_cpu, flags);
2162 if (err)
2163 return err;
2164
2165 eth_type = htons(ETH_P_IPV6);
2166 err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2167 ofdpa_port->pport, in_pport_mask,
2168 eth_type, ofdpa_port->dev->dev_addr,
2169 dst_mac_mask, vlan_id, vlan_id_mask,
2170 copy_to_cpu, flags);
2171
2172 return err;
2173 }
2174
2175 static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
2176 struct switchdev_trans *trans, int flags)
2177 {
2178 bool pop_vlan;
2179 u32 out_pport;
2180 __be16 vlan_id;
2181 u16 vid;
2182 int err;
2183
2184 /* Port will be forwarding-enabled if its STP state is LEARNING
2185 * or FORWARDING. Traffic from CPU can still egress, regardless of
2186 * port STP state. Use L2 interface group on port VLANs as a way
2187 * to toggle port forwarding: if forwarding is disabled, L2
2188 * interface group will not exist.
2189 */
2190
2191 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2192 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2193 flags |= OFDPA_OP_FLAG_REMOVE;
2194
2195 out_pport = ofdpa_port->pport;
2196 for (vid = 1; vid < VLAN_N_VID; vid++) {
2197 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2198 continue;
2199 vlan_id = htons(vid);
2200 pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2201 err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
2202 vlan_id, out_pport, pop_vlan);
2203 if (err) {
2204 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2205 err, out_pport);
2206 return err;
2207 }
2208 }
2209
2210 return 0;
2211 }
2212
2213 static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2214 struct switchdev_trans *trans,
2215 int flags, u8 state)
2216 {
2217 bool want[OFDPA_CTRL_MAX] = { 0, };
2218 bool prev_ctrls[OFDPA_CTRL_MAX];
2219 u8 prev_state;
2220 int err;
2221 int i;
2222
2223 prev_state = ofdpa_port->stp_state;
2224 if (prev_state == state)
2225 return 0;
2226
2227 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2228 ofdpa_port->stp_state = state;
2229
2230 switch (state) {
2231 case BR_STATE_DISABLED:
2232 /* port is completely disabled */
2233 break;
2234 case BR_STATE_LISTENING:
2235 case BR_STATE_BLOCKING:
2236 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2237 break;
2238 case BR_STATE_LEARNING:
2239 case BR_STATE_FORWARDING:
2240 if (!ofdpa_port_is_ovsed(ofdpa_port))
2241 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2242 want[OFDPA_CTRL_IPV4_MCAST] = true;
2243 want[OFDPA_CTRL_IPV6_MCAST] = true;
2244 if (ofdpa_port_is_bridged(ofdpa_port))
2245 want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2246 else if (ofdpa_port_is_ovsed(ofdpa_port))
2247 want[OFDPA_CTRL_DFLT_OVS] = true;
2248 else
2249 want[OFDPA_CTRL_LOCAL_ARP] = true;
2250 break;
2251 }
2252
2253 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2254 if (want[i] != ofdpa_port->ctrls[i]) {
2255 int ctrl_flags = flags |
2256 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2257 err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
2258 &ofdpa_ctrls[i]);
2259 if (err)
2260 goto err_out;
2261 ofdpa_port->ctrls[i] = want[i];
2262 }
2263 }
2264
2265 err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
2266 if (err)
2267 goto err_out;
2268
2269 err = ofdpa_port_fwding(ofdpa_port, trans, flags);
2270
2271 err_out:
2272 if (switchdev_trans_ph_prepare(trans)) {
2273 memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2274 ofdpa_port->stp_state = prev_state;
2275 }
2276
2277 return err;
2278 }
2279
2280 static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2281 {
2282 if (ofdpa_port_is_bridged(ofdpa_port))
2283 /* bridge STP will enable port */
2284 return 0;
2285
2286 /* port is not bridged, so simulate going to FORWARDING state */
2287 return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2288 BR_STATE_FORWARDING);
2289 }
2290
2291 static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2292 {
2293 if (ofdpa_port_is_bridged(ofdpa_port))
2294 /* bridge STP will disable port */
2295 return 0;
2296
2297 /* port is not bridged, so simulate going to DISABLED state */
2298 return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2299 BR_STATE_DISABLED);
2300 }
2301
2302 static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2303 struct switchdev_trans *trans,
2304 u16 vid, u16 flags)
2305 {
2306 int err;
2307
2308 /* XXX deal with flags for PVID and untagged */
2309
2310 err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
2311 if (err)
2312 return err;
2313
2314 err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
2315 if (err)
2316 ofdpa_port_vlan(ofdpa_port, trans,
2317 OFDPA_OP_FLAG_REMOVE, vid);
2318
2319 return err;
2320 }
2321
2322 static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2323 u16 vid, u16 flags)
2324 {
2325 int err;
2326
2327 err = ofdpa_port_router_mac(ofdpa_port, NULL,
2328 OFDPA_OP_FLAG_REMOVE, htons(vid));
2329 if (err)
2330 return err;
2331
2332 return ofdpa_port_vlan(ofdpa_port, NULL,
2333 OFDPA_OP_FLAG_REMOVE, vid);
2334 }
2335
2336 static struct ofdpa_internal_vlan_tbl_entry *
2337 ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2338 {
2339 struct ofdpa_internal_vlan_tbl_entry *found;
2340
2341 hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2342 entry, ifindex) {
2343 if (found->ifindex == ifindex)
2344 return found;
2345 }
2346
2347 return NULL;
2348 }
2349
2350 static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2351 int ifindex)
2352 {
2353 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2354 struct ofdpa_internal_vlan_tbl_entry *entry;
2355 struct ofdpa_internal_vlan_tbl_entry *found;
2356 unsigned long lock_flags;
2357 int i;
2358
2359 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2360 if (!entry)
2361 return 0;
2362
2363 entry->ifindex = ifindex;
2364
2365 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2366
2367 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2368 if (found) {
2369 kfree(entry);
2370 goto found;
2371 }
2372
2373 found = entry;
2374 hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2375
2376 for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2377 if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2378 continue;
2379 found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2380 goto found;
2381 }
2382
2383 netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2384
2385 found:
2386 found->ref_count++;
2387 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2388
2389 return found->vlan_id;
2390 }
2391
2392 static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
2393 struct switchdev_trans *trans, __be32 dst,
2394 int dst_len, struct fib_info *fi,
2395 u32 tb_id, int flags)
2396 {
2397 const struct fib_nh *nh;
2398 __be16 eth_type = htons(ETH_P_IP);
2399 __be32 dst_mask = inet_make_mask(dst_len);
2400 __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2401 u32 priority = fi->fib_priority;
2402 enum rocker_of_dpa_table_id goto_tbl =
2403 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2404 u32 group_id;
2405 bool nh_on_port;
2406 bool has_gw;
2407 u32 index;
2408 int err;
2409
2410 /* XXX support ECMP */
2411
2412 nh = fi->fib_nh;
2413 nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2414 has_gw = !!nh->nh_gw;
2415
2416 if (has_gw && nh_on_port) {
2417 err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
2418 nh->nh_gw, &index);
2419 if (err)
2420 return err;
2421
2422 group_id = ROCKER_GROUP_L3_UNICAST(index);
2423 } else {
2424 /* Send to CPU for processing */
2425 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2426 }
2427
2428 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
2429 dst_mask, priority, goto_tbl,
2430 group_id, fi, flags);
2431 if (err)
2432 netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2433 err, &dst);
2434
2435 return err;
2436 }
2437
2438 static void
2439 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2440 int ifindex)
2441 {
2442 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2443 struct ofdpa_internal_vlan_tbl_entry *found;
2444 unsigned long lock_flags;
2445 unsigned long bit;
2446
2447 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2448
2449 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2450 if (!found) {
2451 netdev_err(ofdpa_port->dev,
2452 "ifindex (%d) not found in internal VLAN tbl\n",
2453 ifindex);
2454 goto not_found;
2455 }
2456
2457 if (--found->ref_count <= 0) {
2458 bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2459 clear_bit(bit, ofdpa->internal_vlan_bitmap);
2460 hash_del(&found->entry);
2461 kfree(found);
2462 }
2463
2464 not_found:
2465 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2466 }
2467
2468 /**********************************
2469 * Rocker world ops implementation
2470 **********************************/
2471
2472 static int ofdpa_init(struct rocker *rocker)
2473 {
2474 struct ofdpa *ofdpa = rocker->wpriv;
2475
2476 ofdpa->rocker = rocker;
2477
2478 hash_init(ofdpa->flow_tbl);
2479 spin_lock_init(&ofdpa->flow_tbl_lock);
2480
2481 hash_init(ofdpa->group_tbl);
2482 spin_lock_init(&ofdpa->group_tbl_lock);
2483
2484 hash_init(ofdpa->fdb_tbl);
2485 spin_lock_init(&ofdpa->fdb_tbl_lock);
2486
2487 hash_init(ofdpa->internal_vlan_tbl);
2488 spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2489
2490 hash_init(ofdpa->neigh_tbl);
2491 spin_lock_init(&ofdpa->neigh_tbl_lock);
2492
2493 setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
2494 (unsigned long) ofdpa);
2495 mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2496
2497 ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2498
2499 return 0;
2500 }
2501
2502 static void ofdpa_fini(struct rocker *rocker)
2503 {
2504 struct ofdpa *ofdpa = rocker->wpriv;
2505
2506 unsigned long flags;
2507 struct ofdpa_flow_tbl_entry *flow_entry;
2508 struct ofdpa_group_tbl_entry *group_entry;
2509 struct ofdpa_fdb_tbl_entry *fdb_entry;
2510 struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2511 struct ofdpa_neigh_tbl_entry *neigh_entry;
2512 struct hlist_node *tmp;
2513 int bkt;
2514
2515 del_timer_sync(&ofdpa->fdb_cleanup_timer);
2516 flush_workqueue(rocker->rocker_owq);
2517
2518 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2519 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2520 hash_del(&flow_entry->entry);
2521 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2522
2523 spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2524 hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2525 hash_del(&group_entry->entry);
2526 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2527
2528 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2529 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2530 hash_del(&fdb_entry->entry);
2531 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2532
2533 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2534 hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2535 tmp, internal_vlan_entry, entry)
2536 hash_del(&internal_vlan_entry->entry);
2537 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2538
2539 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2540 hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2541 hash_del(&neigh_entry->entry);
2542 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2543 }
2544
2545 static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2546 {
2547 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2548
2549 ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2550 ofdpa_port->rocker_port = rocker_port;
2551 ofdpa_port->dev = rocker_port->dev;
2552 ofdpa_port->pport = rocker_port->pport;
2553 ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
2554 ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2555 return 0;
2556 }
2557
2558 static int ofdpa_port_init(struct rocker_port *rocker_port)
2559 {
2560 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2561 int err;
2562
2563 rocker_port_set_learning(rocker_port,
2564 !!(ofdpa_port->brport_flags & BR_LEARNING));
2565
2566 err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
2567 if (err) {
2568 netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2569 return err;
2570 }
2571
2572 ofdpa_port->internal_vlan_id =
2573 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2574 ofdpa_port->dev->ifindex);
2575
2576 err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2577 if (err) {
2578 netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2579 goto err_untagged_vlan;
2580 }
2581 return 0;
2582
2583 err_untagged_vlan:
2584 ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2585 return err;
2586 }
2587
2588 static void ofdpa_port_fini(struct rocker_port *rocker_port)
2589 {
2590 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2591
2592 ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2593 }
2594
2595 static int ofdpa_port_open(struct rocker_port *rocker_port)
2596 {
2597 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2598
2599 return ofdpa_port_fwd_enable(ofdpa_port, 0);
2600 }
2601
2602 static void ofdpa_port_stop(struct rocker_port *rocker_port)
2603 {
2604 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2605
2606 ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2607 }
2608
2609 static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2610 u8 state,
2611 struct switchdev_trans *trans)
2612 {
2613 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2614
2615 return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
2616 }
2617
2618 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2619 unsigned long brport_flags,
2620 struct switchdev_trans *trans)
2621 {
2622 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2623 unsigned long orig_flags;
2624 int err = 0;
2625
2626 orig_flags = ofdpa_port->brport_flags;
2627 ofdpa_port->brport_flags = brport_flags;
2628 if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2629 !switchdev_trans_ph_prepare(trans))
2630 err = rocker_port_set_learning(ofdpa_port->rocker_port,
2631 !!(ofdpa_port->brport_flags & BR_LEARNING));
2632
2633 if (switchdev_trans_ph_prepare(trans))
2634 ofdpa_port->brport_flags = orig_flags;
2635
2636 return err;
2637 }
2638
2639 static int
2640 ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
2641 unsigned long *p_brport_flags)
2642 {
2643 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2644
2645 *p_brport_flags = ofdpa_port->brport_flags;
2646 return 0;
2647 }
2648
2649 static int
2650 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2651 u32 ageing_time,
2652 struct switchdev_trans *trans)
2653 {
2654 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2655 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2656
2657 if (!switchdev_trans_ph_prepare(trans)) {
2658 ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2659 if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2660 ofdpa->ageing_time = ofdpa_port->ageing_time;
2661 mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2662 }
2663
2664 return 0;
2665 }
2666
2667 static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2668 const struct switchdev_obj_port_vlan *vlan,
2669 struct switchdev_trans *trans)
2670 {
2671 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2672 u16 vid;
2673 int err;
2674
2675 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2676 err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
2677 if (err)
2678 return err;
2679 }
2680
2681 return 0;
2682 }
2683
2684 static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2685 const struct switchdev_obj_port_vlan *vlan)
2686 {
2687 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2688 u16 vid;
2689 int err;
2690
2691 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2692 err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2693 if (err)
2694 return err;
2695 }
2696
2697 return 0;
2698 }
2699
2700 static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
2701 struct switchdev_obj_port_vlan *vlan,
2702 switchdev_obj_dump_cb_t *cb)
2703 {
2704 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2705 u16 vid;
2706 int err = 0;
2707
2708 for (vid = 1; vid < VLAN_N_VID; vid++) {
2709 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2710 continue;
2711 vlan->flags = 0;
2712 if (ofdpa_vlan_id_is_internal(htons(vid)))
2713 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
2714 vlan->vid_begin = vlan->vid_end = vid;
2715 err = cb(&vlan->obj);
2716 if (err)
2717 break;
2718 }
2719
2720 return err;
2721 }
2722
2723 static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2724 const struct switchdev_obj_port_fdb *fdb,
2725 struct switchdev_trans *trans)
2726 {
2727 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2728 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2729
2730 if (!ofdpa_port_is_bridged(ofdpa_port))
2731 return -EINVAL;
2732
2733 return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
2734 }
2735
2736 static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2737 const struct switchdev_obj_port_fdb *fdb)
2738 {
2739 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2740 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2741 int flags = OFDPA_OP_FLAG_REMOVE;
2742
2743 if (!ofdpa_port_is_bridged(ofdpa_port))
2744 return -EINVAL;
2745
2746 return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
2747 }
2748
2749 static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
2750 struct switchdev_obj_port_fdb *fdb,
2751 switchdev_obj_dump_cb_t *cb)
2752 {
2753 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2754 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2755 struct ofdpa_fdb_tbl_entry *found;
2756 struct hlist_node *tmp;
2757 unsigned long lock_flags;
2758 int bkt;
2759 int err = 0;
2760
2761 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2762 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2763 if (found->key.ofdpa_port != ofdpa_port)
2764 continue;
2765 ether_addr_copy(fdb->addr, found->key.addr);
2766 fdb->ndm_state = NUD_REACHABLE;
2767 fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
2768 found->key.vlan_id);
2769 err = cb(&fdb->obj);
2770 if (err)
2771 break;
2772 }
2773 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2774
2775 return err;
2776 }
2777
2778 static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2779 struct net_device *bridge)
2780 {
2781 int err;
2782
2783 /* Port is joining bridge, so the internal VLAN for the
2784 * port is going to change to the bridge internal VLAN.
2785 * Let's remove untagged VLAN (vid=0) from port and
2786 * re-add once internal VLAN has changed.
2787 */
2788
2789 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2790 if (err)
2791 return err;
2792
2793 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2794 ofdpa_port->dev->ifindex);
2795 ofdpa_port->internal_vlan_id =
2796 ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2797
2798 ofdpa_port->bridge_dev = bridge;
2799
2800 return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2801 }
2802
2803 static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2804 {
2805 int err;
2806
2807 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2808 if (err)
2809 return err;
2810
2811 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2812 ofdpa_port->bridge_dev->ifindex);
2813 ofdpa_port->internal_vlan_id =
2814 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2815 ofdpa_port->dev->ifindex);
2816
2817 ofdpa_port->bridge_dev = NULL;
2818
2819 err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2820 if (err)
2821 return err;
2822
2823 if (ofdpa_port->dev->flags & IFF_UP)
2824 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2825
2826 return err;
2827 }
2828
2829 static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2830 struct net_device *master)
2831 {
2832 int err;
2833
2834 ofdpa_port->bridge_dev = master;
2835
2836 err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2837 if (err)
2838 return err;
2839 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2840
2841 return err;
2842 }
2843
2844 static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2845 struct net_device *master)
2846 {
2847 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2848 int err = 0;
2849
2850 if (netif_is_bridge_master(master))
2851 err = ofdpa_port_bridge_join(ofdpa_port, master);
2852 else if (netif_is_ovs_master(master))
2853 err = ofdpa_port_ovs_changed(ofdpa_port, master);
2854 return err;
2855 }
2856
2857 static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2858 struct net_device *master)
2859 {
2860 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2861 int err = 0;
2862
2863 if (ofdpa_port_is_bridged(ofdpa_port))
2864 err = ofdpa_port_bridge_leave(ofdpa_port);
2865 else if (ofdpa_port_is_ovsed(ofdpa_port))
2866 err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2867 return err;
2868 }
2869
2870 static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2871 struct neighbour *n)
2872 {
2873 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2874 int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2875 OFDPA_OP_FLAG_NOWAIT;
2876 __be32 ip_addr = *(__be32 *) n->primary_key;
2877
2878 return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2879 }
2880
2881 static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2882 struct neighbour *n)
2883 {
2884 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2885 int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2886 __be32 ip_addr = *(__be32 *) n->primary_key;
2887
2888 return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2889 }
2890
2891 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2892 const unsigned char *addr,
2893 __be16 vlan_id)
2894 {
2895 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2896 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2897
2898 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2899 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2900 return 0;
2901
2902 return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
2903 }
2904
2905 static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2906 struct rocker *rocker)
2907 {
2908 struct rocker_port *rocker_port;
2909
2910 rocker_port = rocker_port_dev_lower_find(dev, rocker);
2911 return rocker_port ? rocker_port->wpriv : NULL;
2912 }
2913
2914 static int ofdpa_fib4_add(struct rocker *rocker,
2915 const struct fib_entry_notifier_info *fen_info)
2916 {
2917 struct ofdpa *ofdpa = rocker->wpriv;
2918 struct ofdpa_port *ofdpa_port;
2919 int err;
2920
2921 if (ofdpa->fib_aborted)
2922 return 0;
2923 ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2924 if (!ofdpa_port)
2925 return 0;
2926 err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
2927 fen_info->dst_len, fen_info->fi,
2928 fen_info->tb_id, 0);
2929 if (err)
2930 return err;
2931 fib_info_offload_inc(fen_info->fi);
2932 return 0;
2933 }
2934
2935 static int ofdpa_fib4_del(struct rocker *rocker,
2936 const struct fib_entry_notifier_info *fen_info)
2937 {
2938 struct ofdpa *ofdpa = rocker->wpriv;
2939 struct ofdpa_port *ofdpa_port;
2940
2941 if (ofdpa->fib_aborted)
2942 return 0;
2943 ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2944 if (!ofdpa_port)
2945 return 0;
2946 fib_info_offload_dec(fen_info->fi);
2947 return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
2948 fen_info->dst_len, fen_info->fi,
2949 fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2950 }
2951
2952 static void ofdpa_fib4_abort(struct rocker *rocker)
2953 {
2954 struct ofdpa *ofdpa = rocker->wpriv;
2955 struct ofdpa_port *ofdpa_port;
2956 struct ofdpa_flow_tbl_entry *flow_entry;
2957 struct hlist_node *tmp;
2958 unsigned long flags;
2959 int bkt;
2960
2961 if (ofdpa->fib_aborted)
2962 return;
2963
2964 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2965 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2966 if (flow_entry->key.tbl_id !=
2967 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2968 continue;
2969 ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
2970 rocker);
2971 if (!ofdpa_port)
2972 continue;
2973 fib_info_offload_dec(flow_entry->fi);
2974 ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
2975 flow_entry);
2976 }
2977 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2978 ofdpa->fib_aborted = true;
2979 }
2980
2981 struct rocker_world_ops rocker_ofdpa_ops = {
2982 .kind = "ofdpa",
2983 .priv_size = sizeof(struct ofdpa),
2984 .port_priv_size = sizeof(struct ofdpa_port),
2985 .mode = ROCKER_PORT_MODE_OF_DPA,
2986 .init = ofdpa_init,
2987 .fini = ofdpa_fini,
2988 .port_pre_init = ofdpa_port_pre_init,
2989 .port_init = ofdpa_port_init,
2990 .port_fini = ofdpa_port_fini,
2991 .port_open = ofdpa_port_open,
2992 .port_stop = ofdpa_port_stop,
2993 .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2994 .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2995 .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
2996 .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2997 .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2998 .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2999 .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
3000 .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
3001 .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
3002 .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
3003 .port_master_linked = ofdpa_port_master_linked,
3004 .port_master_unlinked = ofdpa_port_master_unlinked,
3005 .port_neigh_update = ofdpa_port_neigh_update,
3006 .port_neigh_destroy = ofdpa_port_neigh_destroy,
3007 .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
3008 .fib4_add = ofdpa_fib4_add,
3009 .fib4_del = ofdpa_fib4_del,
3010 .fib4_abort = ofdpa_fib4_abort,
3011 };