team: push array_index and port into separate structure
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / team / team.c
1 /*
2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_arp.h>
23 #include <linux/socket.h>
24 #include <linux/etherdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <net/rtnetlink.h>
27 #include <net/genetlink.h>
28 #include <net/netlink.h>
29 #include <linux/if_team.h>
30
31 #define DRV_NAME "team"
32
33
34 /**********
35 * Helpers
36 **********/
37
38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
39
40 static struct team_port *team_port_get_rcu(const struct net_device *dev)
41 {
42 struct team_port *port = rcu_dereference(dev->rx_handler_data);
43
44 return team_port_exists(dev) ? port : NULL;
45 }
46
47 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
48 {
49 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
50
51 return team_port_exists(dev) ? port : NULL;
52 }
53
54 /*
55 * Since the ability to change mac address for open port device is tested in
56 * team_port_add, this function can be called without control of return value
57 */
58 static int __set_port_mac(struct net_device *port_dev,
59 const unsigned char *dev_addr)
60 {
61 struct sockaddr addr;
62
63 memcpy(addr.sa_data, dev_addr, ETH_ALEN);
64 addr.sa_family = ARPHRD_ETHER;
65 return dev_set_mac_address(port_dev, &addr);
66 }
67
68 static int team_port_set_orig_mac(struct team_port *port)
69 {
70 return __set_port_mac(port->dev, port->orig.dev_addr);
71 }
72
73 int team_port_set_team_mac(struct team_port *port)
74 {
75 return __set_port_mac(port->dev, port->team->dev->dev_addr);
76 }
77 EXPORT_SYMBOL(team_port_set_team_mac);
78
79 static void team_refresh_port_linkup(struct team_port *port)
80 {
81 port->linkup = port->user.linkup_enabled ? port->user.linkup :
82 port->state.linkup;
83 }
84
85 /*******************
86 * Options handling
87 *******************/
88
89 struct team_option_inst { /* One for each option instance */
90 struct list_head list;
91 struct team_option *option;
92 struct team_option_inst_info info;
93 bool changed;
94 bool removed;
95 };
96
97 static struct team_option *__team_find_option(struct team *team,
98 const char *opt_name)
99 {
100 struct team_option *option;
101
102 list_for_each_entry(option, &team->option_list, list) {
103 if (strcmp(option->name, opt_name) == 0)
104 return option;
105 }
106 return NULL;
107 }
108
109 static void __team_option_inst_del(struct team_option_inst *opt_inst)
110 {
111 list_del(&opt_inst->list);
112 kfree(opt_inst);
113 }
114
115 static void __team_option_inst_del_option(struct team *team,
116 struct team_option *option)
117 {
118 struct team_option_inst *opt_inst, *tmp;
119
120 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
121 if (opt_inst->option == option)
122 __team_option_inst_del(opt_inst);
123 }
124 }
125
126 static int __team_option_inst_add(struct team *team, struct team_option *option,
127 struct team_port *port)
128 {
129 struct team_option_inst *opt_inst;
130 unsigned int array_size;
131 unsigned int i;
132 int err;
133
134 array_size = option->array_size;
135 if (!array_size)
136 array_size = 1; /* No array but still need one instance */
137
138 for (i = 0; i < array_size; i++) {
139 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
140 if (!opt_inst)
141 return -ENOMEM;
142 opt_inst->option = option;
143 opt_inst->info.port = port;
144 opt_inst->info.array_index = i;
145 opt_inst->changed = true;
146 opt_inst->removed = false;
147 list_add_tail(&opt_inst->list, &team->option_inst_list);
148 if (option->init) {
149 err = option->init(team, &opt_inst->info);
150 if (err)
151 return err;
152 }
153
154 }
155 return 0;
156 }
157
158 static int __team_option_inst_add_option(struct team *team,
159 struct team_option *option)
160 {
161 struct team_port *port;
162 int err;
163
164 if (!option->per_port) {
165 err = __team_option_inst_add(team, option, 0);
166 if (err)
167 goto inst_del_option;
168 }
169
170 list_for_each_entry(port, &team->port_list, list) {
171 err = __team_option_inst_add(team, option, port);
172 if (err)
173 goto inst_del_option;
174 }
175 return 0;
176
177 inst_del_option:
178 __team_option_inst_del_option(team, option);
179 return err;
180 }
181
182 static void __team_option_inst_mark_removed_option(struct team *team,
183 struct team_option *option)
184 {
185 struct team_option_inst *opt_inst;
186
187 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
188 if (opt_inst->option == option) {
189 opt_inst->changed = true;
190 opt_inst->removed = true;
191 }
192 }
193 }
194
195 static void __team_option_inst_del_port(struct team *team,
196 struct team_port *port)
197 {
198 struct team_option_inst *opt_inst, *tmp;
199
200 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
201 if (opt_inst->option->per_port &&
202 opt_inst->info.port == port)
203 __team_option_inst_del(opt_inst);
204 }
205 }
206
207 static int __team_option_inst_add_port(struct team *team,
208 struct team_port *port)
209 {
210 struct team_option *option;
211 int err;
212
213 list_for_each_entry(option, &team->option_list, list) {
214 if (!option->per_port)
215 continue;
216 err = __team_option_inst_add(team, option, port);
217 if (err)
218 goto inst_del_port;
219 }
220 return 0;
221
222 inst_del_port:
223 __team_option_inst_del_port(team, port);
224 return err;
225 }
226
227 static void __team_option_inst_mark_removed_port(struct team *team,
228 struct team_port *port)
229 {
230 struct team_option_inst *opt_inst;
231
232 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
233 if (opt_inst->info.port == port) {
234 opt_inst->changed = true;
235 opt_inst->removed = true;
236 }
237 }
238 }
239
240 static int __team_options_register(struct team *team,
241 const struct team_option *option,
242 size_t option_count)
243 {
244 int i;
245 struct team_option **dst_opts;
246 int err;
247
248 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
249 GFP_KERNEL);
250 if (!dst_opts)
251 return -ENOMEM;
252 for (i = 0; i < option_count; i++, option++) {
253 if (__team_find_option(team, option->name)) {
254 err = -EEXIST;
255 goto alloc_rollback;
256 }
257 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
258 if (!dst_opts[i]) {
259 err = -ENOMEM;
260 goto alloc_rollback;
261 }
262 }
263
264 for (i = 0; i < option_count; i++) {
265 err = __team_option_inst_add_option(team, dst_opts[i]);
266 if (err)
267 goto inst_rollback;
268 list_add_tail(&dst_opts[i]->list, &team->option_list);
269 }
270
271 kfree(dst_opts);
272 return 0;
273
274 inst_rollback:
275 for (i--; i >= 0; i--)
276 __team_option_inst_del_option(team, dst_opts[i]);
277
278 i = option_count - 1;
279 alloc_rollback:
280 for (i--; i >= 0; i--)
281 kfree(dst_opts[i]);
282
283 kfree(dst_opts);
284 return err;
285 }
286
287 static void __team_options_mark_removed(struct team *team,
288 const struct team_option *option,
289 size_t option_count)
290 {
291 int i;
292
293 for (i = 0; i < option_count; i++, option++) {
294 struct team_option *del_opt;
295
296 del_opt = __team_find_option(team, option->name);
297 if (del_opt)
298 __team_option_inst_mark_removed_option(team, del_opt);
299 }
300 }
301
302 static void __team_options_unregister(struct team *team,
303 const struct team_option *option,
304 size_t option_count)
305 {
306 int i;
307
308 for (i = 0; i < option_count; i++, option++) {
309 struct team_option *del_opt;
310
311 del_opt = __team_find_option(team, option->name);
312 if (del_opt) {
313 __team_option_inst_del_option(team, del_opt);
314 list_del(&del_opt->list);
315 kfree(del_opt);
316 }
317 }
318 }
319
320 static void __team_options_change_check(struct team *team);
321
322 int team_options_register(struct team *team,
323 const struct team_option *option,
324 size_t option_count)
325 {
326 int err;
327
328 err = __team_options_register(team, option, option_count);
329 if (err)
330 return err;
331 __team_options_change_check(team);
332 return 0;
333 }
334 EXPORT_SYMBOL(team_options_register);
335
336 void team_options_unregister(struct team *team,
337 const struct team_option *option,
338 size_t option_count)
339 {
340 __team_options_mark_removed(team, option, option_count);
341 __team_options_change_check(team);
342 __team_options_unregister(team, option, option_count);
343 }
344 EXPORT_SYMBOL(team_options_unregister);
345
346 static int team_option_port_add(struct team *team, struct team_port *port)
347 {
348 int err;
349
350 err = __team_option_inst_add_port(team, port);
351 if (err)
352 return err;
353 __team_options_change_check(team);
354 return 0;
355 }
356
357 static void team_option_port_del(struct team *team, struct team_port *port)
358 {
359 __team_option_inst_mark_removed_port(team, port);
360 __team_options_change_check(team);
361 __team_option_inst_del_port(team, port);
362 }
363
364 static int team_option_get(struct team *team,
365 struct team_option_inst *opt_inst,
366 struct team_gsetter_ctx *ctx)
367 {
368 if (!opt_inst->option->getter)
369 return -EOPNOTSUPP;
370 return opt_inst->option->getter(team, ctx);
371 }
372
373 static int team_option_set(struct team *team,
374 struct team_option_inst *opt_inst,
375 struct team_gsetter_ctx *ctx)
376 {
377 int err;
378
379 if (!opt_inst->option->setter)
380 return -EOPNOTSUPP;
381 err = opt_inst->option->setter(team, ctx);
382 if (err)
383 return err;
384
385 opt_inst->changed = true;
386 __team_options_change_check(team);
387 return err;
388 }
389
390 /****************
391 * Mode handling
392 ****************/
393
394 static LIST_HEAD(mode_list);
395 static DEFINE_SPINLOCK(mode_list_lock);
396
397 struct team_mode_item {
398 struct list_head list;
399 const struct team_mode *mode;
400 };
401
402 static struct team_mode_item *__find_mode(const char *kind)
403 {
404 struct team_mode_item *mitem;
405
406 list_for_each_entry(mitem, &mode_list, list) {
407 if (strcmp(mitem->mode->kind, kind) == 0)
408 return mitem;
409 }
410 return NULL;
411 }
412
413 static bool is_good_mode_name(const char *name)
414 {
415 while (*name != '\0') {
416 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
417 return false;
418 name++;
419 }
420 return true;
421 }
422
423 int team_mode_register(const struct team_mode *mode)
424 {
425 int err = 0;
426 struct team_mode_item *mitem;
427
428 if (!is_good_mode_name(mode->kind) ||
429 mode->priv_size > TEAM_MODE_PRIV_SIZE)
430 return -EINVAL;
431
432 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
433 if (!mitem)
434 return -ENOMEM;
435
436 spin_lock(&mode_list_lock);
437 if (__find_mode(mode->kind)) {
438 err = -EEXIST;
439 kfree(mitem);
440 goto unlock;
441 }
442 mitem->mode = mode;
443 list_add_tail(&mitem->list, &mode_list);
444 unlock:
445 spin_unlock(&mode_list_lock);
446 return err;
447 }
448 EXPORT_SYMBOL(team_mode_register);
449
450 void team_mode_unregister(const struct team_mode *mode)
451 {
452 struct team_mode_item *mitem;
453
454 spin_lock(&mode_list_lock);
455 mitem = __find_mode(mode->kind);
456 if (mitem) {
457 list_del_init(&mitem->list);
458 kfree(mitem);
459 }
460 spin_unlock(&mode_list_lock);
461 }
462 EXPORT_SYMBOL(team_mode_unregister);
463
464 static const struct team_mode *team_mode_get(const char *kind)
465 {
466 struct team_mode_item *mitem;
467 const struct team_mode *mode = NULL;
468
469 spin_lock(&mode_list_lock);
470 mitem = __find_mode(kind);
471 if (!mitem) {
472 spin_unlock(&mode_list_lock);
473 request_module("team-mode-%s", kind);
474 spin_lock(&mode_list_lock);
475 mitem = __find_mode(kind);
476 }
477 if (mitem) {
478 mode = mitem->mode;
479 if (!try_module_get(mode->owner))
480 mode = NULL;
481 }
482
483 spin_unlock(&mode_list_lock);
484 return mode;
485 }
486
487 static void team_mode_put(const struct team_mode *mode)
488 {
489 module_put(mode->owner);
490 }
491
492 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
493 {
494 dev_kfree_skb_any(skb);
495 return false;
496 }
497
498 rx_handler_result_t team_dummy_receive(struct team *team,
499 struct team_port *port,
500 struct sk_buff *skb)
501 {
502 return RX_HANDLER_ANOTHER;
503 }
504
505 static const struct team_mode __team_no_mode = {
506 .kind = "*NOMODE*",
507 };
508
509 static bool team_is_mode_set(struct team *team)
510 {
511 return team->mode != &__team_no_mode;
512 }
513
514 static void team_set_no_mode(struct team *team)
515 {
516 team->mode = &__team_no_mode;
517 }
518
519 static void team_adjust_ops(struct team *team)
520 {
521 /*
522 * To avoid checks in rx/tx skb paths, ensure here that non-null and
523 * correct ops are always set.
524 */
525
526 if (list_empty(&team->port_list) ||
527 !team_is_mode_set(team) || !team->mode->ops->transmit)
528 team->ops.transmit = team_dummy_transmit;
529 else
530 team->ops.transmit = team->mode->ops->transmit;
531
532 if (list_empty(&team->port_list) ||
533 !team_is_mode_set(team) || !team->mode->ops->receive)
534 team->ops.receive = team_dummy_receive;
535 else
536 team->ops.receive = team->mode->ops->receive;
537 }
538
539 /*
540 * We can benefit from the fact that it's ensured no port is present
541 * at the time of mode change. Therefore no packets are in fly so there's no
542 * need to set mode operations in any special way.
543 */
544 static int __team_change_mode(struct team *team,
545 const struct team_mode *new_mode)
546 {
547 /* Check if mode was previously set and do cleanup if so */
548 if (team_is_mode_set(team)) {
549 void (*exit_op)(struct team *team) = team->ops.exit;
550
551 /* Clear ops area so no callback is called any longer */
552 memset(&team->ops, 0, sizeof(struct team_mode_ops));
553 team_adjust_ops(team);
554
555 if (exit_op)
556 exit_op(team);
557 team_mode_put(team->mode);
558 team_set_no_mode(team);
559 /* zero private data area */
560 memset(&team->mode_priv, 0,
561 sizeof(struct team) - offsetof(struct team, mode_priv));
562 }
563
564 if (!new_mode)
565 return 0;
566
567 if (new_mode->ops->init) {
568 int err;
569
570 err = new_mode->ops->init(team);
571 if (err)
572 return err;
573 }
574
575 team->mode = new_mode;
576 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
577 team_adjust_ops(team);
578
579 return 0;
580 }
581
582 static int team_change_mode(struct team *team, const char *kind)
583 {
584 const struct team_mode *new_mode;
585 struct net_device *dev = team->dev;
586 int err;
587
588 if (!list_empty(&team->port_list)) {
589 netdev_err(dev, "No ports can be present during mode change\n");
590 return -EBUSY;
591 }
592
593 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
594 netdev_err(dev, "Unable to change to the same mode the team is in\n");
595 return -EINVAL;
596 }
597
598 new_mode = team_mode_get(kind);
599 if (!new_mode) {
600 netdev_err(dev, "Mode \"%s\" not found\n", kind);
601 return -EINVAL;
602 }
603
604 err = __team_change_mode(team, new_mode);
605 if (err) {
606 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
607 team_mode_put(new_mode);
608 return err;
609 }
610
611 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
612 return 0;
613 }
614
615
616 /************************
617 * Rx path frame handler
618 ************************/
619
620 static bool team_port_enabled(struct team_port *port);
621
622 /* note: already called with rcu_read_lock */
623 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
624 {
625 struct sk_buff *skb = *pskb;
626 struct team_port *port;
627 struct team *team;
628 rx_handler_result_t res;
629
630 skb = skb_share_check(skb, GFP_ATOMIC);
631 if (!skb)
632 return RX_HANDLER_CONSUMED;
633
634 *pskb = skb;
635
636 port = team_port_get_rcu(skb->dev);
637 team = port->team;
638 if (!team_port_enabled(port)) {
639 /* allow exact match delivery for disabled ports */
640 res = RX_HANDLER_EXACT;
641 } else {
642 res = team->ops.receive(team, port, skb);
643 }
644 if (res == RX_HANDLER_ANOTHER) {
645 struct team_pcpu_stats *pcpu_stats;
646
647 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
648 u64_stats_update_begin(&pcpu_stats->syncp);
649 pcpu_stats->rx_packets++;
650 pcpu_stats->rx_bytes += skb->len;
651 if (skb->pkt_type == PACKET_MULTICAST)
652 pcpu_stats->rx_multicast++;
653 u64_stats_update_end(&pcpu_stats->syncp);
654
655 skb->dev = team->dev;
656 } else {
657 this_cpu_inc(team->pcpu_stats->rx_dropped);
658 }
659
660 return res;
661 }
662
663
664 /****************
665 * Port handling
666 ****************/
667
668 static bool team_port_find(const struct team *team,
669 const struct team_port *port)
670 {
671 struct team_port *cur;
672
673 list_for_each_entry(cur, &team->port_list, list)
674 if (cur == port)
675 return true;
676 return false;
677 }
678
679 static bool team_port_enabled(struct team_port *port)
680 {
681 return port->index != -1;
682 }
683
684 /*
685 * Enable/disable port by adding to enabled port hashlist and setting
686 * port->index (Might be racy so reader could see incorrect ifindex when
687 * processing a flying packet, but that is not a problem). Write guarded
688 * by team->lock.
689 */
690 static void team_port_enable(struct team *team,
691 struct team_port *port)
692 {
693 if (team_port_enabled(port))
694 return;
695 port->index = team->en_port_count++;
696 hlist_add_head_rcu(&port->hlist,
697 team_port_index_hash(team, port->index));
698 }
699
700 static void __reconstruct_port_hlist(struct team *team, int rm_index)
701 {
702 int i;
703 struct team_port *port;
704
705 for (i = rm_index + 1; i < team->en_port_count; i++) {
706 port = team_get_port_by_index(team, i);
707 hlist_del_rcu(&port->hlist);
708 port->index--;
709 hlist_add_head_rcu(&port->hlist,
710 team_port_index_hash(team, port->index));
711 }
712 }
713
714 static void team_port_disable(struct team *team,
715 struct team_port *port)
716 {
717 int rm_index = port->index;
718
719 if (!team_port_enabled(port))
720 return;
721 hlist_del_rcu(&port->hlist);
722 __reconstruct_port_hlist(team, rm_index);
723 team->en_port_count--;
724 port->index = -1;
725 }
726
727 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
728 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
729 NETIF_F_HIGHDMA | NETIF_F_LRO)
730
731 static void __team_compute_features(struct team *team)
732 {
733 struct team_port *port;
734 u32 vlan_features = TEAM_VLAN_FEATURES;
735 unsigned short max_hard_header_len = ETH_HLEN;
736
737 list_for_each_entry(port, &team->port_list, list) {
738 vlan_features = netdev_increment_features(vlan_features,
739 port->dev->vlan_features,
740 TEAM_VLAN_FEATURES);
741
742 if (port->dev->hard_header_len > max_hard_header_len)
743 max_hard_header_len = port->dev->hard_header_len;
744 }
745
746 team->dev->vlan_features = vlan_features;
747 team->dev->hard_header_len = max_hard_header_len;
748
749 netdev_change_features(team->dev);
750 }
751
752 static void team_compute_features(struct team *team)
753 {
754 mutex_lock(&team->lock);
755 __team_compute_features(team);
756 mutex_unlock(&team->lock);
757 }
758
759 static int team_port_enter(struct team *team, struct team_port *port)
760 {
761 int err = 0;
762
763 dev_hold(team->dev);
764 port->dev->priv_flags |= IFF_TEAM_PORT;
765 if (team->ops.port_enter) {
766 err = team->ops.port_enter(team, port);
767 if (err) {
768 netdev_err(team->dev, "Device %s failed to enter team mode\n",
769 port->dev->name);
770 goto err_port_enter;
771 }
772 }
773
774 return 0;
775
776 err_port_enter:
777 port->dev->priv_flags &= ~IFF_TEAM_PORT;
778 dev_put(team->dev);
779
780 return err;
781 }
782
783 static void team_port_leave(struct team *team, struct team_port *port)
784 {
785 if (team->ops.port_leave)
786 team->ops.port_leave(team, port);
787 port->dev->priv_flags &= ~IFF_TEAM_PORT;
788 dev_put(team->dev);
789 }
790
791 static void __team_port_change_check(struct team_port *port, bool linkup);
792
793 static int team_port_add(struct team *team, struct net_device *port_dev)
794 {
795 struct net_device *dev = team->dev;
796 struct team_port *port;
797 char *portname = port_dev->name;
798 int err;
799
800 if (port_dev->flags & IFF_LOOPBACK ||
801 port_dev->type != ARPHRD_ETHER) {
802 netdev_err(dev, "Device %s is of an unsupported type\n",
803 portname);
804 return -EINVAL;
805 }
806
807 if (team_port_exists(port_dev)) {
808 netdev_err(dev, "Device %s is already a port "
809 "of a team device\n", portname);
810 return -EBUSY;
811 }
812
813 if (port_dev->flags & IFF_UP) {
814 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
815 portname);
816 return -EBUSY;
817 }
818
819 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
820 GFP_KERNEL);
821 if (!port)
822 return -ENOMEM;
823
824 port->dev = port_dev;
825 port->team = team;
826
827 port->orig.mtu = port_dev->mtu;
828 err = dev_set_mtu(port_dev, dev->mtu);
829 if (err) {
830 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
831 goto err_set_mtu;
832 }
833
834 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
835
836 err = team_port_enter(team, port);
837 if (err) {
838 netdev_err(dev, "Device %s failed to enter team mode\n",
839 portname);
840 goto err_port_enter;
841 }
842
843 err = dev_open(port_dev);
844 if (err) {
845 netdev_dbg(dev, "Device %s opening failed\n",
846 portname);
847 goto err_dev_open;
848 }
849
850 err = vlan_vids_add_by_dev(port_dev, dev);
851 if (err) {
852 netdev_err(dev, "Failed to add vlan ids to device %s\n",
853 portname);
854 goto err_vids_add;
855 }
856
857 err = netdev_set_master(port_dev, dev);
858 if (err) {
859 netdev_err(dev, "Device %s failed to set master\n", portname);
860 goto err_set_master;
861 }
862
863 err = netdev_rx_handler_register(port_dev, team_handle_frame,
864 port);
865 if (err) {
866 netdev_err(dev, "Device %s failed to register rx_handler\n",
867 portname);
868 goto err_handler_register;
869 }
870
871 err = team_option_port_add(team, port);
872 if (err) {
873 netdev_err(dev, "Device %s failed to add per-port options\n",
874 portname);
875 goto err_option_port_add;
876 }
877
878 port->index = -1;
879 team_port_enable(team, port);
880 list_add_tail_rcu(&port->list, &team->port_list);
881 team_adjust_ops(team);
882 __team_compute_features(team);
883 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
884
885 netdev_info(dev, "Port device %s added\n", portname);
886
887 return 0;
888
889 err_option_port_add:
890 netdev_rx_handler_unregister(port_dev);
891
892 err_handler_register:
893 netdev_set_master(port_dev, NULL);
894
895 err_set_master:
896 vlan_vids_del_by_dev(port_dev, dev);
897
898 err_vids_add:
899 dev_close(port_dev);
900
901 err_dev_open:
902 team_port_leave(team, port);
903 team_port_set_orig_mac(port);
904
905 err_port_enter:
906 dev_set_mtu(port_dev, port->orig.mtu);
907
908 err_set_mtu:
909 kfree(port);
910
911 return err;
912 }
913
914 static int team_port_del(struct team *team, struct net_device *port_dev)
915 {
916 struct net_device *dev = team->dev;
917 struct team_port *port;
918 char *portname = port_dev->name;
919
920 port = team_port_get_rtnl(port_dev);
921 if (!port || !team_port_find(team, port)) {
922 netdev_err(dev, "Device %s does not act as a port of this team\n",
923 portname);
924 return -ENOENT;
925 }
926
927 port->removed = true;
928 __team_port_change_check(port, false);
929 team_port_disable(team, port);
930 list_del_rcu(&port->list);
931 team_adjust_ops(team);
932 team_option_port_del(team, port);
933 netdev_rx_handler_unregister(port_dev);
934 netdev_set_master(port_dev, NULL);
935 vlan_vids_del_by_dev(port_dev, dev);
936 dev_close(port_dev);
937 team_port_leave(team, port);
938 team_port_set_orig_mac(port);
939 dev_set_mtu(port_dev, port->orig.mtu);
940 synchronize_rcu();
941 kfree(port);
942 netdev_info(dev, "Port device %s removed\n", portname);
943 __team_compute_features(team);
944
945 return 0;
946 }
947
948
949 /*****************
950 * Net device ops
951 *****************/
952
953 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
954 {
955 ctx->data.str_val = team->mode->kind;
956 return 0;
957 }
958
959 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
960 {
961 return team_change_mode(team, ctx->data.str_val);
962 }
963
964 static int team_port_en_option_get(struct team *team,
965 struct team_gsetter_ctx *ctx)
966 {
967 struct team_port *port = ctx->info->port;
968
969 ctx->data.bool_val = team_port_enabled(port);
970 return 0;
971 }
972
973 static int team_port_en_option_set(struct team *team,
974 struct team_gsetter_ctx *ctx)
975 {
976 struct team_port *port = ctx->info->port;
977
978 if (ctx->data.bool_val)
979 team_port_enable(team, port);
980 else
981 team_port_disable(team, port);
982 return 0;
983 }
984
985 static int team_user_linkup_option_get(struct team *team,
986 struct team_gsetter_ctx *ctx)
987 {
988 struct team_port *port = ctx->info->port;
989
990 ctx->data.bool_val = port->user.linkup;
991 return 0;
992 }
993
994 static int team_user_linkup_option_set(struct team *team,
995 struct team_gsetter_ctx *ctx)
996 {
997 struct team_port *port = ctx->info->port;
998
999 port->user.linkup = ctx->data.bool_val;
1000 team_refresh_port_linkup(port);
1001 return 0;
1002 }
1003
1004 static int team_user_linkup_en_option_get(struct team *team,
1005 struct team_gsetter_ctx *ctx)
1006 {
1007 struct team_port *port = ctx->info->port;
1008
1009 ctx->data.bool_val = port->user.linkup_enabled;
1010 return 0;
1011 }
1012
1013 static int team_user_linkup_en_option_set(struct team *team,
1014 struct team_gsetter_ctx *ctx)
1015 {
1016 struct team_port *port = ctx->info->port;
1017
1018 port->user.linkup_enabled = ctx->data.bool_val;
1019 team_refresh_port_linkup(port);
1020 return 0;
1021 }
1022
1023 static const struct team_option team_options[] = {
1024 {
1025 .name = "mode",
1026 .type = TEAM_OPTION_TYPE_STRING,
1027 .getter = team_mode_option_get,
1028 .setter = team_mode_option_set,
1029 },
1030 {
1031 .name = "enabled",
1032 .type = TEAM_OPTION_TYPE_BOOL,
1033 .per_port = true,
1034 .getter = team_port_en_option_get,
1035 .setter = team_port_en_option_set,
1036 },
1037 {
1038 .name = "user_linkup",
1039 .type = TEAM_OPTION_TYPE_BOOL,
1040 .per_port = true,
1041 .getter = team_user_linkup_option_get,
1042 .setter = team_user_linkup_option_set,
1043 },
1044 {
1045 .name = "user_linkup_enabled",
1046 .type = TEAM_OPTION_TYPE_BOOL,
1047 .per_port = true,
1048 .getter = team_user_linkup_en_option_get,
1049 .setter = team_user_linkup_en_option_set,
1050 },
1051 };
1052
1053 static int team_init(struct net_device *dev)
1054 {
1055 struct team *team = netdev_priv(dev);
1056 int i;
1057 int err;
1058
1059 team->dev = dev;
1060 mutex_init(&team->lock);
1061 team_set_no_mode(team);
1062
1063 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1064 if (!team->pcpu_stats)
1065 return -ENOMEM;
1066
1067 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1068 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1069 INIT_LIST_HEAD(&team->port_list);
1070
1071 team_adjust_ops(team);
1072
1073 INIT_LIST_HEAD(&team->option_list);
1074 INIT_LIST_HEAD(&team->option_inst_list);
1075 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1076 if (err)
1077 goto err_options_register;
1078 netif_carrier_off(dev);
1079
1080 return 0;
1081
1082 err_options_register:
1083 free_percpu(team->pcpu_stats);
1084
1085 return err;
1086 }
1087
1088 static void team_uninit(struct net_device *dev)
1089 {
1090 struct team *team = netdev_priv(dev);
1091 struct team_port *port;
1092 struct team_port *tmp;
1093
1094 mutex_lock(&team->lock);
1095 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1096 team_port_del(team, port->dev);
1097
1098 __team_change_mode(team, NULL); /* cleanup */
1099 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1100 mutex_unlock(&team->lock);
1101 }
1102
1103 static void team_destructor(struct net_device *dev)
1104 {
1105 struct team *team = netdev_priv(dev);
1106
1107 free_percpu(team->pcpu_stats);
1108 free_netdev(dev);
1109 }
1110
1111 static int team_open(struct net_device *dev)
1112 {
1113 netif_carrier_on(dev);
1114 return 0;
1115 }
1116
1117 static int team_close(struct net_device *dev)
1118 {
1119 netif_carrier_off(dev);
1120 return 0;
1121 }
1122
1123 /*
1124 * note: already called with rcu_read_lock
1125 */
1126 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1127 {
1128 struct team *team = netdev_priv(dev);
1129 bool tx_success = false;
1130 unsigned int len = skb->len;
1131
1132 tx_success = team->ops.transmit(team, skb);
1133 if (tx_success) {
1134 struct team_pcpu_stats *pcpu_stats;
1135
1136 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1137 u64_stats_update_begin(&pcpu_stats->syncp);
1138 pcpu_stats->tx_packets++;
1139 pcpu_stats->tx_bytes += len;
1140 u64_stats_update_end(&pcpu_stats->syncp);
1141 } else {
1142 this_cpu_inc(team->pcpu_stats->tx_dropped);
1143 }
1144
1145 return NETDEV_TX_OK;
1146 }
1147
1148 static void team_change_rx_flags(struct net_device *dev, int change)
1149 {
1150 struct team *team = netdev_priv(dev);
1151 struct team_port *port;
1152 int inc;
1153
1154 rcu_read_lock();
1155 list_for_each_entry_rcu(port, &team->port_list, list) {
1156 if (change & IFF_PROMISC) {
1157 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1158 dev_set_promiscuity(port->dev, inc);
1159 }
1160 if (change & IFF_ALLMULTI) {
1161 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1162 dev_set_allmulti(port->dev, inc);
1163 }
1164 }
1165 rcu_read_unlock();
1166 }
1167
1168 static void team_set_rx_mode(struct net_device *dev)
1169 {
1170 struct team *team = netdev_priv(dev);
1171 struct team_port *port;
1172
1173 rcu_read_lock();
1174 list_for_each_entry_rcu(port, &team->port_list, list) {
1175 dev_uc_sync(port->dev, dev);
1176 dev_mc_sync(port->dev, dev);
1177 }
1178 rcu_read_unlock();
1179 }
1180
1181 static int team_set_mac_address(struct net_device *dev, void *p)
1182 {
1183 struct team *team = netdev_priv(dev);
1184 struct team_port *port;
1185 struct sockaddr *addr = p;
1186
1187 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1188 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1189 rcu_read_lock();
1190 list_for_each_entry_rcu(port, &team->port_list, list)
1191 if (team->ops.port_change_mac)
1192 team->ops.port_change_mac(team, port);
1193 rcu_read_unlock();
1194 return 0;
1195 }
1196
1197 static int team_change_mtu(struct net_device *dev, int new_mtu)
1198 {
1199 struct team *team = netdev_priv(dev);
1200 struct team_port *port;
1201 int err;
1202
1203 /*
1204 * Alhough this is reader, it's guarded by team lock. It's not possible
1205 * to traverse list in reverse under rcu_read_lock
1206 */
1207 mutex_lock(&team->lock);
1208 list_for_each_entry(port, &team->port_list, list) {
1209 err = dev_set_mtu(port->dev, new_mtu);
1210 if (err) {
1211 netdev_err(dev, "Device %s failed to change mtu",
1212 port->dev->name);
1213 goto unwind;
1214 }
1215 }
1216 mutex_unlock(&team->lock);
1217
1218 dev->mtu = new_mtu;
1219
1220 return 0;
1221
1222 unwind:
1223 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1224 dev_set_mtu(port->dev, dev->mtu);
1225 mutex_unlock(&team->lock);
1226
1227 return err;
1228 }
1229
1230 static struct rtnl_link_stats64 *
1231 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1232 {
1233 struct team *team = netdev_priv(dev);
1234 struct team_pcpu_stats *p;
1235 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1236 u32 rx_dropped = 0, tx_dropped = 0;
1237 unsigned int start;
1238 int i;
1239
1240 for_each_possible_cpu(i) {
1241 p = per_cpu_ptr(team->pcpu_stats, i);
1242 do {
1243 start = u64_stats_fetch_begin_bh(&p->syncp);
1244 rx_packets = p->rx_packets;
1245 rx_bytes = p->rx_bytes;
1246 rx_multicast = p->rx_multicast;
1247 tx_packets = p->tx_packets;
1248 tx_bytes = p->tx_bytes;
1249 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
1250
1251 stats->rx_packets += rx_packets;
1252 stats->rx_bytes += rx_bytes;
1253 stats->multicast += rx_multicast;
1254 stats->tx_packets += tx_packets;
1255 stats->tx_bytes += tx_bytes;
1256 /*
1257 * rx_dropped & tx_dropped are u32, updated
1258 * without syncp protection.
1259 */
1260 rx_dropped += p->rx_dropped;
1261 tx_dropped += p->tx_dropped;
1262 }
1263 stats->rx_dropped = rx_dropped;
1264 stats->tx_dropped = tx_dropped;
1265 return stats;
1266 }
1267
1268 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1269 {
1270 struct team *team = netdev_priv(dev);
1271 struct team_port *port;
1272 int err;
1273
1274 /*
1275 * Alhough this is reader, it's guarded by team lock. It's not possible
1276 * to traverse list in reverse under rcu_read_lock
1277 */
1278 mutex_lock(&team->lock);
1279 list_for_each_entry(port, &team->port_list, list) {
1280 err = vlan_vid_add(port->dev, vid);
1281 if (err)
1282 goto unwind;
1283 }
1284 mutex_unlock(&team->lock);
1285
1286 return 0;
1287
1288 unwind:
1289 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1290 vlan_vid_del(port->dev, vid);
1291 mutex_unlock(&team->lock);
1292
1293 return err;
1294 }
1295
1296 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1297 {
1298 struct team *team = netdev_priv(dev);
1299 struct team_port *port;
1300
1301 rcu_read_lock();
1302 list_for_each_entry_rcu(port, &team->port_list, list)
1303 vlan_vid_del(port->dev, vid);
1304 rcu_read_unlock();
1305
1306 return 0;
1307 }
1308
1309 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1310 {
1311 struct team *team = netdev_priv(dev);
1312 int err;
1313
1314 mutex_lock(&team->lock);
1315 err = team_port_add(team, port_dev);
1316 mutex_unlock(&team->lock);
1317 return err;
1318 }
1319
1320 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1321 {
1322 struct team *team = netdev_priv(dev);
1323 int err;
1324
1325 mutex_lock(&team->lock);
1326 err = team_port_del(team, port_dev);
1327 mutex_unlock(&team->lock);
1328 return err;
1329 }
1330
1331 static netdev_features_t team_fix_features(struct net_device *dev,
1332 netdev_features_t features)
1333 {
1334 struct team_port *port;
1335 struct team *team = netdev_priv(dev);
1336 netdev_features_t mask;
1337
1338 mask = features;
1339 features &= ~NETIF_F_ONE_FOR_ALL;
1340 features |= NETIF_F_ALL_FOR_ALL;
1341
1342 rcu_read_lock();
1343 list_for_each_entry_rcu(port, &team->port_list, list) {
1344 features = netdev_increment_features(features,
1345 port->dev->features,
1346 mask);
1347 }
1348 rcu_read_unlock();
1349 return features;
1350 }
1351
1352 static const struct net_device_ops team_netdev_ops = {
1353 .ndo_init = team_init,
1354 .ndo_uninit = team_uninit,
1355 .ndo_open = team_open,
1356 .ndo_stop = team_close,
1357 .ndo_start_xmit = team_xmit,
1358 .ndo_change_rx_flags = team_change_rx_flags,
1359 .ndo_set_rx_mode = team_set_rx_mode,
1360 .ndo_set_mac_address = team_set_mac_address,
1361 .ndo_change_mtu = team_change_mtu,
1362 .ndo_get_stats64 = team_get_stats64,
1363 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
1364 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
1365 .ndo_add_slave = team_add_slave,
1366 .ndo_del_slave = team_del_slave,
1367 .ndo_fix_features = team_fix_features,
1368 };
1369
1370
1371 /***********************
1372 * rt netlink interface
1373 ***********************/
1374
1375 static void team_setup(struct net_device *dev)
1376 {
1377 ether_setup(dev);
1378
1379 dev->netdev_ops = &team_netdev_ops;
1380 dev->destructor = team_destructor;
1381 dev->tx_queue_len = 0;
1382 dev->flags |= IFF_MULTICAST;
1383 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1384
1385 /*
1386 * Indicate we support unicast address filtering. That way core won't
1387 * bring us to promisc mode in case a unicast addr is added.
1388 * Let this up to underlay drivers.
1389 */
1390 dev->priv_flags |= IFF_UNICAST_FLT;
1391
1392 dev->features |= NETIF_F_LLTX;
1393 dev->features |= NETIF_F_GRO;
1394 dev->hw_features = NETIF_F_HW_VLAN_TX |
1395 NETIF_F_HW_VLAN_RX |
1396 NETIF_F_HW_VLAN_FILTER;
1397
1398 dev->features |= dev->hw_features;
1399 }
1400
1401 static int team_newlink(struct net *src_net, struct net_device *dev,
1402 struct nlattr *tb[], struct nlattr *data[])
1403 {
1404 int err;
1405
1406 if (tb[IFLA_ADDRESS] == NULL)
1407 eth_hw_addr_random(dev);
1408
1409 err = register_netdevice(dev);
1410 if (err)
1411 return err;
1412
1413 return 0;
1414 }
1415
1416 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1417 {
1418 if (tb[IFLA_ADDRESS]) {
1419 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1420 return -EINVAL;
1421 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1422 return -EADDRNOTAVAIL;
1423 }
1424 return 0;
1425 }
1426
1427 static struct rtnl_link_ops team_link_ops __read_mostly = {
1428 .kind = DRV_NAME,
1429 .priv_size = sizeof(struct team),
1430 .setup = team_setup,
1431 .newlink = team_newlink,
1432 .validate = team_validate,
1433 };
1434
1435
1436 /***********************************
1437 * Generic netlink custom interface
1438 ***********************************/
1439
1440 static struct genl_family team_nl_family = {
1441 .id = GENL_ID_GENERATE,
1442 .name = TEAM_GENL_NAME,
1443 .version = TEAM_GENL_VERSION,
1444 .maxattr = TEAM_ATTR_MAX,
1445 .netnsok = true,
1446 };
1447
1448 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1449 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
1450 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
1451 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
1452 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
1453 };
1454
1455 static const struct nla_policy
1456 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1457 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
1458 [TEAM_ATTR_OPTION_NAME] = {
1459 .type = NLA_STRING,
1460 .len = TEAM_STRING_MAX_LEN,
1461 },
1462 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1463 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1464 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1465 };
1466
1467 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1468 {
1469 struct sk_buff *msg;
1470 void *hdr;
1471 int err;
1472
1473 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1474 if (!msg)
1475 return -ENOMEM;
1476
1477 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1478 &team_nl_family, 0, TEAM_CMD_NOOP);
1479 if (IS_ERR(hdr)) {
1480 err = PTR_ERR(hdr);
1481 goto err_msg_put;
1482 }
1483
1484 genlmsg_end(msg, hdr);
1485
1486 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1487
1488 err_msg_put:
1489 nlmsg_free(msg);
1490
1491 return err;
1492 }
1493
1494 /*
1495 * Netlink cmd functions should be locked by following two functions.
1496 * Since dev gets held here, that ensures dev won't disappear in between.
1497 */
1498 static struct team *team_nl_team_get(struct genl_info *info)
1499 {
1500 struct net *net = genl_info_net(info);
1501 int ifindex;
1502 struct net_device *dev;
1503 struct team *team;
1504
1505 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1506 return NULL;
1507
1508 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1509 dev = dev_get_by_index(net, ifindex);
1510 if (!dev || dev->netdev_ops != &team_netdev_ops) {
1511 if (dev)
1512 dev_put(dev);
1513 return NULL;
1514 }
1515
1516 team = netdev_priv(dev);
1517 mutex_lock(&team->lock);
1518 return team;
1519 }
1520
1521 static void team_nl_team_put(struct team *team)
1522 {
1523 mutex_unlock(&team->lock);
1524 dev_put(team->dev);
1525 }
1526
1527 static int team_nl_send_generic(struct genl_info *info, struct team *team,
1528 int (*fill_func)(struct sk_buff *skb,
1529 struct genl_info *info,
1530 int flags, struct team *team))
1531 {
1532 struct sk_buff *skb;
1533 int err;
1534
1535 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1536 if (!skb)
1537 return -ENOMEM;
1538
1539 err = fill_func(skb, info, NLM_F_ACK, team);
1540 if (err < 0)
1541 goto err_fill;
1542
1543 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1544 return err;
1545
1546 err_fill:
1547 nlmsg_free(skb);
1548 return err;
1549 }
1550
1551 static int team_nl_fill_options_get(struct sk_buff *skb,
1552 u32 pid, u32 seq, int flags,
1553 struct team *team, bool fillall)
1554 {
1555 struct nlattr *option_list;
1556 void *hdr;
1557 struct team_option_inst *opt_inst;
1558 int err;
1559
1560 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1561 TEAM_CMD_OPTIONS_GET);
1562 if (IS_ERR(hdr))
1563 return PTR_ERR(hdr);
1564
1565 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1566 goto nla_put_failure;
1567 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1568 if (!option_list)
1569 return -EMSGSIZE;
1570
1571 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1572 struct nlattr *option_item;
1573 struct team_option *option = opt_inst->option;
1574 struct team_option_inst_info *opt_inst_info;
1575 struct team_gsetter_ctx ctx;
1576
1577 /* Include only changed options if fill all mode is not on */
1578 if (!fillall && !opt_inst->changed)
1579 continue;
1580 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1581 if (!option_item)
1582 goto nla_put_failure;
1583 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1584 goto nla_put_failure;
1585 if (opt_inst->changed) {
1586 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1587 goto nla_put_failure;
1588 opt_inst->changed = false;
1589 }
1590 if (opt_inst->removed &&
1591 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1592 goto nla_put_failure;
1593
1594 opt_inst_info = &opt_inst->info;
1595 if (opt_inst_info->port &&
1596 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1597 opt_inst_info->port->dev->ifindex))
1598 goto nla_put_failure;
1599 if (opt_inst->option->array_size &&
1600 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1601 opt_inst_info->array_index))
1602 goto nla_put_failure;
1603 ctx.info = opt_inst_info;
1604
1605 switch (option->type) {
1606 case TEAM_OPTION_TYPE_U32:
1607 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1608 goto nla_put_failure;
1609 err = team_option_get(team, opt_inst, &ctx);
1610 if (err)
1611 goto errout;
1612 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
1613 ctx.data.u32_val))
1614 goto nla_put_failure;
1615 break;
1616 case TEAM_OPTION_TYPE_STRING:
1617 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1618 goto nla_put_failure;
1619 err = team_option_get(team, opt_inst, &ctx);
1620 if (err)
1621 goto errout;
1622 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1623 ctx.data.str_val))
1624 goto nla_put_failure;
1625 break;
1626 case TEAM_OPTION_TYPE_BINARY:
1627 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1628 goto nla_put_failure;
1629 err = team_option_get(team, opt_inst, &ctx);
1630 if (err)
1631 goto errout;
1632 if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
1633 ctx.data.bin_val.len, ctx.data.bin_val.ptr))
1634 goto nla_put_failure;
1635 break;
1636 case TEAM_OPTION_TYPE_BOOL:
1637 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1638 goto nla_put_failure;
1639 err = team_option_get(team, opt_inst, &ctx);
1640 if (err)
1641 goto errout;
1642 if (ctx.data.bool_val &&
1643 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1644 goto nla_put_failure;
1645 break;
1646 default:
1647 BUG();
1648 }
1649 nla_nest_end(skb, option_item);
1650 }
1651
1652 nla_nest_end(skb, option_list);
1653 return genlmsg_end(skb, hdr);
1654
1655 nla_put_failure:
1656 err = -EMSGSIZE;
1657 errout:
1658 genlmsg_cancel(skb, hdr);
1659 return err;
1660 }
1661
1662 static int team_nl_fill_options_get_all(struct sk_buff *skb,
1663 struct genl_info *info, int flags,
1664 struct team *team)
1665 {
1666 return team_nl_fill_options_get(skb, info->snd_pid,
1667 info->snd_seq, NLM_F_ACK,
1668 team, true);
1669 }
1670
1671 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1672 {
1673 struct team *team;
1674 int err;
1675
1676 team = team_nl_team_get(info);
1677 if (!team)
1678 return -EINVAL;
1679
1680 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
1681
1682 team_nl_team_put(team);
1683
1684 return err;
1685 }
1686
1687 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1688 {
1689 struct team *team;
1690 int err = 0;
1691 int i;
1692 struct nlattr *nl_option;
1693
1694 team = team_nl_team_get(info);
1695 if (!team)
1696 return -EINVAL;
1697
1698 err = -EINVAL;
1699 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1700 err = -EINVAL;
1701 goto team_put;
1702 }
1703
1704 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1705 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1706 struct nlattr *attr;
1707 struct nlattr *attr_data;
1708 enum team_option_type opt_type;
1709 int opt_port_ifindex = 0; /* != 0 for per-port options */
1710 u32 opt_array_index = 0;
1711 bool opt_is_array = false;
1712 struct team_option_inst *opt_inst;
1713 char *opt_name;
1714 bool opt_found = false;
1715
1716 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1717 err = -EINVAL;
1718 goto team_put;
1719 }
1720 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1721 nl_option, team_nl_option_policy);
1722 if (err)
1723 goto team_put;
1724 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1725 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1726 err = -EINVAL;
1727 goto team_put;
1728 }
1729 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1730 case NLA_U32:
1731 opt_type = TEAM_OPTION_TYPE_U32;
1732 break;
1733 case NLA_STRING:
1734 opt_type = TEAM_OPTION_TYPE_STRING;
1735 break;
1736 case NLA_BINARY:
1737 opt_type = TEAM_OPTION_TYPE_BINARY;
1738 break;
1739 case NLA_FLAG:
1740 opt_type = TEAM_OPTION_TYPE_BOOL;
1741 break;
1742 default:
1743 goto team_put;
1744 }
1745
1746 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1747 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1748 err = -EINVAL;
1749 goto team_put;
1750 }
1751
1752 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1753 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1754 if (attr)
1755 opt_port_ifindex = nla_get_u32(attr);
1756
1757 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1758 if (attr) {
1759 opt_is_array = true;
1760 opt_array_index = nla_get_u32(attr);
1761 }
1762
1763 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1764 struct team_option *option = opt_inst->option;
1765 struct team_gsetter_ctx ctx;
1766 struct team_option_inst_info *opt_inst_info;
1767 int tmp_ifindex;
1768
1769 opt_inst_info = &opt_inst->info;
1770 tmp_ifindex = opt_inst_info->port ?
1771 opt_inst_info->port->dev->ifindex : 0;
1772 if (option->type != opt_type ||
1773 strcmp(option->name, opt_name) ||
1774 tmp_ifindex != opt_port_ifindex ||
1775 (option->array_size && !opt_is_array) ||
1776 opt_inst_info->array_index != opt_array_index)
1777 continue;
1778 opt_found = true;
1779 ctx.info = opt_inst_info;
1780 switch (opt_type) {
1781 case TEAM_OPTION_TYPE_U32:
1782 ctx.data.u32_val = nla_get_u32(attr_data);
1783 break;
1784 case TEAM_OPTION_TYPE_STRING:
1785 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1786 err = -EINVAL;
1787 goto team_put;
1788 }
1789 ctx.data.str_val = nla_data(attr_data);
1790 break;
1791 case TEAM_OPTION_TYPE_BINARY:
1792 ctx.data.bin_val.len = nla_len(attr_data);
1793 ctx.data.bin_val.ptr = nla_data(attr_data);
1794 break;
1795 case TEAM_OPTION_TYPE_BOOL:
1796 ctx.data.bool_val = attr_data ? true : false;
1797 break;
1798 default:
1799 BUG();
1800 }
1801 err = team_option_set(team, opt_inst, &ctx);
1802 if (err)
1803 goto team_put;
1804 }
1805 if (!opt_found) {
1806 err = -ENOENT;
1807 goto team_put;
1808 }
1809 }
1810
1811 team_put:
1812 team_nl_team_put(team);
1813
1814 return err;
1815 }
1816
1817 static int team_nl_fill_port_list_get(struct sk_buff *skb,
1818 u32 pid, u32 seq, int flags,
1819 struct team *team,
1820 bool fillall)
1821 {
1822 struct nlattr *port_list;
1823 void *hdr;
1824 struct team_port *port;
1825
1826 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1827 TEAM_CMD_PORT_LIST_GET);
1828 if (IS_ERR(hdr))
1829 return PTR_ERR(hdr);
1830
1831 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1832 goto nla_put_failure;
1833 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1834 if (!port_list)
1835 return -EMSGSIZE;
1836
1837 list_for_each_entry(port, &team->port_list, list) {
1838 struct nlattr *port_item;
1839
1840 /* Include only changed ports if fill all mode is not on */
1841 if (!fillall && !port->changed)
1842 continue;
1843 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1844 if (!port_item)
1845 goto nla_put_failure;
1846 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1847 goto nla_put_failure;
1848 if (port->changed) {
1849 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1850 goto nla_put_failure;
1851 port->changed = false;
1852 }
1853 if ((port->removed &&
1854 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1855 (port->state.linkup &&
1856 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1857 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1858 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1859 goto nla_put_failure;
1860 nla_nest_end(skb, port_item);
1861 }
1862
1863 nla_nest_end(skb, port_list);
1864 return genlmsg_end(skb, hdr);
1865
1866 nla_put_failure:
1867 genlmsg_cancel(skb, hdr);
1868 return -EMSGSIZE;
1869 }
1870
1871 static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1872 struct genl_info *info, int flags,
1873 struct team *team)
1874 {
1875 return team_nl_fill_port_list_get(skb, info->snd_pid,
1876 info->snd_seq, NLM_F_ACK,
1877 team, true);
1878 }
1879
1880 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1881 struct genl_info *info)
1882 {
1883 struct team *team;
1884 int err;
1885
1886 team = team_nl_team_get(info);
1887 if (!team)
1888 return -EINVAL;
1889
1890 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1891
1892 team_nl_team_put(team);
1893
1894 return err;
1895 }
1896
1897 static struct genl_ops team_nl_ops[] = {
1898 {
1899 .cmd = TEAM_CMD_NOOP,
1900 .doit = team_nl_cmd_noop,
1901 .policy = team_nl_policy,
1902 },
1903 {
1904 .cmd = TEAM_CMD_OPTIONS_SET,
1905 .doit = team_nl_cmd_options_set,
1906 .policy = team_nl_policy,
1907 .flags = GENL_ADMIN_PERM,
1908 },
1909 {
1910 .cmd = TEAM_CMD_OPTIONS_GET,
1911 .doit = team_nl_cmd_options_get,
1912 .policy = team_nl_policy,
1913 .flags = GENL_ADMIN_PERM,
1914 },
1915 {
1916 .cmd = TEAM_CMD_PORT_LIST_GET,
1917 .doit = team_nl_cmd_port_list_get,
1918 .policy = team_nl_policy,
1919 .flags = GENL_ADMIN_PERM,
1920 },
1921 };
1922
1923 static struct genl_multicast_group team_change_event_mcgrp = {
1924 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1925 };
1926
1927 static int team_nl_send_event_options_get(struct team *team)
1928 {
1929 struct sk_buff *skb;
1930 int err;
1931 struct net *net = dev_net(team->dev);
1932
1933 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1934 if (!skb)
1935 return -ENOMEM;
1936
1937 err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
1938 if (err < 0)
1939 goto err_fill;
1940
1941 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1942 GFP_KERNEL);
1943 return err;
1944
1945 err_fill:
1946 nlmsg_free(skb);
1947 return err;
1948 }
1949
1950 static int team_nl_send_event_port_list_get(struct team *team)
1951 {
1952 struct sk_buff *skb;
1953 int err;
1954 struct net *net = dev_net(team->dev);
1955
1956 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1957 if (!skb)
1958 return -ENOMEM;
1959
1960 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
1961 if (err < 0)
1962 goto err_fill;
1963
1964 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1965 GFP_KERNEL);
1966 return err;
1967
1968 err_fill:
1969 nlmsg_free(skb);
1970 return err;
1971 }
1972
1973 static int team_nl_init(void)
1974 {
1975 int err;
1976
1977 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
1978 ARRAY_SIZE(team_nl_ops));
1979 if (err)
1980 return err;
1981
1982 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
1983 if (err)
1984 goto err_change_event_grp_reg;
1985
1986 return 0;
1987
1988 err_change_event_grp_reg:
1989 genl_unregister_family(&team_nl_family);
1990
1991 return err;
1992 }
1993
1994 static void team_nl_fini(void)
1995 {
1996 genl_unregister_family(&team_nl_family);
1997 }
1998
1999
2000 /******************
2001 * Change checkers
2002 ******************/
2003
2004 static void __team_options_change_check(struct team *team)
2005 {
2006 int err;
2007
2008 err = team_nl_send_event_options_get(team);
2009 if (err)
2010 netdev_warn(team->dev, "Failed to send options change via netlink\n");
2011 }
2012
2013 /* rtnl lock is held */
2014 static void __team_port_change_check(struct team_port *port, bool linkup)
2015 {
2016 int err;
2017
2018 if (!port->removed && port->state.linkup == linkup)
2019 return;
2020
2021 port->changed = true;
2022 port->state.linkup = linkup;
2023 team_refresh_port_linkup(port);
2024 if (linkup) {
2025 struct ethtool_cmd ecmd;
2026
2027 err = __ethtool_get_settings(port->dev, &ecmd);
2028 if (!err) {
2029 port->state.speed = ethtool_cmd_speed(&ecmd);
2030 port->state.duplex = ecmd.duplex;
2031 goto send_event;
2032 }
2033 }
2034 port->state.speed = 0;
2035 port->state.duplex = 0;
2036
2037 send_event:
2038 err = team_nl_send_event_port_list_get(port->team);
2039 if (err)
2040 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
2041 port->dev->name);
2042
2043 }
2044
2045 static void team_port_change_check(struct team_port *port, bool linkup)
2046 {
2047 struct team *team = port->team;
2048
2049 mutex_lock(&team->lock);
2050 __team_port_change_check(port, linkup);
2051 mutex_unlock(&team->lock);
2052 }
2053
2054 /************************************
2055 * Net device notifier event handler
2056 ************************************/
2057
2058 static int team_device_event(struct notifier_block *unused,
2059 unsigned long event, void *ptr)
2060 {
2061 struct net_device *dev = (struct net_device *) ptr;
2062 struct team_port *port;
2063
2064 port = team_port_get_rtnl(dev);
2065 if (!port)
2066 return NOTIFY_DONE;
2067
2068 switch (event) {
2069 case NETDEV_UP:
2070 if (netif_carrier_ok(dev))
2071 team_port_change_check(port, true);
2072 case NETDEV_DOWN:
2073 team_port_change_check(port, false);
2074 case NETDEV_CHANGE:
2075 if (netif_running(port->dev))
2076 team_port_change_check(port,
2077 !!netif_carrier_ok(port->dev));
2078 break;
2079 case NETDEV_UNREGISTER:
2080 team_del_slave(port->team->dev, dev);
2081 break;
2082 case NETDEV_FEAT_CHANGE:
2083 team_compute_features(port->team);
2084 break;
2085 case NETDEV_CHANGEMTU:
2086 /* Forbid to change mtu of underlaying device */
2087 return NOTIFY_BAD;
2088 case NETDEV_PRE_TYPE_CHANGE:
2089 /* Forbid to change type of underlaying device */
2090 return NOTIFY_BAD;
2091 }
2092 return NOTIFY_DONE;
2093 }
2094
2095 static struct notifier_block team_notifier_block __read_mostly = {
2096 .notifier_call = team_device_event,
2097 };
2098
2099
2100 /***********************
2101 * Module init and exit
2102 ***********************/
2103
2104 static int __init team_module_init(void)
2105 {
2106 int err;
2107
2108 register_netdevice_notifier(&team_notifier_block);
2109
2110 err = rtnl_link_register(&team_link_ops);
2111 if (err)
2112 goto err_rtnl_reg;
2113
2114 err = team_nl_init();
2115 if (err)
2116 goto err_nl_init;
2117
2118 return 0;
2119
2120 err_nl_init:
2121 rtnl_link_unregister(&team_link_ops);
2122
2123 err_rtnl_reg:
2124 unregister_netdevice_notifier(&team_notifier_block);
2125
2126 return err;
2127 }
2128
2129 static void __exit team_module_exit(void)
2130 {
2131 team_nl_fini();
2132 rtnl_link_unregister(&team_link_ops);
2133 unregister_netdevice_notifier(&team_notifier_block);
2134 }
2135
2136 module_init(team_module_init);
2137 module_exit(team_module_exit);
2138
2139 MODULE_LICENSE("GPL v2");
2140 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2141 MODULE_DESCRIPTION("Ethernet team device driver");
2142 MODULE_ALIAS_RTNL_LINK(DRV_NAME);