}
+/*********************
+ * Peers notification
+ *********************/
+
+static void team_notify_peers_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, notify_peers.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ schedule_delayed_work(&team->notify_peers.dw,
+ msecs_to_jiffies(team->notify_peers.interval));
+}
+
+static void team_notify_peers(struct team *team)
+{
+ if (!team->notify_peers.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+}
+
+static void team_notify_peers_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
+}
+
+static void team_notify_peers_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->notify_peers.dw);
+}
+
+
/************************
* Rx path frame handler
************************/
team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
+ team_notify_peers(team);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
+ team_notify_peers(team);
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
return team_change_mode(team, ctx->data.str_val);
}
+static int team_notify_peers_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.count;
+ return 0;
+}
+
+static int team_notify_peers_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_notify_peers_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.interval;
+ return 0;
+}
+
+static int team_notify_peers_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.interval = ctx->data.u32_val;
+ return 0;
+}
+
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
.getter = team_mode_option_get,
.setter = team_mode_option_set,
},
+ {
+ .name = "notify_peers_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_count_get,
+ .setter = team_notify_peers_count_set,
+ },
+ {
+ .name = "notify_peers_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_interval_get,
+ .setter = team_notify_peers_interval_set,
+ },
{
.name = "enabled",
.type = TEAM_OPTION_TYPE_BOOL,
INIT_LIST_HEAD(&team->option_list);
INIT_LIST_HEAD(&team->option_inst_list);
+
+ team_notify_peers_init(team);
+
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
if (err)
goto err_options_register;
return 0;
err_options_register:
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
err_team_queue_override_init:
free_percpu(team->pcpu_stats);
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
}