static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
{
pr_info("Resetting bearer <%s>\n", b_ptr->name);
- tipc_link_delete_list(net, b_ptr->identity);
+ tipc_node_delete_links(net, b_ptr->identity);
tipc_disc_reset(net, b_ptr);
return 0;
}
pr_info("Disabling bearer <%s>\n", b_ptr->name);
b_ptr->media->disable_media(b_ptr);
- tipc_link_delete_list(net, b_ptr->identity);
+ tipc_node_delete_links(net, b_ptr->identity);
if (b_ptr->link_req)
tipc_disc_delete(b_ptr->link_req);
static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
struct sk_buff_head *xmitq);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
+static int tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
/*
* Simple link routines
return l_ptr;
}
-/**
- * tipc_link_delete - Delete a link
- * @l: link to be deleted
- */
-void tipc_link_delete(struct tipc_link *l)
-{
- tipc_link_reset(l);
- tipc_link_reset_fragments(l);
- tipc_node_detach_link(l->owner, l);
-}
-
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *link;
- struct tipc_node *node;
-
- rcu_read_lock();
- list_for_each_entry_rcu(node, &tn->node_list, list) {
- tipc_node_lock(node);
- link = node->links[bearer_id].link;
- if (link)
- tipc_link_delete(link);
- tipc_node_unlock(node);
- }
- rcu_read_unlock();
-}
-
/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
*
* Give a newly added peer node the sequence number where it should
l->snd_nxt = seqno;
}
-void tipc_link_reset_all(struct tipc_node *node)
-{
- char addr_string[16];
- u32 i;
-
- tipc_node_lock(node);
-
- pr_warn("Resetting all links to %s\n",
- tipc_addr_string_fill(addr_string, node->addr));
-
- for (i = 0; i < MAX_BEARERS; i++) {
- if (node->links[i].link) {
- link_print(node->links[i].link, "Resetting link\n");
- tipc_link_reset(node->links[i].link);
- }
- }
-
- tipc_node_unlock(node);
-}
-
static void link_retransmit_failure(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
msg_errcode(msg));
pr_info("sqno %u, prev: %x, src: %x\n",
msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
- tipc_link_reset(l_ptr);
} else {
/* Handle failure on broadcast link */
struct tipc_node *n_ptr;
l->stale_count = 1;
} else if (++l->stale_count > 100) {
link_retransmit_failure(l, skb);
+ l->exec_mode = TIPC_LINK_BLOCKED;
return TIPC_LINK_DOWN_EVT;
}
skb_queue_walk(&l->transmq, skb) {
* Consumes buffer
* Node lock must be held
*/
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
+static int tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
{
struct tipc_node *node = link->owner;
struct tipc_msg *msg = buf_msg(skb);
struct sk_buff *iskb;
int pos = 0;
+ int rc = 0;
switch (msg_user(msg)) {
case TUNNEL_PROTOCOL:
kfree_skb(skb);
break;
}
- if (!tipc_link_failover_rcv(link, &skb))
+ rc |= tipc_link_failover_rcv(link, &skb);
+ if (!skb)
break;
if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
tipc_data_input(link, skb);
link->stats.recv_fragmented++;
tipc_data_input(link, skb);
} else if (!link->reasm_buf) {
- tipc_link_reset(link);
+ link->exec_mode = TIPC_LINK_BLOCKED;
+ rc |= TIPC_LINK_DOWN_EVT;
}
break;
case BCAST_PROTOCOL:
default:
break;
};
+ return rc;
}
static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
l->rcv_nxt++;
l->stats.recv_info++;
if (unlikely(!tipc_data_input(l, skb)))
- tipc_link_input(l, skb);
+ rc |= tipc_link_input(l, skb);
/* Ack at regular intervals */
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
* Owner node is locked.
*/
-static bool tipc_link_failover_rcv(struct tipc_link *link,
- struct sk_buff **skb)
+static int tipc_link_failover_rcv(struct tipc_link *link,
+ struct sk_buff **skb)
{
struct tipc_msg *msg = buf_msg(*skb);
struct sk_buff *iskb = NULL;
struct tipc_link *pl = NULL;
int bearer_id = msg_bearer_id(msg);
int pos = 0;
+ int rc = 0;
if (msg_type(msg) != FAILOVER_MSG) {
pr_warn("%sunknown tunnel pkt received\n", link_co_err);
goto exit;
pl = link->owner->links[bearer_id].link;
- if (pl && tipc_link_is_up(pl))
- tipc_link_reset(pl);
if (link->failover_pkts == FIRST_FAILOVER)
link->failover_pkts = msg_msgcnt(msg);
}
if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
link->stats.recv_fragments++;
- tipc_buf_append(&link->failover_skb, &iskb);
+ if (!tipc_buf_append(&link->failover_skb, &iskb) &&
+ !link->failover_skb) {
+ link->exec_mode = TIPC_LINK_BLOCKED;
+ rc |= TIPC_LINK_DOWN_EVT;
+ }
}
exit:
if (!link->failover_pkts && pl)
pl->exec_mode = TIPC_LINK_OPEN;
kfree_skb(*skb);
*skb = iskb;
- return *skb;
+ return rc;
}
/* tipc_link_proto_rcv(): receive link level protocol message :
const struct tipc_media_addr *maddr,
struct sk_buff_head *inputq,
struct sk_buff_head *namedq);
-void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
int tipc_link_is_active(struct tipc_link *l_ptr);
void tipc_link_purge_queues(struct tipc_link *l_ptr);
void tipc_link_purge_backlog(struct tipc_link *l);
-void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr);
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
struct sk_buff_head *list);
return true;
}
+void tipc_node_delete_links(struct net *net, int bearer_id)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *l;
+ struct tipc_node *n;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_lock(n);
+ l = n->links[bearer_id].link;
+ if (l) {
+ tipc_link_reset(l);
+ n->links[bearer_id].link = NULL;
+ n->link_cnt--;
+ }
+ tipc_node_unlock(n);
+ kfree(l);
+ }
+ rcu_read_unlock();
+}
+
+static void tipc_node_reset_links(struct tipc_node *n)
+{
+ char addr_string[16];
+ u32 i;
+
+ tipc_node_lock(n);
+
+ pr_warn("Resetting all links to %s\n",
+ tipc_addr_string_fill(addr_string, n->addr));
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (n->links[i].link)
+ tipc_link_reset(n->links[i].link);
+ }
+ tipc_node_unlock(n);
+}
+
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
n_ptr->links[l_ptr->bearer_id].link = l_ptr;
tipc_bclink_input(net);
if (flags & TIPC_BCAST_RESET)
- tipc_link_reset_all(node);
+ tipc_node_reset_links(node);
}
/* Caller should hold node lock for the passed node */
return 0;
}
+/* tipc_node_tnl_init(): handle a received TUNNEL_PROTOCOL packet,
+ * in order to control parallel link failover or synchronization
+ */
+static void tipc_node_tnl_init(struct tipc_node *n, int bearer_id,
+ struct sk_buff *skb)
+{
+ struct tipc_link *tnl, *pl;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u16 oseqno = msg_seqno(hdr);
+ int pb_id = msg_bearer_id(hdr);
+
+ if (pb_id >= MAX_BEARERS)
+ return;
+
+ tnl = n->links[bearer_id].link;
+ if (!tnl)
+ return;
+
+ /* Ignore if duplicate */
+ if (less(oseqno, tnl->rcv_nxt))
+ return;
+
+ pl = n->links[pb_id].link;
+ if (!pl)
+ return;
+
+ if (msg_type(hdr) == FAILOVER_MSG) {
+ if (tipc_link_is_up(pl)) {
+ tipc_link_reset(pl);
+ pl->exec_mode = TIPC_LINK_BLOCKED;
+ }
+ }
+}
+
/**
* tipc_rcv - process TIPC packets/messages arriving from off-node
* @net: the applicable net namespace
struct tipc_media_addr *maddr;
int bearer_id = b->identity;
int rc = 0;
+ int usr;
__skb_queue_head_init(&xmitq);
/* Handle arrival of a non-unicast link packet */
hdr = buf_msg(skb);
+ usr = msg_user(hdr);
if (unlikely(msg_non_seq(hdr))) {
- if (msg_user(hdr) == LINK_CONFIG)
+ if (usr == LINK_CONFIG)
tipc_disc_rcv(net, skb, b);
else
tipc_bclink_rcv(net, skb);
goto discard;
tipc_node_lock(n);
+ /* Prepare links for tunneled reception if applicable */
+ if (unlikely(usr == TUNNEL_PROTOCOL))
+ tipc_node_tnl_init(n, bearer_id, skb);
+
/* Locate link endpoint that should handle packet */
l = n->links[bearer_id].link;
if (unlikely(!l))
if (!tipc_node_filter_skb(n, l, hdr))
goto unlock;
- if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+ if (unlikely(usr == LINK_PROTOCOL))
tipc_bclink_sync_state(n, hdr);
/* Release acked broadcast messages */
struct tipc_media_addr *maddr);
bool tipc_node_update_dest(struct tipc_node *n, struct tipc_bearer *bearer,
struct tipc_media_addr *maddr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_link_down(struct tipc_node *n_ptr, int bearer_id);