hlist: drop the node parameter from iterators
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "send.h"
23 #include "routing.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "vis.h"
28 #include "gateway_common.h"
29 #include "originator.h"
30
31 #include <linux/if_ether.h>
32
33 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
34
35 /* send out an already prepared packet to the given address via the
36 * specified batman interface
37 */
38 int batadv_send_skb_packet(struct sk_buff *skb,
39 struct batadv_hard_iface *hard_iface,
40 const uint8_t *dst_addr)
41 {
42 struct ethhdr *ethhdr;
43
44 if (hard_iface->if_status != BATADV_IF_ACTIVE)
45 goto send_skb_err;
46
47 if (unlikely(!hard_iface->net_dev))
48 goto send_skb_err;
49
50 if (!(hard_iface->net_dev->flags & IFF_UP)) {
51 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
52 hard_iface->net_dev->name);
53 goto send_skb_err;
54 }
55
56 /* push to the ethernet header. */
57 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
58 goto send_skb_err;
59
60 skb_reset_mac_header(skb);
61
62 ethhdr = (struct ethhdr *)skb_mac_header(skb);
63 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
64 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
65 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
66
67 skb_set_network_header(skb, ETH_HLEN);
68 skb->priority = TC_PRIO_CONTROL;
69 skb->protocol = __constant_htons(ETH_P_BATMAN);
70
71 skb->dev = hard_iface->net_dev;
72
73 /* dev_queue_xmit() returns a negative result on error. However on
74 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
75 * (which is > 0). This will not be treated as an error.
76 */
77 return dev_queue_xmit(skb);
78 send_skb_err:
79 kfree_skb(skb);
80 return NET_XMIT_DROP;
81 }
82
83 /**
84 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
85 * @skb: Packet to be transmitted.
86 * @orig_node: Final destination of the packet.
87 * @recv_if: Interface used when receiving the packet (can be NULL).
88 *
89 * Looks up the best next-hop towards the passed originator and passes the
90 * skb on for preparation of MAC header. If the packet originated from this
91 * host, NULL can be passed as recv_if and no interface alternating is
92 * attempted.
93 *
94 * Returns TRUE on success; FALSE otherwise.
95 */
96 bool batadv_send_skb_to_orig(struct sk_buff *skb,
97 struct batadv_orig_node *orig_node,
98 struct batadv_hard_iface *recv_if)
99 {
100 struct batadv_priv *bat_priv = orig_node->bat_priv;
101 struct batadv_neigh_node *neigh_node;
102
103 /* batadv_find_router() increases neigh_nodes refcount if found. */
104 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
105 if (!neigh_node)
106 return false;
107
108 /* route it */
109 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
110
111 batadv_neigh_node_free_ref(neigh_node);
112
113 return true;
114 }
115
116 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
117 {
118 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
119
120 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
121 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
122 return;
123
124 /* the interface gets activated here to avoid race conditions between
125 * the moment of activating the interface in
126 * hardif_activate_interface() where the originator mac is set and
127 * outdated packets (especially uninitialized mac addresses) in the
128 * packet queue
129 */
130 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
131 hard_iface->if_status = BATADV_IF_ACTIVE;
132
133 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
134 }
135
136 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
137 {
138 if (forw_packet->skb)
139 kfree_skb(forw_packet->skb);
140 if (forw_packet->if_incoming)
141 batadv_hardif_free_ref(forw_packet->if_incoming);
142 kfree(forw_packet);
143 }
144
145 static void
146 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
147 struct batadv_forw_packet *forw_packet,
148 unsigned long send_time)
149 {
150 INIT_HLIST_NODE(&forw_packet->list);
151
152 /* add new packet to packet list */
153 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
154 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
155 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
156
157 /* start timer for this packet */
158 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
159 send_time);
160 }
161
162 /* add a broadcast packet to the queue and setup timers. broadcast packets
163 * are sent multiple times to increase probability for being received.
164 *
165 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
166 * errors.
167 *
168 * The skb is not consumed, so the caller should make sure that the
169 * skb is freed.
170 */
171 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
172 const struct sk_buff *skb,
173 unsigned long delay)
174 {
175 struct batadv_hard_iface *primary_if = NULL;
176 struct batadv_forw_packet *forw_packet;
177 struct batadv_bcast_packet *bcast_packet;
178 struct sk_buff *newskb;
179
180 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
181 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
182 "bcast packet queue full\n");
183 goto out;
184 }
185
186 primary_if = batadv_primary_if_get_selected(bat_priv);
187 if (!primary_if)
188 goto out_and_inc;
189
190 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
191
192 if (!forw_packet)
193 goto out_and_inc;
194
195 newskb = skb_copy(skb, GFP_ATOMIC);
196 if (!newskb)
197 goto packet_free;
198
199 /* as we have a copy now, it is safe to decrease the TTL */
200 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
201 bcast_packet->header.ttl--;
202
203 skb_reset_mac_header(newskb);
204
205 forw_packet->skb = newskb;
206 forw_packet->if_incoming = primary_if;
207
208 /* how often did we send the bcast packet ? */
209 forw_packet->num_packets = 0;
210
211 INIT_DELAYED_WORK(&forw_packet->delayed_work,
212 batadv_send_outstanding_bcast_packet);
213
214 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
215 return NETDEV_TX_OK;
216
217 packet_free:
218 kfree(forw_packet);
219 out_and_inc:
220 atomic_inc(&bat_priv->bcast_queue_left);
221 out:
222 if (primary_if)
223 batadv_hardif_free_ref(primary_if);
224 return NETDEV_TX_BUSY;
225 }
226
227 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
228 {
229 struct batadv_hard_iface *hard_iface;
230 struct delayed_work *delayed_work;
231 struct batadv_forw_packet *forw_packet;
232 struct sk_buff *skb1;
233 struct net_device *soft_iface;
234 struct batadv_priv *bat_priv;
235
236 delayed_work = container_of(work, struct delayed_work, work);
237 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
238 delayed_work);
239 soft_iface = forw_packet->if_incoming->soft_iface;
240 bat_priv = netdev_priv(soft_iface);
241
242 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
243 hlist_del(&forw_packet->list);
244 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
245
246 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
247 goto out;
248
249 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
250 goto out;
251
252 /* rebroadcast packet */
253 rcu_read_lock();
254 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
255 if (hard_iface->soft_iface != soft_iface)
256 continue;
257
258 /* send a copy of the saved skb */
259 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
260 if (skb1)
261 batadv_send_skb_packet(skb1, hard_iface,
262 batadv_broadcast_addr);
263 }
264 rcu_read_unlock();
265
266 forw_packet->num_packets++;
267
268 /* if we still have some more bcasts to send */
269 if (forw_packet->num_packets < 3) {
270 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
271 msecs_to_jiffies(5));
272 return;
273 }
274
275 out:
276 batadv_forw_packet_free(forw_packet);
277 atomic_inc(&bat_priv->bcast_queue_left);
278 }
279
280 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
281 {
282 struct delayed_work *delayed_work;
283 struct batadv_forw_packet *forw_packet;
284 struct batadv_priv *bat_priv;
285
286 delayed_work = container_of(work, struct delayed_work, work);
287 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
288 delayed_work);
289 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
290 spin_lock_bh(&bat_priv->forw_bat_list_lock);
291 hlist_del(&forw_packet->list);
292 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
293
294 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
295 goto out;
296
297 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
298
299 /* we have to have at least one packet in the queue
300 * to determine the queues wake up time unless we are
301 * shutting down
302 */
303 if (forw_packet->own)
304 batadv_schedule_bat_ogm(forw_packet->if_incoming);
305
306 out:
307 /* don't count own packet */
308 if (!forw_packet->own)
309 atomic_inc(&bat_priv->batman_queue_left);
310
311 batadv_forw_packet_free(forw_packet);
312 }
313
314 void
315 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
316 const struct batadv_hard_iface *hard_iface)
317 {
318 struct batadv_forw_packet *forw_packet;
319 struct hlist_node *safe_tmp_node;
320 bool pending;
321
322 if (hard_iface)
323 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
324 "purge_outstanding_packets(): %s\n",
325 hard_iface->net_dev->name);
326 else
327 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
328 "purge_outstanding_packets()\n");
329
330 /* free bcast list */
331 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
332 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
333 &bat_priv->forw_bcast_list, list) {
334 /* if purge_outstanding_packets() was called with an argument
335 * we delete only packets belonging to the given interface
336 */
337 if ((hard_iface) &&
338 (forw_packet->if_incoming != hard_iface))
339 continue;
340
341 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
342
343 /* batadv_send_outstanding_bcast_packet() will lock the list to
344 * delete the item from the list
345 */
346 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
347 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
348
349 if (pending) {
350 hlist_del(&forw_packet->list);
351 batadv_forw_packet_free(forw_packet);
352 }
353 }
354 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
355
356 /* free batman packet list */
357 spin_lock_bh(&bat_priv->forw_bat_list_lock);
358 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
359 &bat_priv->forw_bat_list, list) {
360 /* if purge_outstanding_packets() was called with an argument
361 * we delete only packets belonging to the given interface
362 */
363 if ((hard_iface) &&
364 (forw_packet->if_incoming != hard_iface))
365 continue;
366
367 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
368
369 /* send_outstanding_bat_packet() will lock the list to
370 * delete the item from the list
371 */
372 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
373 spin_lock_bh(&bat_priv->forw_bat_list_lock);
374
375 if (pending) {
376 hlist_del(&forw_packet->list);
377 batadv_forw_packet_free(forw_packet);
378 }
379 }
380 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
381 }