Merge branch 'master' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / mac80211 / tx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * Transmit and frame generation functions.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/etherdevice.h>
19 #include <linux/bitmap.h>
20 #include <linux/rcupdate.h>
21 #include <net/net_namespace.h>
22 #include <net/ieee80211_radiotap.h>
23 #include <net/cfg80211.h>
24 #include <net/mac80211.h>
25 #include <asm/unaligned.h>
26
27 #include "ieee80211_i.h"
28 #include "driver-ops.h"
29 #include "led.h"
30 #include "mesh.h"
31 #include "wep.h"
32 #include "wpa.h"
33 #include "wme.h"
34 #include "rate.h"
35
36 /* misc utils */
37
38 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
39 int next_frag_len)
40 {
41 int rate, mrate, erp, dur, i;
42 struct ieee80211_rate *txrate;
43 struct ieee80211_local *local = tx->local;
44 struct ieee80211_supported_band *sband;
45 struct ieee80211_hdr *hdr;
46 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
47
48 /* assume HW handles this */
49 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
50 return 0;
51
52 /* uh huh? */
53 if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
54 return 0;
55
56 sband = local->hw.wiphy->bands[tx->channel->band];
57 txrate = &sband->bitrates[info->control.rates[0].idx];
58
59 erp = txrate->flags & IEEE80211_RATE_ERP_G;
60
61 /*
62 * data and mgmt (except PS Poll):
63 * - during CFP: 32768
64 * - during contention period:
65 * if addr1 is group address: 0
66 * if more fragments = 0 and addr1 is individual address: time to
67 * transmit one ACK plus SIFS
68 * if more fragments = 1 and addr1 is individual address: time to
69 * transmit next fragment plus 2 x ACK plus 3 x SIFS
70 *
71 * IEEE 802.11, 9.6:
72 * - control response frame (CTS or ACK) shall be transmitted using the
73 * same rate as the immediately previous frame in the frame exchange
74 * sequence, if this rate belongs to the PHY mandatory rates, or else
75 * at the highest possible rate belonging to the PHY rates in the
76 * BSSBasicRateSet
77 */
78 hdr = (struct ieee80211_hdr *)tx->skb->data;
79 if (ieee80211_is_ctl(hdr->frame_control)) {
80 /* TODO: These control frames are not currently sent by
81 * mac80211, but should they be implemented, this function
82 * needs to be updated to support duration field calculation.
83 *
84 * RTS: time needed to transmit pending data/mgmt frame plus
85 * one CTS frame plus one ACK frame plus 3 x SIFS
86 * CTS: duration of immediately previous RTS minus time
87 * required to transmit CTS and its SIFS
88 * ACK: 0 if immediately previous directed data/mgmt had
89 * more=0, with more=1 duration in ACK frame is duration
90 * from previous frame minus time needed to transmit ACK
91 * and its SIFS
92 * PS Poll: BIT(15) | BIT(14) | aid
93 */
94 return 0;
95 }
96
97 /* data/mgmt */
98 if (0 /* FIX: data/mgmt during CFP */)
99 return cpu_to_le16(32768);
100
101 if (group_addr) /* Group address as the destination - no ACK */
102 return 0;
103
104 /* Individual destination address:
105 * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
106 * CTS and ACK frames shall be transmitted using the highest rate in
107 * basic rate set that is less than or equal to the rate of the
108 * immediately previous frame and that is using the same modulation
109 * (CCK or OFDM). If no basic rate set matches with these requirements,
110 * the highest mandatory rate of the PHY that is less than or equal to
111 * the rate of the previous frame is used.
112 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
113 */
114 rate = -1;
115 /* use lowest available if everything fails */
116 mrate = sband->bitrates[0].bitrate;
117 for (i = 0; i < sband->n_bitrates; i++) {
118 struct ieee80211_rate *r = &sband->bitrates[i];
119
120 if (r->bitrate > txrate->bitrate)
121 break;
122
123 if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
124 rate = r->bitrate;
125
126 switch (sband->band) {
127 case IEEE80211_BAND_2GHZ: {
128 u32 flag;
129 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
130 flag = IEEE80211_RATE_MANDATORY_G;
131 else
132 flag = IEEE80211_RATE_MANDATORY_B;
133 if (r->flags & flag)
134 mrate = r->bitrate;
135 break;
136 }
137 case IEEE80211_BAND_5GHZ:
138 if (r->flags & IEEE80211_RATE_MANDATORY_A)
139 mrate = r->bitrate;
140 break;
141 case IEEE80211_NUM_BANDS:
142 WARN_ON(1);
143 break;
144 }
145 }
146 if (rate == -1) {
147 /* No matching basic rate found; use highest suitable mandatory
148 * PHY rate */
149 rate = mrate;
150 }
151
152 /* Time needed to transmit ACK
153 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
154 * to closest integer */
155
156 dur = ieee80211_frame_duration(local, 10, rate, erp,
157 tx->sdata->vif.bss_conf.use_short_preamble);
158
159 if (next_frag_len) {
160 /* Frame is fragmented: duration increases with time needed to
161 * transmit next fragment plus ACK and 2 x SIFS. */
162 dur *= 2; /* ACK + SIFS */
163 /* next fragment */
164 dur += ieee80211_frame_duration(local, next_frag_len,
165 txrate->bitrate, erp,
166 tx->sdata->vif.bss_conf.use_short_preamble);
167 }
168
169 return cpu_to_le16(dur);
170 }
171
172 static inline int is_ieee80211_device(struct ieee80211_local *local,
173 struct net_device *dev)
174 {
175 return local == wdev_priv(dev->ieee80211_ptr);
176 }
177
178 /* tx handlers */
179 static ieee80211_tx_result debug_noinline
180 ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
181 {
182 struct ieee80211_local *local = tx->local;
183 struct ieee80211_if_managed *ifmgd;
184
185 /* driver doesn't support power save */
186 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
187 return TX_CONTINUE;
188
189 /* hardware does dynamic power save */
190 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
191 return TX_CONTINUE;
192
193 /* dynamic power save disabled */
194 if (local->hw.conf.dynamic_ps_timeout <= 0)
195 return TX_CONTINUE;
196
197 /* we are scanning, don't enable power save */
198 if (local->scanning)
199 return TX_CONTINUE;
200
201 if (!local->ps_sdata)
202 return TX_CONTINUE;
203
204 /* No point if we're going to suspend */
205 if (local->quiescing)
206 return TX_CONTINUE;
207
208 /* dynamic ps is supported only in managed mode */
209 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
210 return TX_CONTINUE;
211
212 ifmgd = &tx->sdata->u.mgd;
213
214 /*
215 * Don't wakeup from power save if u-apsd is enabled, voip ac has
216 * u-apsd enabled and the frame is in voip class. This effectively
217 * means that even if all access categories have u-apsd enabled, in
218 * practise u-apsd is only used with the voip ac. This is a
219 * workaround for the case when received voip class packets do not
220 * have correct qos tag for some reason, due the network or the
221 * peer application.
222 *
223 * Note: local->uapsd_queues access is racy here. If the value is
224 * changed via debugfs, user needs to reassociate manually to have
225 * everything in sync.
226 */
227 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
228 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
229 && skb_get_queue_mapping(tx->skb) == 0)
230 return TX_CONTINUE;
231
232 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
233 ieee80211_stop_queues_by_reason(&local->hw,
234 IEEE80211_QUEUE_STOP_REASON_PS);
235 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
236 ieee80211_queue_work(&local->hw,
237 &local->dynamic_ps_disable_work);
238 }
239
240 /* Don't restart the timer if we're not disassociated */
241 if (!ifmgd->associated)
242 return TX_CONTINUE;
243
244 mod_timer(&local->dynamic_ps_timer, jiffies +
245 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
246
247 return TX_CONTINUE;
248 }
249
250 static ieee80211_tx_result debug_noinline
251 ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
252 {
253
254 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
255 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
256 u32 sta_flags;
257
258 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
259 return TX_CONTINUE;
260
261 if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
262 test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
263 !ieee80211_is_probe_req(hdr->frame_control) &&
264 !ieee80211_is_nullfunc(hdr->frame_control))
265 /*
266 * When software scanning only nullfunc frames (to notify
267 * the sleep state to the AP) and probe requests (for the
268 * active scan) are allowed, all other frames should not be
269 * sent and we should not get here, but if we do
270 * nonetheless, drop them to avoid sending them
271 * off-channel. See the link below and
272 * ieee80211_start_scan() for more.
273 *
274 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
275 */
276 return TX_DROP;
277
278 if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
279 return TX_CONTINUE;
280
281 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
282 return TX_CONTINUE;
283
284 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
285 return TX_CONTINUE;
286
287 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
288
289 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
290 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
291 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
292 ieee80211_is_data(hdr->frame_control))) {
293 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
294 printk(KERN_DEBUG "%s: dropped data frame to not "
295 "associated station %pM\n",
296 tx->sdata->name, hdr->addr1);
297 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
298 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
299 return TX_DROP;
300 }
301 } else {
302 if (unlikely(ieee80211_is_data(hdr->frame_control) &&
303 tx->local->num_sta == 0 &&
304 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
305 /*
306 * No associated STAs - no need to send multicast
307 * frames.
308 */
309 return TX_DROP;
310 }
311 return TX_CONTINUE;
312 }
313
314 return TX_CONTINUE;
315 }
316
317 /* This function is called whenever the AP is about to exceed the maximum limit
318 * of buffered frames for power saving STAs. This situation should not really
319 * happen often during normal operation, so dropping the oldest buffered packet
320 * from each queue should be OK to make some room for new frames. */
321 static void purge_old_ps_buffers(struct ieee80211_local *local)
322 {
323 int total = 0, purged = 0;
324 struct sk_buff *skb;
325 struct ieee80211_sub_if_data *sdata;
326 struct sta_info *sta;
327
328 /*
329 * virtual interfaces are protected by RCU
330 */
331 rcu_read_lock();
332
333 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
334 struct ieee80211_if_ap *ap;
335 if (sdata->vif.type != NL80211_IFTYPE_AP)
336 continue;
337 ap = &sdata->u.ap;
338 skb = skb_dequeue(&ap->ps_bc_buf);
339 if (skb) {
340 purged++;
341 dev_kfree_skb(skb);
342 }
343 total += skb_queue_len(&ap->ps_bc_buf);
344 }
345
346 list_for_each_entry_rcu(sta, &local->sta_list, list) {
347 skb = skb_dequeue(&sta->ps_tx_buf);
348 if (skb) {
349 purged++;
350 dev_kfree_skb(skb);
351 }
352 total += skb_queue_len(&sta->ps_tx_buf);
353 }
354
355 rcu_read_unlock();
356
357 local->total_ps_buffered = total;
358 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
359 wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
360 purged);
361 #endif
362 }
363
364 static ieee80211_tx_result
365 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
366 {
367 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
368 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
369
370 /*
371 * broadcast/multicast frame
372 *
373 * If any of the associated stations is in power save mode,
374 * the frame is buffered to be sent after DTIM beacon frame.
375 * This is done either by the hardware or us.
376 */
377
378 /* powersaving STAs only in AP/VLAN mode */
379 if (!tx->sdata->bss)
380 return TX_CONTINUE;
381
382 /* no buffering for ordered frames */
383 if (ieee80211_has_order(hdr->frame_control))
384 return TX_CONTINUE;
385
386 /* no stations in PS mode */
387 if (!atomic_read(&tx->sdata->bss->num_sta_ps))
388 return TX_CONTINUE;
389
390 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
391
392 /* device releases frame after DTIM beacon */
393 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
394 return TX_CONTINUE;
395
396 /* buffered in mac80211 */
397 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
398 purge_old_ps_buffers(tx->local);
399
400 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
401 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
402 if (net_ratelimit())
403 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
404 tx->sdata->name);
405 #endif
406 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
407 } else
408 tx->local->total_ps_buffered++;
409
410 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
411
412 return TX_QUEUED;
413 }
414
415 static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
416 struct sk_buff *skb)
417 {
418 if (!ieee80211_is_mgmt(fc))
419 return 0;
420
421 if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
422 return 0;
423
424 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
425 skb->data))
426 return 0;
427
428 return 1;
429 }
430
431 static ieee80211_tx_result
432 ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
433 {
434 struct sta_info *sta = tx->sta;
435 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
436 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
437 struct ieee80211_local *local = tx->local;
438 u32 staflags;
439
440 if (unlikely(!sta ||
441 ieee80211_is_probe_resp(hdr->frame_control) ||
442 ieee80211_is_auth(hdr->frame_control) ||
443 ieee80211_is_assoc_resp(hdr->frame_control) ||
444 ieee80211_is_reassoc_resp(hdr->frame_control)))
445 return TX_CONTINUE;
446
447 staflags = get_sta_flags(sta);
448
449 if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
450 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
451 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
452 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
453 "before %d)\n",
454 sta->sta.addr, sta->sta.aid,
455 skb_queue_len(&sta->ps_tx_buf));
456 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
457 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
458 purge_old_ps_buffers(tx->local);
459 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
460 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
461 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
462 if (net_ratelimit()) {
463 printk(KERN_DEBUG "%s: STA %pM TX "
464 "buffer full - dropping oldest frame\n",
465 tx->sdata->name, sta->sta.addr);
466 }
467 #endif
468 dev_kfree_skb(old);
469 } else
470 tx->local->total_ps_buffered++;
471
472 /*
473 * Queue frame to be sent after STA wakes up/polls,
474 * but don't set the TIM bit if the driver is blocking
475 * wakeup or poll response transmissions anyway.
476 */
477 if (skb_queue_empty(&sta->ps_tx_buf) &&
478 !(staflags & WLAN_STA_PS_DRIVER))
479 sta_info_set_tim_bit(sta);
480
481 info->control.jiffies = jiffies;
482 info->control.vif = &tx->sdata->vif;
483 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
484 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
485
486 if (!timer_pending(&local->sta_cleanup))
487 mod_timer(&local->sta_cleanup,
488 round_jiffies(jiffies +
489 STA_INFO_CLEANUP_INTERVAL));
490
491 return TX_QUEUED;
492 }
493 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
494 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
495 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
496 "set -> send frame\n", tx->sdata->name,
497 sta->sta.addr);
498 }
499 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
500
501 return TX_CONTINUE;
502 }
503
504 static ieee80211_tx_result debug_noinline
505 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
506 {
507 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
508 return TX_CONTINUE;
509
510 if (tx->flags & IEEE80211_TX_UNICAST)
511 return ieee80211_tx_h_unicast_ps_buf(tx);
512 else
513 return ieee80211_tx_h_multicast_ps_buf(tx);
514 }
515
516 static ieee80211_tx_result debug_noinline
517 ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
518 {
519 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
520
521 if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
522 tx->sdata->control_port_no_encrypt))
523 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
524
525 return TX_CONTINUE;
526 }
527
528 static ieee80211_tx_result debug_noinline
529 ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
530 {
531 struct ieee80211_key *key = NULL;
532 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
533 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
534
535 if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
536 tx->key = NULL;
537 else if (tx->sta && (key = rcu_dereference(tx->sta->ptk)))
538 tx->key = key;
539 else if (ieee80211_is_mgmt(hdr->frame_control) &&
540 is_multicast_ether_addr(hdr->addr1) &&
541 ieee80211_is_robust_mgmt_frame(hdr) &&
542 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
543 tx->key = key;
544 else if (is_multicast_ether_addr(hdr->addr1) &&
545 (key = rcu_dereference(tx->sdata->default_multicast_key)))
546 tx->key = key;
547 else if (!is_multicast_ether_addr(hdr->addr1) &&
548 (key = rcu_dereference(tx->sdata->default_unicast_key)))
549 tx->key = key;
550 else if (tx->sdata->drop_unencrypted &&
551 (tx->skb->protocol != tx->sdata->control_port_protocol) &&
552 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
553 (!ieee80211_is_robust_mgmt_frame(hdr) ||
554 (ieee80211_is_action(hdr->frame_control) &&
555 tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
556 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
557 return TX_DROP;
558 } else
559 tx->key = NULL;
560
561 if (tx->key) {
562 bool skip_hw = false;
563
564 tx->key->tx_rx_count++;
565 /* TODO: add threshold stuff again */
566
567 switch (tx->key->conf.cipher) {
568 case WLAN_CIPHER_SUITE_WEP40:
569 case WLAN_CIPHER_SUITE_WEP104:
570 if (ieee80211_is_auth(hdr->frame_control))
571 break;
572 case WLAN_CIPHER_SUITE_TKIP:
573 if (!ieee80211_is_data_present(hdr->frame_control))
574 tx->key = NULL;
575 break;
576 case WLAN_CIPHER_SUITE_CCMP:
577 if (!ieee80211_is_data_present(hdr->frame_control) &&
578 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
579 tx->skb))
580 tx->key = NULL;
581 else
582 skip_hw = (tx->key->conf.flags &
583 IEEE80211_KEY_FLAG_SW_MGMT) &&
584 ieee80211_is_mgmt(hdr->frame_control);
585 break;
586 case WLAN_CIPHER_SUITE_AES_CMAC:
587 if (!ieee80211_is_mgmt(hdr->frame_control))
588 tx->key = NULL;
589 break;
590 }
591
592 if (!skip_hw && tx->key &&
593 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
594 info->control.hw_key = &tx->key->conf;
595 }
596
597 return TX_CONTINUE;
598 }
599
600 static ieee80211_tx_result debug_noinline
601 ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
602 {
603 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
604 struct ieee80211_hdr *hdr = (void *)tx->skb->data;
605 struct ieee80211_supported_band *sband;
606 struct ieee80211_rate *rate;
607 int i;
608 u32 len;
609 bool inval = false, rts = false, short_preamble = false;
610 struct ieee80211_tx_rate_control txrc;
611 u32 sta_flags;
612
613 memset(&txrc, 0, sizeof(txrc));
614
615 sband = tx->local->hw.wiphy->bands[tx->channel->band];
616
617 len = min_t(u32, tx->skb->len + FCS_LEN,
618 tx->local->hw.wiphy->frag_threshold);
619
620 /* set up the tx rate control struct we give the RC algo */
621 txrc.hw = local_to_hw(tx->local);
622 txrc.sband = sband;
623 txrc.bss_conf = &tx->sdata->vif.bss_conf;
624 txrc.skb = tx->skb;
625 txrc.reported_rate.idx = -1;
626 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
627 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
628 txrc.max_rate_idx = -1;
629 else
630 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
631 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
632 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
633
634 /* set up RTS protection if desired */
635 if (len > tx->local->hw.wiphy->rts_threshold) {
636 txrc.rts = rts = true;
637 }
638
639 /*
640 * Use short preamble if the BSS can handle it, but not for
641 * management frames unless we know the receiver can handle
642 * that -- the management frame might be to a station that
643 * just wants a probe response.
644 */
645 if (tx->sdata->vif.bss_conf.use_short_preamble &&
646 (ieee80211_is_data(hdr->frame_control) ||
647 (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
648 txrc.short_preamble = short_preamble = true;
649
650 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
651
652 /*
653 * Lets not bother rate control if we're associated and cannot
654 * talk to the sta. This should not happen.
655 */
656 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) &&
657 (sta_flags & WLAN_STA_ASSOC) &&
658 !rate_usable_index_exists(sband, &tx->sta->sta),
659 "%s: Dropped data frame as no usable bitrate found while "
660 "scanning and associated. Target station: "
661 "%pM on %d GHz band\n",
662 tx->sdata->name, hdr->addr1,
663 tx->channel->band ? 5 : 2))
664 return TX_DROP;
665
666 /*
667 * If we're associated with the sta at this point we know we can at
668 * least send the frame at the lowest bit rate.
669 */
670 rate_control_get_rate(tx->sdata, tx->sta, &txrc);
671
672 if (unlikely(info->control.rates[0].idx < 0))
673 return TX_DROP;
674
675 if (txrc.reported_rate.idx < 0) {
676 txrc.reported_rate = info->control.rates[0];
677 if (tx->sta && ieee80211_is_data(hdr->frame_control))
678 tx->sta->last_tx_rate = txrc.reported_rate;
679 } else if (tx->sta)
680 tx->sta->last_tx_rate = txrc.reported_rate;
681
682 if (unlikely(!info->control.rates[0].count))
683 info->control.rates[0].count = 1;
684
685 if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
686 (info->flags & IEEE80211_TX_CTL_NO_ACK)))
687 info->control.rates[0].count = 1;
688
689 if (is_multicast_ether_addr(hdr->addr1)) {
690 /*
691 * XXX: verify the rate is in the basic rateset
692 */
693 return TX_CONTINUE;
694 }
695
696 /*
697 * set up the RTS/CTS rate as the fastest basic rate
698 * that is not faster than the data rate
699 *
700 * XXX: Should this check all retry rates?
701 */
702 if (!(info->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
703 s8 baserate = 0;
704
705 rate = &sband->bitrates[info->control.rates[0].idx];
706
707 for (i = 0; i < sband->n_bitrates; i++) {
708 /* must be a basic rate */
709 if (!(tx->sdata->vif.bss_conf.basic_rates & BIT(i)))
710 continue;
711 /* must not be faster than the data rate */
712 if (sband->bitrates[i].bitrate > rate->bitrate)
713 continue;
714 /* maximum */
715 if (sband->bitrates[baserate].bitrate <
716 sband->bitrates[i].bitrate)
717 baserate = i;
718 }
719
720 info->control.rts_cts_rate_idx = baserate;
721 }
722
723 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
724 /*
725 * make sure there's no valid rate following
726 * an invalid one, just in case drivers don't
727 * take the API seriously to stop at -1.
728 */
729 if (inval) {
730 info->control.rates[i].idx = -1;
731 continue;
732 }
733 if (info->control.rates[i].idx < 0) {
734 inval = true;
735 continue;
736 }
737
738 /*
739 * For now assume MCS is already set up correctly, this
740 * needs to be fixed.
741 */
742 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) {
743 WARN_ON(info->control.rates[i].idx > 76);
744 continue;
745 }
746
747 /* set up RTS protection if desired */
748 if (rts)
749 info->control.rates[i].flags |=
750 IEEE80211_TX_RC_USE_RTS_CTS;
751
752 /* RC is busted */
753 if (WARN_ON_ONCE(info->control.rates[i].idx >=
754 sband->n_bitrates)) {
755 info->control.rates[i].idx = -1;
756 continue;
757 }
758
759 rate = &sband->bitrates[info->control.rates[i].idx];
760
761 /* set up short preamble */
762 if (short_preamble &&
763 rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
764 info->control.rates[i].flags |=
765 IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
766
767 /* set up G protection */
768 if (!rts && tx->sdata->vif.bss_conf.use_cts_prot &&
769 rate->flags & IEEE80211_RATE_ERP_G)
770 info->control.rates[i].flags |=
771 IEEE80211_TX_RC_USE_CTS_PROTECT;
772 }
773
774 return TX_CONTINUE;
775 }
776
777 static ieee80211_tx_result debug_noinline
778 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
779 {
780 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
781 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
782 u16 *seq;
783 u8 *qc;
784 int tid;
785
786 /*
787 * Packet injection may want to control the sequence
788 * number, if we have no matching interface then we
789 * neither assign one ourselves nor ask the driver to.
790 */
791 if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
792 return TX_CONTINUE;
793
794 if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
795 return TX_CONTINUE;
796
797 if (ieee80211_hdrlen(hdr->frame_control) < 24)
798 return TX_CONTINUE;
799
800 /*
801 * Anything but QoS data that has a sequence number field
802 * (is long enough) gets a sequence number from the global
803 * counter.
804 */
805 if (!ieee80211_is_data_qos(hdr->frame_control)) {
806 /* driver should assign sequence number */
807 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
808 /* for pure STA mode without beacons, we can do it */
809 hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
810 tx->sdata->sequence_number += 0x10;
811 return TX_CONTINUE;
812 }
813
814 /*
815 * This should be true for injected/management frames only, for
816 * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
817 * above since they are not QoS-data frames.
818 */
819 if (!tx->sta)
820 return TX_CONTINUE;
821
822 /* include per-STA, per-TID sequence counter */
823
824 qc = ieee80211_get_qos_ctl(hdr);
825 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
826 seq = &tx->sta->tid_seq[tid];
827
828 hdr->seq_ctrl = cpu_to_le16(*seq);
829
830 /* Increase the sequence number. */
831 *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
832
833 return TX_CONTINUE;
834 }
835
836 static int ieee80211_fragment(struct ieee80211_local *local,
837 struct sk_buff *skb, int hdrlen,
838 int frag_threshold)
839 {
840 struct sk_buff *tail = skb, *tmp;
841 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
842 int pos = hdrlen + per_fragm;
843 int rem = skb->len - hdrlen - per_fragm;
844
845 if (WARN_ON(rem < 0))
846 return -EINVAL;
847
848 while (rem) {
849 int fraglen = per_fragm;
850
851 if (fraglen > rem)
852 fraglen = rem;
853 rem -= fraglen;
854 tmp = dev_alloc_skb(local->tx_headroom +
855 frag_threshold +
856 IEEE80211_ENCRYPT_HEADROOM +
857 IEEE80211_ENCRYPT_TAILROOM);
858 if (!tmp)
859 return -ENOMEM;
860 tail->next = tmp;
861 tail = tmp;
862 skb_reserve(tmp, local->tx_headroom +
863 IEEE80211_ENCRYPT_HEADROOM);
864 /* copy control information */
865 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
866 skb_copy_queue_mapping(tmp, skb);
867 tmp->priority = skb->priority;
868 tmp->dev = skb->dev;
869
870 /* copy header and data */
871 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
872 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
873
874 pos += fraglen;
875 }
876
877 skb->len = hdrlen + per_fragm;
878 return 0;
879 }
880
881 static ieee80211_tx_result debug_noinline
882 ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
883 {
884 struct sk_buff *skb = tx->skb;
885 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
886 struct ieee80211_hdr *hdr = (void *)skb->data;
887 int frag_threshold = tx->local->hw.wiphy->frag_threshold;
888 int hdrlen;
889 int fragnum;
890
891 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
892 return TX_CONTINUE;
893
894 /*
895 * Warn when submitting a fragmented A-MPDU frame and drop it.
896 * This scenario is handled in ieee80211_tx_prepare but extra
897 * caution taken here as fragmented ampdu may cause Tx stop.
898 */
899 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
900 return TX_DROP;
901
902 hdrlen = ieee80211_hdrlen(hdr->frame_control);
903
904 /* internal error, why is TX_FRAGMENTED set? */
905 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
906 return TX_DROP;
907
908 /*
909 * Now fragment the frame. This will allocate all the fragments and
910 * chain them (using skb as the first fragment) to skb->next.
911 * During transmission, we will remove the successfully transmitted
912 * fragments from this list. When the low-level driver rejects one
913 * of the fragments then we will simply pretend to accept the skb
914 * but store it away as pending.
915 */
916 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
917 return TX_DROP;
918
919 /* update duration/seq/flags of fragments */
920 fragnum = 0;
921 do {
922 int next_len;
923 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
924
925 hdr = (void *)skb->data;
926 info = IEEE80211_SKB_CB(skb);
927
928 if (skb->next) {
929 hdr->frame_control |= morefrags;
930 next_len = skb->next->len;
931 /*
932 * No multi-rate retries for fragmented frames, that
933 * would completely throw off the NAV at other STAs.
934 */
935 info->control.rates[1].idx = -1;
936 info->control.rates[2].idx = -1;
937 info->control.rates[3].idx = -1;
938 info->control.rates[4].idx = -1;
939 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
940 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
941 } else {
942 hdr->frame_control &= ~morefrags;
943 next_len = 0;
944 }
945 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
946 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
947 fragnum++;
948 } while ((skb = skb->next));
949
950 return TX_CONTINUE;
951 }
952
953 static ieee80211_tx_result debug_noinline
954 ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
955 {
956 struct sk_buff *skb = tx->skb;
957
958 if (!tx->sta)
959 return TX_CONTINUE;
960
961 tx->sta->tx_packets++;
962 do {
963 tx->sta->tx_fragments++;
964 tx->sta->tx_bytes += skb->len;
965 } while ((skb = skb->next));
966
967 return TX_CONTINUE;
968 }
969
970 static ieee80211_tx_result debug_noinline
971 ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
972 {
973 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
974
975 if (!tx->key)
976 return TX_CONTINUE;
977
978 switch (tx->key->conf.cipher) {
979 case WLAN_CIPHER_SUITE_WEP40:
980 case WLAN_CIPHER_SUITE_WEP104:
981 return ieee80211_crypto_wep_encrypt(tx);
982 case WLAN_CIPHER_SUITE_TKIP:
983 return ieee80211_crypto_tkip_encrypt(tx);
984 case WLAN_CIPHER_SUITE_CCMP:
985 return ieee80211_crypto_ccmp_encrypt(tx);
986 case WLAN_CIPHER_SUITE_AES_CMAC:
987 return ieee80211_crypto_aes_cmac_encrypt(tx);
988 default:
989 /* handle hw-only algorithm */
990 if (info->control.hw_key) {
991 ieee80211_tx_set_protected(tx);
992 return TX_CONTINUE;
993 }
994 break;
995
996 }
997
998 return TX_DROP;
999 }
1000
1001 static ieee80211_tx_result debug_noinline
1002 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
1003 {
1004 struct sk_buff *skb = tx->skb;
1005 struct ieee80211_hdr *hdr;
1006 int next_len;
1007 bool group_addr;
1008
1009 do {
1010 hdr = (void *) skb->data;
1011 if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
1012 break; /* must not overwrite AID */
1013 next_len = skb->next ? skb->next->len : 0;
1014 group_addr = is_multicast_ether_addr(hdr->addr1);
1015
1016 hdr->duration_id =
1017 ieee80211_duration(tx, group_addr, next_len);
1018 } while ((skb = skb->next));
1019
1020 return TX_CONTINUE;
1021 }
1022
1023 /* actual transmit path */
1024
1025 /*
1026 * deal with packet injection down monitor interface
1027 * with Radiotap Header -- only called for monitor mode interface
1028 */
1029 static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1030 struct sk_buff *skb)
1031 {
1032 /*
1033 * this is the moment to interpret and discard the radiotap header that
1034 * must be at the start of the packet injected in Monitor mode
1035 *
1036 * Need to take some care with endian-ness since radiotap
1037 * args are little-endian
1038 */
1039
1040 struct ieee80211_radiotap_iterator iterator;
1041 struct ieee80211_radiotap_header *rthdr =
1042 (struct ieee80211_radiotap_header *) skb->data;
1043 struct ieee80211_supported_band *sband;
1044 bool hw_frag;
1045 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1046 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1047 NULL);
1048
1049 sband = tx->local->hw.wiphy->bands[tx->channel->band];
1050
1051 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1052 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1053
1054 /* packet is fragmented in HW if we have a non-NULL driver callback */
1055 hw_frag = (tx->local->ops->set_frag_threshold != NULL);
1056
1057 /*
1058 * for every radiotap entry that is present
1059 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1060 * entries present, or -EINVAL on error)
1061 */
1062
1063 while (!ret) {
1064 ret = ieee80211_radiotap_iterator_next(&iterator);
1065
1066 if (ret)
1067 continue;
1068
1069 /* see if this argument is something we can use */
1070 switch (iterator.this_arg_index) {
1071 /*
1072 * You must take care when dereferencing iterator.this_arg
1073 * for multibyte types... the pointer is not aligned. Use
1074 * get_unaligned((type *)iterator.this_arg) to dereference
1075 * iterator.this_arg for type "type" safely on all arches.
1076 */
1077 case IEEE80211_RADIOTAP_FLAGS:
1078 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1079 /*
1080 * this indicates that the skb we have been
1081 * handed has the 32-bit FCS CRC at the end...
1082 * we should react to that by snipping it off
1083 * because it will be recomputed and added
1084 * on transmission
1085 */
1086 if (skb->len < (iterator._max_length + FCS_LEN))
1087 return false;
1088
1089 skb_trim(skb, skb->len - FCS_LEN);
1090 }
1091 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1092 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1093 if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
1094 !hw_frag)
1095 tx->flags |= IEEE80211_TX_FRAGMENTED;
1096 break;
1097
1098 /*
1099 * Please update the file
1100 * Documentation/networking/mac80211-injection.txt
1101 * when parsing new fields here.
1102 */
1103
1104 default:
1105 break;
1106 }
1107 }
1108
1109 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1110 return false;
1111
1112 /*
1113 * remove the radiotap header
1114 * iterator->_max_length was sanity-checked against
1115 * skb->len by iterator init
1116 */
1117 skb_pull(skb, iterator._max_length);
1118
1119 return true;
1120 }
1121
1122 static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1123 struct sk_buff *skb,
1124 struct ieee80211_tx_info *info,
1125 struct tid_ampdu_tx *tid_tx,
1126 int tid)
1127 {
1128 bool queued = false;
1129
1130 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1131 info->flags |= IEEE80211_TX_CTL_AMPDU;
1132 } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
1133 /*
1134 * nothing -- this aggregation session is being started
1135 * but that might still fail with the driver
1136 */
1137 } else {
1138 spin_lock(&tx->sta->lock);
1139 /*
1140 * Need to re-check now, because we may get here
1141 *
1142 * 1) in the window during which the setup is actually
1143 * already done, but not marked yet because not all
1144 * packets are spliced over to the driver pending
1145 * queue yet -- if this happened we acquire the lock
1146 * either before or after the splice happens, but
1147 * need to recheck which of these cases happened.
1148 *
1149 * 2) during session teardown, if the OPERATIONAL bit
1150 * was cleared due to the teardown but the pointer
1151 * hasn't been assigned NULL yet (or we loaded it
1152 * before it was assigned) -- in this case it may
1153 * now be NULL which means we should just let the
1154 * packet pass through because splicing the frames
1155 * back is already done.
1156 */
1157 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1158
1159 if (!tid_tx) {
1160 /* do nothing, let packet pass through */
1161 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1162 info->flags |= IEEE80211_TX_CTL_AMPDU;
1163 } else {
1164 queued = true;
1165 info->control.vif = &tx->sdata->vif;
1166 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1167 __skb_queue_tail(&tid_tx->pending, skb);
1168 }
1169 spin_unlock(&tx->sta->lock);
1170 }
1171
1172 return queued;
1173 }
1174
1175 /*
1176 * initialises @tx
1177 */
1178 static ieee80211_tx_result
1179 ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1180 struct ieee80211_tx_data *tx,
1181 struct sk_buff *skb)
1182 {
1183 struct ieee80211_local *local = sdata->local;
1184 struct ieee80211_hdr *hdr;
1185 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1186 int hdrlen, tid;
1187 u8 *qc;
1188
1189 memset(tx, 0, sizeof(*tx));
1190 tx->skb = skb;
1191 tx->local = local;
1192 tx->sdata = sdata;
1193 tx->channel = local->hw.conf.channel;
1194 /*
1195 * Set this flag (used below to indicate "automatic fragmentation"),
1196 * it will be cleared/left by radiotap as desired.
1197 * Only valid when fragmentation is done by the stack.
1198 */
1199 if (!local->ops->set_frag_threshold)
1200 tx->flags |= IEEE80211_TX_FRAGMENTED;
1201
1202 /* process and remove the injection radiotap header */
1203 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
1204 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1205 return TX_DROP;
1206
1207 /*
1208 * __ieee80211_parse_tx_radiotap has now removed
1209 * the radiotap header that was present and pre-filled
1210 * 'tx' with tx control information.
1211 */
1212 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
1213 }
1214
1215 /*
1216 * If this flag is set to true anywhere, and we get here,
1217 * we are doing the needed processing, so remove the flag
1218 * now.
1219 */
1220 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1221
1222 hdr = (struct ieee80211_hdr *) skb->data;
1223
1224 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1225 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1226 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1227 return TX_DROP;
1228 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
1229 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1230 }
1231 if (!tx->sta)
1232 tx->sta = sta_info_get(sdata, hdr->addr1);
1233
1234 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1235 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1236 struct tid_ampdu_tx *tid_tx;
1237
1238 qc = ieee80211_get_qos_ctl(hdr);
1239 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1240
1241 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
1242 if (tid_tx) {
1243 bool queued;
1244
1245 queued = ieee80211_tx_prep_agg(tx, skb, info,
1246 tid_tx, tid);
1247
1248 if (unlikely(queued))
1249 return TX_QUEUED;
1250 }
1251 }
1252
1253 if (is_multicast_ether_addr(hdr->addr1)) {
1254 tx->flags &= ~IEEE80211_TX_UNICAST;
1255 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1256 } else {
1257 tx->flags |= IEEE80211_TX_UNICAST;
1258 if (unlikely(local->wifi_wme_noack_test))
1259 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1260 else
1261 info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
1262 }
1263
1264 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
1265 if ((tx->flags & IEEE80211_TX_UNICAST) &&
1266 skb->len + FCS_LEN > local->hw.wiphy->frag_threshold &&
1267 !(info->flags & IEEE80211_TX_CTL_AMPDU))
1268 tx->flags |= IEEE80211_TX_FRAGMENTED;
1269 else
1270 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1271 }
1272
1273 if (!tx->sta)
1274 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1275 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1276 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1277
1278 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1279 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1280 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1281 tx->ethertype = (pos[0] << 8) | pos[1];
1282 }
1283 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1284
1285 return TX_CONTINUE;
1286 }
1287
1288 /*
1289 * Returns false if the frame couldn't be transmitted but was queued instead.
1290 */
1291 static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
1292 struct sta_info *sta, bool txpending)
1293 {
1294 struct sk_buff *skb = *skbp, *next;
1295 struct ieee80211_tx_info *info;
1296 struct ieee80211_sub_if_data *sdata;
1297 unsigned long flags;
1298 int len;
1299 bool fragm = false;
1300
1301 while (skb) {
1302 int q = skb_get_queue_mapping(skb);
1303 __le16 fc;
1304
1305 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1306 if (local->queue_stop_reasons[q] ||
1307 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1308 /*
1309 * Since queue is stopped, queue up frames for later
1310 * transmission from the tx-pending tasklet when the
1311 * queue is woken again.
1312 */
1313
1314 do {
1315 next = skb->next;
1316 skb->next = NULL;
1317 /*
1318 * NB: If txpending is true, next must already
1319 * be NULL since we must've gone through this
1320 * loop before already; therefore we can just
1321 * queue the frame to the head without worrying
1322 * about reordering of fragments.
1323 */
1324 if (unlikely(txpending))
1325 __skb_queue_head(&local->pending[q],
1326 skb);
1327 else
1328 __skb_queue_tail(&local->pending[q],
1329 skb);
1330 } while ((skb = next));
1331
1332 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1333 flags);
1334 return false;
1335 }
1336 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1337
1338 info = IEEE80211_SKB_CB(skb);
1339
1340 if (fragm)
1341 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
1342 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1343
1344 next = skb->next;
1345 len = skb->len;
1346
1347 if (next)
1348 info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
1349
1350 sdata = vif_to_sdata(info->control.vif);
1351
1352 switch (sdata->vif.type) {
1353 case NL80211_IFTYPE_MONITOR:
1354 info->control.vif = NULL;
1355 break;
1356 case NL80211_IFTYPE_AP_VLAN:
1357 info->control.vif = &container_of(sdata->bss,
1358 struct ieee80211_sub_if_data, u.ap)->vif;
1359 break;
1360 default:
1361 /* keep */
1362 break;
1363 }
1364
1365 if (sta && sta->uploaded)
1366 info->control.sta = &sta->sta;
1367 else
1368 info->control.sta = NULL;
1369
1370 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
1371 drv_tx(local, skb);
1372
1373 ieee80211_tpt_led_trig_tx(local, fc, len);
1374 *skbp = skb = next;
1375 ieee80211_led_tx(local, 1);
1376 fragm = true;
1377 }
1378
1379 return true;
1380 }
1381
1382 /*
1383 * Invoke TX handlers, return 0 on success and non-zero if the
1384 * frame was dropped or queued.
1385 */
1386 static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1387 {
1388 struct sk_buff *skb = tx->skb;
1389 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1390 ieee80211_tx_result res = TX_DROP;
1391
1392 #define CALL_TXH(txh) \
1393 do { \
1394 res = txh(tx); \
1395 if (res != TX_CONTINUE) \
1396 goto txh_done; \
1397 } while (0)
1398
1399 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1400 CALL_TXH(ieee80211_tx_h_check_assoc);
1401 CALL_TXH(ieee80211_tx_h_ps_buf);
1402 CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
1403 CALL_TXH(ieee80211_tx_h_select_key);
1404 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1405 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1406
1407 if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
1408 goto txh_done;
1409
1410 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1411 CALL_TXH(ieee80211_tx_h_sequence);
1412 CALL_TXH(ieee80211_tx_h_fragment);
1413 /* handlers after fragment must be aware of tx info fragmentation! */
1414 CALL_TXH(ieee80211_tx_h_stats);
1415 CALL_TXH(ieee80211_tx_h_encrypt);
1416 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1417 CALL_TXH(ieee80211_tx_h_calculate_duration);
1418 #undef CALL_TXH
1419
1420 txh_done:
1421 if (unlikely(res == TX_DROP)) {
1422 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1423 while (skb) {
1424 struct sk_buff *next;
1425
1426 next = skb->next;
1427 dev_kfree_skb(skb);
1428 skb = next;
1429 }
1430 return -1;
1431 } else if (unlikely(res == TX_QUEUED)) {
1432 I802_DEBUG_INC(tx->local->tx_handlers_queued);
1433 return -1;
1434 }
1435
1436 return 0;
1437 }
1438
1439 /*
1440 * Returns false if the frame couldn't be transmitted but was queued instead.
1441 */
1442 static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1443 struct sk_buff *skb, bool txpending)
1444 {
1445 struct ieee80211_local *local = sdata->local;
1446 struct ieee80211_tx_data tx;
1447 ieee80211_tx_result res_prepare;
1448 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1449 u16 queue;
1450 bool result = true;
1451
1452 queue = skb_get_queue_mapping(skb);
1453
1454 if (unlikely(skb->len < 10)) {
1455 dev_kfree_skb(skb);
1456 return true;
1457 }
1458
1459 rcu_read_lock();
1460
1461 /* initialises tx */
1462 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
1463
1464 if (unlikely(res_prepare == TX_DROP)) {
1465 dev_kfree_skb(skb);
1466 goto out;
1467 } else if (unlikely(res_prepare == TX_QUEUED)) {
1468 goto out;
1469 }
1470
1471 tx.channel = local->hw.conf.channel;
1472 info->band = tx.channel->band;
1473
1474 if (!invoke_tx_handlers(&tx))
1475 result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
1476 out:
1477 rcu_read_unlock();
1478 return result;
1479 }
1480
1481 /* device xmit handlers */
1482
1483 static int ieee80211_skb_resize(struct ieee80211_local *local,
1484 struct sk_buff *skb,
1485 int head_need, bool may_encrypt)
1486 {
1487 int tail_need = 0;
1488
1489 /*
1490 * This could be optimised, devices that do full hardware
1491 * crypto (including TKIP MMIC) need no tailroom... But we
1492 * have no drivers for such devices currently.
1493 */
1494 if (may_encrypt) {
1495 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1496 tail_need -= skb_tailroom(skb);
1497 tail_need = max_t(int, tail_need, 0);
1498 }
1499
1500 if (head_need || tail_need) {
1501 /* Sorry. Can't account for this any more */
1502 skb_orphan(skb);
1503 }
1504
1505 if (skb_cloned(skb))
1506 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1507 else if (head_need || tail_need)
1508 I802_DEBUG_INC(local->tx_expand_skb_head);
1509 else
1510 return 0;
1511
1512 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1513 wiphy_debug(local->hw.wiphy,
1514 "failed to reallocate TX buffer\n");
1515 return -ENOMEM;
1516 }
1517
1518 /* update truesize too */
1519 skb->truesize += head_need + tail_need;
1520
1521 return 0;
1522 }
1523
1524 static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1525 struct sk_buff *skb)
1526 {
1527 struct ieee80211_local *local = sdata->local;
1528 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1529 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1530 struct ieee80211_sub_if_data *tmp_sdata;
1531 int headroom;
1532 bool may_encrypt;
1533
1534 rcu_read_lock();
1535
1536 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1537 int hdrlen;
1538 u16 len_rthdr;
1539
1540 info->flags |= IEEE80211_TX_CTL_INJECTED |
1541 IEEE80211_TX_INTFL_HAS_RADIOTAP;
1542
1543 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1544 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
1545 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1546
1547 /* check the header is complete in the frame */
1548 if (likely(skb->len >= len_rthdr + hdrlen)) {
1549 /*
1550 * We process outgoing injected frames that have a
1551 * local address we handle as though they are our
1552 * own frames.
1553 * This code here isn't entirely correct, the local
1554 * MAC address is not necessarily enough to find
1555 * the interface to use; for that proper VLAN/WDS
1556 * support we will need a different mechanism.
1557 */
1558
1559 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1560 list) {
1561 if (!ieee80211_sdata_running(tmp_sdata))
1562 continue;
1563 if (tmp_sdata->vif.type ==
1564 NL80211_IFTYPE_MONITOR ||
1565 tmp_sdata->vif.type ==
1566 NL80211_IFTYPE_AP_VLAN ||
1567 tmp_sdata->vif.type ==
1568 NL80211_IFTYPE_WDS)
1569 continue;
1570 if (compare_ether_addr(tmp_sdata->vif.addr,
1571 hdr->addr2) == 0) {
1572 sdata = tmp_sdata;
1573 break;
1574 }
1575 }
1576 }
1577 }
1578
1579 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
1580
1581 headroom = local->tx_headroom;
1582 if (may_encrypt)
1583 headroom += IEEE80211_ENCRYPT_HEADROOM;
1584 headroom -= skb_headroom(skb);
1585 headroom = max_t(int, 0, headroom);
1586
1587 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) {
1588 dev_kfree_skb(skb);
1589 rcu_read_unlock();
1590 return;
1591 }
1592
1593 hdr = (struct ieee80211_hdr *) skb->data;
1594 info->control.vif = &sdata->vif;
1595
1596 if (ieee80211_vif_is_mesh(&sdata->vif) &&
1597 ieee80211_is_data(hdr->frame_control) &&
1598 !is_multicast_ether_addr(hdr->addr1))
1599 if (mesh_nexthop_lookup(skb, sdata)) {
1600 /* skb queued: don't free */
1601 rcu_read_unlock();
1602 return;
1603 }
1604
1605 ieee80211_set_qos_hdr(local, skb);
1606 ieee80211_tx(sdata, skb, false);
1607 rcu_read_unlock();
1608 }
1609
1610 netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1611 struct net_device *dev)
1612 {
1613 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1614 struct ieee80211_channel *chan = local->hw.conf.channel;
1615 struct ieee80211_radiotap_header *prthdr =
1616 (struct ieee80211_radiotap_header *)skb->data;
1617 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1618 u16 len_rthdr;
1619
1620 /*
1621 * Frame injection is not allowed if beaconing is not allowed
1622 * or if we need radar detection. Beaconing is usually not allowed when
1623 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
1624 * Passive scan is also used in world regulatory domains where
1625 * your country is not known and as such it should be treated as
1626 * NO TX unless the channel is explicitly allowed in which case
1627 * your current regulatory domain would not have the passive scan
1628 * flag.
1629 *
1630 * Since AP mode uses monitor interfaces to inject/TX management
1631 * frames we can make AP mode the exception to this rule once it
1632 * supports radar detection as its implementation can deal with
1633 * radar detection by itself. We can do that later by adding a
1634 * monitor flag interfaces used for AP support.
1635 */
1636 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
1637 IEEE80211_CHAN_PASSIVE_SCAN)))
1638 goto fail;
1639
1640 /* check for not even having the fixed radiotap header part */
1641 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
1642 goto fail; /* too short to be possibly valid */
1643
1644 /* is it a header version we can trust to find length from? */
1645 if (unlikely(prthdr->it_version))
1646 goto fail; /* only version 0 is supported */
1647
1648 /* then there must be a radiotap header with a length we can use */
1649 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1650
1651 /* does the skb contain enough to deliver on the alleged length? */
1652 if (unlikely(skb->len < len_rthdr))
1653 goto fail; /* skb too short for claimed rt header extent */
1654
1655 /*
1656 * fix up the pointers accounting for the radiotap
1657 * header still being in there. We are being given
1658 * a precooked IEEE80211 header so no need for
1659 * normal processing
1660 */
1661 skb_set_mac_header(skb, len_rthdr);
1662 /*
1663 * these are just fixed to the end of the rt area since we
1664 * don't have any better information and at this point, nobody cares
1665 */
1666 skb_set_network_header(skb, len_rthdr);
1667 skb_set_transport_header(skb, len_rthdr);
1668
1669 memset(info, 0, sizeof(*info));
1670
1671 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1672
1673 /* pass the radiotap header up to xmit */
1674 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1675 return NETDEV_TX_OK;
1676
1677 fail:
1678 dev_kfree_skb(skb);
1679 return NETDEV_TX_OK; /* meaning, we dealt with the skb */
1680 }
1681
1682 /**
1683 * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type
1684 * subinterfaces (wlan#, WDS, and VLAN interfaces)
1685 * @skb: packet to be sent
1686 * @dev: incoming interface
1687 *
1688 * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will
1689 * not be freed, and caller is responsible for either retrying later or freeing
1690 * skb).
1691 *
1692 * This function takes in an Ethernet header and encapsulates it with suitable
1693 * IEEE 802.11 header based on which interface the packet is coming in. The
1694 * encapsulated packet will then be passed to master interface, wlan#.11, for
1695 * transmission (through low-level driver).
1696 */
1697 netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1698 struct net_device *dev)
1699 {
1700 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1701 struct ieee80211_local *local = sdata->local;
1702 struct ieee80211_tx_info *info;
1703 int ret = NETDEV_TX_BUSY, head_need;
1704 u16 ethertype, hdrlen, meshhdrlen = 0;
1705 __le16 fc;
1706 struct ieee80211_hdr hdr;
1707 struct ieee80211s_hdr mesh_hdr __maybe_unused;
1708 struct mesh_path __maybe_unused *mppath = NULL;
1709 const u8 *encaps_data;
1710 int encaps_len, skip_header_bytes;
1711 int nh_pos, h_pos;
1712 struct sta_info *sta = NULL;
1713 u32 sta_flags = 0;
1714 struct sk_buff *tmp_skb;
1715
1716 if (unlikely(skb->len < ETH_HLEN)) {
1717 ret = NETDEV_TX_OK;
1718 goto fail;
1719 }
1720
1721 /* convert Ethernet header to proper 802.11 header (based on
1722 * operation mode) */
1723 ethertype = (skb->data[12] << 8) | skb->data[13];
1724 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1725
1726 switch (sdata->vif.type) {
1727 case NL80211_IFTYPE_AP_VLAN:
1728 rcu_read_lock();
1729 sta = rcu_dereference(sdata->u.vlan.sta);
1730 if (sta) {
1731 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1732 /* RA TA DA SA */
1733 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1734 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1735 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1736 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1737 hdrlen = 30;
1738 sta_flags = get_sta_flags(sta);
1739 }
1740 rcu_read_unlock();
1741 if (sta)
1742 break;
1743 /* fall through */
1744 case NL80211_IFTYPE_AP:
1745 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1746 /* DA BSSID SA */
1747 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1748 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1749 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1750 hdrlen = 24;
1751 break;
1752 case NL80211_IFTYPE_WDS:
1753 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1754 /* RA TA DA SA */
1755 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1756 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1757 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1758 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1759 hdrlen = 30;
1760 break;
1761 #ifdef CONFIG_MAC80211_MESH
1762 case NL80211_IFTYPE_MESH_POINT:
1763 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1764 /* Do not send frames with mesh_ttl == 0 */
1765 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1766 ret = NETDEV_TX_OK;
1767 goto fail;
1768 }
1769 if (!is_multicast_ether_addr(skb->data))
1770 mppath = mpp_path_lookup(skb->data, sdata);
1771
1772 /*
1773 * Use address extension if it is a packet from
1774 * another interface or if we know the destination
1775 * is being proxied by a portal (i.e. portal address
1776 * differs from proxied address)
1777 */
1778 if (compare_ether_addr(sdata->vif.addr,
1779 skb->data + ETH_ALEN) == 0 &&
1780 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
1781 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1782 skb->data, skb->data + ETH_ALEN);
1783 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1784 sdata, NULL, NULL);
1785 } else {
1786 int is_mesh_mcast = 1;
1787 const u8 *mesh_da;
1788
1789 rcu_read_lock();
1790 if (is_multicast_ether_addr(skb->data))
1791 /* DA TA mSA AE:SA */
1792 mesh_da = skb->data;
1793 else {
1794 static const u8 bcast[ETH_ALEN] =
1795 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1796 if (mppath) {
1797 /* RA TA mDA mSA AE:DA SA */
1798 mesh_da = mppath->mpp;
1799 is_mesh_mcast = 0;
1800 } else {
1801 /* DA TA mSA AE:SA */
1802 mesh_da = bcast;
1803 }
1804 }
1805 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1806 mesh_da, sdata->vif.addr);
1807 rcu_read_unlock();
1808 if (is_mesh_mcast)
1809 meshhdrlen =
1810 ieee80211_new_mesh_header(&mesh_hdr,
1811 sdata,
1812 skb->data + ETH_ALEN,
1813 NULL);
1814 else
1815 meshhdrlen =
1816 ieee80211_new_mesh_header(&mesh_hdr,
1817 sdata,
1818 skb->data,
1819 skb->data + ETH_ALEN);
1820
1821 }
1822 break;
1823 #endif
1824 case NL80211_IFTYPE_STATION:
1825 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1826 if (sdata->u.mgd.use_4addr &&
1827 cpu_to_be16(ethertype) != sdata->control_port_protocol) {
1828 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1829 /* RA TA DA SA */
1830 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1831 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1832 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1833 hdrlen = 30;
1834 } else {
1835 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1836 /* BSSID SA DA */
1837 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1838 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1839 hdrlen = 24;
1840 }
1841 break;
1842 case NL80211_IFTYPE_ADHOC:
1843 /* DA SA BSSID */
1844 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1845 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1846 memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
1847 hdrlen = 24;
1848 break;
1849 default:
1850 ret = NETDEV_TX_OK;
1851 goto fail;
1852 }
1853
1854 /*
1855 * There's no need to try to look up the destination
1856 * if it is a multicast address (which can only happen
1857 * in AP mode)
1858 */
1859 if (!is_multicast_ether_addr(hdr.addr1)) {
1860 rcu_read_lock();
1861 sta = sta_info_get(sdata, hdr.addr1);
1862 if (sta)
1863 sta_flags = get_sta_flags(sta);
1864 rcu_read_unlock();
1865 }
1866
1867 /* receiver and we are QoS enabled, use a QoS type frame */
1868 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
1869 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1870 hdrlen += 2;
1871 }
1872
1873 /*
1874 * Drop unicast frames to unauthorised stations unless they are
1875 * EAPOL frames from the local station.
1876 */
1877 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1878 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1879 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1880 !(cpu_to_be16(ethertype) == sdata->control_port_protocol &&
1881 compare_ether_addr(sdata->vif.addr,
1882 skb->data + ETH_ALEN) == 0))) {
1883 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1884 if (net_ratelimit())
1885 printk(KERN_DEBUG "%s: dropped frame to %pM"
1886 " (unauthorized port)\n", dev->name,
1887 hdr.addr1);
1888 #endif
1889
1890 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
1891
1892 ret = NETDEV_TX_OK;
1893 goto fail;
1894 }
1895
1896 /*
1897 * If the skb is shared we need to obtain our own copy.
1898 */
1899 if (skb_shared(skb)) {
1900 tmp_skb = skb;
1901 skb = skb_clone(skb, GFP_ATOMIC);
1902 kfree_skb(tmp_skb);
1903
1904 if (!skb) {
1905 ret = NETDEV_TX_OK;
1906 goto fail;
1907 }
1908 }
1909
1910 hdr.frame_control = fc;
1911 hdr.duration_id = 0;
1912 hdr.seq_ctrl = 0;
1913
1914 skip_header_bytes = ETH_HLEN;
1915 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
1916 encaps_data = bridge_tunnel_header;
1917 encaps_len = sizeof(bridge_tunnel_header);
1918 skip_header_bytes -= 2;
1919 } else if (ethertype >= 0x600) {
1920 encaps_data = rfc1042_header;
1921 encaps_len = sizeof(rfc1042_header);
1922 skip_header_bytes -= 2;
1923 } else {
1924 encaps_data = NULL;
1925 encaps_len = 0;
1926 }
1927
1928 nh_pos = skb_network_header(skb) - skb->data;
1929 h_pos = skb_transport_header(skb) - skb->data;
1930
1931 skb_pull(skb, skip_header_bytes);
1932 nh_pos -= skip_header_bytes;
1933 h_pos -= skip_header_bytes;
1934
1935 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
1936
1937 /*
1938 * So we need to modify the skb header and hence need a copy of
1939 * that. The head_need variable above doesn't, so far, include
1940 * the needed header space that we don't need right away. If we
1941 * can, then we don't reallocate right now but only after the
1942 * frame arrives at the master device (if it does...)
1943 *
1944 * If we cannot, however, then we will reallocate to include all
1945 * the ever needed space. Also, if we need to reallocate it anyway,
1946 * make it big enough for everything we may ever need.
1947 */
1948
1949 if (head_need > 0 || skb_cloned(skb)) {
1950 head_need += IEEE80211_ENCRYPT_HEADROOM;
1951 head_need += local->tx_headroom;
1952 head_need = max_t(int, 0, head_need);
1953 if (ieee80211_skb_resize(local, skb, head_need, true))
1954 goto fail;
1955 }
1956
1957 if (encaps_data) {
1958 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
1959 nh_pos += encaps_len;
1960 h_pos += encaps_len;
1961 }
1962
1963 #ifdef CONFIG_MAC80211_MESH
1964 if (meshhdrlen > 0) {
1965 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
1966 nh_pos += meshhdrlen;
1967 h_pos += meshhdrlen;
1968 }
1969 #endif
1970
1971 if (ieee80211_is_data_qos(fc)) {
1972 __le16 *qos_control;
1973
1974 qos_control = (__le16*) skb_push(skb, 2);
1975 memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
1976 /*
1977 * Maybe we could actually set some fields here, for now just
1978 * initialise to zero to indicate no special operation.
1979 */
1980 *qos_control = 0;
1981 } else
1982 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
1983
1984 nh_pos += hdrlen;
1985 h_pos += hdrlen;
1986
1987 dev->stats.tx_packets++;
1988 dev->stats.tx_bytes += skb->len;
1989
1990 /* Update skb pointers to various headers since this modified frame
1991 * is going to go through Linux networking code that may potentially
1992 * need things like pointer to IP header. */
1993 skb_set_mac_header(skb, 0);
1994 skb_set_network_header(skb, nh_pos);
1995 skb_set_transport_header(skb, h_pos);
1996
1997 info = IEEE80211_SKB_CB(skb);
1998 memset(info, 0, sizeof(*info));
1999
2000 dev->trans_start = jiffies;
2001 ieee80211_xmit(sdata, skb);
2002
2003 return NETDEV_TX_OK;
2004
2005 fail:
2006 if (ret == NETDEV_TX_OK)
2007 dev_kfree_skb(skb);
2008
2009 return ret;
2010 }
2011
2012
2013 /*
2014 * ieee80211_clear_tx_pending may not be called in a context where
2015 * it is possible that it packets could come in again.
2016 */
2017 void ieee80211_clear_tx_pending(struct ieee80211_local *local)
2018 {
2019 int i;
2020
2021 for (i = 0; i < local->hw.queues; i++)
2022 skb_queue_purge(&local->pending[i]);
2023 }
2024
2025 /*
2026 * Returns false if the frame couldn't be transmitted but was queued instead,
2027 * which in this case means re-queued -- take as an indication to stop sending
2028 * more pending frames.
2029 */
2030 static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2031 struct sk_buff *skb)
2032 {
2033 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2034 struct ieee80211_sub_if_data *sdata;
2035 struct sta_info *sta;
2036 struct ieee80211_hdr *hdr;
2037 bool result;
2038
2039 sdata = vif_to_sdata(info->control.vif);
2040
2041 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
2042 result = ieee80211_tx(sdata, skb, true);
2043 } else {
2044 hdr = (struct ieee80211_hdr *)skb->data;
2045 sta = sta_info_get(sdata, hdr->addr1);
2046
2047 result = __ieee80211_tx(local, &skb, sta, true);
2048 }
2049
2050 return result;
2051 }
2052
2053 /*
2054 * Transmit all pending packets. Called from tasklet.
2055 */
2056 void ieee80211_tx_pending(unsigned long data)
2057 {
2058 struct ieee80211_local *local = (struct ieee80211_local *)data;
2059 struct ieee80211_sub_if_data *sdata;
2060 unsigned long flags;
2061 int i;
2062 bool txok;
2063
2064 rcu_read_lock();
2065
2066 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
2067 for (i = 0; i < local->hw.queues; i++) {
2068 /*
2069 * If queue is stopped by something other than due to pending
2070 * frames, or we have no pending frames, proceed to next queue.
2071 */
2072 if (local->queue_stop_reasons[i] ||
2073 skb_queue_empty(&local->pending[i]))
2074 continue;
2075
2076 while (!skb_queue_empty(&local->pending[i])) {
2077 struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
2078 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2079
2080 if (WARN_ON(!info->control.vif)) {
2081 kfree_skb(skb);
2082 continue;
2083 }
2084
2085 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
2086 flags);
2087
2088 txok = ieee80211_tx_pending_skb(local, skb);
2089 spin_lock_irqsave(&local->queue_stop_reason_lock,
2090 flags);
2091 if (!txok)
2092 break;
2093 }
2094
2095 if (skb_queue_empty(&local->pending[i]))
2096 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2097 netif_wake_subqueue(sdata->dev, i);
2098 }
2099 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2100
2101 rcu_read_unlock();
2102 }
2103
2104 /* functions for drivers to get certain frames */
2105
2106 static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
2107 struct sk_buff *skb,
2108 struct beacon_data *beacon)
2109 {
2110 u8 *pos, *tim;
2111 int aid0 = 0;
2112 int i, have_bits = 0, n1, n2;
2113
2114 /* Generate bitmap for TIM only if there are any STAs in power save
2115 * mode. */
2116 if (atomic_read(&bss->num_sta_ps) > 0)
2117 /* in the hope that this is faster than
2118 * checking byte-for-byte */
2119 have_bits = !bitmap_empty((unsigned long*)bss->tim,
2120 IEEE80211_MAX_AID+1);
2121
2122 if (bss->dtim_count == 0)
2123 bss->dtim_count = beacon->dtim_period - 1;
2124 else
2125 bss->dtim_count--;
2126
2127 tim = pos = (u8 *) skb_put(skb, 6);
2128 *pos++ = WLAN_EID_TIM;
2129 *pos++ = 4;
2130 *pos++ = bss->dtim_count;
2131 *pos++ = beacon->dtim_period;
2132
2133 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
2134 aid0 = 1;
2135
2136 bss->dtim_bc_mc = aid0 == 1;
2137
2138 if (have_bits) {
2139 /* Find largest even number N1 so that bits numbered 1 through
2140 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
2141 * (N2 + 1) x 8 through 2007 are 0. */
2142 n1 = 0;
2143 for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
2144 if (bss->tim[i]) {
2145 n1 = i & 0xfe;
2146 break;
2147 }
2148 }
2149 n2 = n1;
2150 for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
2151 if (bss->tim[i]) {
2152 n2 = i;
2153 break;
2154 }
2155 }
2156
2157 /* Bitmap control */
2158 *pos++ = n1 | aid0;
2159 /* Part Virt Bitmap */
2160 memcpy(pos, bss->tim + n1, n2 - n1 + 1);
2161
2162 tim[1] = n2 - n1 + 4;
2163 skb_put(skb, n2 - n1);
2164 } else {
2165 *pos++ = aid0; /* Bitmap control */
2166 *pos++ = 0; /* Part Virt Bitmap */
2167 }
2168 }
2169
2170 struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2171 struct ieee80211_vif *vif,
2172 u16 *tim_offset, u16 *tim_length)
2173 {
2174 struct ieee80211_local *local = hw_to_local(hw);
2175 struct sk_buff *skb = NULL;
2176 struct ieee80211_tx_info *info;
2177 struct ieee80211_sub_if_data *sdata = NULL;
2178 struct ieee80211_if_ap *ap = NULL;
2179 struct beacon_data *beacon;
2180 struct ieee80211_supported_band *sband;
2181 enum ieee80211_band band = local->hw.conf.channel->band;
2182 struct ieee80211_tx_rate_control txrc;
2183
2184 sband = local->hw.wiphy->bands[band];
2185
2186 rcu_read_lock();
2187
2188 sdata = vif_to_sdata(vif);
2189
2190 if (!ieee80211_sdata_running(sdata))
2191 goto out;
2192
2193 if (tim_offset)
2194 *tim_offset = 0;
2195 if (tim_length)
2196 *tim_length = 0;
2197
2198 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2199 ap = &sdata->u.ap;
2200 beacon = rcu_dereference(ap->beacon);
2201 if (beacon) {
2202 /*
2203 * headroom, head length,
2204 * tail length and maximum TIM length
2205 */
2206 skb = dev_alloc_skb(local->tx_headroom +
2207 beacon->head_len +
2208 beacon->tail_len + 256);
2209 if (!skb)
2210 goto out;
2211
2212 skb_reserve(skb, local->tx_headroom);
2213 memcpy(skb_put(skb, beacon->head_len), beacon->head,
2214 beacon->head_len);
2215
2216 /*
2217 * Not very nice, but we want to allow the driver to call
2218 * ieee80211_beacon_get() as a response to the set_tim()
2219 * callback. That, however, is already invoked under the
2220 * sta_lock to guarantee consistent and race-free update
2221 * of the tim bitmap in mac80211 and the driver.
2222 */
2223 if (local->tim_in_locked_section) {
2224 ieee80211_beacon_add_tim(ap, skb, beacon);
2225 } else {
2226 unsigned long flags;
2227
2228 spin_lock_irqsave(&local->sta_lock, flags);
2229 ieee80211_beacon_add_tim(ap, skb, beacon);
2230 spin_unlock_irqrestore(&local->sta_lock, flags);
2231 }
2232
2233 if (tim_offset)
2234 *tim_offset = beacon->head_len;
2235 if (tim_length)
2236 *tim_length = skb->len - beacon->head_len;
2237
2238 if (beacon->tail)
2239 memcpy(skb_put(skb, beacon->tail_len),
2240 beacon->tail, beacon->tail_len);
2241 } else
2242 goto out;
2243 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2244 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
2245 struct ieee80211_hdr *hdr;
2246 struct sk_buff *presp = rcu_dereference(ifibss->presp);
2247
2248 if (!presp)
2249 goto out;
2250
2251 skb = skb_copy(presp, GFP_ATOMIC);
2252 if (!skb)
2253 goto out;
2254
2255 hdr = (struct ieee80211_hdr *) skb->data;
2256 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2257 IEEE80211_STYPE_BEACON);
2258 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2259 struct ieee80211_mgmt *mgmt;
2260 u8 *pos;
2261
2262 #ifdef CONFIG_MAC80211_MESH
2263 if (!sdata->u.mesh.mesh_id_len)
2264 goto out;
2265 #endif
2266
2267 /* headroom, head length, tail length and maximum TIM length */
2268 skb = dev_alloc_skb(local->tx_headroom + 400 +
2269 sdata->u.mesh.vendor_ie_len);
2270 if (!skb)
2271 goto out;
2272
2273 skb_reserve(skb, local->hw.extra_tx_headroom);
2274 mgmt = (struct ieee80211_mgmt *)
2275 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2276 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2277 mgmt->frame_control =
2278 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2279 memset(mgmt->da, 0xff, ETH_ALEN);
2280 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2281 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2282 mgmt->u.beacon.beacon_int =
2283 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2284 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
2285
2286 pos = skb_put(skb, 2);
2287 *pos++ = WLAN_EID_SSID;
2288 *pos++ = 0x0;
2289
2290 mesh_mgmt_ies_add(skb, sdata);
2291 } else {
2292 WARN_ON(1);
2293 goto out;
2294 }
2295
2296 info = IEEE80211_SKB_CB(skb);
2297
2298 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2299 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2300 info->band = band;
2301
2302 memset(&txrc, 0, sizeof(txrc));
2303 txrc.hw = hw;
2304 txrc.sband = sband;
2305 txrc.bss_conf = &sdata->vif.bss_conf;
2306 txrc.skb = skb;
2307 txrc.reported_rate.idx = -1;
2308 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2309 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
2310 txrc.max_rate_idx = -1;
2311 else
2312 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2313 txrc.bss = true;
2314 rate_control_get_rate(sdata, NULL, &txrc);
2315
2316 info->control.vif = vif;
2317
2318 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
2319 IEEE80211_TX_CTL_ASSIGN_SEQ |
2320 IEEE80211_TX_CTL_FIRST_FRAGMENT;
2321 out:
2322 rcu_read_unlock();
2323 return skb;
2324 }
2325 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2326
2327 struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2328 struct ieee80211_vif *vif)
2329 {
2330 struct ieee80211_sub_if_data *sdata;
2331 struct ieee80211_if_managed *ifmgd;
2332 struct ieee80211_pspoll *pspoll;
2333 struct ieee80211_local *local;
2334 struct sk_buff *skb;
2335
2336 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2337 return NULL;
2338
2339 sdata = vif_to_sdata(vif);
2340 ifmgd = &sdata->u.mgd;
2341 local = sdata->local;
2342
2343 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2344 if (!skb) {
2345 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2346 "pspoll template\n", sdata->name);
2347 return NULL;
2348 }
2349 skb_reserve(skb, local->hw.extra_tx_headroom);
2350
2351 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
2352 memset(pspoll, 0, sizeof(*pspoll));
2353 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
2354 IEEE80211_STYPE_PSPOLL);
2355 pspoll->aid = cpu_to_le16(ifmgd->aid);
2356
2357 /* aid in PS-Poll has its two MSBs each set to 1 */
2358 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
2359
2360 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
2361 memcpy(pspoll->ta, vif->addr, ETH_ALEN);
2362
2363 return skb;
2364 }
2365 EXPORT_SYMBOL(ieee80211_pspoll_get);
2366
2367 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2368 struct ieee80211_vif *vif)
2369 {
2370 struct ieee80211_hdr_3addr *nullfunc;
2371 struct ieee80211_sub_if_data *sdata;
2372 struct ieee80211_if_managed *ifmgd;
2373 struct ieee80211_local *local;
2374 struct sk_buff *skb;
2375
2376 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2377 return NULL;
2378
2379 sdata = vif_to_sdata(vif);
2380 ifmgd = &sdata->u.mgd;
2381 local = sdata->local;
2382
2383 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2384 if (!skb) {
2385 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2386 "template\n", sdata->name);
2387 return NULL;
2388 }
2389 skb_reserve(skb, local->hw.extra_tx_headroom);
2390
2391 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
2392 sizeof(*nullfunc));
2393 memset(nullfunc, 0, sizeof(*nullfunc));
2394 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2395 IEEE80211_STYPE_NULLFUNC |
2396 IEEE80211_FCTL_TODS);
2397 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
2398 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
2399 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
2400
2401 return skb;
2402 }
2403 EXPORT_SYMBOL(ieee80211_nullfunc_get);
2404
2405 struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2406 struct ieee80211_vif *vif,
2407 const u8 *ssid, size_t ssid_len,
2408 const u8 *ie, size_t ie_len)
2409 {
2410 struct ieee80211_sub_if_data *sdata;
2411 struct ieee80211_local *local;
2412 struct ieee80211_hdr_3addr *hdr;
2413 struct sk_buff *skb;
2414 size_t ie_ssid_len;
2415 u8 *pos;
2416
2417 sdata = vif_to_sdata(vif);
2418 local = sdata->local;
2419 ie_ssid_len = 2 + ssid_len;
2420
2421 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2422 ie_ssid_len + ie_len);
2423 if (!skb) {
2424 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2425 "request template\n", sdata->name);
2426 return NULL;
2427 }
2428
2429 skb_reserve(skb, local->hw.extra_tx_headroom);
2430
2431 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
2432 memset(hdr, 0, sizeof(*hdr));
2433 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2434 IEEE80211_STYPE_PROBE_REQ);
2435 memset(hdr->addr1, 0xff, ETH_ALEN);
2436 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2437 memset(hdr->addr3, 0xff, ETH_ALEN);
2438
2439 pos = skb_put(skb, ie_ssid_len);
2440 *pos++ = WLAN_EID_SSID;
2441 *pos++ = ssid_len;
2442 if (ssid)
2443 memcpy(pos, ssid, ssid_len);
2444 pos += ssid_len;
2445
2446 if (ie) {
2447 pos = skb_put(skb, ie_len);
2448 memcpy(pos, ie, ie_len);
2449 }
2450
2451 return skb;
2452 }
2453 EXPORT_SYMBOL(ieee80211_probereq_get);
2454
2455 void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2456 const void *frame, size_t frame_len,
2457 const struct ieee80211_tx_info *frame_txctl,
2458 struct ieee80211_rts *rts)
2459 {
2460 const struct ieee80211_hdr *hdr = frame;
2461
2462 rts->frame_control =
2463 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
2464 rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
2465 frame_txctl);
2466 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
2467 memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
2468 }
2469 EXPORT_SYMBOL(ieee80211_rts_get);
2470
2471 void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2472 const void *frame, size_t frame_len,
2473 const struct ieee80211_tx_info *frame_txctl,
2474 struct ieee80211_cts *cts)
2475 {
2476 const struct ieee80211_hdr *hdr = frame;
2477
2478 cts->frame_control =
2479 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
2480 cts->duration = ieee80211_ctstoself_duration(hw, vif,
2481 frame_len, frame_txctl);
2482 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
2483 }
2484 EXPORT_SYMBOL(ieee80211_ctstoself_get);
2485
2486 struct sk_buff *
2487 ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2488 struct ieee80211_vif *vif)
2489 {
2490 struct ieee80211_local *local = hw_to_local(hw);
2491 struct sk_buff *skb = NULL;
2492 struct sta_info *sta;
2493 struct ieee80211_tx_data tx;
2494 struct ieee80211_sub_if_data *sdata;
2495 struct ieee80211_if_ap *bss = NULL;
2496 struct beacon_data *beacon;
2497 struct ieee80211_tx_info *info;
2498
2499 sdata = vif_to_sdata(vif);
2500 bss = &sdata->u.ap;
2501
2502 rcu_read_lock();
2503 beacon = rcu_dereference(bss->beacon);
2504
2505 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
2506 goto out;
2507
2508 if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
2509 goto out; /* send buffered bc/mc only after DTIM beacon */
2510
2511 while (1) {
2512 skb = skb_dequeue(&bss->ps_bc_buf);
2513 if (!skb)
2514 goto out;
2515 local->total_ps_buffered--;
2516
2517 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) {
2518 struct ieee80211_hdr *hdr =
2519 (struct ieee80211_hdr *) skb->data;
2520 /* more buffered multicast/broadcast frames ==> set
2521 * MoreData flag in IEEE 802.11 header to inform PS
2522 * STAs */
2523 hdr->frame_control |=
2524 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2525 }
2526
2527 if (!ieee80211_tx_prepare(sdata, &tx, skb))
2528 break;
2529 dev_kfree_skb_any(skb);
2530 }
2531
2532 info = IEEE80211_SKB_CB(skb);
2533
2534 sta = tx.sta;
2535 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2536 tx.channel = local->hw.conf.channel;
2537 info->band = tx.channel->band;
2538
2539 if (invoke_tx_handlers(&tx))
2540 skb = NULL;
2541 out:
2542 rcu_read_unlock();
2543
2544 return skb;
2545 }
2546 EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2547
2548 void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
2549 {
2550 skb_set_mac_header(skb, 0);
2551 skb_set_network_header(skb, 0);
2552 skb_set_transport_header(skb, 0);
2553
2554 /* send all internal mgmt frames on VO */
2555 skb_set_queue_mapping(skb, 0);
2556
2557 /*
2558 * The other path calling ieee80211_xmit is from the tasklet,
2559 * and while we can handle concurrent transmissions locking
2560 * requirements are that we do not come into tx with bhs on.
2561 */
2562 local_bh_disable();
2563 ieee80211_xmit(sdata, skb);
2564 local_bh_enable();
2565 }