[SK_BUFF]: Introduce skb_copy_from_linear_data{_offset}
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ieee80211 / ieee80211_tx.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 ******************************************************************************/
26 #include <linux/compiler.h>
27 #include <linux/errno.h>
28 #include <linux/if_arp.h>
29 #include <linux/in6.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/netdevice.h>
35 #include <linux/proc_fs.h>
36 #include <linux/skbuff.h>
37 #include <linux/slab.h>
38 #include <linux/tcp.h>
39 #include <linux/types.h>
40 #include <linux/wireless.h>
41 #include <linux/etherdevice.h>
42 #include <asm/uaccess.h>
43
44 #include <net/ieee80211.h>
45
46 /*
47
48 802.11 Data Frame
49
50 ,-------------------------------------------------------------------.
51 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
52 |------|------|---------|---------|---------|------|---------|------|
53 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
54 | | tion | (BSSID) | | | ence | data | |
55 `--------------------------------------------------| |------'
56 Total: 28 non-data bytes `----.----'
57 |
58 .- 'Frame data' expands, if WEP enabled, to <----------'
59 |
60 V
61 ,-----------------------.
62 Bytes | 4 | 0-2296 | 4 |
63 |-----|-----------|-----|
64 Desc. | IV | Encrypted | ICV |
65 | | Packet | |
66 `-----| |-----'
67 `-----.-----'
68 |
69 .- 'Encrypted Packet' expands to
70 |
71 V
72 ,---------------------------------------------------.
73 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
74 |------|------|---------|----------|------|---------|
75 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
76 | DSAP | SSAP | | | | Packet |
77 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
78 `----------------------------------------------------
79 Total: 8 non-data bytes
80
81 802.3 Ethernet Data Frame
82
83 ,-----------------------------------------.
84 Bytes | 6 | 6 | 2 | Variable | 4 |
85 |-------|-------|------|-----------|------|
86 Desc. | Dest. | Source| Type | IP Packet | fcs |
87 | MAC | MAC | | | |
88 `-----------------------------------------'
89 Total: 18 non-data bytes
90
91 In the event that fragmentation is required, the incoming payload is split into
92 N parts of size ieee->fts. The first fragment contains the SNAP header and the
93 remaining packets are just data.
94
95 If encryption is enabled, each fragment payload size is reduced by enough space
96 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
97 So if you have 1500 bytes of payload with ieee->fts set to 500 without
98 encryption it will take 3 frames. With WEP it will take 4 frames as the
99 payload of each frame is reduced to 492 bytes.
100
101 * SKB visualization
102 *
103 * ,- skb->data
104 * |
105 * | ETHERNET HEADER ,-<-- PAYLOAD
106 * | | 14 bytes from skb->data
107 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
108 * | | | |
109 * |,-Dest.--. ,--Src.---. | | |
110 * | 6 bytes| | 6 bytes | | | |
111 * v | | | | | |
112 * 0 | v 1 | v | v 2
113 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
114 * ^ | ^ | ^ |
115 * | | | | | |
116 * | | | | `T' <---- 2 bytes for Type
117 * | | | |
118 * | | '---SNAP--' <-------- 6 bytes for SNAP
119 * | |
120 * `-IV--' <-------------------- 4 bytes for IV (WEP)
121 *
122 * SNAP HEADER
123 *
124 */
125
126 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
127 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
128
129 static int ieee80211_copy_snap(u8 * data, u16 h_proto)
130 {
131 struct ieee80211_snap_hdr *snap;
132 u8 *oui;
133
134 snap = (struct ieee80211_snap_hdr *)data;
135 snap->dsap = 0xaa;
136 snap->ssap = 0xaa;
137 snap->ctrl = 0x03;
138
139 if (h_proto == 0x8137 || h_proto == 0x80f3)
140 oui = P802_1H_OUI;
141 else
142 oui = RFC1042_OUI;
143 snap->oui[0] = oui[0];
144 snap->oui[1] = oui[1];
145 snap->oui[2] = oui[2];
146
147 *(u16 *) (data + SNAP_SIZE) = htons(h_proto);
148
149 return SNAP_SIZE + sizeof(u16);
150 }
151
152 static int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
153 struct sk_buff *frag, int hdr_len)
154 {
155 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
156 int res;
157
158 if (crypt == NULL)
159 return -1;
160
161 /* To encrypt, frame format is:
162 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
163 atomic_inc(&crypt->refcnt);
164 res = 0;
165 if (crypt->ops && crypt->ops->encrypt_mpdu)
166 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
167
168 atomic_dec(&crypt->refcnt);
169 if (res < 0) {
170 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
171 ieee->dev->name, frag->len);
172 ieee->ieee_stats.tx_discards++;
173 return -1;
174 }
175
176 return 0;
177 }
178
179 void ieee80211_txb_free(struct ieee80211_txb *txb)
180 {
181 int i;
182 if (unlikely(!txb))
183 return;
184 for (i = 0; i < txb->nr_frags; i++)
185 if (txb->fragments[i])
186 dev_kfree_skb_any(txb->fragments[i]);
187 kfree(txb);
188 }
189
190 static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
191 int headroom, gfp_t gfp_mask)
192 {
193 struct ieee80211_txb *txb;
194 int i;
195 txb = kmalloc(sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
196 gfp_mask);
197 if (!txb)
198 return NULL;
199
200 memset(txb, 0, sizeof(struct ieee80211_txb));
201 txb->nr_frags = nr_frags;
202 txb->frag_size = txb_size;
203
204 for (i = 0; i < nr_frags; i++) {
205 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
206 gfp_mask);
207 if (unlikely(!txb->fragments[i])) {
208 i--;
209 break;
210 }
211 skb_reserve(txb->fragments[i], headroom);
212 }
213 if (unlikely(i != nr_frags)) {
214 while (i >= 0)
215 dev_kfree_skb_any(txb->fragments[i--]);
216 kfree(txb);
217 return NULL;
218 }
219 return txb;
220 }
221
222 static int ieee80211_classify(struct sk_buff *skb)
223 {
224 struct ethhdr *eth;
225 struct iphdr *ip;
226
227 eth = (struct ethhdr *)skb->data;
228 if (eth->h_proto != htons(ETH_P_IP))
229 return 0;
230
231 ip = ip_hdr(skb);
232 switch (ip->tos & 0xfc) {
233 case 0x20:
234 return 2;
235 case 0x40:
236 return 1;
237 case 0x60:
238 return 3;
239 case 0x80:
240 return 4;
241 case 0xa0:
242 return 5;
243 case 0xc0:
244 return 6;
245 case 0xe0:
246 return 7;
247 default:
248 return 0;
249 }
250 }
251
252 /* Incoming skb is converted to a txb which consists of
253 * a block of 802.11 fragment packets (stored as skbs) */
254 int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
255 {
256 struct ieee80211_device *ieee = netdev_priv(dev);
257 struct ieee80211_txb *txb = NULL;
258 struct ieee80211_hdr_3addrqos *frag_hdr;
259 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
260 rts_required;
261 unsigned long flags;
262 struct net_device_stats *stats = &ieee->stats;
263 int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
264 int bytes, fc, hdr_len;
265 struct sk_buff *skb_frag;
266 struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */
267 .duration_id = 0,
268 .seq_ctl = 0,
269 .qos_ctl = 0
270 };
271 u8 dest[ETH_ALEN], src[ETH_ALEN];
272 struct ieee80211_crypt_data *crypt;
273 int priority = skb->priority;
274 int snapped = 0;
275
276 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
277 return NETDEV_TX_BUSY;
278
279 spin_lock_irqsave(&ieee->lock, flags);
280
281 /* If there is no driver handler to take the TXB, dont' bother
282 * creating it... */
283 if (!ieee->hard_start_xmit) {
284 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
285 goto success;
286 }
287
288 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
289 printk(KERN_WARNING "%s: skb too small (%d).\n",
290 ieee->dev->name, skb->len);
291 goto success;
292 }
293
294 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
295
296 crypt = ieee->crypt[ieee->tx_keyidx];
297
298 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
299 ieee->sec.encrypt;
300
301 host_encrypt = ieee->host_encrypt && encrypt && crypt;
302 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
303 host_build_iv = ieee->host_build_iv && encrypt && crypt;
304
305 if (!encrypt && ieee->ieee802_1x &&
306 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
307 stats->tx_dropped++;
308 goto success;
309 }
310
311 /* Save source and destination addresses */
312 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
313 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
314
315 if (host_encrypt || host_build_iv)
316 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
317 IEEE80211_FCTL_PROTECTED;
318 else
319 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
320
321 if (ieee->iw_mode == IW_MODE_INFRA) {
322 fc |= IEEE80211_FCTL_TODS;
323 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
324 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
325 memcpy(header.addr2, src, ETH_ALEN);
326 memcpy(header.addr3, dest, ETH_ALEN);
327 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
328 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
329 memcpy(header.addr1, dest, ETH_ALEN);
330 memcpy(header.addr2, src, ETH_ALEN);
331 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
332 }
333 hdr_len = IEEE80211_3ADDR_LEN;
334
335 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
336 fc |= IEEE80211_STYPE_QOS_DATA;
337 hdr_len += 2;
338
339 skb->priority = ieee80211_classify(skb);
340 header.qos_ctl |= cpu_to_le16(skb->priority & IEEE80211_QCTL_TID);
341 }
342 header.frame_ctl = cpu_to_le16(fc);
343
344 /* Advance the SKB to the start of the payload */
345 skb_pull(skb, sizeof(struct ethhdr));
346
347 /* Determine total amount of storage required for TXB packets */
348 bytes = skb->len + SNAP_SIZE + sizeof(u16);
349
350 /* Encrypt msdu first on the whole data packet. */
351 if ((host_encrypt || host_encrypt_msdu) &&
352 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
353 int res = 0;
354 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
355 crypt->ops->extra_msdu_postfix_len;
356 struct sk_buff *skb_new = dev_alloc_skb(len);
357
358 if (unlikely(!skb_new))
359 goto failed;
360
361 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
362 memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
363 snapped = 1;
364 ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
365 ether_type);
366 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
367 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
368 if (res < 0) {
369 IEEE80211_ERROR("msdu encryption failed\n");
370 dev_kfree_skb_any(skb_new);
371 goto failed;
372 }
373 dev_kfree_skb_any(skb);
374 skb = skb_new;
375 bytes += crypt->ops->extra_msdu_prefix_len +
376 crypt->ops->extra_msdu_postfix_len;
377 skb_pull(skb, hdr_len);
378 }
379
380 if (host_encrypt || ieee->host_open_frag) {
381 /* Determine fragmentation size based on destination (multicast
382 * and broadcast are not fragmented) */
383 if (is_multicast_ether_addr(dest) ||
384 is_broadcast_ether_addr(dest))
385 frag_size = MAX_FRAG_THRESHOLD;
386 else
387 frag_size = ieee->fts;
388
389 /* Determine amount of payload per fragment. Regardless of if
390 * this stack is providing the full 802.11 header, one will
391 * eventually be affixed to this fragment -- so we must account
392 * for it when determining the amount of payload space. */
393 bytes_per_frag = frag_size - hdr_len;
394 if (ieee->config &
395 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
396 bytes_per_frag -= IEEE80211_FCS_LEN;
397
398 /* Each fragment may need to have room for encryptiong
399 * pre/postfix */
400 if (host_encrypt)
401 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
402 crypt->ops->extra_mpdu_postfix_len;
403
404 /* Number of fragments is the total
405 * bytes_per_frag / payload_per_fragment */
406 nr_frags = bytes / bytes_per_frag;
407 bytes_last_frag = bytes % bytes_per_frag;
408 if (bytes_last_frag)
409 nr_frags++;
410 else
411 bytes_last_frag = bytes_per_frag;
412 } else {
413 nr_frags = 1;
414 bytes_per_frag = bytes_last_frag = bytes;
415 frag_size = bytes + hdr_len;
416 }
417
418 rts_required = (frag_size > ieee->rts
419 && ieee->config & CFG_IEEE80211_RTS);
420 if (rts_required)
421 nr_frags++;
422
423 /* When we allocate the TXB we allocate enough space for the reserve
424 * and full fragment bytes (bytes_per_frag doesn't include prefix,
425 * postfix, header, FCS, etc.) */
426 txb = ieee80211_alloc_txb(nr_frags, frag_size,
427 ieee->tx_headroom, GFP_ATOMIC);
428 if (unlikely(!txb)) {
429 printk(KERN_WARNING "%s: Could not allocate TXB\n",
430 ieee->dev->name);
431 goto failed;
432 }
433 txb->encrypted = encrypt;
434 if (host_encrypt)
435 txb->payload_size = frag_size * (nr_frags - 1) +
436 bytes_last_frag;
437 else
438 txb->payload_size = bytes;
439
440 if (rts_required) {
441 skb_frag = txb->fragments[0];
442 frag_hdr =
443 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
444
445 /*
446 * Set header frame_ctl to the RTS.
447 */
448 header.frame_ctl =
449 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
450 memcpy(frag_hdr, &header, hdr_len);
451
452 /*
453 * Restore header frame_ctl to the original data setting.
454 */
455 header.frame_ctl = cpu_to_le16(fc);
456
457 if (ieee->config &
458 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
459 skb_put(skb_frag, 4);
460
461 txb->rts_included = 1;
462 i = 1;
463 } else
464 i = 0;
465
466 for (; i < nr_frags; i++) {
467 skb_frag = txb->fragments[i];
468
469 if (host_encrypt || host_build_iv)
470 skb_reserve(skb_frag,
471 crypt->ops->extra_mpdu_prefix_len);
472
473 frag_hdr =
474 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
475 memcpy(frag_hdr, &header, hdr_len);
476
477 /* If this is not the last fragment, then add the MOREFRAGS
478 * bit to the frame control */
479 if (i != nr_frags - 1) {
480 frag_hdr->frame_ctl =
481 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
482 bytes = bytes_per_frag;
483 } else {
484 /* The last fragment takes the remaining length */
485 bytes = bytes_last_frag;
486 }
487
488 if (i == 0 && !snapped) {
489 ieee80211_copy_snap(skb_put
490 (skb_frag, SNAP_SIZE + sizeof(u16)),
491 ether_type);
492 bytes -= SNAP_SIZE + sizeof(u16);
493 }
494
495 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
496
497 /* Advance the SKB... */
498 skb_pull(skb, bytes);
499
500 /* Encryption routine will move the header forward in order
501 * to insert the IV between the header and the payload */
502 if (host_encrypt)
503 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
504 else if (host_build_iv) {
505 atomic_inc(&crypt->refcnt);
506 if (crypt->ops->build_iv)
507 crypt->ops->build_iv(skb_frag, hdr_len,
508 ieee->sec.keys[ieee->sec.active_key],
509 ieee->sec.key_sizes[ieee->sec.active_key],
510 crypt->priv);
511 atomic_dec(&crypt->refcnt);
512 }
513
514 if (ieee->config &
515 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
516 skb_put(skb_frag, 4);
517 }
518
519 success:
520 spin_unlock_irqrestore(&ieee->lock, flags);
521
522 dev_kfree_skb_any(skb);
523
524 if (txb) {
525 int ret = (*ieee->hard_start_xmit) (txb, dev, priority);
526 if (ret == 0) {
527 stats->tx_packets++;
528 stats->tx_bytes += txb->payload_size;
529 return 0;
530 }
531
532 ieee80211_txb_free(txb);
533 }
534
535 return 0;
536
537 failed:
538 spin_unlock_irqrestore(&ieee->lock, flags);
539 netif_stop_queue(dev);
540 stats->tx_errors++;
541 return 1;
542 }
543
544 /* Incoming 802.11 strucure is converted to a TXB
545 * a block of 802.11 fragment packets (stored as skbs) */
546 int ieee80211_tx_frame(struct ieee80211_device *ieee,
547 struct ieee80211_hdr *frame, int hdr_len, int total_len,
548 int encrypt_mpdu)
549 {
550 struct ieee80211_txb *txb = NULL;
551 unsigned long flags;
552 struct net_device_stats *stats = &ieee->stats;
553 struct sk_buff *skb_frag;
554 int priority = -1;
555 int fraglen = total_len;
556 int headroom = ieee->tx_headroom;
557 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
558
559 spin_lock_irqsave(&ieee->lock, flags);
560
561 if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt))
562 encrypt_mpdu = 0;
563
564 /* If there is no driver handler to take the TXB, dont' bother
565 * creating it... */
566 if (!ieee->hard_start_xmit) {
567 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
568 goto success;
569 }
570
571 if (unlikely(total_len < 24)) {
572 printk(KERN_WARNING "%s: skb too small (%d).\n",
573 ieee->dev->name, total_len);
574 goto success;
575 }
576
577 if (encrypt_mpdu) {
578 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
579 fraglen += crypt->ops->extra_mpdu_prefix_len +
580 crypt->ops->extra_mpdu_postfix_len;
581 headroom += crypt->ops->extra_mpdu_prefix_len;
582 }
583
584 /* When we allocate the TXB we allocate enough space for the reserve
585 * and full fragment bytes (bytes_per_frag doesn't include prefix,
586 * postfix, header, FCS, etc.) */
587 txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC);
588 if (unlikely(!txb)) {
589 printk(KERN_WARNING "%s: Could not allocate TXB\n",
590 ieee->dev->name);
591 goto failed;
592 }
593 txb->encrypted = 0;
594 txb->payload_size = fraglen;
595
596 skb_frag = txb->fragments[0];
597
598 memcpy(skb_put(skb_frag, total_len), frame, total_len);
599
600 if (ieee->config &
601 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
602 skb_put(skb_frag, 4);
603
604 /* To avoid overcomplicating things, we do the corner-case frame
605 * encryption in software. The only real situation where encryption is
606 * needed here is during software-based shared key authentication. */
607 if (encrypt_mpdu)
608 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
609
610 success:
611 spin_unlock_irqrestore(&ieee->lock, flags);
612
613 if (txb) {
614 if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
615 stats->tx_packets++;
616 stats->tx_bytes += txb->payload_size;
617 return 0;
618 }
619 ieee80211_txb_free(txb);
620 }
621 return 0;
622
623 failed:
624 spin_unlock_irqrestore(&ieee->lock, flags);
625 stats->tx_errors++;
626 return 1;
627 }
628
629 EXPORT_SYMBOL(ieee80211_tx_frame);
630 EXPORT_SYMBOL(ieee80211_txb_free);