Update my e-mail address
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / b43 / pio.c
1 /*
2
3 Broadcom B43 wireless driver
4
5 PIO data transfer
6
7 Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24 */
25
26 #include "b43.h"
27 #include "pio.h"
28 #include "dma.h"
29 #include "main.h"
30 #include "xmit.h"
31
32 #include <linux/delay.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35
36
37 static u16 generate_cookie(struct b43_pio_txqueue *q,
38 struct b43_pio_txpacket *pack)
39 {
40 u16 cookie;
41
42 /* Use the upper 4 bits of the cookie as
43 * PIO controller ID and store the packet index number
44 * in the lower 12 bits.
45 * Note that the cookie must never be 0, as this
46 * is a special value used in RX path.
47 * It can also not be 0xFFFF because that is special
48 * for multicast frames.
49 */
50 cookie = (((u16)q->index + 1) << 12);
51 cookie |= pack->index;
52
53 return cookie;
54 }
55
56 static
57 struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev,
58 u16 cookie,
59 struct b43_pio_txpacket **pack)
60 {
61 struct b43_pio *pio = &dev->pio;
62 struct b43_pio_txqueue *q = NULL;
63 unsigned int pack_index;
64
65 switch (cookie & 0xF000) {
66 case 0x1000:
67 q = pio->tx_queue_AC_BK;
68 break;
69 case 0x2000:
70 q = pio->tx_queue_AC_BE;
71 break;
72 case 0x3000:
73 q = pio->tx_queue_AC_VI;
74 break;
75 case 0x4000:
76 q = pio->tx_queue_AC_VO;
77 break;
78 case 0x5000:
79 q = pio->tx_queue_mcast;
80 break;
81 }
82 if (B43_WARN_ON(!q))
83 return NULL;
84 pack_index = (cookie & 0x0FFF);
85 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
86 return NULL;
87 *pack = &q->packets[pack_index];
88
89 return q;
90 }
91
92 static u16 index_to_pioqueue_base(struct b43_wldev *dev,
93 unsigned int index)
94 {
95 static const u16 bases[] = {
96 B43_MMIO_PIO_BASE0,
97 B43_MMIO_PIO_BASE1,
98 B43_MMIO_PIO_BASE2,
99 B43_MMIO_PIO_BASE3,
100 B43_MMIO_PIO_BASE4,
101 B43_MMIO_PIO_BASE5,
102 B43_MMIO_PIO_BASE6,
103 B43_MMIO_PIO_BASE7,
104 };
105 static const u16 bases_rev11[] = {
106 B43_MMIO_PIO11_BASE0,
107 B43_MMIO_PIO11_BASE1,
108 B43_MMIO_PIO11_BASE2,
109 B43_MMIO_PIO11_BASE3,
110 B43_MMIO_PIO11_BASE4,
111 B43_MMIO_PIO11_BASE5,
112 };
113
114 if (dev->sdev->id.revision >= 11) {
115 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
116 return bases_rev11[index];
117 }
118 B43_WARN_ON(index >= ARRAY_SIZE(bases));
119 return bases[index];
120 }
121
122 static u16 pio_txqueue_offset(struct b43_wldev *dev)
123 {
124 if (dev->sdev->id.revision >= 11)
125 return 0x18;
126 return 0;
127 }
128
129 static u16 pio_rxqueue_offset(struct b43_wldev *dev)
130 {
131 if (dev->sdev->id.revision >= 11)
132 return 0x38;
133 return 8;
134 }
135
136 static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
137 unsigned int index)
138 {
139 struct b43_pio_txqueue *q;
140 struct b43_pio_txpacket *p;
141 unsigned int i;
142
143 q = kzalloc(sizeof(*q), GFP_KERNEL);
144 if (!q)
145 return NULL;
146 q->dev = dev;
147 q->rev = dev->sdev->id.revision;
148 q->mmio_base = index_to_pioqueue_base(dev, index) +
149 pio_txqueue_offset(dev);
150 q->index = index;
151
152 q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
153 if (q->rev >= 8) {
154 q->buffer_size = 1920; //FIXME this constant is wrong.
155 } else {
156 q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
157 q->buffer_size -= 80;
158 }
159
160 INIT_LIST_HEAD(&q->packets_list);
161 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
162 p = &(q->packets[i]);
163 INIT_LIST_HEAD(&p->list);
164 p->index = i;
165 p->queue = q;
166 list_add(&p->list, &q->packets_list);
167 }
168
169 return q;
170 }
171
172 static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
173 unsigned int index)
174 {
175 struct b43_pio_rxqueue *q;
176
177 q = kzalloc(sizeof(*q), GFP_KERNEL);
178 if (!q)
179 return NULL;
180 q->dev = dev;
181 q->rev = dev->sdev->id.revision;
182 q->mmio_base = index_to_pioqueue_base(dev, index) +
183 pio_rxqueue_offset(dev);
184
185 /* Enable Direct FIFO RX (PIO) on the engine. */
186 b43_dma_direct_fifo_rx(dev, index, 1);
187
188 return q;
189 }
190
191 static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
192 {
193 struct b43_pio_txpacket *pack;
194 unsigned int i;
195
196 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
197 pack = &(q->packets[i]);
198 if (pack->skb) {
199 dev_kfree_skb_any(pack->skb);
200 pack->skb = NULL;
201 }
202 }
203 }
204
205 static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
206 const char *name)
207 {
208 if (!q)
209 return;
210 b43_pio_cancel_tx_packets(q);
211 kfree(q);
212 }
213
214 static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
215 const char *name)
216 {
217 if (!q)
218 return;
219 kfree(q);
220 }
221
222 #define destroy_queue_tx(pio, queue) do { \
223 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
224 (pio)->queue = NULL; \
225 } while (0)
226
227 #define destroy_queue_rx(pio, queue) do { \
228 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
229 (pio)->queue = NULL; \
230 } while (0)
231
232 void b43_pio_free(struct b43_wldev *dev)
233 {
234 struct b43_pio *pio;
235
236 if (!b43_using_pio_transfers(dev))
237 return;
238 pio = &dev->pio;
239
240 destroy_queue_rx(pio, rx_queue);
241 destroy_queue_tx(pio, tx_queue_mcast);
242 destroy_queue_tx(pio, tx_queue_AC_VO);
243 destroy_queue_tx(pio, tx_queue_AC_VI);
244 destroy_queue_tx(pio, tx_queue_AC_BE);
245 destroy_queue_tx(pio, tx_queue_AC_BK);
246 }
247
248 int b43_pio_init(struct b43_wldev *dev)
249 {
250 struct b43_pio *pio = &dev->pio;
251 int err = -ENOMEM;
252
253 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
254 & ~B43_MACCTL_BE);
255 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
256
257 pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
258 if (!pio->tx_queue_AC_BK)
259 goto out;
260
261 pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
262 if (!pio->tx_queue_AC_BE)
263 goto err_destroy_bk;
264
265 pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
266 if (!pio->tx_queue_AC_VI)
267 goto err_destroy_be;
268
269 pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
270 if (!pio->tx_queue_AC_VO)
271 goto err_destroy_vi;
272
273 pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
274 if (!pio->tx_queue_mcast)
275 goto err_destroy_vo;
276
277 pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
278 if (!pio->rx_queue)
279 goto err_destroy_mcast;
280
281 b43dbg(dev->wl, "PIO initialized\n");
282 err = 0;
283 out:
284 return err;
285
286 err_destroy_mcast:
287 destroy_queue_tx(pio, tx_queue_mcast);
288 err_destroy_vo:
289 destroy_queue_tx(pio, tx_queue_AC_VO);
290 err_destroy_vi:
291 destroy_queue_tx(pio, tx_queue_AC_VI);
292 err_destroy_be:
293 destroy_queue_tx(pio, tx_queue_AC_BE);
294 err_destroy_bk:
295 destroy_queue_tx(pio, tx_queue_AC_BK);
296 return err;
297 }
298
299 /* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
300 static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
301 u8 queue_prio)
302 {
303 struct b43_pio_txqueue *q;
304
305 if (dev->qos_enabled) {
306 /* 0 = highest priority */
307 switch (queue_prio) {
308 default:
309 B43_WARN_ON(1);
310 /* fallthrough */
311 case 0:
312 q = dev->pio.tx_queue_AC_VO;
313 break;
314 case 1:
315 q = dev->pio.tx_queue_AC_VI;
316 break;
317 case 2:
318 q = dev->pio.tx_queue_AC_BE;
319 break;
320 case 3:
321 q = dev->pio.tx_queue_AC_BK;
322 break;
323 }
324 } else
325 q = dev->pio.tx_queue_AC_BE;
326
327 return q;
328 }
329
330 static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
331 u16 ctl,
332 const void *_data,
333 unsigned int data_len)
334 {
335 struct b43_wldev *dev = q->dev;
336 struct b43_wl *wl = dev->wl;
337 const u8 *data = _data;
338
339 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
340 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
341
342 b43_block_write(dev, data, (data_len & ~1),
343 q->mmio_base + B43_PIO_TXDATA,
344 sizeof(u16));
345 if (data_len & 1) {
346 u8 *tail = wl->pio_tailspace;
347 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
348
349 /* Write the last byte. */
350 ctl &= ~B43_PIO_TXCTL_WRITEHI;
351 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
352 tail[0] = data[data_len - 1];
353 tail[1] = 0;
354 b43_block_write(dev, tail, 2,
355 q->mmio_base + B43_PIO_TXDATA,
356 sizeof(u16));
357 }
358
359 return ctl;
360 }
361
362 static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
363 const u8 *hdr, unsigned int hdrlen)
364 {
365 struct b43_pio_txqueue *q = pack->queue;
366 const char *frame = pack->skb->data;
367 unsigned int frame_len = pack->skb->len;
368 u16 ctl;
369
370 ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
371 ctl |= B43_PIO_TXCTL_FREADY;
372 ctl &= ~B43_PIO_TXCTL_EOF;
373
374 /* Transfer the header data. */
375 ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
376 /* Transfer the frame data. */
377 ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
378
379 ctl |= B43_PIO_TXCTL_EOF;
380 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
381 }
382
383 static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
384 u32 ctl,
385 const void *_data,
386 unsigned int data_len)
387 {
388 struct b43_wldev *dev = q->dev;
389 struct b43_wl *wl = dev->wl;
390 const u8 *data = _data;
391
392 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
393 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
394 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
395
396 b43_block_write(dev, data, (data_len & ~3),
397 q->mmio_base + B43_PIO8_TXDATA,
398 sizeof(u32));
399 if (data_len & 3) {
400 u8 *tail = wl->pio_tailspace;
401 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
402
403 memset(tail, 0, 4);
404 /* Write the last few bytes. */
405 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
406 B43_PIO8_TXCTL_24_31);
407 switch (data_len & 3) {
408 case 3:
409 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
410 tail[0] = data[data_len - 3];
411 tail[1] = data[data_len - 2];
412 tail[2] = data[data_len - 1];
413 break;
414 case 2:
415 ctl |= B43_PIO8_TXCTL_8_15;
416 tail[0] = data[data_len - 2];
417 tail[1] = data[data_len - 1];
418 break;
419 case 1:
420 tail[0] = data[data_len - 1];
421 break;
422 }
423 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
424 b43_block_write(dev, tail, 4,
425 q->mmio_base + B43_PIO8_TXDATA,
426 sizeof(u32));
427 }
428
429 return ctl;
430 }
431
432 static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
433 const u8 *hdr, unsigned int hdrlen)
434 {
435 struct b43_pio_txqueue *q = pack->queue;
436 const char *frame = pack->skb->data;
437 unsigned int frame_len = pack->skb->len;
438 u32 ctl;
439
440 ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
441 ctl |= B43_PIO8_TXCTL_FREADY;
442 ctl &= ~B43_PIO8_TXCTL_EOF;
443
444 /* Transfer the header data. */
445 ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
446 /* Transfer the frame data. */
447 ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
448
449 ctl |= B43_PIO8_TXCTL_EOF;
450 b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
451 }
452
453 static int pio_tx_frame(struct b43_pio_txqueue *q,
454 struct sk_buff *skb)
455 {
456 struct b43_wldev *dev = q->dev;
457 struct b43_wl *wl = dev->wl;
458 struct b43_pio_txpacket *pack;
459 u16 cookie;
460 int err;
461 unsigned int hdrlen;
462 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
463 struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
464
465 B43_WARN_ON(list_empty(&q->packets_list));
466 pack = list_entry(q->packets_list.next,
467 struct b43_pio_txpacket, list);
468
469 cookie = generate_cookie(q, pack);
470 hdrlen = b43_txhdr_size(dev);
471 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
472 B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
473 err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
474 info, cookie);
475 if (err)
476 return err;
477
478 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
479 /* Tell the firmware about the cookie of the last
480 * mcast frame, so it can clear the more-data bit in it. */
481 b43_shm_write16(dev, B43_SHM_SHARED,
482 B43_SHM_SH_MCASTCOOKIE, cookie);
483 }
484
485 pack->skb = skb;
486 if (q->rev >= 8)
487 pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
488 else
489 pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
490
491 /* Remove it from the list of available packet slots.
492 * It will be put back when we receive the status report. */
493 list_del(&pack->list);
494
495 /* Update the queue statistics. */
496 q->buffer_used += roundup(skb->len + hdrlen, 4);
497 q->free_packet_slots -= 1;
498
499 return 0;
500 }
501
502 int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
503 {
504 struct b43_pio_txqueue *q;
505 struct ieee80211_hdr *hdr;
506 unsigned int hdrlen, total_len;
507 int err = 0;
508 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
509
510 hdr = (struct ieee80211_hdr *)skb->data;
511
512 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
513 /* The multicast queue will be sent after the DTIM. */
514 q = dev->pio.tx_queue_mcast;
515 /* Set the frame More-Data bit. Ucode will clear it
516 * for us on the last frame. */
517 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
518 } else {
519 /* Decide by priority where to put this frame. */
520 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
521 }
522
523 hdrlen = b43_txhdr_size(dev);
524 total_len = roundup(skb->len + hdrlen, 4);
525
526 if (unlikely(total_len > q->buffer_size)) {
527 err = -ENOBUFS;
528 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
529 goto out;
530 }
531 if (unlikely(q->free_packet_slots == 0)) {
532 err = -ENOBUFS;
533 b43warn(dev->wl, "PIO: TX packet overflow.\n");
534 goto out;
535 }
536 B43_WARN_ON(q->buffer_used > q->buffer_size);
537
538 if (total_len > (q->buffer_size - q->buffer_used)) {
539 /* Not enough memory on the queue. */
540 err = -EBUSY;
541 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
542 q->stopped = 1;
543 goto out;
544 }
545
546 /* Assign the queue number to the ring (if not already done before)
547 * so TX status handling can use it. The mac80211-queue to b43-queue
548 * mapping is static, so we don't need to store it per frame. */
549 q->queue_prio = skb_get_queue_mapping(skb);
550
551 err = pio_tx_frame(q, skb);
552 if (unlikely(err == -ENOKEY)) {
553 /* Drop this packet, as we don't have the encryption key
554 * anymore and must not transmit it unencrypted. */
555 dev_kfree_skb_any(skb);
556 err = 0;
557 goto out;
558 }
559 if (unlikely(err)) {
560 b43err(dev->wl, "PIO transmission failure\n");
561 goto out;
562 }
563
564 B43_WARN_ON(q->buffer_used > q->buffer_size);
565 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
566 (q->free_packet_slots == 0)) {
567 /* The queue is full. */
568 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
569 q->stopped = 1;
570 }
571
572 out:
573 return err;
574 }
575
576 void b43_pio_handle_txstatus(struct b43_wldev *dev,
577 const struct b43_txstatus *status)
578 {
579 struct b43_pio_txqueue *q;
580 struct b43_pio_txpacket *pack = NULL;
581 unsigned int total_len;
582 struct ieee80211_tx_info *info;
583
584 q = parse_cookie(dev, status->cookie, &pack);
585 if (unlikely(!q))
586 return;
587 B43_WARN_ON(!pack);
588
589 info = IEEE80211_SKB_CB(pack->skb);
590
591 b43_fill_txstatus_report(dev, info, status);
592
593 total_len = pack->skb->len + b43_txhdr_size(dev);
594 total_len = roundup(total_len, 4);
595 q->buffer_used -= total_len;
596 q->free_packet_slots += 1;
597
598 ieee80211_tx_status(dev->wl->hw, pack->skb);
599 pack->skb = NULL;
600 list_add(&pack->list, &q->packets_list);
601
602 if (q->stopped) {
603 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
604 q->stopped = 0;
605 }
606 }
607
608 /* Returns whether we should fetch another frame. */
609 static bool pio_rx_frame(struct b43_pio_rxqueue *q)
610 {
611 struct b43_wldev *dev = q->dev;
612 struct b43_wl *wl = dev->wl;
613 u16 len;
614 u32 macstat;
615 unsigned int i, padding;
616 struct sk_buff *skb;
617 const char *err_msg = NULL;
618 struct b43_rxhdr_fw4 *rxhdr =
619 (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
620
621 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
622 memset(rxhdr, 0, sizeof(*rxhdr));
623
624 /* Check if we have data and wait for it to get ready. */
625 if (q->rev >= 8) {
626 u32 ctl;
627
628 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
629 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
630 return 0;
631 b43_piorx_write32(q, B43_PIO8_RXCTL,
632 B43_PIO8_RXCTL_FRAMERDY);
633 for (i = 0; i < 10; i++) {
634 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
635 if (ctl & B43_PIO8_RXCTL_DATARDY)
636 goto data_ready;
637 udelay(10);
638 }
639 } else {
640 u16 ctl;
641
642 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
643 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
644 return 0;
645 b43_piorx_write16(q, B43_PIO_RXCTL,
646 B43_PIO_RXCTL_FRAMERDY);
647 for (i = 0; i < 10; i++) {
648 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
649 if (ctl & B43_PIO_RXCTL_DATARDY)
650 goto data_ready;
651 udelay(10);
652 }
653 }
654 b43dbg(q->dev->wl, "PIO RX timed out\n");
655 return 1;
656 data_ready:
657
658 /* Get the preamble (RX header) */
659 if (q->rev >= 8) {
660 b43_block_read(dev, rxhdr, sizeof(*rxhdr),
661 q->mmio_base + B43_PIO8_RXDATA,
662 sizeof(u32));
663 } else {
664 b43_block_read(dev, rxhdr, sizeof(*rxhdr),
665 q->mmio_base + B43_PIO_RXDATA,
666 sizeof(u16));
667 }
668 /* Sanity checks. */
669 len = le16_to_cpu(rxhdr->frame_len);
670 if (unlikely(len > 0x700)) {
671 err_msg = "len > 0x700";
672 goto rx_error;
673 }
674 if (unlikely(len == 0)) {
675 err_msg = "len == 0";
676 goto rx_error;
677 }
678
679 macstat = le32_to_cpu(rxhdr->mac_status);
680 if (macstat & B43_RX_MAC_FCSERR) {
681 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
682 /* Drop frames with failed FCS. */
683 err_msg = "Frame FCS error";
684 goto rx_error;
685 }
686 }
687
688 /* We always pad 2 bytes, as that's what upstream code expects
689 * due to the RX-header being 30 bytes. In case the frame is
690 * unaligned, we pad another 2 bytes. */
691 padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
692 skb = dev_alloc_skb(len + padding + 2);
693 if (unlikely(!skb)) {
694 err_msg = "Out of memory";
695 goto rx_error;
696 }
697 skb_reserve(skb, 2);
698 skb_put(skb, len + padding);
699 if (q->rev >= 8) {
700 b43_block_read(dev, skb->data + padding, (len & ~3),
701 q->mmio_base + B43_PIO8_RXDATA,
702 sizeof(u32));
703 if (len & 3) {
704 u8 *tail = wl->pio_tailspace;
705 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
706
707 /* Read the last few bytes. */
708 b43_block_read(dev, tail, 4,
709 q->mmio_base + B43_PIO8_RXDATA,
710 sizeof(u32));
711 switch (len & 3) {
712 case 3:
713 skb->data[len + padding - 3] = tail[0];
714 skb->data[len + padding - 2] = tail[1];
715 skb->data[len + padding - 1] = tail[2];
716 break;
717 case 2:
718 skb->data[len + padding - 2] = tail[0];
719 skb->data[len + padding - 1] = tail[1];
720 break;
721 case 1:
722 skb->data[len + padding - 1] = tail[0];
723 break;
724 }
725 }
726 } else {
727 b43_block_read(dev, skb->data + padding, (len & ~1),
728 q->mmio_base + B43_PIO_RXDATA,
729 sizeof(u16));
730 if (len & 1) {
731 u8 *tail = wl->pio_tailspace;
732 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
733
734 /* Read the last byte. */
735 b43_block_read(dev, tail, 2,
736 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16));
738 skb->data[len + padding - 1] = tail[0];
739 }
740 }
741
742 b43_rx(q->dev, skb, rxhdr);
743
744 return 1;
745
746 rx_error:
747 if (err_msg)
748 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
749 if (q->rev >= 8)
750 b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
751 else
752 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
753
754 return 1;
755 }
756
757 void b43_pio_rx(struct b43_pio_rxqueue *q)
758 {
759 unsigned int count = 0;
760 bool stop;
761
762 while (1) {
763 stop = (pio_rx_frame(q) == 0);
764 if (stop)
765 break;
766 cond_resched();
767 if (WARN_ON_ONCE(++count > 10000))
768 break;
769 }
770 }
771
772 static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
773 {
774 if (q->rev >= 8) {
775 b43_piotx_write32(q, B43_PIO8_TXCTL,
776 b43_piotx_read32(q, B43_PIO8_TXCTL)
777 | B43_PIO8_TXCTL_SUSPREQ);
778 } else {
779 b43_piotx_write16(q, B43_PIO_TXCTL,
780 b43_piotx_read16(q, B43_PIO_TXCTL)
781 | B43_PIO_TXCTL_SUSPREQ);
782 }
783 }
784
785 static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
786 {
787 if (q->rev >= 8) {
788 b43_piotx_write32(q, B43_PIO8_TXCTL,
789 b43_piotx_read32(q, B43_PIO8_TXCTL)
790 & ~B43_PIO8_TXCTL_SUSPREQ);
791 } else {
792 b43_piotx_write16(q, B43_PIO_TXCTL,
793 b43_piotx_read16(q, B43_PIO_TXCTL)
794 & ~B43_PIO_TXCTL_SUSPREQ);
795 }
796 }
797
798 void b43_pio_tx_suspend(struct b43_wldev *dev)
799 {
800 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
801 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
802 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
803 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
804 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
805 b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
806 }
807
808 void b43_pio_tx_resume(struct b43_wldev *dev)
809 {
810 b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
811 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
812 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
813 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
814 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
815 b43_power_saving_ctl_bits(dev, 0);
816 }