mac80211: rework debug settings and make debugging safer
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / mac80211 / wme.c
CommitLineData
f0706e82
JB
1/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
f0706e82 21/* maximum number of hardware queues we support. */
e100bb64
JB
22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
9e723492 25
e100bb64
JB
26/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
9e723492 30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
f0706e82
JB
31
32struct ieee80211_sched_data
33{
e100bb64 34 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
f0706e82 35 struct tcf_proto *filter_list;
e100bb64
JB
36 struct Qdisc *queues[QD_MAX_QUEUES];
37 struct sk_buff_head requeued[QD_MAX_QUEUES];
f0706e82
JB
38};
39
a8bdf29c 40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
f0706e82
JB
41
42/* given a data frame determine the 802.1p/1d tag to use */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
44{
45 struct iphdr *ip;
46 int dscp;
47 int offset;
48
49 struct ieee80211_sched_data *q = qdisc_priv(qd);
50 struct tcf_result res = { -1, 0 };
51
52 /* if there is a user set filter list, call out to that */
53 if (q->filter_list) {
54 tc_classify(skb, q->filter_list, &res);
55 if (res.class != -1)
56 return res.class;
57 }
58
59 /* skb->priority values from 256->263 are magic values to
60 * directly indicate a specific 802.1d priority.
61 * This is used to allow 802.1d priority to be passed directly in
62 * from VLAN tags, etc. */
63 if (skb->priority >= 256 && skb->priority <= 263)
64 return skb->priority - 256;
65
66 /* check there is a valid IP header present */
a8bdf29c
GC
67 offset = ieee80211_get_hdrlen_from_skb(skb);
68 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
69 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
f0706e82
JB
70 return 0;
71
a8bdf29c 72 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
f0706e82
JB
73
74 dscp = ip->tos & 0xfc;
75 if (dscp & 0x1c)
76 return 0;
77 return dscp >> 5;
78}
79
80
81static inline int wme_downgrade_ac(struct sk_buff *skb)
82{
83 switch (skb->priority) {
84 case 6:
85 case 7:
86 skb->priority = 5; /* VO -> VI */
87 return 0;
88 case 4:
89 case 5:
90 skb->priority = 3; /* VI -> BE */
91 return 0;
92 case 0:
93 case 3:
94 skb->priority = 2; /* BE -> BK */
95 return 0;
96 default:
97 return -1;
98 }
99}
100
101
102/* positive return value indicates which queue to use
103 * negative return value indicates to drop the frame */
e100bb64 104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
f0706e82
JB
105{
106 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
f0706e82 107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
f0706e82 108
002aaf4e 109 if (!ieee80211_is_data(hdr->frame_control)) {
f0706e82
JB
110 /* management frames go on AC_VO queue, but are sent
111 * without QoS control fields */
e100bb64 112 return 0;
f0706e82
JB
113 }
114
f9d540ee
JB
115 if (0 /* injected */) {
116 /* use AC from radiotap */
f0706e82
JB
117 }
118
002aaf4e 119 if (!ieee80211_is_data_qos(hdr->frame_control)) {
f0706e82
JB
120 skb->priority = 0; /* required for correct WPA/11i MIC */
121 return ieee802_1d_to_ac[skb->priority];
122 }
123
124 /* use the data classifier to determine what 802.1d tag the
3c3b00ca 125 * data frame has */
f0706e82
JB
126 skb->priority = classify_1d(skb, qd);
127
3c3b00ca 128 /* in case we are a client verify acm is not set for this ac */
f0706e82
JB
129 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 if (wme_downgrade_ac(skb)) {
3c3b00ca 131 /* No AC with lower priority has acm=0, drop packet. */
f0706e82
JB
132 return -1;
133 }
134 }
135
136 /* look up which queue to use for frames with this 1d tag */
137 return ieee802_1d_to_ac[skb->priority];
138}
139
140
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{
143 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
e100bb64 144 struct ieee80211_hw *hw = &local->hw;
f0706e82 145 struct ieee80211_sched_data *q = qdisc_priv(qd);
e039fa4a 146 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
f0706e82 147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
f0706e82 148 struct Qdisc *qdisc;
9e723492 149 struct sta_info *sta;
5c5e1289 150 int err, queue;
9e723492 151 u8 tid;
f0706e82 152
e039fa4a 153 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
e2530083 154 queue = skb_get_queue_mapping(skb);
d0709a65 155 rcu_read_lock();
9e723492
RR
156 sta = sta_info_get(local, hdr->addr1);
157 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
158 if (sta) {
159 int ampdu_queue = sta->tid_to_tx_q[tid];
e100bb64 160 if ((ampdu_queue < QD_NUM(hw)) &&
a9af2013 161 test_bit(ampdu_queue, q->qdisc_pool)) {
9e723492 162 queue = ampdu_queue;
e039fa4a 163 info->flags |= IEEE80211_TX_CTL_AMPDU;
9e723492 164 } else {
e039fa4a 165 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
9e723492 166 }
9e723492 167 }
d0709a65 168 rcu_read_unlock();
9e723492 169 skb_queue_tail(&q->requeued[queue], skb);
f0706e82
JB
170 qd->q.qlen++;
171 return 0;
172 }
173
174 queue = classify80211(skb, qd);
175
e100bb64
JB
176 if (unlikely(queue >= local->hw.queues))
177 queue = local->hw.queues - 1;
178
f0706e82
JB
179 /* now we know the 1d priority, fill in the QoS header if there is one
180 */
002aaf4e
HH
181 if (ieee80211_is_data_qos(hdr->frame_control)) {
182 u8 *p = ieee80211_get_qos_ctl(hdr);
9e723492
RR
183 u8 ack_policy = 0;
184 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
f0706e82 185 if (local->wifi_wme_noack_test)
9e723492 186 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
f0706e82
JB
187 QOS_CONTROL_ACK_POLICY_SHIFT;
188 /* qos header is 2 bytes, second reserved */
002aaf4e 189 *p++ = ack_policy | tid;
f0706e82 190 *p = 0;
9e723492 191
d0709a65
JB
192 rcu_read_lock();
193
9e723492
RR
194 sta = sta_info_get(local, hdr->addr1);
195 if (sta) {
196 int ampdu_queue = sta->tid_to_tx_q[tid];
e100bb64
JB
197 if ((ampdu_queue < QD_NUM(hw)) &&
198 test_bit(ampdu_queue, q->qdisc_pool)) {
9e723492 199 queue = ampdu_queue;
e039fa4a 200 info->flags |= IEEE80211_TX_CTL_AMPDU;
9e723492 201 } else {
e039fa4a 202 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
9e723492 203 }
9e723492 204 }
d0709a65
JB
205
206 rcu_read_unlock();
f0706e82
JB
207 }
208
5c5e1289
JB
209 if (unlikely(queue < 0)) {
210 kfree_skb(skb);
211 err = NET_XMIT_DROP;
212 } else {
213 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
e2530083 214 skb_set_queue_mapping(skb, queue);
5c5e1289
JB
215 qdisc = q->queues[queue];
216 err = qdisc->enqueue(skb, qdisc);
217 if (err == NET_XMIT_SUCCESS) {
218 qd->q.qlen++;
219 qd->bstats.bytes += skb->len;
220 qd->bstats.packets++;
221 return NET_XMIT_SUCCESS;
222 }
f0706e82
JB
223 }
224 qd->qstats.drops++;
225 return err;
226}
227
228
229/* TODO: clean up the cases where master_hard_start_xmit
230 * returns non 0 - it shouldn't ever do that. Once done we
231 * can remove this function */
232static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
233{
234 struct ieee80211_sched_data *q = qdisc_priv(qd);
f0706e82
JB
235 struct Qdisc *qdisc;
236 int err;
237
238 /* we recorded which queue to use earlier! */
e2530083 239 qdisc = q->queues[skb_get_queue_mapping(skb)];
f0706e82
JB
240
241 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
242 qd->q.qlen++;
243 return 0;
244 }
245 qd->qstats.drops++;
246 return err;
247}
248
249
250static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
251{
252 struct ieee80211_sched_data *q = qdisc_priv(qd);
253 struct net_device *dev = qd->dev;
254 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
255 struct ieee80211_hw *hw = &local->hw;
256 struct sk_buff *skb;
257 struct Qdisc *qdisc;
258 int queue;
259
260 /* check all the h/w queues in numeric/priority order */
e100bb64 261 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82 262 /* see if there is room in this hardware queue */
e2530083
JB
263 if (__netif_subqueue_stopped(local->mdev, queue) ||
264 !test_bit(queue, q->qdisc_pool))
f0706e82
JB
265 continue;
266
267 /* there is space - try and get a frame */
268 skb = skb_dequeue(&q->requeued[queue]);
269 if (skb) {
270 qd->q.qlen--;
271 return skb;
272 }
273
274 qdisc = q->queues[queue];
275 skb = qdisc->dequeue(qdisc);
276 if (skb) {
277 qd->q.qlen--;
278 return skb;
279 }
280 }
281 /* returning a NULL here when all the h/w queues are full means we
282 * never need to call netif_stop_queue in the driver */
283 return NULL;
284}
285
286
287static void wme_qdiscop_reset(struct Qdisc* qd)
288{
289 struct ieee80211_sched_data *q = qdisc_priv(qd);
290 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
291 struct ieee80211_hw *hw = &local->hw;
292 int queue;
293
294 /* QUESTION: should we have some hardware flush functionality here? */
295
e100bb64 296 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82
JB
297 skb_queue_purge(&q->requeued[queue]);
298 qdisc_reset(q->queues[queue]);
299 }
300 qd->q.qlen = 0;
301}
302
303
304static void wme_qdiscop_destroy(struct Qdisc* qd)
305{
306 struct ieee80211_sched_data *q = qdisc_priv(qd);
307 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
308 struct ieee80211_hw *hw = &local->hw;
309 int queue;
310
311 tcf_destroy_chain(q->filter_list);
312 q->filter_list = NULL;
313
e100bb64 314 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82
JB
315 skb_queue_purge(&q->requeued[queue]);
316 qdisc_destroy(q->queues[queue]);
317 q->queues[queue] = &noop_qdisc;
318 }
319}
320
321
322/* called whenever parameters are updated on existing qdisc */
1e90474c 323static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
f0706e82 324{
f0706e82
JB
325 return 0;
326}
327
328
329/* called during initial creation of qdisc on device */
1e90474c 330static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
f0706e82
JB
331{
332 struct ieee80211_sched_data *q = qdisc_priv(qd);
333 struct net_device *dev = qd->dev;
334 struct ieee80211_local *local;
e100bb64 335 struct ieee80211_hw *hw;
f0706e82
JB
336 int err = 0, i;
337
338 /* check that device is a mac80211 device */
339 if (!dev->ieee80211_ptr ||
340 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
341 return -EINVAL;
342
e100bb64
JB
343 local = wdev_priv(dev->ieee80211_ptr);
344 hw = &local->hw;
345
346 /* only allow on master dev */
347 if (dev != local->mdev)
f0706e82
JB
348 return -EINVAL;
349
e100bb64
JB
350 /* ensure that we are root qdisc */
351 if (qd->parent != TC_H_ROOT)
f0706e82
JB
352 return -EINVAL;
353
354 if (qd->flags & TCQ_F_INGRESS)
355 return -EINVAL;
356
f0706e82 357 /* if options were passed in, set them */
e100bb64 358 if (opt)
f0706e82 359 err = wme_qdiscop_tune(qd, opt);
f0706e82
JB
360
361 /* create child queues */
e100bb64 362 for (i = 0; i < QD_NUM(hw); i++) {
f0706e82
JB
363 skb_queue_head_init(&q->requeued[i]);
364 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
365 qd->handle);
136e83d6 366 if (!q->queues[i]) {
f0706e82 367 q->queues[i] = &noop_qdisc;
a4278e18
PR
368 printk(KERN_ERR "%s child qdisc %i creation failed\n",
369 dev->name, i);
f0706e82
JB
370 }
371 }
372
e100bb64
JB
373 /* non-aggregation queues: reserve/mark as used */
374 for (i = 0; i < local->hw.queues; i++)
a9af2013 375 set_bit(i, q->qdisc_pool);
9e723492 376
f0706e82
JB
377 return err;
378}
379
380static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
381{
f0706e82
JB
382 return -1;
383}
384
385
386static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
387 struct Qdisc *new, struct Qdisc **old)
388{
389 struct ieee80211_sched_data *q = qdisc_priv(qd);
390 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
391 struct ieee80211_hw *hw = &local->hw;
392 unsigned long queue = arg - 1;
393
e100bb64 394 if (queue >= QD_NUM(hw))
f0706e82
JB
395 return -EINVAL;
396
397 if (!new)
398 new = &noop_qdisc;
399
400 sch_tree_lock(qd);
401 *old = q->queues[queue];
402 q->queues[queue] = new;
403 qdisc_reset(*old);
404 sch_tree_unlock(qd);
405
406 return 0;
407}
408
409
410static struct Qdisc *
411wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
412{
413 struct ieee80211_sched_data *q = qdisc_priv(qd);
414 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
415 struct ieee80211_hw *hw = &local->hw;
416 unsigned long queue = arg - 1;
417
e100bb64 418 if (queue >= QD_NUM(hw))
f0706e82
JB
419 return NULL;
420
421 return q->queues[queue];
422}
423
424
425static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
426{
427 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
428 struct ieee80211_hw *hw = &local->hw;
429 unsigned long queue = TC_H_MIN(classid);
430
e100bb64 431 if (queue - 1 >= QD_NUM(hw))
f0706e82
JB
432 return 0;
433
434 return queue;
435}
436
437
438static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
439 u32 classid)
440{
441 return wme_classop_get(qd, classid);
442}
443
444
445static void wme_classop_put(struct Qdisc *q, unsigned long cl)
446{
447}
448
449
450static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
1e90474c 451 struct nlattr **tca, unsigned long *arg)
f0706e82
JB
452{
453 unsigned long cl = *arg;
454 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
455 struct ieee80211_hw *hw = &local->hw;
456
e100bb64 457 if (cl - 1 > QD_NUM(hw))
f0706e82
JB
458 return -ENOENT;
459
460 /* TODO: put code to program hardware queue parameters here,
461 * to allow programming from tc command line */
462
463 return 0;
464}
465
466
467/* we don't support deleting hardware queues
468 * when we add WMM-SA support - TSPECs may be deleted here */
469static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
470{
471 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
472 struct ieee80211_hw *hw = &local->hw;
473
e100bb64 474 if (cl - 1 > QD_NUM(hw))
f0706e82
JB
475 return -ENOENT;
476 return 0;
477}
478
479
480static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
481 struct sk_buff *skb, struct tcmsg *tcm)
482{
483 struct ieee80211_sched_data *q = qdisc_priv(qd);
484 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
485 struct ieee80211_hw *hw = &local->hw;
486
e100bb64 487 if (cl - 1 > QD_NUM(hw))
f0706e82
JB
488 return -ENOENT;
489 tcm->tcm_handle = TC_H_MIN(cl);
490 tcm->tcm_parent = qd->handle;
491 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
492 return 0;
493}
494
495
496static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
497{
498 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
499 struct ieee80211_hw *hw = &local->hw;
500 int queue;
501
502 if (arg->stop)
503 return;
504
e100bb64 505 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82
JB
506 if (arg->count < arg->skip) {
507 arg->count++;
508 continue;
509 }
510 /* we should return classids for our internal queues here
511 * as well as the external ones */
512 if (arg->fn(qd, queue+1, arg) < 0) {
513 arg->stop = 1;
514 break;
515 }
516 arg->count++;
517 }
518}
519
520
521static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
522 unsigned long cl)
523{
524 struct ieee80211_sched_data *q = qdisc_priv(qd);
525
526 if (cl)
527 return NULL;
528
529 return &q->filter_list;
530}
531
532
533/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
534 * - these are the operations on the classes */
20fea08b 535static const struct Qdisc_class_ops class_ops =
f0706e82
JB
536{
537 .graft = wme_classop_graft,
538 .leaf = wme_classop_leaf,
539
540 .get = wme_classop_get,
541 .put = wme_classop_put,
542 .change = wme_classop_change,
543 .delete = wme_classop_delete,
544 .walk = wme_classop_walk,
545
546 .tcf_chain = wme_classop_find_tcf,
547 .bind_tcf = wme_classop_bind,
548 .unbind_tcf = wme_classop_put,
549
550 .dump = wme_classop_dump_class,
551};
552
553
554/* queueing discipline operations */
20fea08b 555static struct Qdisc_ops wme_qdisc_ops __read_mostly =
f0706e82
JB
556{
557 .next = NULL,
558 .cl_ops = &class_ops,
559 .id = "ieee80211",
560 .priv_size = sizeof(struct ieee80211_sched_data),
561
562 .enqueue = wme_qdiscop_enqueue,
563 .dequeue = wme_qdiscop_dequeue,
564 .requeue = wme_qdiscop_requeue,
565 .drop = NULL, /* drop not needed since we are always the root qdisc */
566
567 .init = wme_qdiscop_init,
568 .reset = wme_qdiscop_reset,
569 .destroy = wme_qdiscop_destroy,
570 .change = wme_qdiscop_tune,
571
572 .dump = wme_qdiscop_dump,
573};
574
575
576void ieee80211_install_qdisc(struct net_device *dev)
577{
578 struct Qdisc *qdisc;
579
580 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
581 if (!qdisc) {
582 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
583 return;
584 }
585
586 /* same handle as would be allocated by qdisc_alloc_handle() */
587 qdisc->handle = 0x80010000;
588
589 qdisc_lock_tree(dev);
590 list_add_tail(&qdisc->list, &dev->qdisc_list);
591 dev->qdisc_sleeping = qdisc;
592 qdisc_unlock_tree(dev);
593}
594
595
596int ieee80211_qdisc_installed(struct net_device *dev)
597{
598 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
599}
600
601
602int ieee80211_wme_register(void)
603{
604 return register_qdisc(&wme_qdisc_ops);
605}
606
607
608void ieee80211_wme_unregister(void)
609{
610 unregister_qdisc(&wme_qdisc_ops);
611}
9e723492
RR
612
613int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
614 struct sta_info *sta, u16 tid)
615{
616 int i;
617 struct ieee80211_sched_data *q =
618 qdisc_priv(local->mdev->qdisc_sleeping);
619 DECLARE_MAC_BUF(mac);
620
621 /* prepare the filter and save it for the SW queue
e100bb64
JB
622 * matching the received HW queue */
623
624 if (!local->hw.ampdu_queues)
625 return -EPERM;
9e723492
RR
626
627 /* try to get a Qdisc from the pool */
e100bb64 628 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
a9af2013 629 if (!test_and_set_bit(i, q->qdisc_pool)) {
9e723492
RR
630 ieee80211_stop_queue(local_to_hw(local), i);
631 sta->tid_to_tx_q[tid] = i;
632
633 /* IF there are already pending packets
634 * on this tid first we need to drain them
635 * on the previous queue
636 * since HT is strict in order */
637#ifdef CONFIG_MAC80211_HT_DEBUG
638 if (net_ratelimit())
639 printk(KERN_DEBUG "allocated aggregation queue"
995ad6c5 640 " %d tid %d addr %s pool=0x%lX\n",
9e723492 641 i, tid, print_mac(mac, sta->addr),
a9af2013 642 q->qdisc_pool[0]);
9e723492
RR
643#endif /* CONFIG_MAC80211_HT_DEBUG */
644 return 0;
645 }
646
647 return -EAGAIN;
648}
649
650/**
651 * the caller needs to hold local->mdev->queue_lock
652 */
653void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
654 struct sta_info *sta, u16 tid,
655 u8 requeue)
656{
e100bb64 657 struct ieee80211_hw *hw = &local->hw;
9e723492
RR
658 struct ieee80211_sched_data *q =
659 qdisc_priv(local->mdev->qdisc_sleeping);
660 int agg_queue = sta->tid_to_tx_q[tid];
661
662 /* return the qdisc to the pool */
a9af2013 663 clear_bit(agg_queue, q->qdisc_pool);
e100bb64 664 sta->tid_to_tx_q[tid] = QD_NUM(hw);
9e723492
RR
665
666 if (requeue)
667 ieee80211_requeue(local, agg_queue);
668 else
669 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
670}
671
672void ieee80211_requeue(struct ieee80211_local *local, int queue)
673{
674 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
675 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
676 struct Qdisc *qdisc = q->queues[queue];
677 struct sk_buff *skb = NULL;
0da926f0 678 u32 len;
9e723492
RR
679
680 if (!qdisc || !qdisc->dequeue)
681 return;
682
9e723492
RR
683 for (len = qdisc->q.qlen; len > 0; len--) {
684 skb = qdisc->dequeue(qdisc);
685 root_qd->q.qlen--;
686 /* packet will be classified again and */
687 /* skb->packet_data->queue will be overridden if needed */
688 if (skb)
689 wme_qdiscop_enqueue(skb, root_qd);
690 }
691}