[Bluetooth] Switch from OGF+OCF to using only opcodes
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
1da177e4
LT
27#include <linux/module.h>
28#include <linux/kmod.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
1da177e4
LT
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/skbuff.h>
39#include <linux/interrupt.h>
40#include <linux/notifier.h>
41#include <net/sock.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/unaligned.h>
46
47#include <net/bluetooth/bluetooth.h>
48#include <net/bluetooth/hci_core.h>
49
50#ifndef CONFIG_BT_HCI_CORE_DEBUG
51#undef BT_DBG
52#define BT_DBG(D...)
53#endif
54
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
e041c683 75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
e041c683 81 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
e041c683 86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
87}
88
6516455d 89static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 90{
e041c683 91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
92}
93
94/* ---- HCI requests ---- */
95
96void hci_req_complete(struct hci_dev *hdev, int result)
97{
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
99
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
104 }
105}
106
107static void hci_req_cancel(struct hci_dev *hdev, int err)
108{
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
110
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116}
117
118/* Execute request and wait for completion. */
8e87d142 119static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
120 unsigned long opt, __u32 timeout)
121{
122 DECLARE_WAITQUEUE(wait, current);
123 int err = 0;
124
125 BT_DBG("%s start", hdev->name);
126
127 hdev->req_status = HCI_REQ_PEND;
128
129 add_wait_queue(&hdev->req_wait_q, &wait);
130 set_current_state(TASK_INTERRUPTIBLE);
131
132 req(hdev, opt);
133 schedule_timeout(timeout);
134
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136
137 if (signal_pending(current))
138 return -EINTR;
139
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
142 err = -bt_err(hdev->req_result);
143 break;
144
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
148
149 default:
150 err = -ETIMEDOUT;
151 break;
3ff50b79 152 }
1da177e4
LT
153
154 hdev->req_status = hdev->req_result = 0;
155
156 BT_DBG("%s end: err %d", hdev->name, err);
157
158 return err;
159}
160
161static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 unsigned long opt, __u32 timeout)
163{
164 int ret;
165
166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
170
171 return ret;
172}
173
174static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175{
176 BT_DBG("%s %ld", hdev->name, opt);
177
178 /* Reset device */
a9de9248 179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
180}
181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183{
184 struct sk_buff *skb;
1ebb9252 185 __le16 param;
89f2783d 186 __u8 flt_type;
1da177e4
LT
187
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Driver initialization */
191
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4
LT
195 skb->dev = (void *) hdev;
196 skb_queue_tail(&hdev->cmd_q, skb);
197 hci_sched_cmd(hdev);
198 }
199 skb_queue_purge(&hdev->driver_init);
200
201 /* Mandatory initialization */
202
203 /* Reset */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
a9de9248 205 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
206
207 /* Read Local Supported Features */
a9de9248 208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 209
1143e5a6 210 /* Read Local Version */
a9de9248 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 212
1da177e4 213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 214 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
215
216#if 0
217 /* Host buffer size */
218 {
219 struct hci_cp_host_buffer_size cp;
aca3192c 220 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 224 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
225 }
226#endif
227
228 /* Read BD Address */
a9de9248
MH
229 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230
231 /* Read Class of Device */
232 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233
234 /* Read Local Name */
235 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
236
237 /* Read Voice Setting */
a9de9248 238 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
239
240 /* Optional initialization */
241
242 /* Clear Event Filters */
89f2783d 243 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 244 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4
LT
245
246 /* Page timeout ~20 secs */
aca3192c 247 param = cpu_to_le16(0x8000);
a9de9248 248 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
1da177e4
LT
249
250 /* Connection accept timeout ~20 secs */
aca3192c 251 param = cpu_to_le16(0x7d00);
a9de9248 252 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1da177e4
LT
253}
254
255static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
256{
257 __u8 scan = opt;
258
259 BT_DBG("%s %x", hdev->name, scan);
260
261 /* Inquiry and Page scans */
a9de9248 262 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
263}
264
265static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
266{
267 __u8 auth = opt;
268
269 BT_DBG("%s %x", hdev->name, auth);
270
271 /* Authentication */
a9de9248 272 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
273}
274
275static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
276{
277 __u8 encrypt = opt;
278
279 BT_DBG("%s %x", hdev->name, encrypt);
280
281 /* Authentication */
a9de9248 282 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
283}
284
8e87d142 285/* Get HCI device by index.
1da177e4
LT
286 * Device is held on return. */
287struct hci_dev *hci_dev_get(int index)
288{
289 struct hci_dev *hdev = NULL;
290 struct list_head *p;
291
292 BT_DBG("%d", index);
293
294 if (index < 0)
295 return NULL;
296
297 read_lock(&hci_dev_list_lock);
298 list_for_each(p, &hci_dev_list) {
299 struct hci_dev *d = list_entry(p, struct hci_dev, list);
300 if (d->id == index) {
301 hdev = hci_dev_hold(d);
302 break;
303 }
304 }
305 read_unlock(&hci_dev_list_lock);
306 return hdev;
307}
1da177e4
LT
308
309/* ---- Inquiry support ---- */
310static void inquiry_cache_flush(struct hci_dev *hdev)
311{
312 struct inquiry_cache *cache = &hdev->inq_cache;
313 struct inquiry_entry *next = cache->list, *e;
314
315 BT_DBG("cache %p", cache);
316
317 cache->list = NULL;
318 while ((e = next)) {
319 next = e->next;
320 kfree(e);
321 }
322}
323
324struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
325{
326 struct inquiry_cache *cache = &hdev->inq_cache;
327 struct inquiry_entry *e;
328
329 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
330
331 for (e = cache->list; e; e = e->next)
332 if (!bacmp(&e->data.bdaddr, bdaddr))
333 break;
334 return e;
335}
336
337void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
338{
339 struct inquiry_cache *cache = &hdev->inq_cache;
340 struct inquiry_entry *e;
341
342 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
343
344 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
345 /* Entry not in the cache. Add new one. */
25ea6db0 346 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
1da177e4 347 return;
1da177e4
LT
348 e->next = cache->list;
349 cache->list = e;
350 }
351
352 memcpy(&e->data, data, sizeof(*data));
353 e->timestamp = jiffies;
354 cache->timestamp = jiffies;
355}
356
357static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
358{
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_info *info = (struct inquiry_info *) buf;
361 struct inquiry_entry *e;
362 int copied = 0;
363
364 for (e = cache->list; e && copied < num; e = e->next, copied++) {
365 struct inquiry_data *data = &e->data;
366 bacpy(&info->bdaddr, &data->bdaddr);
367 info->pscan_rep_mode = data->pscan_rep_mode;
368 info->pscan_period_mode = data->pscan_period_mode;
369 info->pscan_mode = data->pscan_mode;
370 memcpy(info->dev_class, data->dev_class, 3);
371 info->clock_offset = data->clock_offset;
372 info++;
373 }
374
375 BT_DBG("cache %p, copied %d", cache, copied);
376 return copied;
377}
378
379static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
380{
381 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
382 struct hci_cp_inquiry cp;
383
384 BT_DBG("%s", hdev->name);
385
386 if (test_bit(HCI_INQUIRY, &hdev->flags))
387 return;
388
389 /* Start Inquiry */
390 memcpy(&cp.lap, &ir->lap, 3);
391 cp.length = ir->length;
392 cp.num_rsp = ir->num_rsp;
a9de9248 393 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
394}
395
396int hci_inquiry(void __user *arg)
397{
398 __u8 __user *ptr = arg;
399 struct hci_inquiry_req ir;
400 struct hci_dev *hdev;
401 int err = 0, do_inquiry = 0, max_rsp;
402 long timeo;
403 __u8 *buf;
404
405 if (copy_from_user(&ir, ptr, sizeof(ir)))
406 return -EFAULT;
407
408 if (!(hdev = hci_dev_get(ir.dev_id)))
409 return -ENODEV;
410
411 hci_dev_lock_bh(hdev);
8e87d142 412 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1da177e4
LT
413 inquiry_cache_empty(hdev) ||
414 ir.flags & IREQ_CACHE_FLUSH) {
415 inquiry_cache_flush(hdev);
416 do_inquiry = 1;
417 }
418 hci_dev_unlock_bh(hdev);
419
04837f64 420 timeo = ir.length * msecs_to_jiffies(2000);
1da177e4
LT
421 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
422 goto done;
423
424 /* for unlimited number of responses we will use buffer with 255 entries */
425 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
426
427 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
428 * copy it to the user space.
429 */
430 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
431 err = -ENOMEM;
432 goto done;
433 }
434
435 hci_dev_lock_bh(hdev);
436 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
437 hci_dev_unlock_bh(hdev);
438
439 BT_DBG("num_rsp %d", ir.num_rsp);
440
441 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
442 ptr += sizeof(ir);
443 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
444 ir.num_rsp))
445 err = -EFAULT;
8e87d142 446 } else
1da177e4
LT
447 err = -EFAULT;
448
449 kfree(buf);
450
451done:
452 hci_dev_put(hdev);
453 return err;
454}
455
456/* ---- HCI ioctl helpers ---- */
457
458int hci_dev_open(__u16 dev)
459{
460 struct hci_dev *hdev;
461 int ret = 0;
462
463 if (!(hdev = hci_dev_get(dev)))
464 return -ENODEV;
465
466 BT_DBG("%s %p", hdev->name, hdev);
467
468 hci_req_lock(hdev);
469
470 if (test_bit(HCI_UP, &hdev->flags)) {
471 ret = -EALREADY;
472 goto done;
473 }
474
475 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
476 set_bit(HCI_RAW, &hdev->flags);
477
478 if (hdev->open(hdev)) {
479 ret = -EIO;
480 goto done;
481 }
482
483 if (!test_bit(HCI_RAW, &hdev->flags)) {
484 atomic_set(&hdev->cmd_cnt, 1);
485 set_bit(HCI_INIT, &hdev->flags);
486
487 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
488 ret = __hci_request(hdev, hci_init_req, 0,
489 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
490
491 clear_bit(HCI_INIT, &hdev->flags);
492 }
493
494 if (!ret) {
495 hci_dev_hold(hdev);
496 set_bit(HCI_UP, &hdev->flags);
497 hci_notify(hdev, HCI_DEV_UP);
8e87d142 498 } else {
1da177e4
LT
499 /* Init failed, cleanup */
500 tasklet_kill(&hdev->rx_task);
501 tasklet_kill(&hdev->tx_task);
502 tasklet_kill(&hdev->cmd_task);
503
504 skb_queue_purge(&hdev->cmd_q);
505 skb_queue_purge(&hdev->rx_q);
506
507 if (hdev->flush)
508 hdev->flush(hdev);
509
510 if (hdev->sent_cmd) {
511 kfree_skb(hdev->sent_cmd);
512 hdev->sent_cmd = NULL;
513 }
514
515 hdev->close(hdev);
516 hdev->flags = 0;
517 }
518
519done:
520 hci_req_unlock(hdev);
521 hci_dev_put(hdev);
522 return ret;
523}
524
525static int hci_dev_do_close(struct hci_dev *hdev)
526{
527 BT_DBG("%s %p", hdev->name, hdev);
528
529 hci_req_cancel(hdev, ENODEV);
530 hci_req_lock(hdev);
531
532 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
533 hci_req_unlock(hdev);
534 return 0;
535 }
536
537 /* Kill RX and TX tasks */
538 tasklet_kill(&hdev->rx_task);
539 tasklet_kill(&hdev->tx_task);
540
541 hci_dev_lock_bh(hdev);
542 inquiry_cache_flush(hdev);
543 hci_conn_hash_flush(hdev);
544 hci_dev_unlock_bh(hdev);
545
546 hci_notify(hdev, HCI_DEV_DOWN);
547
548 if (hdev->flush)
549 hdev->flush(hdev);
550
551 /* Reset device */
552 skb_queue_purge(&hdev->cmd_q);
553 atomic_set(&hdev->cmd_cnt, 1);
554 if (!test_bit(HCI_RAW, &hdev->flags)) {
555 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
556 __hci_request(hdev, hci_reset_req, 0,
557 msecs_to_jiffies(250));
1da177e4
LT
558 clear_bit(HCI_INIT, &hdev->flags);
559 }
560
561 /* Kill cmd task */
562 tasklet_kill(&hdev->cmd_task);
563
564 /* Drop queues */
565 skb_queue_purge(&hdev->rx_q);
566 skb_queue_purge(&hdev->cmd_q);
567 skb_queue_purge(&hdev->raw_q);
568
569 /* Drop last sent command */
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
573 }
574
575 /* After this point our queues are empty
576 * and no tasks are scheduled. */
577 hdev->close(hdev);
578
579 /* Clear flags */
580 hdev->flags = 0;
581
582 hci_req_unlock(hdev);
583
584 hci_dev_put(hdev);
585 return 0;
586}
587
588int hci_dev_close(__u16 dev)
589{
590 struct hci_dev *hdev;
591 int err;
592
593 if (!(hdev = hci_dev_get(dev)))
594 return -ENODEV;
595 err = hci_dev_do_close(hdev);
596 hci_dev_put(hdev);
597 return err;
598}
599
600int hci_dev_reset(__u16 dev)
601{
602 struct hci_dev *hdev;
603 int ret = 0;
604
605 if (!(hdev = hci_dev_get(dev)))
606 return -ENODEV;
607
608 hci_req_lock(hdev);
609 tasklet_disable(&hdev->tx_task);
610
611 if (!test_bit(HCI_UP, &hdev->flags))
612 goto done;
613
614 /* Drop queues */
615 skb_queue_purge(&hdev->rx_q);
616 skb_queue_purge(&hdev->cmd_q);
617
618 hci_dev_lock_bh(hdev);
619 inquiry_cache_flush(hdev);
620 hci_conn_hash_flush(hdev);
621 hci_dev_unlock_bh(hdev);
622
623 if (hdev->flush)
624 hdev->flush(hdev);
625
8e87d142 626 atomic_set(&hdev->cmd_cnt, 1);
1da177e4
LT
627 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
628
629 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
630 ret = __hci_request(hdev, hci_reset_req, 0,
631 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
632
633done:
634 tasklet_enable(&hdev->tx_task);
635 hci_req_unlock(hdev);
636 hci_dev_put(hdev);
637 return ret;
638}
639
640int hci_dev_reset_stat(__u16 dev)
641{
642 struct hci_dev *hdev;
643 int ret = 0;
644
645 if (!(hdev = hci_dev_get(dev)))
646 return -ENODEV;
647
648 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
649
650 hci_dev_put(hdev);
651
652 return ret;
653}
654
655int hci_dev_cmd(unsigned int cmd, void __user *arg)
656{
657 struct hci_dev *hdev;
658 struct hci_dev_req dr;
659 int err = 0;
660
661 if (copy_from_user(&dr, arg, sizeof(dr)))
662 return -EFAULT;
663
664 if (!(hdev = hci_dev_get(dr.dev_id)))
665 return -ENODEV;
666
667 switch (cmd) {
668 case HCISETAUTH:
04837f64
MH
669 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
670 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
671 break;
672
673 case HCISETENCRYPT:
674 if (!lmp_encrypt_capable(hdev)) {
675 err = -EOPNOTSUPP;
676 break;
677 }
678
679 if (!test_bit(HCI_AUTH, &hdev->flags)) {
680 /* Auth must be enabled first */
04837f64
MH
681 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
682 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
683 if (err)
684 break;
685 }
686
04837f64
MH
687 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
689 break;
690
691 case HCISETSCAN:
04837f64
MH
692 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
694 break;
695
696 case HCISETPTYPE:
697 hdev->pkt_type = (__u16) dr.dev_opt;
698 break;
699
700 case HCISETLINKPOL:
701 hdev->link_policy = (__u16) dr.dev_opt;
702 break;
703
704 case HCISETLINKMODE:
705 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
706 break;
707
708 case HCISETACLMTU:
709 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
710 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
711 break;
712
713 case HCISETSCOMTU:
714 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
715 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
716 break;
717
718 default:
719 err = -EINVAL;
720 break;
721 }
722 hci_dev_put(hdev);
723 return err;
724}
725
726int hci_get_dev_list(void __user *arg)
727{
728 struct hci_dev_list_req *dl;
729 struct hci_dev_req *dr;
730 struct list_head *p;
731 int n = 0, size, err;
732 __u16 dev_num;
733
734 if (get_user(dev_num, (__u16 __user *) arg))
735 return -EFAULT;
736
737 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
738 return -EINVAL;
739
740 size = sizeof(*dl) + dev_num * sizeof(*dr);
741
742 if (!(dl = kmalloc(size, GFP_KERNEL)))
743 return -ENOMEM;
744
745 dr = dl->dev_req;
746
747 read_lock_bh(&hci_dev_list_lock);
748 list_for_each(p, &hci_dev_list) {
749 struct hci_dev *hdev;
750 hdev = list_entry(p, struct hci_dev, list);
751 (dr + n)->dev_id = hdev->id;
752 (dr + n)->dev_opt = hdev->flags;
753 if (++n >= dev_num)
754 break;
755 }
756 read_unlock_bh(&hci_dev_list_lock);
757
758 dl->dev_num = n;
759 size = sizeof(*dl) + n * sizeof(*dr);
760
761 err = copy_to_user(arg, dl, size);
762 kfree(dl);
763
764 return err ? -EFAULT : 0;
765}
766
767int hci_get_dev_info(void __user *arg)
768{
769 struct hci_dev *hdev;
770 struct hci_dev_info di;
771 int err = 0;
772
773 if (copy_from_user(&di, arg, sizeof(di)))
774 return -EFAULT;
775
776 if (!(hdev = hci_dev_get(di.dev_id)))
777 return -ENODEV;
778
779 strcpy(di.name, hdev->name);
780 di.bdaddr = hdev->bdaddr;
781 di.type = hdev->type;
782 di.flags = hdev->flags;
783 di.pkt_type = hdev->pkt_type;
784 di.acl_mtu = hdev->acl_mtu;
785 di.acl_pkts = hdev->acl_pkts;
786 di.sco_mtu = hdev->sco_mtu;
787 di.sco_pkts = hdev->sco_pkts;
788 di.link_policy = hdev->link_policy;
789 di.link_mode = hdev->link_mode;
790
791 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
792 memcpy(&di.features, &hdev->features, sizeof(di.features));
793
794 if (copy_to_user(arg, &di, sizeof(di)))
795 err = -EFAULT;
796
797 hci_dev_put(hdev);
798
799 return err;
800}
801
802/* ---- Interface to HCI drivers ---- */
803
804/* Alloc HCI device */
805struct hci_dev *hci_alloc_dev(void)
806{
807 struct hci_dev *hdev;
808
25ea6db0 809 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
810 if (!hdev)
811 return NULL;
812
1da177e4
LT
813 skb_queue_head_init(&hdev->driver_init);
814
815 return hdev;
816}
817EXPORT_SYMBOL(hci_alloc_dev);
818
819/* Free HCI device */
820void hci_free_dev(struct hci_dev *hdev)
821{
822 skb_queue_purge(&hdev->driver_init);
823
a91f2e39
MH
824 /* will free via device release */
825 put_device(&hdev->dev);
1da177e4
LT
826}
827EXPORT_SYMBOL(hci_free_dev);
828
829/* Register HCI device */
830int hci_register_dev(struct hci_dev *hdev)
831{
832 struct list_head *head = &hci_dev_list, *p;
ef222013 833 int i, id = 0;
1da177e4
LT
834
835 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
836
837 if (!hdev->open || !hdev->close || !hdev->destruct)
838 return -EINVAL;
839
840 write_lock_bh(&hci_dev_list_lock);
841
842 /* Find first available device id */
843 list_for_each(p, &hci_dev_list) {
844 if (list_entry(p, struct hci_dev, list)->id != id)
845 break;
846 head = p; id++;
847 }
8e87d142 848
1da177e4
LT
849 sprintf(hdev->name, "hci%d", id);
850 hdev->id = id;
851 list_add(&hdev->list, head);
852
853 atomic_set(&hdev->refcnt, 1);
854 spin_lock_init(&hdev->lock);
855
856 hdev->flags = 0;
857 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 858 hdev->esco_type = (ESCO_HV1);
1da177e4
LT
859 hdev->link_mode = (HCI_LM_ACCEPT);
860
04837f64
MH
861 hdev->idle_timeout = 0;
862 hdev->sniff_max_interval = 800;
863 hdev->sniff_min_interval = 80;
864
1da177e4
LT
865 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
866 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
867 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
868
869 skb_queue_head_init(&hdev->rx_q);
870 skb_queue_head_init(&hdev->cmd_q);
871 skb_queue_head_init(&hdev->raw_q);
872
ef222013
MH
873 for (i = 0; i < 3; i++)
874 hdev->reassembly[i] = NULL;
875
1da177e4
LT
876 init_waitqueue_head(&hdev->req_wait_q);
877 init_MUTEX(&hdev->req_lock);
878
879 inquiry_cache_init(hdev);
880
881 hci_conn_hash_init(hdev);
882
883 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
884
885 atomic_set(&hdev->promisc, 0);
886
887 write_unlock_bh(&hci_dev_list_lock);
888
889 hci_register_sysfs(hdev);
890
891 hci_notify(hdev, HCI_DEV_REG);
892
893 return id;
894}
895EXPORT_SYMBOL(hci_register_dev);
896
897/* Unregister HCI device */
898int hci_unregister_dev(struct hci_dev *hdev)
899{
ef222013
MH
900 int i;
901
1da177e4
LT
902 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
903
904 hci_unregister_sysfs(hdev);
905
906 write_lock_bh(&hci_dev_list_lock);
907 list_del(&hdev->list);
908 write_unlock_bh(&hci_dev_list_lock);
909
910 hci_dev_do_close(hdev);
911
ef222013
MH
912 for (i = 0; i < 3; i++)
913 kfree_skb(hdev->reassembly[i]);
914
1da177e4
LT
915 hci_notify(hdev, HCI_DEV_UNREG);
916
917 __hci_dev_put(hdev);
ef222013 918
1da177e4
LT
919 return 0;
920}
921EXPORT_SYMBOL(hci_unregister_dev);
922
923/* Suspend HCI device */
924int hci_suspend_dev(struct hci_dev *hdev)
925{
926 hci_notify(hdev, HCI_DEV_SUSPEND);
927 return 0;
928}
929EXPORT_SYMBOL(hci_suspend_dev);
930
931/* Resume HCI device */
932int hci_resume_dev(struct hci_dev *hdev)
933{
934 hci_notify(hdev, HCI_DEV_RESUME);
935 return 0;
936}
937EXPORT_SYMBOL(hci_resume_dev);
938
ef222013
MH
939/* Receive packet type fragment */
940#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
941
942int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
943{
944 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
945 return -EILSEQ;
946
947 while (count) {
948 struct sk_buff *skb = __reassembly(hdev, type);
949 struct { int expect; } *scb;
950 int len = 0;
951
952 if (!skb) {
953 /* Start of the frame */
954
955 switch (type) {
956 case HCI_EVENT_PKT:
957 if (count >= HCI_EVENT_HDR_SIZE) {
958 struct hci_event_hdr *h = data;
959 len = HCI_EVENT_HDR_SIZE + h->plen;
960 } else
961 return -EILSEQ;
962 break;
963
964 case HCI_ACLDATA_PKT:
965 if (count >= HCI_ACL_HDR_SIZE) {
966 struct hci_acl_hdr *h = data;
967 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
968 } else
969 return -EILSEQ;
970 break;
971
972 case HCI_SCODATA_PKT:
973 if (count >= HCI_SCO_HDR_SIZE) {
974 struct hci_sco_hdr *h = data;
975 len = HCI_SCO_HDR_SIZE + h->dlen;
976 } else
977 return -EILSEQ;
978 break;
979 }
980
981 skb = bt_skb_alloc(len, GFP_ATOMIC);
982 if (!skb) {
983 BT_ERR("%s no memory for packet", hdev->name);
984 return -ENOMEM;
985 }
986
987 skb->dev = (void *) hdev;
988 bt_cb(skb)->pkt_type = type;
00ae02f3 989
ef222013
MH
990 __reassembly(hdev, type) = skb;
991
992 scb = (void *) skb->cb;
993 scb->expect = len;
994 } else {
995 /* Continuation */
996
997 scb = (void *) skb->cb;
998 len = scb->expect;
999 }
1000
1001 len = min(len, count);
1002
1003 memcpy(skb_put(skb, len), data, len);
1004
1005 scb->expect -= len;
1006
1007 if (scb->expect == 0) {
1008 /* Complete frame */
1009
1010 __reassembly(hdev, type) = NULL;
1011
1012 bt_cb(skb)->pkt_type = type;
1013 hci_recv_frame(skb);
1014 }
1015
1016 count -= len; data += len;
1017 }
1018
1019 return 0;
1020}
1021EXPORT_SYMBOL(hci_recv_fragment);
1022
1da177e4
LT
1023/* ---- Interface to upper protocols ---- */
1024
1025/* Register/Unregister protocols.
1026 * hci_task_lock is used to ensure that no tasks are running. */
1027int hci_register_proto(struct hci_proto *hp)
1028{
1029 int err = 0;
1030
1031 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1032
1033 if (hp->id >= HCI_MAX_PROTO)
1034 return -EINVAL;
1035
1036 write_lock_bh(&hci_task_lock);
1037
1038 if (!hci_proto[hp->id])
1039 hci_proto[hp->id] = hp;
1040 else
1041 err = -EEXIST;
1042
1043 write_unlock_bh(&hci_task_lock);
1044
1045 return err;
1046}
1047EXPORT_SYMBOL(hci_register_proto);
1048
1049int hci_unregister_proto(struct hci_proto *hp)
1050{
1051 int err = 0;
1052
1053 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1054
1055 if (hp->id >= HCI_MAX_PROTO)
1056 return -EINVAL;
1057
1058 write_lock_bh(&hci_task_lock);
1059
1060 if (hci_proto[hp->id])
1061 hci_proto[hp->id] = NULL;
1062 else
1063 err = -ENOENT;
1064
1065 write_unlock_bh(&hci_task_lock);
1066
1067 return err;
1068}
1069EXPORT_SYMBOL(hci_unregister_proto);
1070
1071int hci_register_cb(struct hci_cb *cb)
1072{
1073 BT_DBG("%p name %s", cb, cb->name);
1074
1075 write_lock_bh(&hci_cb_list_lock);
1076 list_add(&cb->list, &hci_cb_list);
1077 write_unlock_bh(&hci_cb_list_lock);
1078
1079 return 0;
1080}
1081EXPORT_SYMBOL(hci_register_cb);
1082
1083int hci_unregister_cb(struct hci_cb *cb)
1084{
1085 BT_DBG("%p name %s", cb, cb->name);
1086
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_del(&cb->list);
1089 write_unlock_bh(&hci_cb_list_lock);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL(hci_unregister_cb);
1094
1095static int hci_send_frame(struct sk_buff *skb)
1096{
1097 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1098
1099 if (!hdev) {
1100 kfree_skb(skb);
1101 return -ENODEV;
1102 }
1103
0d48d939 1104 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1105
1106 if (atomic_read(&hdev->promisc)) {
1107 /* Time stamp */
a61bbcf2 1108 __net_timestamp(skb);
1da177e4
LT
1109
1110 hci_send_to_sock(hdev, skb);
1111 }
1112
1113 /* Get rid of skb owner, prior to sending to the driver. */
1114 skb_orphan(skb);
1115
1116 return hdev->send(skb);
1117}
1118
1119/* Send HCI command */
a9de9248 1120int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1121{
1122 int len = HCI_COMMAND_HDR_SIZE + plen;
1123 struct hci_command_hdr *hdr;
1124 struct sk_buff *skb;
1125
a9de9248 1126 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1127
1128 skb = bt_skb_alloc(len, GFP_ATOMIC);
1129 if (!skb) {
ef222013 1130 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1131 return -ENOMEM;
1132 }
1133
1134 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1135 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1136 hdr->plen = plen;
1137
1138 if (plen)
1139 memcpy(skb_put(skb, plen), param, plen);
1140
1141 BT_DBG("skb len %d", skb->len);
1142
0d48d939 1143 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4
LT
1144 skb->dev = (void *) hdev;
1145 skb_queue_tail(&hdev->cmd_q, skb);
1146 hci_sched_cmd(hdev);
1147
1148 return 0;
1149}
1da177e4
LT
1150
1151/* Get data from the previously sent command */
a9de9248 1152void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1153{
1154 struct hci_command_hdr *hdr;
1155
1156 if (!hdev->sent_cmd)
1157 return NULL;
1158
1159 hdr = (void *) hdev->sent_cmd->data;
1160
a9de9248 1161 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1162 return NULL;
1163
a9de9248 1164 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1165
1166 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1167}
1168
1169/* Send ACL data */
1170static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1171{
1172 struct hci_acl_hdr *hdr;
1173 int len = skb->len;
1174
badff6d0
ACM
1175 skb_push(skb, HCI_ACL_HDR_SIZE);
1176 skb_reset_transport_header(skb);
9c70220b 1177 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1178 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1179 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1180}
1181
1182int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1183{
1184 struct hci_dev *hdev = conn->hdev;
1185 struct sk_buff *list;
1186
1187 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1188
1189 skb->dev = (void *) hdev;
0d48d939 1190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1191 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1192
1193 if (!(list = skb_shinfo(skb)->frag_list)) {
1194 /* Non fragmented */
1195 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1196
1197 skb_queue_tail(&conn->data_q, skb);
1198 } else {
1199 /* Fragmented */
1200 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1201
1202 skb_shinfo(skb)->frag_list = NULL;
1203
1204 /* Queue all fragments atomically */
1205 spin_lock_bh(&conn->data_q.lock);
1206
1207 __skb_queue_tail(&conn->data_q, skb);
1208 do {
1209 skb = list; list = list->next;
8e87d142 1210
1da177e4 1211 skb->dev = (void *) hdev;
0d48d939 1212 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1213 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1214
1215 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1216
1217 __skb_queue_tail(&conn->data_q, skb);
1218 } while (list);
1219
1220 spin_unlock_bh(&conn->data_q.lock);
1221 }
1222
1223 hci_sched_tx(hdev);
1224 return 0;
1225}
1226EXPORT_SYMBOL(hci_send_acl);
1227
1228/* Send SCO data */
1229int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1230{
1231 struct hci_dev *hdev = conn->hdev;
1232 struct hci_sco_hdr hdr;
1233
1234 BT_DBG("%s len %d", hdev->name, skb->len);
1235
1236 if (skb->len > hdev->sco_mtu) {
1237 kfree_skb(skb);
1238 return -EINVAL;
1239 }
1240
aca3192c 1241 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1242 hdr.dlen = skb->len;
1243
badff6d0
ACM
1244 skb_push(skb, HCI_SCO_HDR_SIZE);
1245 skb_reset_transport_header(skb);
9c70220b 1246 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1247
1248 skb->dev = (void *) hdev;
0d48d939 1249 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1da177e4
LT
1250 skb_queue_tail(&conn->data_q, skb);
1251 hci_sched_tx(hdev);
1252 return 0;
1253}
1254EXPORT_SYMBOL(hci_send_sco);
1255
1256/* ---- HCI TX task (outgoing data) ---- */
1257
1258/* HCI Connection scheduler */
1259static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1260{
1261 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1262 struct hci_conn *conn = NULL;
1da177e4
LT
1263 int num = 0, min = ~0;
1264 struct list_head *p;
1265
8e87d142 1266 /* We don't have to lock device here. Connections are always
1da177e4
LT
1267 * added and removed with TX task disabled. */
1268 list_for_each(p, &h->list) {
1269 struct hci_conn *c;
1270 c = list_entry(p, struct hci_conn, list);
1271
1272 if (c->type != type || c->state != BT_CONNECTED
1273 || skb_queue_empty(&c->data_q))
1274 continue;
1275 num++;
1276
1277 if (c->sent < min) {
1278 min = c->sent;
1279 conn = c;
1280 }
1281 }
1282
1283 if (conn) {
1284 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1285 int q = cnt / num;
1286 *quote = q ? q : 1;
1287 } else
1288 *quote = 0;
1289
1290 BT_DBG("conn %p quote %d", conn, *quote);
1291 return conn;
1292}
1293
1294static inline void hci_acl_tx_to(struct hci_dev *hdev)
1295{
1296 struct hci_conn_hash *h = &hdev->conn_hash;
1297 struct list_head *p;
1298 struct hci_conn *c;
1299
1300 BT_ERR("%s ACL tx timeout", hdev->name);
1301
1302 /* Kill stalled connections */
1303 list_for_each(p, &h->list) {
1304 c = list_entry(p, struct hci_conn, list);
1305 if (c->type == ACL_LINK && c->sent) {
1306 BT_ERR("%s killing stalled ACL connection %s",
1307 hdev->name, batostr(&c->dst));
1308 hci_acl_disconn(c, 0x13);
1309 }
1310 }
1311}
1312
1313static inline void hci_sched_acl(struct hci_dev *hdev)
1314{
1315 struct hci_conn *conn;
1316 struct sk_buff *skb;
1317 int quote;
1318
1319 BT_DBG("%s", hdev->name);
1320
1321 if (!test_bit(HCI_RAW, &hdev->flags)) {
1322 /* ACL tx timeout must be longer than maximum
1323 * link supervision timeout (40.9 seconds) */
1324 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1325 hci_acl_tx_to(hdev);
1326 }
1327
1328 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1329 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1330 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1331
1332 hci_conn_enter_active_mode(conn);
1333
1da177e4
LT
1334 hci_send_frame(skb);
1335 hdev->acl_last_tx = jiffies;
1336
1337 hdev->acl_cnt--;
1338 conn->sent++;
1339 }
1340 }
1341}
1342
1343/* Schedule SCO */
1344static inline void hci_sched_sco(struct hci_dev *hdev)
1345{
1346 struct hci_conn *conn;
1347 struct sk_buff *skb;
1348 int quote;
1349
1350 BT_DBG("%s", hdev->name);
1351
1352 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1353 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1354 BT_DBG("skb %p len %d", skb, skb->len);
1355 hci_send_frame(skb);
1356
1357 conn->sent++;
1358 if (conn->sent == ~0)
1359 conn->sent = 0;
1360 }
1361 }
1362}
1363
1364static void hci_tx_task(unsigned long arg)
1365{
1366 struct hci_dev *hdev = (struct hci_dev *) arg;
1367 struct sk_buff *skb;
1368
1369 read_lock(&hci_task_lock);
1370
1371 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1372
1373 /* Schedule queues and send stuff to HCI driver */
1374
1375 hci_sched_acl(hdev);
1376
1377 hci_sched_sco(hdev);
1378
1379 /* Send next queued raw (unknown type) packet */
1380 while ((skb = skb_dequeue(&hdev->raw_q)))
1381 hci_send_frame(skb);
1382
1383 read_unlock(&hci_task_lock);
1384}
1385
1386/* ----- HCI RX task (incoming data proccessing) ----- */
1387
1388/* ACL data packet */
1389static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1390{
1391 struct hci_acl_hdr *hdr = (void *) skb->data;
1392 struct hci_conn *conn;
1393 __u16 handle, flags;
1394
1395 skb_pull(skb, HCI_ACL_HDR_SIZE);
1396
1397 handle = __le16_to_cpu(hdr->handle);
1398 flags = hci_flags(handle);
1399 handle = hci_handle(handle);
1400
1401 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1402
1403 hdev->stat.acl_rx++;
1404
1405 hci_dev_lock(hdev);
1406 conn = hci_conn_hash_lookup_handle(hdev, handle);
1407 hci_dev_unlock(hdev);
8e87d142 1408
1da177e4
LT
1409 if (conn) {
1410 register struct hci_proto *hp;
1411
04837f64
MH
1412 hci_conn_enter_active_mode(conn);
1413
1da177e4
LT
1414 /* Send to upper protocol */
1415 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1416 hp->recv_acldata(conn, skb, flags);
1417 return;
1418 }
1419 } else {
8e87d142 1420 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1421 hdev->name, handle);
1422 }
1423
1424 kfree_skb(skb);
1425}
1426
1427/* SCO data packet */
1428static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1429{
1430 struct hci_sco_hdr *hdr = (void *) skb->data;
1431 struct hci_conn *conn;
1432 __u16 handle;
1433
1434 skb_pull(skb, HCI_SCO_HDR_SIZE);
1435
1436 handle = __le16_to_cpu(hdr->handle);
1437
1438 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1439
1440 hdev->stat.sco_rx++;
1441
1442 hci_dev_lock(hdev);
1443 conn = hci_conn_hash_lookup_handle(hdev, handle);
1444 hci_dev_unlock(hdev);
1445
1446 if (conn) {
1447 register struct hci_proto *hp;
1448
1449 /* Send to upper protocol */
1450 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1451 hp->recv_scodata(conn, skb);
1452 return;
1453 }
1454 } else {
8e87d142 1455 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1456 hdev->name, handle);
1457 }
1458
1459 kfree_skb(skb);
1460}
1461
6516455d 1462static void hci_rx_task(unsigned long arg)
1da177e4
LT
1463{
1464 struct hci_dev *hdev = (struct hci_dev *) arg;
1465 struct sk_buff *skb;
1466
1467 BT_DBG("%s", hdev->name);
1468
1469 read_lock(&hci_task_lock);
1470
1471 while ((skb = skb_dequeue(&hdev->rx_q))) {
1472 if (atomic_read(&hdev->promisc)) {
1473 /* Send copy to the sockets */
1474 hci_send_to_sock(hdev, skb);
1475 }
1476
1477 if (test_bit(HCI_RAW, &hdev->flags)) {
1478 kfree_skb(skb);
1479 continue;
1480 }
1481
1482 if (test_bit(HCI_INIT, &hdev->flags)) {
1483 /* Don't process data packets in this states. */
0d48d939 1484 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1485 case HCI_ACLDATA_PKT:
1486 case HCI_SCODATA_PKT:
1487 kfree_skb(skb);
1488 continue;
3ff50b79 1489 }
1da177e4
LT
1490 }
1491
1492 /* Process frame */
0d48d939 1493 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1494 case HCI_EVENT_PKT:
1495 hci_event_packet(hdev, skb);
1496 break;
1497
1498 case HCI_ACLDATA_PKT:
1499 BT_DBG("%s ACL data packet", hdev->name);
1500 hci_acldata_packet(hdev, skb);
1501 break;
1502
1503 case HCI_SCODATA_PKT:
1504 BT_DBG("%s SCO data packet", hdev->name);
1505 hci_scodata_packet(hdev, skb);
1506 break;
1507
1508 default:
1509 kfree_skb(skb);
1510 break;
1511 }
1512 }
1513
1514 read_unlock(&hci_task_lock);
1515}
1516
1517static void hci_cmd_task(unsigned long arg)
1518{
1519 struct hci_dev *hdev = (struct hci_dev *) arg;
1520 struct sk_buff *skb;
1521
1522 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1523
1524 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1525 BT_ERR("%s command tx timeout", hdev->name);
1526 atomic_set(&hdev->cmd_cnt, 1);
1527 }
1528
1529 /* Send queued commands */
1530 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1531 if (hdev->sent_cmd)
1532 kfree_skb(hdev->sent_cmd);
1533
1534 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1535 atomic_dec(&hdev->cmd_cnt);
1536 hci_send_frame(skb);
1537 hdev->cmd_last_tx = jiffies;
1538 } else {
1539 skb_queue_head(&hdev->cmd_q, skb);
1540 hci_sched_cmd(hdev);
1541 }
1542 }
1543}