Bluetooth: Add controller types for BR/EDR and 802.11 AMP
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/interrupt.h>
41#include <linux/notifier.h>
611b30f7 42#include <linux/rfkill.h>
1da177e4
LT
43#include <net/sock.h>
44
45#include <asm/system.h>
46#include <asm/uaccess.h>
47#include <asm/unaligned.h>
48
49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h>
51
1da177e4
LT
52static void hci_cmd_task(unsigned long arg);
53static void hci_rx_task(unsigned long arg);
54static void hci_tx_task(unsigned long arg);
55static void hci_notify(struct hci_dev *hdev, int event);
56
57static DEFINE_RWLOCK(hci_task_lock);
58
59/* HCI device list */
60LIST_HEAD(hci_dev_list);
61DEFINE_RWLOCK(hci_dev_list_lock);
62
63/* HCI callback list */
64LIST_HEAD(hci_cb_list);
65DEFINE_RWLOCK(hci_cb_list_lock);
66
67/* HCI protocols */
68#define HCI_MAX_PROTO 2
69struct hci_proto *hci_proto[HCI_MAX_PROTO];
70
71/* HCI notifiers list */
e041c683 72static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
73
74/* ---- HCI notifications ---- */
75
76int hci_register_notifier(struct notifier_block *nb)
77{
e041c683 78 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
79}
80
81int hci_unregister_notifier(struct notifier_block *nb)
82{
e041c683 83 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
84}
85
6516455d 86static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 87{
e041c683 88 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
89}
90
91/* ---- HCI requests ---- */
92
93void hci_req_complete(struct hci_dev *hdev, int result)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 wake_up_interruptible(&hdev->req_wait_q);
101 }
102}
103
104static void hci_req_cancel(struct hci_dev *hdev, int err)
105{
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115/* Execute request and wait for completion. */
8e87d142 116static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
117 unsigned long opt, __u32 timeout)
118{
119 DECLARE_WAITQUEUE(wait, current);
120 int err = 0;
121
122 BT_DBG("%s start", hdev->name);
123
124 hdev->req_status = HCI_REQ_PEND;
125
126 add_wait_queue(&hdev->req_wait_q, &wait);
127 set_current_state(TASK_INTERRUPTIBLE);
128
129 req(hdev, opt);
130 schedule_timeout(timeout);
131
132 remove_wait_queue(&hdev->req_wait_q, &wait);
133
134 if (signal_pending(current))
135 return -EINTR;
136
137 switch (hdev->req_status) {
138 case HCI_REQ_DONE:
139 err = -bt_err(hdev->req_result);
140 break;
141
142 case HCI_REQ_CANCELED:
143 err = -hdev->req_result;
144 break;
145
146 default:
147 err = -ETIMEDOUT;
148 break;
3ff50b79 149 }
1da177e4
LT
150
151 hdev->req_status = hdev->req_result = 0;
152
153 BT_DBG("%s end: err %d", hdev->name, err);
154
155 return err;
156}
157
158static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159 unsigned long opt, __u32 timeout)
160{
161 int ret;
162
7c6a329e
MH
163 if (!test_bit(HCI_UP, &hdev->flags))
164 return -ENETDOWN;
165
1da177e4
LT
166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
170
171 return ret;
172}
173
174static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175{
176 BT_DBG("%s %ld", hdev->name, opt);
177
178 /* Reset device */
a9de9248 179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
180}
181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183{
184 struct sk_buff *skb;
1ebb9252 185 __le16 param;
89f2783d 186 __u8 flt_type;
1da177e4
LT
187
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Driver initialization */
191
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 195 skb->dev = (void *) hdev;
c78ae283 196
1da177e4 197 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 198 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
199 }
200 skb_queue_purge(&hdev->driver_init);
201
202 /* Mandatory initialization */
203
204 /* Reset */
7a9d4020 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
207
208 /* Read Local Supported Features */
a9de9248 209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 210
1143e5a6 211 /* Read Local Version */
a9de9248 212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 213
1da177e4 214 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 215 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
216
217#if 0
218 /* Host buffer size */
219 {
220 struct hci_cp_host_buffer_size cp;
aca3192c 221 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 222 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
223 cp.acl_max_pkt = cpu_to_le16(0xffff);
224 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 225 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
226 }
227#endif
228
229 /* Read BD Address */
a9de9248
MH
230 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231
232 /* Read Class of Device */
233 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234
235 /* Read Local Name */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
237
238 /* Read Voice Setting */
a9de9248 239 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
240
241 /* Optional initialization */
242
243 /* Clear Event Filters */
89f2783d 244 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 245 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4
LT
246
247 /* Page timeout ~20 secs */
aca3192c 248 param = cpu_to_le16(0x8000);
a9de9248 249 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
1da177e4
LT
250
251 /* Connection accept timeout ~20 secs */
aca3192c 252 param = cpu_to_le16(0x7d00);
a9de9248 253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1da177e4
LT
254}
255
256static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257{
258 __u8 scan = opt;
259
260 BT_DBG("%s %x", hdev->name, scan);
261
262 /* Inquiry and Page scans */
a9de9248 263 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
264}
265
266static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 auth = opt;
269
270 BT_DBG("%s %x", hdev->name, auth);
271
272 /* Authentication */
a9de9248 273 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
274}
275
276static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 encrypt = opt;
279
280 BT_DBG("%s %x", hdev->name, encrypt);
281
e4e8e37c 282 /* Encryption */
a9de9248 283 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
284}
285
e4e8e37c
MH
286static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __le16 policy = cpu_to_le16(opt);
289
a418b893 290 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
291
292 /* Default link policy */
293 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
294}
295
8e87d142 296/* Get HCI device by index.
1da177e4
LT
297 * Device is held on return. */
298struct hci_dev *hci_dev_get(int index)
299{
300 struct hci_dev *hdev = NULL;
301 struct list_head *p;
302
303 BT_DBG("%d", index);
304
305 if (index < 0)
306 return NULL;
307
308 read_lock(&hci_dev_list_lock);
309 list_for_each(p, &hci_dev_list) {
310 struct hci_dev *d = list_entry(p, struct hci_dev, list);
311 if (d->id == index) {
312 hdev = hci_dev_hold(d);
313 break;
314 }
315 }
316 read_unlock(&hci_dev_list_lock);
317 return hdev;
318}
1da177e4
LT
319
320/* ---- Inquiry support ---- */
321static void inquiry_cache_flush(struct hci_dev *hdev)
322{
323 struct inquiry_cache *cache = &hdev->inq_cache;
324 struct inquiry_entry *next = cache->list, *e;
325
326 BT_DBG("cache %p", cache);
327
328 cache->list = NULL;
329 while ((e = next)) {
330 next = e->next;
331 kfree(e);
332 }
333}
334
335struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
336{
337 struct inquiry_cache *cache = &hdev->inq_cache;
338 struct inquiry_entry *e;
339
340 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
341
342 for (e = cache->list; e; e = e->next)
343 if (!bacmp(&e->data.bdaddr, bdaddr))
344 break;
345 return e;
346}
347
348void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
349{
350 struct inquiry_cache *cache = &hdev->inq_cache;
351 struct inquiry_entry *e;
352
353 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
354
355 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
356 /* Entry not in the cache. Add new one. */
25ea6db0 357 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
1da177e4 358 return;
1da177e4
LT
359 e->next = cache->list;
360 cache->list = e;
361 }
362
363 memcpy(&e->data, data, sizeof(*data));
364 e->timestamp = jiffies;
365 cache->timestamp = jiffies;
366}
367
368static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_info *info = (struct inquiry_info *) buf;
372 struct inquiry_entry *e;
373 int copied = 0;
374
375 for (e = cache->list; e && copied < num; e = e->next, copied++) {
376 struct inquiry_data *data = &e->data;
377 bacpy(&info->bdaddr, &data->bdaddr);
378 info->pscan_rep_mode = data->pscan_rep_mode;
379 info->pscan_period_mode = data->pscan_period_mode;
380 info->pscan_mode = data->pscan_mode;
381 memcpy(info->dev_class, data->dev_class, 3);
382 info->clock_offset = data->clock_offset;
383 info++;
384 }
385
386 BT_DBG("cache %p, copied %d", cache, copied);
387 return copied;
388}
389
390static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
391{
392 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
393 struct hci_cp_inquiry cp;
394
395 BT_DBG("%s", hdev->name);
396
397 if (test_bit(HCI_INQUIRY, &hdev->flags))
398 return;
399
400 /* Start Inquiry */
401 memcpy(&cp.lap, &ir->lap, 3);
402 cp.length = ir->length;
403 cp.num_rsp = ir->num_rsp;
a9de9248 404 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
405}
406
407int hci_inquiry(void __user *arg)
408{
409 __u8 __user *ptr = arg;
410 struct hci_inquiry_req ir;
411 struct hci_dev *hdev;
412 int err = 0, do_inquiry = 0, max_rsp;
413 long timeo;
414 __u8 *buf;
415
416 if (copy_from_user(&ir, ptr, sizeof(ir)))
417 return -EFAULT;
418
419 if (!(hdev = hci_dev_get(ir.dev_id)))
420 return -ENODEV;
421
422 hci_dev_lock_bh(hdev);
8e87d142 423 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1da177e4
LT
424 inquiry_cache_empty(hdev) ||
425 ir.flags & IREQ_CACHE_FLUSH) {
426 inquiry_cache_flush(hdev);
427 do_inquiry = 1;
428 }
429 hci_dev_unlock_bh(hdev);
430
04837f64 431 timeo = ir.length * msecs_to_jiffies(2000);
1da177e4
LT
432 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
433 goto done;
434
435 /* for unlimited number of responses we will use buffer with 255 entries */
436 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
437
438 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
439 * copy it to the user space.
440 */
441 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
442 err = -ENOMEM;
443 goto done;
444 }
445
446 hci_dev_lock_bh(hdev);
447 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
448 hci_dev_unlock_bh(hdev);
449
450 BT_DBG("num_rsp %d", ir.num_rsp);
451
452 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
453 ptr += sizeof(ir);
454 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
455 ir.num_rsp))
456 err = -EFAULT;
8e87d142 457 } else
1da177e4
LT
458 err = -EFAULT;
459
460 kfree(buf);
461
462done:
463 hci_dev_put(hdev);
464 return err;
465}
466
467/* ---- HCI ioctl helpers ---- */
468
469int hci_dev_open(__u16 dev)
470{
471 struct hci_dev *hdev;
472 int ret = 0;
473
474 if (!(hdev = hci_dev_get(dev)))
475 return -ENODEV;
476
477 BT_DBG("%s %p", hdev->name, hdev);
478
479 hci_req_lock(hdev);
480
611b30f7
MH
481 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
482 ret = -ERFKILL;
483 goto done;
484 }
485
1da177e4
LT
486 if (test_bit(HCI_UP, &hdev->flags)) {
487 ret = -EALREADY;
488 goto done;
489 }
490
491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492 set_bit(HCI_RAW, &hdev->flags);
493
943da25d
MH
494 /* Treat all non BR/EDR controllers as raw devices for now */
495 if (hdev->dev_type != HCI_BREDR)
496 set_bit(HCI_RAW, &hdev->flags);
497
1da177e4
LT
498 if (hdev->open(hdev)) {
499 ret = -EIO;
500 goto done;
501 }
502
503 if (!test_bit(HCI_RAW, &hdev->flags)) {
504 atomic_set(&hdev->cmd_cnt, 1);
505 set_bit(HCI_INIT, &hdev->flags);
506
507 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
508 ret = __hci_request(hdev, hci_init_req, 0,
509 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
510
511 clear_bit(HCI_INIT, &hdev->flags);
512 }
513
514 if (!ret) {
515 hci_dev_hold(hdev);
516 set_bit(HCI_UP, &hdev->flags);
517 hci_notify(hdev, HCI_DEV_UP);
8e87d142 518 } else {
1da177e4
LT
519 /* Init failed, cleanup */
520 tasklet_kill(&hdev->rx_task);
521 tasklet_kill(&hdev->tx_task);
522 tasklet_kill(&hdev->cmd_task);
523
524 skb_queue_purge(&hdev->cmd_q);
525 skb_queue_purge(&hdev->rx_q);
526
527 if (hdev->flush)
528 hdev->flush(hdev);
529
530 if (hdev->sent_cmd) {
531 kfree_skb(hdev->sent_cmd);
532 hdev->sent_cmd = NULL;
533 }
534
535 hdev->close(hdev);
536 hdev->flags = 0;
537 }
538
539done:
540 hci_req_unlock(hdev);
541 hci_dev_put(hdev);
542 return ret;
543}
544
545static int hci_dev_do_close(struct hci_dev *hdev)
546{
547 BT_DBG("%s %p", hdev->name, hdev);
548
549 hci_req_cancel(hdev, ENODEV);
550 hci_req_lock(hdev);
551
552 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
553 hci_req_unlock(hdev);
554 return 0;
555 }
556
557 /* Kill RX and TX tasks */
558 tasklet_kill(&hdev->rx_task);
559 tasklet_kill(&hdev->tx_task);
560
561 hci_dev_lock_bh(hdev);
562 inquiry_cache_flush(hdev);
563 hci_conn_hash_flush(hdev);
564 hci_dev_unlock_bh(hdev);
565
566 hci_notify(hdev, HCI_DEV_DOWN);
567
568 if (hdev->flush)
569 hdev->flush(hdev);
570
571 /* Reset device */
572 skb_queue_purge(&hdev->cmd_q);
573 atomic_set(&hdev->cmd_cnt, 1);
574 if (!test_bit(HCI_RAW, &hdev->flags)) {
575 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
576 __hci_request(hdev, hci_reset_req, 0,
577 msecs_to_jiffies(250));
1da177e4
LT
578 clear_bit(HCI_INIT, &hdev->flags);
579 }
580
581 /* Kill cmd task */
582 tasklet_kill(&hdev->cmd_task);
583
584 /* Drop queues */
585 skb_queue_purge(&hdev->rx_q);
586 skb_queue_purge(&hdev->cmd_q);
587 skb_queue_purge(&hdev->raw_q);
588
589 /* Drop last sent command */
590 if (hdev->sent_cmd) {
591 kfree_skb(hdev->sent_cmd);
592 hdev->sent_cmd = NULL;
593 }
594
595 /* After this point our queues are empty
596 * and no tasks are scheduled. */
597 hdev->close(hdev);
598
599 /* Clear flags */
600 hdev->flags = 0;
601
602 hci_req_unlock(hdev);
603
604 hci_dev_put(hdev);
605 return 0;
606}
607
608int hci_dev_close(__u16 dev)
609{
610 struct hci_dev *hdev;
611 int err;
612
613 if (!(hdev = hci_dev_get(dev)))
614 return -ENODEV;
615 err = hci_dev_do_close(hdev);
616 hci_dev_put(hdev);
617 return err;
618}
619
620int hci_dev_reset(__u16 dev)
621{
622 struct hci_dev *hdev;
623 int ret = 0;
624
625 if (!(hdev = hci_dev_get(dev)))
626 return -ENODEV;
627
628 hci_req_lock(hdev);
629 tasklet_disable(&hdev->tx_task);
630
631 if (!test_bit(HCI_UP, &hdev->flags))
632 goto done;
633
634 /* Drop queues */
635 skb_queue_purge(&hdev->rx_q);
636 skb_queue_purge(&hdev->cmd_q);
637
638 hci_dev_lock_bh(hdev);
639 inquiry_cache_flush(hdev);
640 hci_conn_hash_flush(hdev);
641 hci_dev_unlock_bh(hdev);
642
643 if (hdev->flush)
644 hdev->flush(hdev);
645
8e87d142 646 atomic_set(&hdev->cmd_cnt, 1);
1da177e4
LT
647 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
648
649 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
650 ret = __hci_request(hdev, hci_reset_req, 0,
651 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
652
653done:
654 tasklet_enable(&hdev->tx_task);
655 hci_req_unlock(hdev);
656 hci_dev_put(hdev);
657 return ret;
658}
659
660int hci_dev_reset_stat(__u16 dev)
661{
662 struct hci_dev *hdev;
663 int ret = 0;
664
665 if (!(hdev = hci_dev_get(dev)))
666 return -ENODEV;
667
668 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
669
670 hci_dev_put(hdev);
671
672 return ret;
673}
674
675int hci_dev_cmd(unsigned int cmd, void __user *arg)
676{
677 struct hci_dev *hdev;
678 struct hci_dev_req dr;
679 int err = 0;
680
681 if (copy_from_user(&dr, arg, sizeof(dr)))
682 return -EFAULT;
683
684 if (!(hdev = hci_dev_get(dr.dev_id)))
685 return -ENODEV;
686
687 switch (cmd) {
688 case HCISETAUTH:
04837f64
MH
689 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
691 break;
692
693 case HCISETENCRYPT:
694 if (!lmp_encrypt_capable(hdev)) {
695 err = -EOPNOTSUPP;
696 break;
697 }
698
699 if (!test_bit(HCI_AUTH, &hdev->flags)) {
700 /* Auth must be enabled first */
04837f64
MH
701 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
703 if (err)
704 break;
705 }
706
04837f64
MH
707 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
708 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
709 break;
710
711 case HCISETSCAN:
04837f64
MH
712 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
713 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
714 break;
715
1da177e4 716 case HCISETLINKPOL:
e4e8e37c
MH
717 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
718 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
719 break;
720
721 case HCISETLINKMODE:
e4e8e37c
MH
722 hdev->link_mode = ((__u16) dr.dev_opt) &
723 (HCI_LM_MASTER | HCI_LM_ACCEPT);
724 break;
725
726 case HCISETPTYPE:
727 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
728 break;
729
730 case HCISETACLMTU:
e4e8e37c
MH
731 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
732 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
733 break;
734
735 case HCISETSCOMTU:
e4e8e37c
MH
736 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
737 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
738 break;
739
740 default:
741 err = -EINVAL;
742 break;
743 }
e4e8e37c 744
1da177e4
LT
745 hci_dev_put(hdev);
746 return err;
747}
748
749int hci_get_dev_list(void __user *arg)
750{
751 struct hci_dev_list_req *dl;
752 struct hci_dev_req *dr;
753 struct list_head *p;
754 int n = 0, size, err;
755 __u16 dev_num;
756
757 if (get_user(dev_num, (__u16 __user *) arg))
758 return -EFAULT;
759
760 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
761 return -EINVAL;
762
763 size = sizeof(*dl) + dev_num * sizeof(*dr);
764
c6bf514c 765 if (!(dl = kzalloc(size, GFP_KERNEL)))
1da177e4
LT
766 return -ENOMEM;
767
768 dr = dl->dev_req;
769
770 read_lock_bh(&hci_dev_list_lock);
771 list_for_each(p, &hci_dev_list) {
772 struct hci_dev *hdev;
773 hdev = list_entry(p, struct hci_dev, list);
774 (dr + n)->dev_id = hdev->id;
775 (dr + n)->dev_opt = hdev->flags;
776 if (++n >= dev_num)
777 break;
778 }
779 read_unlock_bh(&hci_dev_list_lock);
780
781 dl->dev_num = n;
782 size = sizeof(*dl) + n * sizeof(*dr);
783
784 err = copy_to_user(arg, dl, size);
785 kfree(dl);
786
787 return err ? -EFAULT : 0;
788}
789
790int hci_get_dev_info(void __user *arg)
791{
792 struct hci_dev *hdev;
793 struct hci_dev_info di;
794 int err = 0;
795
796 if (copy_from_user(&di, arg, sizeof(di)))
797 return -EFAULT;
798
799 if (!(hdev = hci_dev_get(di.dev_id)))
800 return -ENODEV;
801
802 strcpy(di.name, hdev->name);
803 di.bdaddr = hdev->bdaddr;
943da25d 804 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
805 di.flags = hdev->flags;
806 di.pkt_type = hdev->pkt_type;
807 di.acl_mtu = hdev->acl_mtu;
808 di.acl_pkts = hdev->acl_pkts;
809 di.sco_mtu = hdev->sco_mtu;
810 di.sco_pkts = hdev->sco_pkts;
811 di.link_policy = hdev->link_policy;
812 di.link_mode = hdev->link_mode;
813
814 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
815 memcpy(&di.features, &hdev->features, sizeof(di.features));
816
817 if (copy_to_user(arg, &di, sizeof(di)))
818 err = -EFAULT;
819
820 hci_dev_put(hdev);
821
822 return err;
823}
824
825/* ---- Interface to HCI drivers ---- */
826
611b30f7
MH
827static int hci_rfkill_set_block(void *data, bool blocked)
828{
829 struct hci_dev *hdev = data;
830
831 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
832
833 if (!blocked)
834 return 0;
835
836 hci_dev_do_close(hdev);
837
838 return 0;
839}
840
841static const struct rfkill_ops hci_rfkill_ops = {
842 .set_block = hci_rfkill_set_block,
843};
844
1da177e4
LT
845/* Alloc HCI device */
846struct hci_dev *hci_alloc_dev(void)
847{
848 struct hci_dev *hdev;
849
25ea6db0 850 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
851 if (!hdev)
852 return NULL;
853
1da177e4
LT
854 skb_queue_head_init(&hdev->driver_init);
855
856 return hdev;
857}
858EXPORT_SYMBOL(hci_alloc_dev);
859
860/* Free HCI device */
861void hci_free_dev(struct hci_dev *hdev)
862{
863 skb_queue_purge(&hdev->driver_init);
864
a91f2e39
MH
865 /* will free via device release */
866 put_device(&hdev->dev);
1da177e4
LT
867}
868EXPORT_SYMBOL(hci_free_dev);
869
870/* Register HCI device */
871int hci_register_dev(struct hci_dev *hdev)
872{
873 struct list_head *head = &hci_dev_list, *p;
ef222013 874 int i, id = 0;
1da177e4 875
c13854ce
MH
876 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
877 hdev->bus, hdev->owner);
1da177e4
LT
878
879 if (!hdev->open || !hdev->close || !hdev->destruct)
880 return -EINVAL;
881
882 write_lock_bh(&hci_dev_list_lock);
883
884 /* Find first available device id */
885 list_for_each(p, &hci_dev_list) {
886 if (list_entry(p, struct hci_dev, list)->id != id)
887 break;
888 head = p; id++;
889 }
8e87d142 890
1da177e4
LT
891 sprintf(hdev->name, "hci%d", id);
892 hdev->id = id;
893 list_add(&hdev->list, head);
894
895 atomic_set(&hdev->refcnt, 1);
896 spin_lock_init(&hdev->lock);
897
898 hdev->flags = 0;
899 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 900 hdev->esco_type = (ESCO_HV1);
1da177e4
LT
901 hdev->link_mode = (HCI_LM_ACCEPT);
902
04837f64
MH
903 hdev->idle_timeout = 0;
904 hdev->sniff_max_interval = 800;
905 hdev->sniff_min_interval = 80;
906
1da177e4
LT
907 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
908 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
909 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
910
911 skb_queue_head_init(&hdev->rx_q);
912 skb_queue_head_init(&hdev->cmd_q);
913 skb_queue_head_init(&hdev->raw_q);
914
ef222013
MH
915 for (i = 0; i < 3; i++)
916 hdev->reassembly[i] = NULL;
917
1da177e4 918 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 919 mutex_init(&hdev->req_lock);
1da177e4
LT
920
921 inquiry_cache_init(hdev);
922
923 hci_conn_hash_init(hdev);
924
925 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
926
927 atomic_set(&hdev->promisc, 0);
928
929 write_unlock_bh(&hci_dev_list_lock);
930
931 hci_register_sysfs(hdev);
932
611b30f7
MH
933 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
934 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
935 if (hdev->rfkill) {
936 if (rfkill_register(hdev->rfkill) < 0) {
937 rfkill_destroy(hdev->rfkill);
938 hdev->rfkill = NULL;
939 }
940 }
941
1da177e4
LT
942 hci_notify(hdev, HCI_DEV_REG);
943
944 return id;
945}
946EXPORT_SYMBOL(hci_register_dev);
947
948/* Unregister HCI device */
949int hci_unregister_dev(struct hci_dev *hdev)
950{
ef222013
MH
951 int i;
952
c13854ce 953 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 954
1da177e4
LT
955 write_lock_bh(&hci_dev_list_lock);
956 list_del(&hdev->list);
957 write_unlock_bh(&hci_dev_list_lock);
958
959 hci_dev_do_close(hdev);
960
ef222013
MH
961 for (i = 0; i < 3; i++)
962 kfree_skb(hdev->reassembly[i]);
963
1da177e4
LT
964 hci_notify(hdev, HCI_DEV_UNREG);
965
611b30f7
MH
966 if (hdev->rfkill) {
967 rfkill_unregister(hdev->rfkill);
968 rfkill_destroy(hdev->rfkill);
969 }
970
147e2d59
DY
971 hci_unregister_sysfs(hdev);
972
1da177e4 973 __hci_dev_put(hdev);
ef222013 974
1da177e4
LT
975 return 0;
976}
977EXPORT_SYMBOL(hci_unregister_dev);
978
979/* Suspend HCI device */
980int hci_suspend_dev(struct hci_dev *hdev)
981{
982 hci_notify(hdev, HCI_DEV_SUSPEND);
983 return 0;
984}
985EXPORT_SYMBOL(hci_suspend_dev);
986
987/* Resume HCI device */
988int hci_resume_dev(struct hci_dev *hdev)
989{
990 hci_notify(hdev, HCI_DEV_RESUME);
991 return 0;
992}
993EXPORT_SYMBOL(hci_resume_dev);
994
76bca880
MH
995/* Receive frame from HCI drivers */
996int hci_recv_frame(struct sk_buff *skb)
997{
998 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
999 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1000 && !test_bit(HCI_INIT, &hdev->flags))) {
1001 kfree_skb(skb);
1002 return -ENXIO;
1003 }
1004
1005 /* Incomming skb */
1006 bt_cb(skb)->incoming = 1;
1007
1008 /* Time stamp */
1009 __net_timestamp(skb);
1010
1011 /* Queue frame for rx task */
1012 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1013 tasklet_schedule(&hdev->rx_task);
1014
76bca880
MH
1015 return 0;
1016}
1017EXPORT_SYMBOL(hci_recv_frame);
1018
ef222013
MH
1019/* Receive packet type fragment */
1020#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
1021
1022int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1023{
1024 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1025 return -EILSEQ;
1026
1027 while (count) {
1028 struct sk_buff *skb = __reassembly(hdev, type);
1029 struct { int expect; } *scb;
1030 int len = 0;
1031
1032 if (!skb) {
1033 /* Start of the frame */
1034
1035 switch (type) {
1036 case HCI_EVENT_PKT:
1037 if (count >= HCI_EVENT_HDR_SIZE) {
1038 struct hci_event_hdr *h = data;
1039 len = HCI_EVENT_HDR_SIZE + h->plen;
1040 } else
1041 return -EILSEQ;
1042 break;
1043
1044 case HCI_ACLDATA_PKT:
1045 if (count >= HCI_ACL_HDR_SIZE) {
1046 struct hci_acl_hdr *h = data;
1047 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1048 } else
1049 return -EILSEQ;
1050 break;
1051
1052 case HCI_SCODATA_PKT:
1053 if (count >= HCI_SCO_HDR_SIZE) {
1054 struct hci_sco_hdr *h = data;
1055 len = HCI_SCO_HDR_SIZE + h->dlen;
1056 } else
1057 return -EILSEQ;
1058 break;
1059 }
1060
1061 skb = bt_skb_alloc(len, GFP_ATOMIC);
1062 if (!skb) {
1063 BT_ERR("%s no memory for packet", hdev->name);
1064 return -ENOMEM;
1065 }
1066
1067 skb->dev = (void *) hdev;
1068 bt_cb(skb)->pkt_type = type;
00ae02f3 1069
ef222013
MH
1070 __reassembly(hdev, type) = skb;
1071
1072 scb = (void *) skb->cb;
1073 scb->expect = len;
1074 } else {
1075 /* Continuation */
1076
1077 scb = (void *) skb->cb;
1078 len = scb->expect;
1079 }
1080
1081 len = min(len, count);
1082
1083 memcpy(skb_put(skb, len), data, len);
1084
1085 scb->expect -= len;
1086
1087 if (scb->expect == 0) {
1088 /* Complete frame */
1089
1090 __reassembly(hdev, type) = NULL;
1091
1092 bt_cb(skb)->pkt_type = type;
1093 hci_recv_frame(skb);
1094 }
1095
1096 count -= len; data += len;
1097 }
1098
1099 return 0;
1100}
1101EXPORT_SYMBOL(hci_recv_fragment);
1102
1da177e4
LT
1103/* ---- Interface to upper protocols ---- */
1104
1105/* Register/Unregister protocols.
1106 * hci_task_lock is used to ensure that no tasks are running. */
1107int hci_register_proto(struct hci_proto *hp)
1108{
1109 int err = 0;
1110
1111 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1112
1113 if (hp->id >= HCI_MAX_PROTO)
1114 return -EINVAL;
1115
1116 write_lock_bh(&hci_task_lock);
1117
1118 if (!hci_proto[hp->id])
1119 hci_proto[hp->id] = hp;
1120 else
1121 err = -EEXIST;
1122
1123 write_unlock_bh(&hci_task_lock);
1124
1125 return err;
1126}
1127EXPORT_SYMBOL(hci_register_proto);
1128
1129int hci_unregister_proto(struct hci_proto *hp)
1130{
1131 int err = 0;
1132
1133 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1134
1135 if (hp->id >= HCI_MAX_PROTO)
1136 return -EINVAL;
1137
1138 write_lock_bh(&hci_task_lock);
1139
1140 if (hci_proto[hp->id])
1141 hci_proto[hp->id] = NULL;
1142 else
1143 err = -ENOENT;
1144
1145 write_unlock_bh(&hci_task_lock);
1146
1147 return err;
1148}
1149EXPORT_SYMBOL(hci_unregister_proto);
1150
1151int hci_register_cb(struct hci_cb *cb)
1152{
1153 BT_DBG("%p name %s", cb, cb->name);
1154
1155 write_lock_bh(&hci_cb_list_lock);
1156 list_add(&cb->list, &hci_cb_list);
1157 write_unlock_bh(&hci_cb_list_lock);
1158
1159 return 0;
1160}
1161EXPORT_SYMBOL(hci_register_cb);
1162
1163int hci_unregister_cb(struct hci_cb *cb)
1164{
1165 BT_DBG("%p name %s", cb, cb->name);
1166
1167 write_lock_bh(&hci_cb_list_lock);
1168 list_del(&cb->list);
1169 write_unlock_bh(&hci_cb_list_lock);
1170
1171 return 0;
1172}
1173EXPORT_SYMBOL(hci_unregister_cb);
1174
1175static int hci_send_frame(struct sk_buff *skb)
1176{
1177 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1178
1179 if (!hdev) {
1180 kfree_skb(skb);
1181 return -ENODEV;
1182 }
1183
0d48d939 1184 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1185
1186 if (atomic_read(&hdev->promisc)) {
1187 /* Time stamp */
a61bbcf2 1188 __net_timestamp(skb);
1da177e4
LT
1189
1190 hci_send_to_sock(hdev, skb);
1191 }
1192
1193 /* Get rid of skb owner, prior to sending to the driver. */
1194 skb_orphan(skb);
1195
1196 return hdev->send(skb);
1197}
1198
1199/* Send HCI command */
a9de9248 1200int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1201{
1202 int len = HCI_COMMAND_HDR_SIZE + plen;
1203 struct hci_command_hdr *hdr;
1204 struct sk_buff *skb;
1205
a9de9248 1206 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1207
1208 skb = bt_skb_alloc(len, GFP_ATOMIC);
1209 if (!skb) {
ef222013 1210 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1211 return -ENOMEM;
1212 }
1213
1214 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1215 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1216 hdr->plen = plen;
1217
1218 if (plen)
1219 memcpy(skb_put(skb, plen), param, plen);
1220
1221 BT_DBG("skb len %d", skb->len);
1222
0d48d939 1223 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1224 skb->dev = (void *) hdev;
c78ae283 1225
1da177e4 1226 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1227 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1228
1229 return 0;
1230}
1da177e4
LT
1231
1232/* Get data from the previously sent command */
a9de9248 1233void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1234{
1235 struct hci_command_hdr *hdr;
1236
1237 if (!hdev->sent_cmd)
1238 return NULL;
1239
1240 hdr = (void *) hdev->sent_cmd->data;
1241
a9de9248 1242 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1243 return NULL;
1244
a9de9248 1245 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1246
1247 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1248}
1249
1250/* Send ACL data */
1251static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1252{
1253 struct hci_acl_hdr *hdr;
1254 int len = skb->len;
1255
badff6d0
ACM
1256 skb_push(skb, HCI_ACL_HDR_SIZE);
1257 skb_reset_transport_header(skb);
9c70220b 1258 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1259 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1260 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1261}
1262
1263int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1264{
1265 struct hci_dev *hdev = conn->hdev;
1266 struct sk_buff *list;
1267
1268 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1269
1270 skb->dev = (void *) hdev;
0d48d939 1271 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1272 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1273
1274 if (!(list = skb_shinfo(skb)->frag_list)) {
1275 /* Non fragmented */
1276 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1277
1278 skb_queue_tail(&conn->data_q, skb);
1279 } else {
1280 /* Fragmented */
1281 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1282
1283 skb_shinfo(skb)->frag_list = NULL;
1284
1285 /* Queue all fragments atomically */
1286 spin_lock_bh(&conn->data_q.lock);
1287
1288 __skb_queue_tail(&conn->data_q, skb);
1289 do {
1290 skb = list; list = list->next;
8e87d142 1291
1da177e4 1292 skb->dev = (void *) hdev;
0d48d939 1293 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1294 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1295
1296 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1297
1298 __skb_queue_tail(&conn->data_q, skb);
1299 } while (list);
1300
1301 spin_unlock_bh(&conn->data_q.lock);
1302 }
1303
c78ae283
MH
1304 tasklet_schedule(&hdev->tx_task);
1305
1da177e4
LT
1306 return 0;
1307}
1308EXPORT_SYMBOL(hci_send_acl);
1309
1310/* Send SCO data */
1311int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1312{
1313 struct hci_dev *hdev = conn->hdev;
1314 struct hci_sco_hdr hdr;
1315
1316 BT_DBG("%s len %d", hdev->name, skb->len);
1317
1318 if (skb->len > hdev->sco_mtu) {
1319 kfree_skb(skb);
1320 return -EINVAL;
1321 }
1322
aca3192c 1323 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1324 hdr.dlen = skb->len;
1325
badff6d0
ACM
1326 skb_push(skb, HCI_SCO_HDR_SIZE);
1327 skb_reset_transport_header(skb);
9c70220b 1328 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1329
1330 skb->dev = (void *) hdev;
0d48d939 1331 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1332
1da177e4 1333 skb_queue_tail(&conn->data_q, skb);
c78ae283
MH
1334 tasklet_schedule(&hdev->tx_task);
1335
1da177e4
LT
1336 return 0;
1337}
1338EXPORT_SYMBOL(hci_send_sco);
1339
1340/* ---- HCI TX task (outgoing data) ---- */
1341
1342/* HCI Connection scheduler */
1343static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1344{
1345 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1346 struct hci_conn *conn = NULL;
1da177e4
LT
1347 int num = 0, min = ~0;
1348 struct list_head *p;
1349
8e87d142 1350 /* We don't have to lock device here. Connections are always
1da177e4
LT
1351 * added and removed with TX task disabled. */
1352 list_for_each(p, &h->list) {
1353 struct hci_conn *c;
1354 c = list_entry(p, struct hci_conn, list);
1355
769be974 1356 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1357 continue;
769be974
MH
1358
1359 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1360 continue;
1361
1da177e4
LT
1362 num++;
1363
1364 if (c->sent < min) {
1365 min = c->sent;
1366 conn = c;
1367 }
1368 }
1369
1370 if (conn) {
1371 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1372 int q = cnt / num;
1373 *quote = q ? q : 1;
1374 } else
1375 *quote = 0;
1376
1377 BT_DBG("conn %p quote %d", conn, *quote);
1378 return conn;
1379}
1380
1381static inline void hci_acl_tx_to(struct hci_dev *hdev)
1382{
1383 struct hci_conn_hash *h = &hdev->conn_hash;
1384 struct list_head *p;
1385 struct hci_conn *c;
1386
1387 BT_ERR("%s ACL tx timeout", hdev->name);
1388
1389 /* Kill stalled connections */
1390 list_for_each(p, &h->list) {
1391 c = list_entry(p, struct hci_conn, list);
1392 if (c->type == ACL_LINK && c->sent) {
1393 BT_ERR("%s killing stalled ACL connection %s",
1394 hdev->name, batostr(&c->dst));
1395 hci_acl_disconn(c, 0x13);
1396 }
1397 }
1398}
1399
1400static inline void hci_sched_acl(struct hci_dev *hdev)
1401{
1402 struct hci_conn *conn;
1403 struct sk_buff *skb;
1404 int quote;
1405
1406 BT_DBG("%s", hdev->name);
1407
1408 if (!test_bit(HCI_RAW, &hdev->flags)) {
1409 /* ACL tx timeout must be longer than maximum
1410 * link supervision timeout (40.9 seconds) */
82453021 1411 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1da177e4
LT
1412 hci_acl_tx_to(hdev);
1413 }
1414
1415 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1416 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1417 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1418
1419 hci_conn_enter_active_mode(conn);
1420
1da177e4
LT
1421 hci_send_frame(skb);
1422 hdev->acl_last_tx = jiffies;
1423
1424 hdev->acl_cnt--;
1425 conn->sent++;
1426 }
1427 }
1428}
1429
1430/* Schedule SCO */
1431static inline void hci_sched_sco(struct hci_dev *hdev)
1432{
1433 struct hci_conn *conn;
1434 struct sk_buff *skb;
1435 int quote;
1436
1437 BT_DBG("%s", hdev->name);
1438
1439 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1440 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1441 BT_DBG("skb %p len %d", skb, skb->len);
1442 hci_send_frame(skb);
1443
1444 conn->sent++;
1445 if (conn->sent == ~0)
1446 conn->sent = 0;
1447 }
1448 }
1449}
1450
b6a0dc82
MH
1451static inline void hci_sched_esco(struct hci_dev *hdev)
1452{
1453 struct hci_conn *conn;
1454 struct sk_buff *skb;
1455 int quote;
1456
1457 BT_DBG("%s", hdev->name);
1458
1459 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1460 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1461 BT_DBG("skb %p len %d", skb, skb->len);
1462 hci_send_frame(skb);
1463
1464 conn->sent++;
1465 if (conn->sent == ~0)
1466 conn->sent = 0;
1467 }
1468 }
1469}
1470
1da177e4
LT
1471static void hci_tx_task(unsigned long arg)
1472{
1473 struct hci_dev *hdev = (struct hci_dev *) arg;
1474 struct sk_buff *skb;
1475
1476 read_lock(&hci_task_lock);
1477
1478 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1479
1480 /* Schedule queues and send stuff to HCI driver */
1481
1482 hci_sched_acl(hdev);
1483
1484 hci_sched_sco(hdev);
1485
b6a0dc82
MH
1486 hci_sched_esco(hdev);
1487
1da177e4
LT
1488 /* Send next queued raw (unknown type) packet */
1489 while ((skb = skb_dequeue(&hdev->raw_q)))
1490 hci_send_frame(skb);
1491
1492 read_unlock(&hci_task_lock);
1493}
1494
1495/* ----- HCI RX task (incoming data proccessing) ----- */
1496
1497/* ACL data packet */
1498static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1499{
1500 struct hci_acl_hdr *hdr = (void *) skb->data;
1501 struct hci_conn *conn;
1502 __u16 handle, flags;
1503
1504 skb_pull(skb, HCI_ACL_HDR_SIZE);
1505
1506 handle = __le16_to_cpu(hdr->handle);
1507 flags = hci_flags(handle);
1508 handle = hci_handle(handle);
1509
1510 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1511
1512 hdev->stat.acl_rx++;
1513
1514 hci_dev_lock(hdev);
1515 conn = hci_conn_hash_lookup_handle(hdev, handle);
1516 hci_dev_unlock(hdev);
8e87d142 1517
1da177e4
LT
1518 if (conn) {
1519 register struct hci_proto *hp;
1520
04837f64
MH
1521 hci_conn_enter_active_mode(conn);
1522
1da177e4
LT
1523 /* Send to upper protocol */
1524 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1525 hp->recv_acldata(conn, skb, flags);
1526 return;
1527 }
1528 } else {
8e87d142 1529 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1530 hdev->name, handle);
1531 }
1532
1533 kfree_skb(skb);
1534}
1535
1536/* SCO data packet */
1537static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1538{
1539 struct hci_sco_hdr *hdr = (void *) skb->data;
1540 struct hci_conn *conn;
1541 __u16 handle;
1542
1543 skb_pull(skb, HCI_SCO_HDR_SIZE);
1544
1545 handle = __le16_to_cpu(hdr->handle);
1546
1547 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1548
1549 hdev->stat.sco_rx++;
1550
1551 hci_dev_lock(hdev);
1552 conn = hci_conn_hash_lookup_handle(hdev, handle);
1553 hci_dev_unlock(hdev);
1554
1555 if (conn) {
1556 register struct hci_proto *hp;
1557
1558 /* Send to upper protocol */
1559 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1560 hp->recv_scodata(conn, skb);
1561 return;
1562 }
1563 } else {
8e87d142 1564 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1565 hdev->name, handle);
1566 }
1567
1568 kfree_skb(skb);
1569}
1570
6516455d 1571static void hci_rx_task(unsigned long arg)
1da177e4
LT
1572{
1573 struct hci_dev *hdev = (struct hci_dev *) arg;
1574 struct sk_buff *skb;
1575
1576 BT_DBG("%s", hdev->name);
1577
1578 read_lock(&hci_task_lock);
1579
1580 while ((skb = skb_dequeue(&hdev->rx_q))) {
1581 if (atomic_read(&hdev->promisc)) {
1582 /* Send copy to the sockets */
1583 hci_send_to_sock(hdev, skb);
1584 }
1585
1586 if (test_bit(HCI_RAW, &hdev->flags)) {
1587 kfree_skb(skb);
1588 continue;
1589 }
1590
1591 if (test_bit(HCI_INIT, &hdev->flags)) {
1592 /* Don't process data packets in this states. */
0d48d939 1593 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1594 case HCI_ACLDATA_PKT:
1595 case HCI_SCODATA_PKT:
1596 kfree_skb(skb);
1597 continue;
3ff50b79 1598 }
1da177e4
LT
1599 }
1600
1601 /* Process frame */
0d48d939 1602 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1603 case HCI_EVENT_PKT:
1604 hci_event_packet(hdev, skb);
1605 break;
1606
1607 case HCI_ACLDATA_PKT:
1608 BT_DBG("%s ACL data packet", hdev->name);
1609 hci_acldata_packet(hdev, skb);
1610 break;
1611
1612 case HCI_SCODATA_PKT:
1613 BT_DBG("%s SCO data packet", hdev->name);
1614 hci_scodata_packet(hdev, skb);
1615 break;
1616
1617 default:
1618 kfree_skb(skb);
1619 break;
1620 }
1621 }
1622
1623 read_unlock(&hci_task_lock);
1624}
1625
1626static void hci_cmd_task(unsigned long arg)
1627{
1628 struct hci_dev *hdev = (struct hci_dev *) arg;
1629 struct sk_buff *skb;
1630
1631 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1632
82453021 1633 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1da177e4
LT
1634 BT_ERR("%s command tx timeout", hdev->name);
1635 atomic_set(&hdev->cmd_cnt, 1);
1636 }
1637
1638 /* Send queued commands */
1639 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
7585b97a 1640 kfree_skb(hdev->sent_cmd);
1da177e4
LT
1641
1642 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1643 atomic_dec(&hdev->cmd_cnt);
1644 hci_send_frame(skb);
1645 hdev->cmd_last_tx = jiffies;
1646 } else {
1647 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 1648 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1649 }
1650 }
1651}