Bluetooth: Modified hci_recv_fragment() to use hci_reassembly helper
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
1da177e4
LT
44#include <net/sock.h>
45
46#include <asm/system.h>
47#include <asm/uaccess.h>
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
1da177e4
LT
53static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg);
56static void hci_notify(struct hci_dev *hdev, int event);
57
58static DEFINE_RWLOCK(hci_task_lock);
59
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
68/* HCI protocols */
69#define HCI_MAX_PROTO 2
70struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
94void hci_req_complete(struct hci_dev *hdev, int result)
95{
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
102 }
103}
104
105static void hci_req_cancel(struct hci_dev *hdev, int err)
106{
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116/* Execute request and wait for completion. */
8e87d142 117static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
118 unsigned long opt, __u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
122
123 BT_DBG("%s start", hdev->name);
124
125 hdev->req_status = HCI_REQ_PEND;
126
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
129
130 req(hdev, opt);
131 schedule_timeout(timeout);
132
133 remove_wait_queue(&hdev->req_wait_q, &wait);
134
135 if (signal_pending(current))
136 return -EINTR;
137
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
142
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
146
147 default:
148 err = -ETIMEDOUT;
149 break;
3ff50b79 150 }
1da177e4
LT
151
152 hdev->req_status = hdev->req_result = 0;
153
154 BT_DBG("%s end: err %d", hdev->name, err);
155
156 return err;
157}
158
159static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
161{
162 int ret;
163
7c6a329e
MH
164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
166
1da177e4
LT
167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
171
172 return ret;
173}
174
175static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176{
177 BT_DBG("%s %ld", hdev->name, opt);
178
179 /* Reset device */
a9de9248 180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
181}
182
183static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184{
185 struct sk_buff *skb;
1ebb9252 186 __le16 param;
89f2783d 187 __u8 flt_type;
1da177e4
LT
188
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Driver initialization */
192
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 196 skb->dev = (void *) hdev;
c78ae283 197
1da177e4 198 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 199 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
200 }
201 skb_queue_purge(&hdev->driver_init);
202
203 /* Mandatory initialization */
204
205 /* Reset */
7a9d4020 206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
208
209 /* Read Local Supported Features */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 211
1143e5a6 212 /* Read Local Version */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 214
1da177e4 215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
217
218#if 0
219 /* Host buffer size */
220 {
221 struct hci_cp_host_buffer_size cp;
aca3192c 222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
227 }
228#endif
229
230 /* Read BD Address */
a9de9248
MH
231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
238
239 /* Read Voice Setting */
a9de9248 240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
241
242 /* Optional initialization */
243
244 /* Clear Event Filters */
89f2783d 245 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4
LT
247
248 /* Page timeout ~20 secs */
aca3192c 249 param = cpu_to_le16(0x8000);
a9de9248 250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
1da177e4
LT
251
252 /* Connection accept timeout ~20 secs */
aca3192c 253 param = cpu_to_le16(0x7d00);
a9de9248 254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1da177e4
LT
255}
256
257static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258{
259 __u8 scan = opt;
260
261 BT_DBG("%s %x", hdev->name, scan);
262
263 /* Inquiry and Page scans */
a9de9248 264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
265}
266
267static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268{
269 __u8 auth = opt;
270
271 BT_DBG("%s %x", hdev->name, auth);
272
273 /* Authentication */
a9de9248 274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
275}
276
277static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 encrypt = opt;
280
281 BT_DBG("%s %x", hdev->name, encrypt);
282
e4e8e37c 283 /* Encryption */
a9de9248 284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
285}
286
e4e8e37c
MH
287static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __le16 policy = cpu_to_le16(opt);
290
a418b893 291 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
292
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295}
296
8e87d142 297/* Get HCI device by index.
1da177e4
LT
298 * Device is held on return. */
299struct hci_dev *hci_dev_get(int index)
300{
301 struct hci_dev *hdev = NULL;
302 struct list_head *p;
303
304 BT_DBG("%d", index);
305
306 if (index < 0)
307 return NULL;
308
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
314 break;
315 }
316 }
317 read_unlock(&hci_dev_list_lock);
318 return hdev;
319}
1da177e4
LT
320
321/* ---- Inquiry support ---- */
322static void inquiry_cache_flush(struct hci_dev *hdev)
323{
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
326
327 BT_DBG("cache %p", cache);
328
329 cache->list = NULL;
330 while ((e = next)) {
331 next = e->next;
332 kfree(e);
333 }
334}
335
336struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337{
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
340
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
345 break;
346 return e;
347}
348
349void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350{
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
353
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357 /* Entry not in the cache. Add new one. */
25ea6db0 358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
1da177e4 359 return;
1da177e4
LT
360 e->next = cache->list;
361 cache->list = e;
362 }
363
364 memcpy(&e->data, data, sizeof(*data));
365 e->timestamp = jiffies;
366 cache->timestamp = jiffies;
367}
368
369static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370{
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
374 int copied = 0;
375
376 for (e = cache->list; e && copied < num; e = e->next, copied++) {
377 struct inquiry_data *data = &e->data;
378 bacpy(&info->bdaddr, &data->bdaddr);
379 info->pscan_rep_mode = data->pscan_rep_mode;
380 info->pscan_period_mode = data->pscan_period_mode;
381 info->pscan_mode = data->pscan_mode;
382 memcpy(info->dev_class, data->dev_class, 3);
383 info->clock_offset = data->clock_offset;
384 info++;
385 }
386
387 BT_DBG("cache %p, copied %d", cache, copied);
388 return copied;
389}
390
391static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392{
393 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394 struct hci_cp_inquiry cp;
395
396 BT_DBG("%s", hdev->name);
397
398 if (test_bit(HCI_INQUIRY, &hdev->flags))
399 return;
400
401 /* Start Inquiry */
402 memcpy(&cp.lap, &ir->lap, 3);
403 cp.length = ir->length;
404 cp.num_rsp = ir->num_rsp;
a9de9248 405 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
406}
407
408int hci_inquiry(void __user *arg)
409{
410 __u8 __user *ptr = arg;
411 struct hci_inquiry_req ir;
412 struct hci_dev *hdev;
413 int err = 0, do_inquiry = 0, max_rsp;
414 long timeo;
415 __u8 *buf;
416
417 if (copy_from_user(&ir, ptr, sizeof(ir)))
418 return -EFAULT;
419
420 if (!(hdev = hci_dev_get(ir.dev_id)))
421 return -ENODEV;
422
423 hci_dev_lock_bh(hdev);
8e87d142 424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1da177e4
LT
425 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev);
428 do_inquiry = 1;
429 }
430 hci_dev_unlock_bh(hdev);
431
04837f64 432 timeo = ir.length * msecs_to_jiffies(2000);
1da177e4
LT
433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434 goto done;
435
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
441 */
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443 err = -ENOMEM;
444 goto done;
445 }
446
447 hci_dev_lock_bh(hdev);
448 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449 hci_dev_unlock_bh(hdev);
450
451 BT_DBG("num_rsp %d", ir.num_rsp);
452
453 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454 ptr += sizeof(ir);
455 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456 ir.num_rsp))
457 err = -EFAULT;
8e87d142 458 } else
1da177e4
LT
459 err = -EFAULT;
460
461 kfree(buf);
462
463done:
464 hci_dev_put(hdev);
465 return err;
466}
467
468/* ---- HCI ioctl helpers ---- */
469
470int hci_dev_open(__u16 dev)
471{
472 struct hci_dev *hdev;
473 int ret = 0;
474
475 if (!(hdev = hci_dev_get(dev)))
476 return -ENODEV;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_lock(hdev);
481
611b30f7
MH
482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483 ret = -ERFKILL;
484 goto done;
485 }
486
1da177e4
LT
487 if (test_bit(HCI_UP, &hdev->flags)) {
488 ret = -EALREADY;
489 goto done;
490 }
491
492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493 set_bit(HCI_RAW, &hdev->flags);
494
943da25d
MH
495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
498
1da177e4
LT
499 if (hdev->open(hdev)) {
500 ret = -EIO;
501 goto done;
502 }
503
504 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags);
507
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
509 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
511
512 clear_bit(HCI_INIT, &hdev->flags);
513 }
514
515 if (!ret) {
516 hci_dev_hold(hdev);
517 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP);
8e87d142 519 } else {
1da177e4
LT
520 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task);
522 tasklet_kill(&hdev->tx_task);
523 tasklet_kill(&hdev->cmd_task);
524
525 skb_queue_purge(&hdev->cmd_q);
526 skb_queue_purge(&hdev->rx_q);
527
528 if (hdev->flush)
529 hdev->flush(hdev);
530
531 if (hdev->sent_cmd) {
532 kfree_skb(hdev->sent_cmd);
533 hdev->sent_cmd = NULL;
534 }
535
536 hdev->close(hdev);
537 hdev->flags = 0;
538 }
539
540done:
541 hci_req_unlock(hdev);
542 hci_dev_put(hdev);
543 return ret;
544}
545
546static int hci_dev_do_close(struct hci_dev *hdev)
547{
548 BT_DBG("%s %p", hdev->name, hdev);
549
550 hci_req_cancel(hdev, ENODEV);
551 hci_req_lock(hdev);
552
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554 hci_req_unlock(hdev);
555 return 0;
556 }
557
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
561
562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev);
f0358568 565 hci_blacklist_clear(hdev);
1da177e4
LT
566 hci_dev_unlock_bh(hdev);
567
568 hci_notify(hdev, HCI_DEV_DOWN);
569
570 if (hdev->flush)
571 hdev->flush(hdev);
572
573 /* Reset device */
574 skb_queue_purge(&hdev->cmd_q);
575 atomic_set(&hdev->cmd_cnt, 1);
576 if (!test_bit(HCI_RAW, &hdev->flags)) {
577 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
578 __hci_request(hdev, hci_reset_req, 0,
579 msecs_to_jiffies(250));
1da177e4
LT
580 clear_bit(HCI_INIT, &hdev->flags);
581 }
582
583 /* Kill cmd task */
584 tasklet_kill(&hdev->cmd_task);
585
586 /* Drop queues */
587 skb_queue_purge(&hdev->rx_q);
588 skb_queue_purge(&hdev->cmd_q);
589 skb_queue_purge(&hdev->raw_q);
590
591 /* Drop last sent command */
592 if (hdev->sent_cmd) {
593 kfree_skb(hdev->sent_cmd);
594 hdev->sent_cmd = NULL;
595 }
596
597 /* After this point our queues are empty
598 * and no tasks are scheduled. */
599 hdev->close(hdev);
600
601 /* Clear flags */
602 hdev->flags = 0;
603
604 hci_req_unlock(hdev);
605
606 hci_dev_put(hdev);
607 return 0;
608}
609
610int hci_dev_close(__u16 dev)
611{
612 struct hci_dev *hdev;
613 int err;
614
615 if (!(hdev = hci_dev_get(dev)))
616 return -ENODEV;
617 err = hci_dev_do_close(hdev);
618 hci_dev_put(hdev);
619 return err;
620}
621
622int hci_dev_reset(__u16 dev)
623{
624 struct hci_dev *hdev;
625 int ret = 0;
626
627 if (!(hdev = hci_dev_get(dev)))
628 return -ENODEV;
629
630 hci_req_lock(hdev);
631 tasklet_disable(&hdev->tx_task);
632
633 if (!test_bit(HCI_UP, &hdev->flags))
634 goto done;
635
636 /* Drop queues */
637 skb_queue_purge(&hdev->rx_q);
638 skb_queue_purge(&hdev->cmd_q);
639
640 hci_dev_lock_bh(hdev);
641 inquiry_cache_flush(hdev);
642 hci_conn_hash_flush(hdev);
643 hci_dev_unlock_bh(hdev);
644
645 if (hdev->flush)
646 hdev->flush(hdev);
647
8e87d142 648 atomic_set(&hdev->cmd_cnt, 1);
1da177e4
LT
649 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
650
651 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
652 ret = __hci_request(hdev, hci_reset_req, 0,
653 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
654
655done:
656 tasklet_enable(&hdev->tx_task);
657 hci_req_unlock(hdev);
658 hci_dev_put(hdev);
659 return ret;
660}
661
662int hci_dev_reset_stat(__u16 dev)
663{
664 struct hci_dev *hdev;
665 int ret = 0;
666
667 if (!(hdev = hci_dev_get(dev)))
668 return -ENODEV;
669
670 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
671
672 hci_dev_put(hdev);
673
674 return ret;
675}
676
677int hci_dev_cmd(unsigned int cmd, void __user *arg)
678{
679 struct hci_dev *hdev;
680 struct hci_dev_req dr;
681 int err = 0;
682
683 if (copy_from_user(&dr, arg, sizeof(dr)))
684 return -EFAULT;
685
686 if (!(hdev = hci_dev_get(dr.dev_id)))
687 return -ENODEV;
688
689 switch (cmd) {
690 case HCISETAUTH:
04837f64
MH
691 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
692 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
693 break;
694
695 case HCISETENCRYPT:
696 if (!lmp_encrypt_capable(hdev)) {
697 err = -EOPNOTSUPP;
698 break;
699 }
700
701 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702 /* Auth must be enabled first */
04837f64
MH
703 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
705 if (err)
706 break;
707 }
708
04837f64
MH
709 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
710 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
711 break;
712
713 case HCISETSCAN:
04837f64
MH
714 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
715 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
716 break;
717
1da177e4 718 case HCISETLINKPOL:
e4e8e37c
MH
719 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
720 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
721 break;
722
723 case HCISETLINKMODE:
e4e8e37c
MH
724 hdev->link_mode = ((__u16) dr.dev_opt) &
725 (HCI_LM_MASTER | HCI_LM_ACCEPT);
726 break;
727
728 case HCISETPTYPE:
729 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
730 break;
731
732 case HCISETACLMTU:
e4e8e37c
MH
733 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
734 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
735 break;
736
737 case HCISETSCOMTU:
e4e8e37c
MH
738 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
739 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
740 break;
741
742 default:
743 err = -EINVAL;
744 break;
745 }
e4e8e37c 746
1da177e4
LT
747 hci_dev_put(hdev);
748 return err;
749}
750
751int hci_get_dev_list(void __user *arg)
752{
753 struct hci_dev_list_req *dl;
754 struct hci_dev_req *dr;
755 struct list_head *p;
756 int n = 0, size, err;
757 __u16 dev_num;
758
759 if (get_user(dev_num, (__u16 __user *) arg))
760 return -EFAULT;
761
762 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
763 return -EINVAL;
764
765 size = sizeof(*dl) + dev_num * sizeof(*dr);
766
c6bf514c 767 if (!(dl = kzalloc(size, GFP_KERNEL)))
1da177e4
LT
768 return -ENOMEM;
769
770 dr = dl->dev_req;
771
772 read_lock_bh(&hci_dev_list_lock);
773 list_for_each(p, &hci_dev_list) {
774 struct hci_dev *hdev;
775 hdev = list_entry(p, struct hci_dev, list);
776 (dr + n)->dev_id = hdev->id;
777 (dr + n)->dev_opt = hdev->flags;
778 if (++n >= dev_num)
779 break;
780 }
781 read_unlock_bh(&hci_dev_list_lock);
782
783 dl->dev_num = n;
784 size = sizeof(*dl) + n * sizeof(*dr);
785
786 err = copy_to_user(arg, dl, size);
787 kfree(dl);
788
789 return err ? -EFAULT : 0;
790}
791
792int hci_get_dev_info(void __user *arg)
793{
794 struct hci_dev *hdev;
795 struct hci_dev_info di;
796 int err = 0;
797
798 if (copy_from_user(&di, arg, sizeof(di)))
799 return -EFAULT;
800
801 if (!(hdev = hci_dev_get(di.dev_id)))
802 return -ENODEV;
803
804 strcpy(di.name, hdev->name);
805 di.bdaddr = hdev->bdaddr;
943da25d 806 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
807 di.flags = hdev->flags;
808 di.pkt_type = hdev->pkt_type;
809 di.acl_mtu = hdev->acl_mtu;
810 di.acl_pkts = hdev->acl_pkts;
811 di.sco_mtu = hdev->sco_mtu;
812 di.sco_pkts = hdev->sco_pkts;
813 di.link_policy = hdev->link_policy;
814 di.link_mode = hdev->link_mode;
815
816 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
817 memcpy(&di.features, &hdev->features, sizeof(di.features));
818
819 if (copy_to_user(arg, &di, sizeof(di)))
820 err = -EFAULT;
821
822 hci_dev_put(hdev);
823
824 return err;
825}
826
827/* ---- Interface to HCI drivers ---- */
828
611b30f7
MH
829static int hci_rfkill_set_block(void *data, bool blocked)
830{
831 struct hci_dev *hdev = data;
832
833 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
834
835 if (!blocked)
836 return 0;
837
838 hci_dev_do_close(hdev);
839
840 return 0;
841}
842
843static const struct rfkill_ops hci_rfkill_ops = {
844 .set_block = hci_rfkill_set_block,
845};
846
1da177e4
LT
847/* Alloc HCI device */
848struct hci_dev *hci_alloc_dev(void)
849{
850 struct hci_dev *hdev;
851
25ea6db0 852 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
853 if (!hdev)
854 return NULL;
855
1da177e4
LT
856 skb_queue_head_init(&hdev->driver_init);
857
858 return hdev;
859}
860EXPORT_SYMBOL(hci_alloc_dev);
861
862/* Free HCI device */
863void hci_free_dev(struct hci_dev *hdev)
864{
865 skb_queue_purge(&hdev->driver_init);
866
a91f2e39
MH
867 /* will free via device release */
868 put_device(&hdev->dev);
1da177e4
LT
869}
870EXPORT_SYMBOL(hci_free_dev);
871
872/* Register HCI device */
873int hci_register_dev(struct hci_dev *hdev)
874{
875 struct list_head *head = &hci_dev_list, *p;
ef222013 876 int i, id = 0;
1da177e4 877
c13854ce
MH
878 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
879 hdev->bus, hdev->owner);
1da177e4
LT
880
881 if (!hdev->open || !hdev->close || !hdev->destruct)
882 return -EINVAL;
883
884 write_lock_bh(&hci_dev_list_lock);
885
886 /* Find first available device id */
887 list_for_each(p, &hci_dev_list) {
888 if (list_entry(p, struct hci_dev, list)->id != id)
889 break;
890 head = p; id++;
891 }
8e87d142 892
1da177e4
LT
893 sprintf(hdev->name, "hci%d", id);
894 hdev->id = id;
895 list_add(&hdev->list, head);
896
897 atomic_set(&hdev->refcnt, 1);
898 spin_lock_init(&hdev->lock);
899
900 hdev->flags = 0;
901 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 902 hdev->esco_type = (ESCO_HV1);
1da177e4
LT
903 hdev->link_mode = (HCI_LM_ACCEPT);
904
04837f64
MH
905 hdev->idle_timeout = 0;
906 hdev->sniff_max_interval = 800;
907 hdev->sniff_min_interval = 80;
908
1da177e4
LT
909 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
910 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
911 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
912
913 skb_queue_head_init(&hdev->rx_q);
914 skb_queue_head_init(&hdev->cmd_q);
915 skb_queue_head_init(&hdev->raw_q);
916
cd4c5391 917 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
918 hdev->reassembly[i] = NULL;
919
1da177e4 920 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 921 mutex_init(&hdev->req_lock);
1da177e4
LT
922
923 inquiry_cache_init(hdev);
924
925 hci_conn_hash_init(hdev);
926
f0358568
JH
927 INIT_LIST_HEAD(&hdev->blacklist.list);
928
1da177e4
LT
929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930
931 atomic_set(&hdev->promisc, 0);
932
933 write_unlock_bh(&hci_dev_list_lock);
934
f48fd9c8
MH
935 hdev->workqueue = create_singlethread_workqueue(hdev->name);
936 if (!hdev->workqueue)
937 goto nomem;
938
1da177e4
LT
939 hci_register_sysfs(hdev);
940
611b30f7
MH
941 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
942 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
943 if (hdev->rfkill) {
944 if (rfkill_register(hdev->rfkill) < 0) {
945 rfkill_destroy(hdev->rfkill);
946 hdev->rfkill = NULL;
947 }
948 }
949
1da177e4
LT
950 hci_notify(hdev, HCI_DEV_REG);
951
952 return id;
f48fd9c8
MH
953
954nomem:
955 write_lock_bh(&hci_dev_list_lock);
956 list_del(&hdev->list);
957 write_unlock_bh(&hci_dev_list_lock);
958
959 return -ENOMEM;
1da177e4
LT
960}
961EXPORT_SYMBOL(hci_register_dev);
962
963/* Unregister HCI device */
964int hci_unregister_dev(struct hci_dev *hdev)
965{
ef222013
MH
966 int i;
967
c13854ce 968 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 969
1da177e4
LT
970 write_lock_bh(&hci_dev_list_lock);
971 list_del(&hdev->list);
972 write_unlock_bh(&hci_dev_list_lock);
973
974 hci_dev_do_close(hdev);
975
cd4c5391 976 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
977 kfree_skb(hdev->reassembly[i]);
978
1da177e4
LT
979 hci_notify(hdev, HCI_DEV_UNREG);
980
611b30f7
MH
981 if (hdev->rfkill) {
982 rfkill_unregister(hdev->rfkill);
983 rfkill_destroy(hdev->rfkill);
984 }
985
147e2d59
DY
986 hci_unregister_sysfs(hdev);
987
f48fd9c8
MH
988 destroy_workqueue(hdev->workqueue);
989
1da177e4 990 __hci_dev_put(hdev);
ef222013 991
1da177e4
LT
992 return 0;
993}
994EXPORT_SYMBOL(hci_unregister_dev);
995
996/* Suspend HCI device */
997int hci_suspend_dev(struct hci_dev *hdev)
998{
999 hci_notify(hdev, HCI_DEV_SUSPEND);
1000 return 0;
1001}
1002EXPORT_SYMBOL(hci_suspend_dev);
1003
1004/* Resume HCI device */
1005int hci_resume_dev(struct hci_dev *hdev)
1006{
1007 hci_notify(hdev, HCI_DEV_RESUME);
1008 return 0;
1009}
1010EXPORT_SYMBOL(hci_resume_dev);
1011
76bca880
MH
1012/* Receive frame from HCI drivers */
1013int hci_recv_frame(struct sk_buff *skb)
1014{
1015 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1016 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1017 && !test_bit(HCI_INIT, &hdev->flags))) {
1018 kfree_skb(skb);
1019 return -ENXIO;
1020 }
1021
1022 /* Incomming skb */
1023 bt_cb(skb)->incoming = 1;
1024
1025 /* Time stamp */
1026 __net_timestamp(skb);
1027
1028 /* Queue frame for rx task */
1029 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1030 tasklet_schedule(&hdev->rx_task);
1031
76bca880
MH
1032 return 0;
1033}
1034EXPORT_SYMBOL(hci_recv_frame);
1035
33e882a5
SS
1036static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1037 int count, __u8 index, gfp_t gfp_mask)
1038{
1039 int len = 0;
1040 int hlen = 0;
1041 int remain = count;
1042 struct sk_buff *skb;
1043 struct bt_skb_cb *scb;
1044
1045 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046 index >= NUM_REASSEMBLY)
1047 return -EILSEQ;
1048
1049 skb = hdev->reassembly[index];
1050
1051 if (!skb) {
1052 switch (type) {
1053 case HCI_ACLDATA_PKT:
1054 len = HCI_MAX_FRAME_SIZE;
1055 hlen = HCI_ACL_HDR_SIZE;
1056 break;
1057 case HCI_EVENT_PKT:
1058 len = HCI_MAX_EVENT_SIZE;
1059 hlen = HCI_EVENT_HDR_SIZE;
1060 break;
1061 case HCI_SCODATA_PKT:
1062 len = HCI_MAX_SCO_SIZE;
1063 hlen = HCI_SCO_HDR_SIZE;
1064 break;
1065 }
1066
1067 skb = bt_skb_alloc(len, gfp_mask);
1068 if (!skb)
1069 return -ENOMEM;
1070
1071 scb = (void *) skb->cb;
1072 scb->expect = hlen;
1073 scb->pkt_type = type;
1074
1075 skb->dev = (void *) hdev;
1076 hdev->reassembly[index] = skb;
1077 }
1078
1079 while (count) {
1080 scb = (void *) skb->cb;
1081 len = min(scb->expect, (__u16)count);
1082
1083 memcpy(skb_put(skb, len), data, len);
1084
1085 count -= len;
1086 data += len;
1087 scb->expect -= len;
1088 remain = count;
1089
1090 switch (type) {
1091 case HCI_EVENT_PKT:
1092 if (skb->len == HCI_EVENT_HDR_SIZE) {
1093 struct hci_event_hdr *h = hci_event_hdr(skb);
1094 scb->expect = h->plen;
1095
1096 if (skb_tailroom(skb) < scb->expect) {
1097 kfree_skb(skb);
1098 hdev->reassembly[index] = NULL;
1099 return -ENOMEM;
1100 }
1101 }
1102 break;
1103
1104 case HCI_ACLDATA_PKT:
1105 if (skb->len == HCI_ACL_HDR_SIZE) {
1106 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1107 scb->expect = __le16_to_cpu(h->dlen);
1108
1109 if (skb_tailroom(skb) < scb->expect) {
1110 kfree_skb(skb);
1111 hdev->reassembly[index] = NULL;
1112 return -ENOMEM;
1113 }
1114 }
1115 break;
1116
1117 case HCI_SCODATA_PKT:
1118 if (skb->len == HCI_SCO_HDR_SIZE) {
1119 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1120 scb->expect = h->dlen;
1121
1122 if (skb_tailroom(skb) < scb->expect) {
1123 kfree_skb(skb);
1124 hdev->reassembly[index] = NULL;
1125 return -ENOMEM;
1126 }
1127 }
1128 break;
1129 }
1130
1131 if (scb->expect == 0) {
1132 /* Complete frame */
1133
1134 bt_cb(skb)->pkt_type = type;
1135 hci_recv_frame(skb);
1136
1137 hdev->reassembly[index] = NULL;
1138 return remain;
1139 }
1140 }
1141
1142 return remain;
1143}
1144
ef222013
MH
1145int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1146{
f39a3c06
SS
1147 int rem = 0;
1148
ef222013
MH
1149 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1150 return -EILSEQ;
1151
f39a3c06
SS
1152 do {
1153 rem = hci_reassembly(hdev, type, data, count,
1154 type - 1, GFP_ATOMIC);
1155 if (rem < 0)
1156 return rem;
ef222013 1157
f39a3c06
SS
1158 data += (count - rem);
1159 count = rem;
1160 } while (count);
ef222013 1161
f39a3c06 1162 return rem;
ef222013
MH
1163}
1164EXPORT_SYMBOL(hci_recv_fragment);
1165
1da177e4
LT
1166/* ---- Interface to upper protocols ---- */
1167
1168/* Register/Unregister protocols.
1169 * hci_task_lock is used to ensure that no tasks are running. */
1170int hci_register_proto(struct hci_proto *hp)
1171{
1172 int err = 0;
1173
1174 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1175
1176 if (hp->id >= HCI_MAX_PROTO)
1177 return -EINVAL;
1178
1179 write_lock_bh(&hci_task_lock);
1180
1181 if (!hci_proto[hp->id])
1182 hci_proto[hp->id] = hp;
1183 else
1184 err = -EEXIST;
1185
1186 write_unlock_bh(&hci_task_lock);
1187
1188 return err;
1189}
1190EXPORT_SYMBOL(hci_register_proto);
1191
1192int hci_unregister_proto(struct hci_proto *hp)
1193{
1194 int err = 0;
1195
1196 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1197
1198 if (hp->id >= HCI_MAX_PROTO)
1199 return -EINVAL;
1200
1201 write_lock_bh(&hci_task_lock);
1202
1203 if (hci_proto[hp->id])
1204 hci_proto[hp->id] = NULL;
1205 else
1206 err = -ENOENT;
1207
1208 write_unlock_bh(&hci_task_lock);
1209
1210 return err;
1211}
1212EXPORT_SYMBOL(hci_unregister_proto);
1213
1214int hci_register_cb(struct hci_cb *cb)
1215{
1216 BT_DBG("%p name %s", cb, cb->name);
1217
1218 write_lock_bh(&hci_cb_list_lock);
1219 list_add(&cb->list, &hci_cb_list);
1220 write_unlock_bh(&hci_cb_list_lock);
1221
1222 return 0;
1223}
1224EXPORT_SYMBOL(hci_register_cb);
1225
1226int hci_unregister_cb(struct hci_cb *cb)
1227{
1228 BT_DBG("%p name %s", cb, cb->name);
1229
1230 write_lock_bh(&hci_cb_list_lock);
1231 list_del(&cb->list);
1232 write_unlock_bh(&hci_cb_list_lock);
1233
1234 return 0;
1235}
1236EXPORT_SYMBOL(hci_unregister_cb);
1237
1238static int hci_send_frame(struct sk_buff *skb)
1239{
1240 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1241
1242 if (!hdev) {
1243 kfree_skb(skb);
1244 return -ENODEV;
1245 }
1246
0d48d939 1247 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1248
1249 if (atomic_read(&hdev->promisc)) {
1250 /* Time stamp */
a61bbcf2 1251 __net_timestamp(skb);
1da177e4
LT
1252
1253 hci_send_to_sock(hdev, skb);
1254 }
1255
1256 /* Get rid of skb owner, prior to sending to the driver. */
1257 skb_orphan(skb);
1258
1259 return hdev->send(skb);
1260}
1261
1262/* Send HCI command */
a9de9248 1263int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1264{
1265 int len = HCI_COMMAND_HDR_SIZE + plen;
1266 struct hci_command_hdr *hdr;
1267 struct sk_buff *skb;
1268
a9de9248 1269 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1270
1271 skb = bt_skb_alloc(len, GFP_ATOMIC);
1272 if (!skb) {
ef222013 1273 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1274 return -ENOMEM;
1275 }
1276
1277 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1278 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1279 hdr->plen = plen;
1280
1281 if (plen)
1282 memcpy(skb_put(skb, plen), param, plen);
1283
1284 BT_DBG("skb len %d", skb->len);
1285
0d48d939 1286 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1287 skb->dev = (void *) hdev;
c78ae283 1288
1da177e4 1289 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1290 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1291
1292 return 0;
1293}
1da177e4
LT
1294
1295/* Get data from the previously sent command */
a9de9248 1296void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1297{
1298 struct hci_command_hdr *hdr;
1299
1300 if (!hdev->sent_cmd)
1301 return NULL;
1302
1303 hdr = (void *) hdev->sent_cmd->data;
1304
a9de9248 1305 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1306 return NULL;
1307
a9de9248 1308 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1309
1310 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1311}
1312
1313/* Send ACL data */
1314static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1315{
1316 struct hci_acl_hdr *hdr;
1317 int len = skb->len;
1318
badff6d0
ACM
1319 skb_push(skb, HCI_ACL_HDR_SIZE);
1320 skb_reset_transport_header(skb);
9c70220b 1321 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1322 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1323 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1324}
1325
9a9c6a34 1326void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1da177e4
LT
1327{
1328 struct hci_dev *hdev = conn->hdev;
1329 struct sk_buff *list;
1330
1331 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1332
1333 skb->dev = (void *) hdev;
0d48d939 1334 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1335 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1336
1337 if (!(list = skb_shinfo(skb)->frag_list)) {
1338 /* Non fragmented */
1339 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1340
1341 skb_queue_tail(&conn->data_q, skb);
1342 } else {
1343 /* Fragmented */
1344 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1345
1346 skb_shinfo(skb)->frag_list = NULL;
1347
1348 /* Queue all fragments atomically */
1349 spin_lock_bh(&conn->data_q.lock);
1350
1351 __skb_queue_tail(&conn->data_q, skb);
1352 do {
1353 skb = list; list = list->next;
8e87d142 1354
1da177e4 1355 skb->dev = (void *) hdev;
0d48d939 1356 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1357 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1358
1359 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1360
1361 __skb_queue_tail(&conn->data_q, skb);
1362 } while (list);
1363
1364 spin_unlock_bh(&conn->data_q.lock);
1365 }
1366
c78ae283 1367 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1368}
1369EXPORT_SYMBOL(hci_send_acl);
1370
1371/* Send SCO data */
0d861d8b 1372void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
1373{
1374 struct hci_dev *hdev = conn->hdev;
1375 struct hci_sco_hdr hdr;
1376
1377 BT_DBG("%s len %d", hdev->name, skb->len);
1378
aca3192c 1379 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1380 hdr.dlen = skb->len;
1381
badff6d0
ACM
1382 skb_push(skb, HCI_SCO_HDR_SIZE);
1383 skb_reset_transport_header(skb);
9c70220b 1384 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1385
1386 skb->dev = (void *) hdev;
0d48d939 1387 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1388
1da177e4 1389 skb_queue_tail(&conn->data_q, skb);
c78ae283 1390 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1391}
1392EXPORT_SYMBOL(hci_send_sco);
1393
1394/* ---- HCI TX task (outgoing data) ---- */
1395
1396/* HCI Connection scheduler */
1397static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1398{
1399 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1400 struct hci_conn *conn = NULL;
1da177e4
LT
1401 int num = 0, min = ~0;
1402 struct list_head *p;
1403
8e87d142 1404 /* We don't have to lock device here. Connections are always
1da177e4
LT
1405 * added and removed with TX task disabled. */
1406 list_for_each(p, &h->list) {
1407 struct hci_conn *c;
1408 c = list_entry(p, struct hci_conn, list);
1409
769be974 1410 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1411 continue;
769be974
MH
1412
1413 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1414 continue;
1415
1da177e4
LT
1416 num++;
1417
1418 if (c->sent < min) {
1419 min = c->sent;
1420 conn = c;
1421 }
1422 }
1423
1424 if (conn) {
1425 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1426 int q = cnt / num;
1427 *quote = q ? q : 1;
1428 } else
1429 *quote = 0;
1430
1431 BT_DBG("conn %p quote %d", conn, *quote);
1432 return conn;
1433}
1434
1435static inline void hci_acl_tx_to(struct hci_dev *hdev)
1436{
1437 struct hci_conn_hash *h = &hdev->conn_hash;
1438 struct list_head *p;
1439 struct hci_conn *c;
1440
1441 BT_ERR("%s ACL tx timeout", hdev->name);
1442
1443 /* Kill stalled connections */
1444 list_for_each(p, &h->list) {
1445 c = list_entry(p, struct hci_conn, list);
1446 if (c->type == ACL_LINK && c->sent) {
1447 BT_ERR("%s killing stalled ACL connection %s",
1448 hdev->name, batostr(&c->dst));
1449 hci_acl_disconn(c, 0x13);
1450 }
1451 }
1452}
1453
1454static inline void hci_sched_acl(struct hci_dev *hdev)
1455{
1456 struct hci_conn *conn;
1457 struct sk_buff *skb;
1458 int quote;
1459
1460 BT_DBG("%s", hdev->name);
1461
1462 if (!test_bit(HCI_RAW, &hdev->flags)) {
1463 /* ACL tx timeout must be longer than maximum
1464 * link supervision timeout (40.9 seconds) */
82453021 1465 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1da177e4
LT
1466 hci_acl_tx_to(hdev);
1467 }
1468
1469 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1470 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1471 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1472
1473 hci_conn_enter_active_mode(conn);
1474
1da177e4
LT
1475 hci_send_frame(skb);
1476 hdev->acl_last_tx = jiffies;
1477
1478 hdev->acl_cnt--;
1479 conn->sent++;
1480 }
1481 }
1482}
1483
1484/* Schedule SCO */
1485static inline void hci_sched_sco(struct hci_dev *hdev)
1486{
1487 struct hci_conn *conn;
1488 struct sk_buff *skb;
1489 int quote;
1490
1491 BT_DBG("%s", hdev->name);
1492
1493 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1494 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1495 BT_DBG("skb %p len %d", skb, skb->len);
1496 hci_send_frame(skb);
1497
1498 conn->sent++;
1499 if (conn->sent == ~0)
1500 conn->sent = 0;
1501 }
1502 }
1503}
1504
b6a0dc82
MH
1505static inline void hci_sched_esco(struct hci_dev *hdev)
1506{
1507 struct hci_conn *conn;
1508 struct sk_buff *skb;
1509 int quote;
1510
1511 BT_DBG("%s", hdev->name);
1512
1513 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1514 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1515 BT_DBG("skb %p len %d", skb, skb->len);
1516 hci_send_frame(skb);
1517
1518 conn->sent++;
1519 if (conn->sent == ~0)
1520 conn->sent = 0;
1521 }
1522 }
1523}
1524
1da177e4
LT
1525static void hci_tx_task(unsigned long arg)
1526{
1527 struct hci_dev *hdev = (struct hci_dev *) arg;
1528 struct sk_buff *skb;
1529
1530 read_lock(&hci_task_lock);
1531
1532 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1533
1534 /* Schedule queues and send stuff to HCI driver */
1535
1536 hci_sched_acl(hdev);
1537
1538 hci_sched_sco(hdev);
1539
b6a0dc82
MH
1540 hci_sched_esco(hdev);
1541
1da177e4
LT
1542 /* Send next queued raw (unknown type) packet */
1543 while ((skb = skb_dequeue(&hdev->raw_q)))
1544 hci_send_frame(skb);
1545
1546 read_unlock(&hci_task_lock);
1547}
1548
1549/* ----- HCI RX task (incoming data proccessing) ----- */
1550
1551/* ACL data packet */
1552static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1553{
1554 struct hci_acl_hdr *hdr = (void *) skb->data;
1555 struct hci_conn *conn;
1556 __u16 handle, flags;
1557
1558 skb_pull(skb, HCI_ACL_HDR_SIZE);
1559
1560 handle = __le16_to_cpu(hdr->handle);
1561 flags = hci_flags(handle);
1562 handle = hci_handle(handle);
1563
1564 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1565
1566 hdev->stat.acl_rx++;
1567
1568 hci_dev_lock(hdev);
1569 conn = hci_conn_hash_lookup_handle(hdev, handle);
1570 hci_dev_unlock(hdev);
8e87d142 1571
1da177e4
LT
1572 if (conn) {
1573 register struct hci_proto *hp;
1574
04837f64
MH
1575 hci_conn_enter_active_mode(conn);
1576
1da177e4
LT
1577 /* Send to upper protocol */
1578 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1579 hp->recv_acldata(conn, skb, flags);
1580 return;
1581 }
1582 } else {
8e87d142 1583 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1584 hdev->name, handle);
1585 }
1586
1587 kfree_skb(skb);
1588}
1589
1590/* SCO data packet */
1591static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1592{
1593 struct hci_sco_hdr *hdr = (void *) skb->data;
1594 struct hci_conn *conn;
1595 __u16 handle;
1596
1597 skb_pull(skb, HCI_SCO_HDR_SIZE);
1598
1599 handle = __le16_to_cpu(hdr->handle);
1600
1601 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1602
1603 hdev->stat.sco_rx++;
1604
1605 hci_dev_lock(hdev);
1606 conn = hci_conn_hash_lookup_handle(hdev, handle);
1607 hci_dev_unlock(hdev);
1608
1609 if (conn) {
1610 register struct hci_proto *hp;
1611
1612 /* Send to upper protocol */
1613 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1614 hp->recv_scodata(conn, skb);
1615 return;
1616 }
1617 } else {
8e87d142 1618 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1619 hdev->name, handle);
1620 }
1621
1622 kfree_skb(skb);
1623}
1624
6516455d 1625static void hci_rx_task(unsigned long arg)
1da177e4
LT
1626{
1627 struct hci_dev *hdev = (struct hci_dev *) arg;
1628 struct sk_buff *skb;
1629
1630 BT_DBG("%s", hdev->name);
1631
1632 read_lock(&hci_task_lock);
1633
1634 while ((skb = skb_dequeue(&hdev->rx_q))) {
1635 if (atomic_read(&hdev->promisc)) {
1636 /* Send copy to the sockets */
1637 hci_send_to_sock(hdev, skb);
1638 }
1639
1640 if (test_bit(HCI_RAW, &hdev->flags)) {
1641 kfree_skb(skb);
1642 continue;
1643 }
1644
1645 if (test_bit(HCI_INIT, &hdev->flags)) {
1646 /* Don't process data packets in this states. */
0d48d939 1647 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1648 case HCI_ACLDATA_PKT:
1649 case HCI_SCODATA_PKT:
1650 kfree_skb(skb);
1651 continue;
3ff50b79 1652 }
1da177e4
LT
1653 }
1654
1655 /* Process frame */
0d48d939 1656 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1657 case HCI_EVENT_PKT:
1658 hci_event_packet(hdev, skb);
1659 break;
1660
1661 case HCI_ACLDATA_PKT:
1662 BT_DBG("%s ACL data packet", hdev->name);
1663 hci_acldata_packet(hdev, skb);
1664 break;
1665
1666 case HCI_SCODATA_PKT:
1667 BT_DBG("%s SCO data packet", hdev->name);
1668 hci_scodata_packet(hdev, skb);
1669 break;
1670
1671 default:
1672 kfree_skb(skb);
1673 break;
1674 }
1675 }
1676
1677 read_unlock(&hci_task_lock);
1678}
1679
1680static void hci_cmd_task(unsigned long arg)
1681{
1682 struct hci_dev *hdev = (struct hci_dev *) arg;
1683 struct sk_buff *skb;
1684
1685 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1686
82453021 1687 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1da177e4
LT
1688 BT_ERR("%s command tx timeout", hdev->name);
1689 atomic_set(&hdev->cmd_cnt, 1);
1690 }
1691
1692 /* Send queued commands */
1693 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
7585b97a 1694 kfree_skb(hdev->sent_cmd);
1da177e4
LT
1695
1696 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1697 atomic_dec(&hdev->cmd_cnt);
1698 hci_send_frame(skb);
1699 hdev->cmd_last_tx = jiffies;
1700 } else {
1701 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 1702 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1703 }
1704 }
1705}