Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/padovan/blueto...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
1da177e4
LT
45#include <net/sock.h>
46
47#include <asm/system.h>
70f23020 48#include <linux/uaccess.h>
1da177e4
LT
49#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
ab81cbf9
JH
54#define AUTO_OFF_TIMEOUT 2000
55
1da177e4
LT
56static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg);
1da177e4
LT
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
e041c683 75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
e041c683 81 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
e041c683 86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
87}
88
6516455d 89static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 90{
e041c683 91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
92}
93
94/* ---- HCI requests ---- */
95
23bb5763 96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 97{
23bb5763
JH
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
a5040efa
JH
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 104 return;
1da177e4
LT
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
8e87d142 125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 126 unsigned long opt, __u32 timeout)
1da177e4
LT
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
3ff50b79 158 }
1da177e4 159
a5040efa 160 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 168 unsigned long opt, __u32 timeout)
1da177e4
LT
169{
170 int ret;
171
7c6a329e
MH
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
1da177e4
LT
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
10572132 188 set_bit(HCI_RESET, &hdev->flags);
a9de9248 189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
b0916ea0 194 struct hci_cp_delete_stored_link_key cp;
1da177e4 195 struct sk_buff *skb;
1ebb9252 196 __le16 param;
89f2783d 197 __u8 flt_type;
1da177e4
LT
198
199 BT_DBG("%s %ld", hdev->name, opt);
200
201 /* Driver initialization */
202
203 /* Special commands */
204 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 205 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 206 skb->dev = (void *) hdev;
c78ae283 207
1da177e4 208 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 209 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
210 }
211 skb_queue_purge(&hdev->driver_init);
212
213 /* Mandatory initialization */
214
215 /* Reset */
10572132
GP
216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217 set_bit(HCI_RESET, &hdev->flags);
a9de9248 218 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
10572132 219 }
1da177e4
LT
220
221 /* Read Local Supported Features */
a9de9248 222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 223
1143e5a6 224 /* Read Local Version */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 226
1da177e4 227 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 228 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
229
230#if 0
231 /* Host buffer size */
232 {
233 struct hci_cp_host_buffer_size cp;
aca3192c 234 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 235 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
236 cp.acl_max_pkt = cpu_to_le16(0xffff);
237 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 238 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
239 }
240#endif
241
242 /* Read BD Address */
a9de9248
MH
243 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
244
245 /* Read Class of Device */
246 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
247
248 /* Read Local Name */
249 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
250
251 /* Read Voice Setting */
a9de9248 252 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
253
254 /* Optional initialization */
255
256 /* Clear Event Filters */
89f2783d 257 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 258 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 259
1da177e4 260 /* Connection accept timeout ~20 secs */
aca3192c 261 param = cpu_to_le16(0x7d00);
a9de9248 262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
263
264 bacpy(&cp.bdaddr, BDADDR_ANY);
265 cp.delete_all = 1;
266 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
267}
268
6ed58ec5
VT
269static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270{
271 BT_DBG("%s", hdev->name);
272
273 /* Read LE buffer size */
274 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
275}
276
1da177e4
LT
277static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 scan = opt;
280
281 BT_DBG("%s %x", hdev->name, scan);
282
283 /* Inquiry and Page scans */
a9de9248 284 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
285}
286
287static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __u8 auth = opt;
290
291 BT_DBG("%s %x", hdev->name, auth);
292
293 /* Authentication */
a9de9248 294 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
295}
296
297static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
298{
299 __u8 encrypt = opt;
300
301 BT_DBG("%s %x", hdev->name, encrypt);
302
e4e8e37c 303 /* Encryption */
a9de9248 304 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
305}
306
e4e8e37c
MH
307static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
308{
309 __le16 policy = cpu_to_le16(opt);
310
a418b893 311 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
312
313 /* Default link policy */
314 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
315}
316
8e87d142 317/* Get HCI device by index.
1da177e4
LT
318 * Device is held on return. */
319struct hci_dev *hci_dev_get(int index)
320{
321 struct hci_dev *hdev = NULL;
322 struct list_head *p;
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
330 list_for_each(p, &hci_dev_list) {
331 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
1da177e4
LT
340
341/* ---- Inquiry support ---- */
342static void inquiry_cache_flush(struct hci_dev *hdev)
343{
344 struct inquiry_cache *cache = &hdev->inq_cache;
345 struct inquiry_entry *next = cache->list, *e;
346
347 BT_DBG("cache %p", cache);
348
349 cache->list = NULL;
350 while ((e = next)) {
351 next = e->next;
352 kfree(e);
353 }
354}
355
356struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357{
358 struct inquiry_cache *cache = &hdev->inq_cache;
359 struct inquiry_entry *e;
360
361 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362
363 for (e = cache->list; e; e = e->next)
364 if (!bacmp(&e->data.bdaddr, bdaddr))
365 break;
366 return e;
367}
368
369void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370{
371 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 372 struct inquiry_entry *ie;
1da177e4
LT
373
374 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375
70f23020
AE
376 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377 if (!ie) {
1da177e4 378 /* Entry not in the cache. Add new one. */
70f23020
AE
379 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380 if (!ie)
1da177e4 381 return;
70f23020
AE
382
383 ie->next = cache->list;
384 cache->list = ie;
1da177e4
LT
385 }
386
70f23020
AE
387 memcpy(&ie->data, data, sizeof(*data));
388 ie->timestamp = jiffies;
1da177e4
LT
389 cache->timestamp = jiffies;
390}
391
392static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393{
394 struct inquiry_cache *cache = &hdev->inq_cache;
395 struct inquiry_info *info = (struct inquiry_info *) buf;
396 struct inquiry_entry *e;
397 int copied = 0;
398
399 for (e = cache->list; e && copied < num; e = e->next, copied++) {
400 struct inquiry_data *data = &e->data;
401 bacpy(&info->bdaddr, &data->bdaddr);
402 info->pscan_rep_mode = data->pscan_rep_mode;
403 info->pscan_period_mode = data->pscan_period_mode;
404 info->pscan_mode = data->pscan_mode;
405 memcpy(info->dev_class, data->dev_class, 3);
406 info->clock_offset = data->clock_offset;
407 info++;
408 }
409
410 BT_DBG("cache %p, copied %d", cache, copied);
411 return copied;
412}
413
414static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415{
416 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417 struct hci_cp_inquiry cp;
418
419 BT_DBG("%s", hdev->name);
420
421 if (test_bit(HCI_INQUIRY, &hdev->flags))
422 return;
423
424 /* Start Inquiry */
425 memcpy(&cp.lap, &ir->lap, 3);
426 cp.length = ir->length;
427 cp.num_rsp = ir->num_rsp;
a9de9248 428 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
429}
430
431int hci_inquiry(void __user *arg)
432{
433 __u8 __user *ptr = arg;
434 struct hci_inquiry_req ir;
435 struct hci_dev *hdev;
436 int err = 0, do_inquiry = 0, max_rsp;
437 long timeo;
438 __u8 *buf;
439
440 if (copy_from_user(&ir, ptr, sizeof(ir)))
441 return -EFAULT;
442
5a08ecce
AE
443 hdev = hci_dev_get(ir.dev_id);
444 if (!hdev)
1da177e4
LT
445 return -ENODEV;
446
447 hci_dev_lock_bh(hdev);
8e87d142 448 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
449 inquiry_cache_empty(hdev) ||
450 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
451 inquiry_cache_flush(hdev);
452 do_inquiry = 1;
453 }
454 hci_dev_unlock_bh(hdev);
455
04837f64 456 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
457
458 if (do_inquiry) {
459 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460 if (err < 0)
461 goto done;
462 }
1da177e4
LT
463
464 /* for unlimited number of responses we will use buffer with 255 entries */
465 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466
467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468 * copy it to the user space.
469 */
01df8c31 470 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 471 if (!buf) {
1da177e4
LT
472 err = -ENOMEM;
473 goto done;
474 }
475
476 hci_dev_lock_bh(hdev);
477 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478 hci_dev_unlock_bh(hdev);
479
480 BT_DBG("num_rsp %d", ir.num_rsp);
481
482 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483 ptr += sizeof(ir);
484 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485 ir.num_rsp))
486 err = -EFAULT;
8e87d142 487 } else
1da177e4
LT
488 err = -EFAULT;
489
490 kfree(buf);
491
492done:
493 hci_dev_put(hdev);
494 return err;
495}
496
497/* ---- HCI ioctl helpers ---- */
498
499int hci_dev_open(__u16 dev)
500{
501 struct hci_dev *hdev;
502 int ret = 0;
503
5a08ecce
AE
504 hdev = hci_dev_get(dev);
505 if (!hdev)
1da177e4
LT
506 return -ENODEV;
507
508 BT_DBG("%s %p", hdev->name, hdev);
509
510 hci_req_lock(hdev);
511
611b30f7
MH
512 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513 ret = -ERFKILL;
514 goto done;
515 }
516
1da177e4
LT
517 if (test_bit(HCI_UP, &hdev->flags)) {
518 ret = -EALREADY;
519 goto done;
520 }
521
522 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523 set_bit(HCI_RAW, &hdev->flags);
524
943da25d
MH
525 /* Treat all non BR/EDR controllers as raw devices for now */
526 if (hdev->dev_type != HCI_BREDR)
527 set_bit(HCI_RAW, &hdev->flags);
528
1da177e4
LT
529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
a5040efa 537 hdev->init_last_cmd = 0;
1da177e4 538
04837f64
MH
539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 541
6ed58ec5
VT
542 if (lmp_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
1da177e4
LT
546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
5add6af8
JH
553 if (!test_bit(HCI_SETUP, &hdev->flags))
554 mgmt_powered(hdev->id, 1);
8e87d142 555 } else {
1da177e4
LT
556 /* Init failed, cleanup */
557 tasklet_kill(&hdev->rx_task);
558 tasklet_kill(&hdev->tx_task);
559 tasklet_kill(&hdev->cmd_task);
560
561 skb_queue_purge(&hdev->cmd_q);
562 skb_queue_purge(&hdev->rx_q);
563
564 if (hdev->flush)
565 hdev->flush(hdev);
566
567 if (hdev->sent_cmd) {
568 kfree_skb(hdev->sent_cmd);
569 hdev->sent_cmd = NULL;
570 }
571
572 hdev->close(hdev);
573 hdev->flags = 0;
574 }
575
576done:
577 hci_req_unlock(hdev);
578 hci_dev_put(hdev);
579 return ret;
580}
581
582static int hci_dev_do_close(struct hci_dev *hdev)
583{
584 BT_DBG("%s %p", hdev->name, hdev);
585
586 hci_req_cancel(hdev, ENODEV);
587 hci_req_lock(hdev);
588
6f5ef998
TG
589 /* Stop timer, it might be running */
590 del_timer_sync(&hdev->cmd_timer);
591
1da177e4
LT
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593 hci_req_unlock(hdev);
594 return 0;
595 }
596
597 /* Kill RX and TX tasks */
598 tasklet_kill(&hdev->rx_task);
599 tasklet_kill(&hdev->tx_task);
600
601 hci_dev_lock_bh(hdev);
602 inquiry_cache_flush(hdev);
603 hci_conn_hash_flush(hdev);
604 hci_dev_unlock_bh(hdev);
605
606 hci_notify(hdev, HCI_DEV_DOWN);
607
608 if (hdev->flush)
609 hdev->flush(hdev);
610
611 /* Reset device */
612 skb_queue_purge(&hdev->cmd_q);
613 atomic_set(&hdev->cmd_cnt, 1);
614 if (!test_bit(HCI_RAW, &hdev->flags)) {
615 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
616 __hci_request(hdev, hci_reset_req, 0,
617 msecs_to_jiffies(250));
1da177e4
LT
618 clear_bit(HCI_INIT, &hdev->flags);
619 }
620
621 /* Kill cmd task */
622 tasklet_kill(&hdev->cmd_task);
623
624 /* Drop queues */
625 skb_queue_purge(&hdev->rx_q);
626 skb_queue_purge(&hdev->cmd_q);
627 skb_queue_purge(&hdev->raw_q);
628
629 /* Drop last sent command */
630 if (hdev->sent_cmd) {
631 kfree_skb(hdev->sent_cmd);
632 hdev->sent_cmd = NULL;
633 }
634
635 /* After this point our queues are empty
636 * and no tasks are scheduled. */
637 hdev->close(hdev);
638
5add6af8
JH
639 mgmt_powered(hdev->id, 0);
640
1da177e4
LT
641 /* Clear flags */
642 hdev->flags = 0;
643
644 hci_req_unlock(hdev);
645
646 hci_dev_put(hdev);
647 return 0;
648}
649
650int hci_dev_close(__u16 dev)
651{
652 struct hci_dev *hdev;
653 int err;
654
70f23020
AE
655 hdev = hci_dev_get(dev);
656 if (!hdev)
1da177e4
LT
657 return -ENODEV;
658 err = hci_dev_do_close(hdev);
659 hci_dev_put(hdev);
660 return err;
661}
662
663int hci_dev_reset(__u16 dev)
664{
665 struct hci_dev *hdev;
666 int ret = 0;
667
70f23020
AE
668 hdev = hci_dev_get(dev);
669 if (!hdev)
1da177e4
LT
670 return -ENODEV;
671
672 hci_req_lock(hdev);
673 tasklet_disable(&hdev->tx_task);
674
675 if (!test_bit(HCI_UP, &hdev->flags))
676 goto done;
677
678 /* Drop queues */
679 skb_queue_purge(&hdev->rx_q);
680 skb_queue_purge(&hdev->cmd_q);
681
682 hci_dev_lock_bh(hdev);
683 inquiry_cache_flush(hdev);
684 hci_conn_hash_flush(hdev);
685 hci_dev_unlock_bh(hdev);
686
687 if (hdev->flush)
688 hdev->flush(hdev);
689
8e87d142 690 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 691 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
692
693 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
694 ret = __hci_request(hdev, hci_reset_req, 0,
695 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
696
697done:
698 tasklet_enable(&hdev->tx_task);
699 hci_req_unlock(hdev);
700 hci_dev_put(hdev);
701 return ret;
702}
703
704int hci_dev_reset_stat(__u16 dev)
705{
706 struct hci_dev *hdev;
707 int ret = 0;
708
70f23020
AE
709 hdev = hci_dev_get(dev);
710 if (!hdev)
1da177e4
LT
711 return -ENODEV;
712
713 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714
715 hci_dev_put(hdev);
716
717 return ret;
718}
719
720int hci_dev_cmd(unsigned int cmd, void __user *arg)
721{
722 struct hci_dev *hdev;
723 struct hci_dev_req dr;
724 int err = 0;
725
726 if (copy_from_user(&dr, arg, sizeof(dr)))
727 return -EFAULT;
728
70f23020
AE
729 hdev = hci_dev_get(dr.dev_id);
730 if (!hdev)
1da177e4
LT
731 return -ENODEV;
732
733 switch (cmd) {
734 case HCISETAUTH:
04837f64
MH
735 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
737 break;
738
739 case HCISETENCRYPT:
740 if (!lmp_encrypt_capable(hdev)) {
741 err = -EOPNOTSUPP;
742 break;
743 }
744
745 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746 /* Auth must be enabled first */
04837f64
MH
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
749 if (err)
750 break;
751 }
752
04837f64
MH
753 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
755 break;
756
757 case HCISETSCAN:
04837f64
MH
758 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
760 break;
761
1da177e4 762 case HCISETLINKPOL:
e4e8e37c
MH
763 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
765 break;
766
767 case HCISETLINKMODE:
e4e8e37c
MH
768 hdev->link_mode = ((__u16) dr.dev_opt) &
769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
770 break;
771
772 case HCISETPTYPE:
773 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
774 break;
775
776 case HCISETACLMTU:
e4e8e37c
MH
777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
779 break;
780
781 case HCISETSCOMTU:
e4e8e37c
MH
782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
784 break;
785
786 default:
787 err = -EINVAL;
788 break;
789 }
e4e8e37c 790
1da177e4
LT
791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_get_dev_list(void __user *arg)
796{
797 struct hci_dev_list_req *dl;
798 struct hci_dev_req *dr;
799 struct list_head *p;
800 int n = 0, size, err;
801 __u16 dev_num;
802
803 if (get_user(dev_num, (__u16 __user *) arg))
804 return -EFAULT;
805
806 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807 return -EINVAL;
808
809 size = sizeof(*dl) + dev_num * sizeof(*dr);
810
70f23020
AE
811 dl = kzalloc(size, GFP_KERNEL);
812 if (!dl)
1da177e4
LT
813 return -ENOMEM;
814
815 dr = dl->dev_req;
816
817 read_lock_bh(&hci_dev_list_lock);
818 list_for_each(p, &hci_dev_list) {
819 struct hci_dev *hdev;
c542a06c 820
1da177e4 821 hdev = list_entry(p, struct hci_dev, list);
c542a06c 822
ab81cbf9 823 hci_del_off_timer(hdev);
c542a06c
JH
824
825 if (!test_bit(HCI_MGMT, &hdev->flags))
826 set_bit(HCI_PAIRABLE, &hdev->flags);
827
1da177e4
LT
828 (dr + n)->dev_id = hdev->id;
829 (dr + n)->dev_opt = hdev->flags;
c542a06c 830
1da177e4
LT
831 if (++n >= dev_num)
832 break;
833 }
834 read_unlock_bh(&hci_dev_list_lock);
835
836 dl->dev_num = n;
837 size = sizeof(*dl) + n * sizeof(*dr);
838
839 err = copy_to_user(arg, dl, size);
840 kfree(dl);
841
842 return err ? -EFAULT : 0;
843}
844
845int hci_get_dev_info(void __user *arg)
846{
847 struct hci_dev *hdev;
848 struct hci_dev_info di;
849 int err = 0;
850
851 if (copy_from_user(&di, arg, sizeof(di)))
852 return -EFAULT;
853
70f23020
AE
854 hdev = hci_dev_get(di.dev_id);
855 if (!hdev)
1da177e4
LT
856 return -ENODEV;
857
ab81cbf9
JH
858 hci_del_off_timer(hdev);
859
c542a06c
JH
860 if (!test_bit(HCI_MGMT, &hdev->flags))
861 set_bit(HCI_PAIRABLE, &hdev->flags);
862
1da177e4
LT
863 strcpy(di.name, hdev->name);
864 di.bdaddr = hdev->bdaddr;
943da25d 865 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
866 di.flags = hdev->flags;
867 di.pkt_type = hdev->pkt_type;
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 di.link_policy = hdev->link_policy;
873 di.link_mode = hdev->link_mode;
874
875 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878 if (copy_to_user(arg, &di, sizeof(di)))
879 err = -EFAULT;
880
881 hci_dev_put(hdev);
882
883 return err;
884}
885
886/* ---- Interface to HCI drivers ---- */
887
611b30f7
MH
888static int hci_rfkill_set_block(void *data, bool blocked)
889{
890 struct hci_dev *hdev = data;
891
892 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894 if (!blocked)
895 return 0;
896
897 hci_dev_do_close(hdev);
898
899 return 0;
900}
901
902static const struct rfkill_ops hci_rfkill_ops = {
903 .set_block = hci_rfkill_set_block,
904};
905
1da177e4
LT
906/* Alloc HCI device */
907struct hci_dev *hci_alloc_dev(void)
908{
909 struct hci_dev *hdev;
910
25ea6db0 911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
912 if (!hdev)
913 return NULL;
914
1da177e4
LT
915 skb_queue_head_init(&hdev->driver_init);
916
917 return hdev;
918}
919EXPORT_SYMBOL(hci_alloc_dev);
920
921/* Free HCI device */
922void hci_free_dev(struct hci_dev *hdev)
923{
924 skb_queue_purge(&hdev->driver_init);
925
a91f2e39
MH
926 /* will free via device release */
927 put_device(&hdev->dev);
1da177e4
LT
928}
929EXPORT_SYMBOL(hci_free_dev);
930
ab81cbf9
JH
931static void hci_power_on(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935 BT_DBG("%s", hdev->name);
936
937 if (hci_dev_open(hdev->id) < 0)
938 return;
939
940 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 mod_timer(&hdev->off_timer,
942 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 mgmt_index_added(hdev->id);
946}
947
948static void hci_power_off(struct work_struct *work)
949{
950 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951
952 BT_DBG("%s", hdev->name);
953
954 hci_dev_close(hdev->id);
955}
956
957static void hci_auto_off(unsigned long data)
958{
959 struct hci_dev *hdev = (struct hci_dev *) data;
960
961 BT_DBG("%s", hdev->name);
962
963 clear_bit(HCI_AUTO_OFF, &hdev->flags);
964
965 queue_work(hdev->workqueue, &hdev->power_off);
966}
967
968void hci_del_off_timer(struct hci_dev *hdev)
969{
970 BT_DBG("%s", hdev->name);
971
972 clear_bit(HCI_AUTO_OFF, &hdev->flags);
973 del_timer(&hdev->off_timer);
974}
975
2aeb9a1a
JH
976int hci_uuids_clear(struct hci_dev *hdev)
977{
978 struct list_head *p, *n;
979
980 list_for_each_safe(p, n, &hdev->uuids) {
981 struct bt_uuid *uuid;
982
983 uuid = list_entry(p, struct bt_uuid, list);
984
985 list_del(p);
986 kfree(uuid);
987 }
988
989 return 0;
990}
991
55ed8ca1
JH
992int hci_link_keys_clear(struct hci_dev *hdev)
993{
994 struct list_head *p, *n;
995
996 list_for_each_safe(p, n, &hdev->link_keys) {
997 struct link_key *key;
998
999 key = list_entry(p, struct link_key, list);
1000
1001 list_del(p);
1002 kfree(key);
1003 }
1004
1005 return 0;
1006}
1007
1008struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009{
1010 struct list_head *p;
1011
1012 list_for_each(p, &hdev->link_keys) {
1013 struct link_key *k;
1014
1015 k = list_entry(p, struct link_key, list);
1016
1017 if (bacmp(bdaddr, &k->bdaddr) == 0)
1018 return k;
1019 }
1020
1021 return NULL;
1022}
1023
1024int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1025 u8 *val, u8 type, u8 pin_len)
1026{
1027 struct link_key *key, *old_key;
1028 u8 old_key_type;
1029
1030 old_key = hci_find_link_key(hdev, bdaddr);
1031 if (old_key) {
1032 old_key_type = old_key->type;
1033 key = old_key;
1034 } else {
1035 old_key_type = 0xff;
1036 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1037 if (!key)
1038 return -ENOMEM;
1039 list_add(&key->list, &hdev->link_keys);
1040 }
1041
1042 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1043
1044 bacpy(&key->bdaddr, bdaddr);
1045 memcpy(key->val, val, 16);
1046 key->type = type;
1047 key->pin_len = pin_len;
1048
1049 if (new_key)
1050 mgmt_new_key(hdev->id, key, old_key_type);
1051
1052 if (type == 0x06)
1053 key->type = old_key_type;
1054
1055 return 0;
1056}
1057
1058int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1059{
1060 struct link_key *key;
1061
1062 key = hci_find_link_key(hdev, bdaddr);
1063 if (!key)
1064 return -ENOENT;
1065
1066 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1067
1068 list_del(&key->list);
1069 kfree(key);
1070
1071 return 0;
1072}
1073
6bd32326
VT
1074/* HCI command timer function */
1075static void hci_cmd_timer(unsigned long arg)
1076{
1077 struct hci_dev *hdev = (void *) arg;
1078
1079 BT_ERR("%s command tx timeout", hdev->name);
1080 atomic_set(&hdev->cmd_cnt, 1);
10572132 1081 clear_bit(HCI_RESET, &hdev->flags);
6bd32326
VT
1082 tasklet_schedule(&hdev->cmd_task);
1083}
1084
2763eda6
SJ
1085struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1086 bdaddr_t *bdaddr)
1087{
1088 struct oob_data *data;
1089
1090 list_for_each_entry(data, &hdev->remote_oob_data, list)
1091 if (bacmp(bdaddr, &data->bdaddr) == 0)
1092 return data;
1093
1094 return NULL;
1095}
1096
1097int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1098{
1099 struct oob_data *data;
1100
1101 data = hci_find_remote_oob_data(hdev, bdaddr);
1102 if (!data)
1103 return -ENOENT;
1104
1105 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1106
1107 list_del(&data->list);
1108 kfree(data);
1109
1110 return 0;
1111}
1112
1113int hci_remote_oob_data_clear(struct hci_dev *hdev)
1114{
1115 struct oob_data *data, *n;
1116
1117 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1118 list_del(&data->list);
1119 kfree(data);
1120 }
1121
1122 return 0;
1123}
1124
1125int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1126 u8 *randomizer)
1127{
1128 struct oob_data *data;
1129
1130 data = hci_find_remote_oob_data(hdev, bdaddr);
1131
1132 if (!data) {
1133 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1134 if (!data)
1135 return -ENOMEM;
1136
1137 bacpy(&data->bdaddr, bdaddr);
1138 list_add(&data->list, &hdev->remote_oob_data);
1139 }
1140
1141 memcpy(data->hash, hash, sizeof(data->hash));
1142 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1143
1144 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1145
1146 return 0;
1147}
1148
1da177e4
LT
1149/* Register HCI device */
1150int hci_register_dev(struct hci_dev *hdev)
1151{
1152 struct list_head *head = &hci_dev_list, *p;
ef222013 1153 int i, id = 0;
1da177e4 1154
c13854ce
MH
1155 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1156 hdev->bus, hdev->owner);
1da177e4
LT
1157
1158 if (!hdev->open || !hdev->close || !hdev->destruct)
1159 return -EINVAL;
1160
1161 write_lock_bh(&hci_dev_list_lock);
1162
1163 /* Find first available device id */
1164 list_for_each(p, &hci_dev_list) {
1165 if (list_entry(p, struct hci_dev, list)->id != id)
1166 break;
1167 head = p; id++;
1168 }
8e87d142 1169
1da177e4
LT
1170 sprintf(hdev->name, "hci%d", id);
1171 hdev->id = id;
1172 list_add(&hdev->list, head);
1173
1174 atomic_set(&hdev->refcnt, 1);
1175 spin_lock_init(&hdev->lock);
1176
1177 hdev->flags = 0;
1178 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1179 hdev->esco_type = (ESCO_HV1);
1da177e4 1180 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1181 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1182
04837f64
MH
1183 hdev->idle_timeout = 0;
1184 hdev->sniff_max_interval = 800;
1185 hdev->sniff_min_interval = 80;
1186
70f23020 1187 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1188 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1189 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1190
1191 skb_queue_head_init(&hdev->rx_q);
1192 skb_queue_head_init(&hdev->cmd_q);
1193 skb_queue_head_init(&hdev->raw_q);
1194
6bd32326
VT
1195 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1196
cd4c5391 1197 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1198 hdev->reassembly[i] = NULL;
1199
1da177e4 1200 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1201 mutex_init(&hdev->req_lock);
1da177e4
LT
1202
1203 inquiry_cache_init(hdev);
1204
1205 hci_conn_hash_init(hdev);
1206
ea4bd8ba 1207 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1208
2aeb9a1a
JH
1209 INIT_LIST_HEAD(&hdev->uuids);
1210
55ed8ca1
JH
1211 INIT_LIST_HEAD(&hdev->link_keys);
1212
2763eda6
SJ
1213 INIT_LIST_HEAD(&hdev->remote_oob_data);
1214
ab81cbf9
JH
1215 INIT_WORK(&hdev->power_on, hci_power_on);
1216 INIT_WORK(&hdev->power_off, hci_power_off);
1217 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1218
1da177e4
LT
1219 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1220
1221 atomic_set(&hdev->promisc, 0);
1222
1223 write_unlock_bh(&hci_dev_list_lock);
1224
f48fd9c8
MH
1225 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1226 if (!hdev->workqueue)
1227 goto nomem;
1228
1da177e4
LT
1229 hci_register_sysfs(hdev);
1230
611b30f7
MH
1231 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1232 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1233 if (hdev->rfkill) {
1234 if (rfkill_register(hdev->rfkill) < 0) {
1235 rfkill_destroy(hdev->rfkill);
1236 hdev->rfkill = NULL;
1237 }
1238 }
1239
ab81cbf9
JH
1240 set_bit(HCI_AUTO_OFF, &hdev->flags);
1241 set_bit(HCI_SETUP, &hdev->flags);
1242 queue_work(hdev->workqueue, &hdev->power_on);
1243
1da177e4
LT
1244 hci_notify(hdev, HCI_DEV_REG);
1245
1246 return id;
f48fd9c8
MH
1247
1248nomem:
1249 write_lock_bh(&hci_dev_list_lock);
1250 list_del(&hdev->list);
1251 write_unlock_bh(&hci_dev_list_lock);
1252
1253 return -ENOMEM;
1da177e4
LT
1254}
1255EXPORT_SYMBOL(hci_register_dev);
1256
1257/* Unregister HCI device */
1258int hci_unregister_dev(struct hci_dev *hdev)
1259{
ef222013
MH
1260 int i;
1261
c13854ce 1262 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1263
1da177e4
LT
1264 write_lock_bh(&hci_dev_list_lock);
1265 list_del(&hdev->list);
1266 write_unlock_bh(&hci_dev_list_lock);
1267
1268 hci_dev_do_close(hdev);
1269
cd4c5391 1270 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1271 kfree_skb(hdev->reassembly[i]);
1272
ab81cbf9
JH
1273 if (!test_bit(HCI_INIT, &hdev->flags) &&
1274 !test_bit(HCI_SETUP, &hdev->flags))
1275 mgmt_index_removed(hdev->id);
1276
1da177e4
LT
1277 hci_notify(hdev, HCI_DEV_UNREG);
1278
611b30f7
MH
1279 if (hdev->rfkill) {
1280 rfkill_unregister(hdev->rfkill);
1281 rfkill_destroy(hdev->rfkill);
1282 }
1283
147e2d59
DY
1284 hci_unregister_sysfs(hdev);
1285
c6f3c5f7
GP
1286 hci_del_off_timer(hdev);
1287
f48fd9c8
MH
1288 destroy_workqueue(hdev->workqueue);
1289
e2e0cacb
JH
1290 hci_dev_lock_bh(hdev);
1291 hci_blacklist_clear(hdev);
2aeb9a1a 1292 hci_uuids_clear(hdev);
55ed8ca1 1293 hci_link_keys_clear(hdev);
2763eda6 1294 hci_remote_oob_data_clear(hdev);
e2e0cacb
JH
1295 hci_dev_unlock_bh(hdev);
1296
1da177e4 1297 __hci_dev_put(hdev);
ef222013 1298
1da177e4
LT
1299 return 0;
1300}
1301EXPORT_SYMBOL(hci_unregister_dev);
1302
1303/* Suspend HCI device */
1304int hci_suspend_dev(struct hci_dev *hdev)
1305{
1306 hci_notify(hdev, HCI_DEV_SUSPEND);
1307 return 0;
1308}
1309EXPORT_SYMBOL(hci_suspend_dev);
1310
1311/* Resume HCI device */
1312int hci_resume_dev(struct hci_dev *hdev)
1313{
1314 hci_notify(hdev, HCI_DEV_RESUME);
1315 return 0;
1316}
1317EXPORT_SYMBOL(hci_resume_dev);
1318
76bca880
MH
1319/* Receive frame from HCI drivers */
1320int hci_recv_frame(struct sk_buff *skb)
1321{
1322 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1323 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1324 && !test_bit(HCI_INIT, &hdev->flags))) {
1325 kfree_skb(skb);
1326 return -ENXIO;
1327 }
1328
1329 /* Incomming skb */
1330 bt_cb(skb)->incoming = 1;
1331
1332 /* Time stamp */
1333 __net_timestamp(skb);
1334
1335 /* Queue frame for rx task */
1336 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1337 tasklet_schedule(&hdev->rx_task);
1338
76bca880
MH
1339 return 0;
1340}
1341EXPORT_SYMBOL(hci_recv_frame);
1342
33e882a5
SS
1343static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1344 int count, __u8 index, gfp_t gfp_mask)
1345{
1346 int len = 0;
1347 int hlen = 0;
1348 int remain = count;
1349 struct sk_buff *skb;
1350 struct bt_skb_cb *scb;
1351
1352 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1353 index >= NUM_REASSEMBLY)
1354 return -EILSEQ;
1355
1356 skb = hdev->reassembly[index];
1357
1358 if (!skb) {
1359 switch (type) {
1360 case HCI_ACLDATA_PKT:
1361 len = HCI_MAX_FRAME_SIZE;
1362 hlen = HCI_ACL_HDR_SIZE;
1363 break;
1364 case HCI_EVENT_PKT:
1365 len = HCI_MAX_EVENT_SIZE;
1366 hlen = HCI_EVENT_HDR_SIZE;
1367 break;
1368 case HCI_SCODATA_PKT:
1369 len = HCI_MAX_SCO_SIZE;
1370 hlen = HCI_SCO_HDR_SIZE;
1371 break;
1372 }
1373
1374 skb = bt_skb_alloc(len, gfp_mask);
1375 if (!skb)
1376 return -ENOMEM;
1377
1378 scb = (void *) skb->cb;
1379 scb->expect = hlen;
1380 scb->pkt_type = type;
1381
1382 skb->dev = (void *) hdev;
1383 hdev->reassembly[index] = skb;
1384 }
1385
1386 while (count) {
1387 scb = (void *) skb->cb;
1388 len = min(scb->expect, (__u16)count);
1389
1390 memcpy(skb_put(skb, len), data, len);
1391
1392 count -= len;
1393 data += len;
1394 scb->expect -= len;
1395 remain = count;
1396
1397 switch (type) {
1398 case HCI_EVENT_PKT:
1399 if (skb->len == HCI_EVENT_HDR_SIZE) {
1400 struct hci_event_hdr *h = hci_event_hdr(skb);
1401 scb->expect = h->plen;
1402
1403 if (skb_tailroom(skb) < scb->expect) {
1404 kfree_skb(skb);
1405 hdev->reassembly[index] = NULL;
1406 return -ENOMEM;
1407 }
1408 }
1409 break;
1410
1411 case HCI_ACLDATA_PKT:
1412 if (skb->len == HCI_ACL_HDR_SIZE) {
1413 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1414 scb->expect = __le16_to_cpu(h->dlen);
1415
1416 if (skb_tailroom(skb) < scb->expect) {
1417 kfree_skb(skb);
1418 hdev->reassembly[index] = NULL;
1419 return -ENOMEM;
1420 }
1421 }
1422 break;
1423
1424 case HCI_SCODATA_PKT:
1425 if (skb->len == HCI_SCO_HDR_SIZE) {
1426 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1427 scb->expect = h->dlen;
1428
1429 if (skb_tailroom(skb) < scb->expect) {
1430 kfree_skb(skb);
1431 hdev->reassembly[index] = NULL;
1432 return -ENOMEM;
1433 }
1434 }
1435 break;
1436 }
1437
1438 if (scb->expect == 0) {
1439 /* Complete frame */
1440
1441 bt_cb(skb)->pkt_type = type;
1442 hci_recv_frame(skb);
1443
1444 hdev->reassembly[index] = NULL;
1445 return remain;
1446 }
1447 }
1448
1449 return remain;
1450}
1451
ef222013
MH
1452int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1453{
f39a3c06
SS
1454 int rem = 0;
1455
ef222013
MH
1456 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1457 return -EILSEQ;
1458
da5f6c37 1459 while (count) {
f39a3c06
SS
1460 rem = hci_reassembly(hdev, type, data, count,
1461 type - 1, GFP_ATOMIC);
1462 if (rem < 0)
1463 return rem;
ef222013 1464
f39a3c06
SS
1465 data += (count - rem);
1466 count = rem;
da5f6c37 1467 };
ef222013 1468
f39a3c06 1469 return rem;
ef222013
MH
1470}
1471EXPORT_SYMBOL(hci_recv_fragment);
1472
99811510
SS
1473#define STREAM_REASSEMBLY 0
1474
1475int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1476{
1477 int type;
1478 int rem = 0;
1479
da5f6c37 1480 while (count) {
99811510
SS
1481 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1482
1483 if (!skb) {
1484 struct { char type; } *pkt;
1485
1486 /* Start of the frame */
1487 pkt = data;
1488 type = pkt->type;
1489
1490 data++;
1491 count--;
1492 } else
1493 type = bt_cb(skb)->pkt_type;
1494
1495 rem = hci_reassembly(hdev, type, data,
1496 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1497 if (rem < 0)
1498 return rem;
1499
1500 data += (count - rem);
1501 count = rem;
da5f6c37 1502 };
99811510
SS
1503
1504 return rem;
1505}
1506EXPORT_SYMBOL(hci_recv_stream_fragment);
1507
1da177e4
LT
1508/* ---- Interface to upper protocols ---- */
1509
1510/* Register/Unregister protocols.
1511 * hci_task_lock is used to ensure that no tasks are running. */
1512int hci_register_proto(struct hci_proto *hp)
1513{
1514 int err = 0;
1515
1516 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1517
1518 if (hp->id >= HCI_MAX_PROTO)
1519 return -EINVAL;
1520
1521 write_lock_bh(&hci_task_lock);
1522
1523 if (!hci_proto[hp->id])
1524 hci_proto[hp->id] = hp;
1525 else
1526 err = -EEXIST;
1527
1528 write_unlock_bh(&hci_task_lock);
1529
1530 return err;
1531}
1532EXPORT_SYMBOL(hci_register_proto);
1533
1534int hci_unregister_proto(struct hci_proto *hp)
1535{
1536 int err = 0;
1537
1538 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1539
1540 if (hp->id >= HCI_MAX_PROTO)
1541 return -EINVAL;
1542
1543 write_lock_bh(&hci_task_lock);
1544
1545 if (hci_proto[hp->id])
1546 hci_proto[hp->id] = NULL;
1547 else
1548 err = -ENOENT;
1549
1550 write_unlock_bh(&hci_task_lock);
1551
1552 return err;
1553}
1554EXPORT_SYMBOL(hci_unregister_proto);
1555
1556int hci_register_cb(struct hci_cb *cb)
1557{
1558 BT_DBG("%p name %s", cb, cb->name);
1559
1560 write_lock_bh(&hci_cb_list_lock);
1561 list_add(&cb->list, &hci_cb_list);
1562 write_unlock_bh(&hci_cb_list_lock);
1563
1564 return 0;
1565}
1566EXPORT_SYMBOL(hci_register_cb);
1567
1568int hci_unregister_cb(struct hci_cb *cb)
1569{
1570 BT_DBG("%p name %s", cb, cb->name);
1571
1572 write_lock_bh(&hci_cb_list_lock);
1573 list_del(&cb->list);
1574 write_unlock_bh(&hci_cb_list_lock);
1575
1576 return 0;
1577}
1578EXPORT_SYMBOL(hci_unregister_cb);
1579
1580static int hci_send_frame(struct sk_buff *skb)
1581{
1582 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1583
1584 if (!hdev) {
1585 kfree_skb(skb);
1586 return -ENODEV;
1587 }
1588
0d48d939 1589 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1590
1591 if (atomic_read(&hdev->promisc)) {
1592 /* Time stamp */
a61bbcf2 1593 __net_timestamp(skb);
1da177e4 1594
eec8d2bc 1595 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1596 }
1597
1598 /* Get rid of skb owner, prior to sending to the driver. */
1599 skb_orphan(skb);
1600
1601 return hdev->send(skb);
1602}
1603
1604/* Send HCI command */
a9de9248 1605int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1606{
1607 int len = HCI_COMMAND_HDR_SIZE + plen;
1608 struct hci_command_hdr *hdr;
1609 struct sk_buff *skb;
1610
a9de9248 1611 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1612
1613 skb = bt_skb_alloc(len, GFP_ATOMIC);
1614 if (!skb) {
ef222013 1615 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1616 return -ENOMEM;
1617 }
1618
1619 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1620 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1621 hdr->plen = plen;
1622
1623 if (plen)
1624 memcpy(skb_put(skb, plen), param, plen);
1625
1626 BT_DBG("skb len %d", skb->len);
1627
0d48d939 1628 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1629 skb->dev = (void *) hdev;
c78ae283 1630
a5040efa
JH
1631 if (test_bit(HCI_INIT, &hdev->flags))
1632 hdev->init_last_cmd = opcode;
1633
1da177e4 1634 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1635 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1636
1637 return 0;
1638}
1da177e4
LT
1639
1640/* Get data from the previously sent command */
a9de9248 1641void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1642{
1643 struct hci_command_hdr *hdr;
1644
1645 if (!hdev->sent_cmd)
1646 return NULL;
1647
1648 hdr = (void *) hdev->sent_cmd->data;
1649
a9de9248 1650 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1651 return NULL;
1652
a9de9248 1653 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1654
1655 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1656}
1657
1658/* Send ACL data */
1659static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1660{
1661 struct hci_acl_hdr *hdr;
1662 int len = skb->len;
1663
badff6d0
ACM
1664 skb_push(skb, HCI_ACL_HDR_SIZE);
1665 skb_reset_transport_header(skb);
9c70220b 1666 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1667 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1668 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1669}
1670
9a9c6a34 1671void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1da177e4
LT
1672{
1673 struct hci_dev *hdev = conn->hdev;
1674 struct sk_buff *list;
1675
1676 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1677
1678 skb->dev = (void *) hdev;
0d48d939 1679 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1680 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4 1681
70f23020
AE
1682 list = skb_shinfo(skb)->frag_list;
1683 if (!list) {
1da177e4
LT
1684 /* Non fragmented */
1685 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1686
1687 skb_queue_tail(&conn->data_q, skb);
1688 } else {
1689 /* Fragmented */
1690 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1691
1692 skb_shinfo(skb)->frag_list = NULL;
1693
1694 /* Queue all fragments atomically */
1695 spin_lock_bh(&conn->data_q.lock);
1696
1697 __skb_queue_tail(&conn->data_q, skb);
e702112f
AE
1698
1699 flags &= ~ACL_START;
1700 flags |= ACL_CONT;
1da177e4
LT
1701 do {
1702 skb = list; list = list->next;
8e87d142 1703
1da177e4 1704 skb->dev = (void *) hdev;
0d48d939 1705 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1706 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1707
1708 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1709
1710 __skb_queue_tail(&conn->data_q, skb);
1711 } while (list);
1712
1713 spin_unlock_bh(&conn->data_q.lock);
1714 }
1715
c78ae283 1716 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1717}
1718EXPORT_SYMBOL(hci_send_acl);
1719
1720/* Send SCO data */
0d861d8b 1721void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
1722{
1723 struct hci_dev *hdev = conn->hdev;
1724 struct hci_sco_hdr hdr;
1725
1726 BT_DBG("%s len %d", hdev->name, skb->len);
1727
aca3192c 1728 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1729 hdr.dlen = skb->len;
1730
badff6d0
ACM
1731 skb_push(skb, HCI_SCO_HDR_SIZE);
1732 skb_reset_transport_header(skb);
9c70220b 1733 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1734
1735 skb->dev = (void *) hdev;
0d48d939 1736 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1737
1da177e4 1738 skb_queue_tail(&conn->data_q, skb);
c78ae283 1739 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1740}
1741EXPORT_SYMBOL(hci_send_sco);
1742
1743/* ---- HCI TX task (outgoing data) ---- */
1744
1745/* HCI Connection scheduler */
1746static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1747{
1748 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1749 struct hci_conn *conn = NULL;
1da177e4
LT
1750 int num = 0, min = ~0;
1751 struct list_head *p;
1752
8e87d142 1753 /* We don't have to lock device here. Connections are always
1da177e4
LT
1754 * added and removed with TX task disabled. */
1755 list_for_each(p, &h->list) {
1756 struct hci_conn *c;
1757 c = list_entry(p, struct hci_conn, list);
1758
769be974 1759 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1760 continue;
769be974
MH
1761
1762 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1763 continue;
1764
1da177e4
LT
1765 num++;
1766
1767 if (c->sent < min) {
1768 min = c->sent;
1769 conn = c;
1770 }
1771 }
1772
1773 if (conn) {
6ed58ec5
VT
1774 int cnt, q;
1775
1776 switch (conn->type) {
1777 case ACL_LINK:
1778 cnt = hdev->acl_cnt;
1779 break;
1780 case SCO_LINK:
1781 case ESCO_LINK:
1782 cnt = hdev->sco_cnt;
1783 break;
1784 case LE_LINK:
1785 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1786 break;
1787 default:
1788 cnt = 0;
1789 BT_ERR("Unknown link type");
1790 }
1791
1792 q = cnt / num;
1da177e4
LT
1793 *quote = q ? q : 1;
1794 } else
1795 *quote = 0;
1796
1797 BT_DBG("conn %p quote %d", conn, *quote);
1798 return conn;
1799}
1800
bae1f5d9 1801static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
1802{
1803 struct hci_conn_hash *h = &hdev->conn_hash;
1804 struct list_head *p;
1805 struct hci_conn *c;
1806
bae1f5d9 1807 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
1808
1809 /* Kill stalled connections */
1810 list_for_each(p, &h->list) {
1811 c = list_entry(p, struct hci_conn, list);
bae1f5d9
VT
1812 if (c->type == type && c->sent) {
1813 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
1814 hdev->name, batostr(&c->dst));
1815 hci_acl_disconn(c, 0x13);
1816 }
1817 }
1818}
1819
1820static inline void hci_sched_acl(struct hci_dev *hdev)
1821{
1822 struct hci_conn *conn;
1823 struct sk_buff *skb;
1824 int quote;
1825
1826 BT_DBG("%s", hdev->name);
1827
1828 if (!test_bit(HCI_RAW, &hdev->flags)) {
1829 /* ACL tx timeout must be longer than maximum
1830 * link supervision timeout (40.9 seconds) */
82453021 1831 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 1832 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
1833 }
1834
1835 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1836 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1837 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1838
1839 hci_conn_enter_active_mode(conn);
1840
1da177e4
LT
1841 hci_send_frame(skb);
1842 hdev->acl_last_tx = jiffies;
1843
1844 hdev->acl_cnt--;
1845 conn->sent++;
1846 }
1847 }
1848}
1849
1850/* Schedule SCO */
1851static inline void hci_sched_sco(struct hci_dev *hdev)
1852{
1853 struct hci_conn *conn;
1854 struct sk_buff *skb;
1855 int quote;
1856
1857 BT_DBG("%s", hdev->name);
1858
1859 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1860 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1861 BT_DBG("skb %p len %d", skb, skb->len);
1862 hci_send_frame(skb);
1863
1864 conn->sent++;
1865 if (conn->sent == ~0)
1866 conn->sent = 0;
1867 }
1868 }
1869}
1870
b6a0dc82
MH
1871static inline void hci_sched_esco(struct hci_dev *hdev)
1872{
1873 struct hci_conn *conn;
1874 struct sk_buff *skb;
1875 int quote;
1876
1877 BT_DBG("%s", hdev->name);
1878
1879 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1880 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1881 BT_DBG("skb %p len %d", skb, skb->len);
1882 hci_send_frame(skb);
1883
1884 conn->sent++;
1885 if (conn->sent == ~0)
1886 conn->sent = 0;
1887 }
1888 }
1889}
1890
6ed58ec5
VT
1891static inline void hci_sched_le(struct hci_dev *hdev)
1892{
1893 struct hci_conn *conn;
1894 struct sk_buff *skb;
1895 int quote, cnt;
1896
1897 BT_DBG("%s", hdev->name);
1898
1899 if (!test_bit(HCI_RAW, &hdev->flags)) {
1900 /* LE tx timeout must be longer than maximum
1901 * link supervision timeout (40.9 seconds) */
bae1f5d9 1902 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 1903 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 1904 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
1905 }
1906
1907 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1908 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1909 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1910 BT_DBG("skb %p len %d", skb, skb->len);
1911
1912 hci_send_frame(skb);
1913 hdev->le_last_tx = jiffies;
1914
1915 cnt--;
1916 conn->sent++;
1917 }
1918 }
1919 if (hdev->le_pkts)
1920 hdev->le_cnt = cnt;
1921 else
1922 hdev->acl_cnt = cnt;
1923}
1924
1da177e4
LT
1925static void hci_tx_task(unsigned long arg)
1926{
1927 struct hci_dev *hdev = (struct hci_dev *) arg;
1928 struct sk_buff *skb;
1929
1930 read_lock(&hci_task_lock);
1931
6ed58ec5
VT
1932 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1933 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
1934
1935 /* Schedule queues and send stuff to HCI driver */
1936
1937 hci_sched_acl(hdev);
1938
1939 hci_sched_sco(hdev);
1940
b6a0dc82
MH
1941 hci_sched_esco(hdev);
1942
6ed58ec5
VT
1943 hci_sched_le(hdev);
1944
1da177e4
LT
1945 /* Send next queued raw (unknown type) packet */
1946 while ((skb = skb_dequeue(&hdev->raw_q)))
1947 hci_send_frame(skb);
1948
1949 read_unlock(&hci_task_lock);
1950}
1951
1952/* ----- HCI RX task (incoming data proccessing) ----- */
1953
1954/* ACL data packet */
1955static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1956{
1957 struct hci_acl_hdr *hdr = (void *) skb->data;
1958 struct hci_conn *conn;
1959 __u16 handle, flags;
1960
1961 skb_pull(skb, HCI_ACL_HDR_SIZE);
1962
1963 handle = __le16_to_cpu(hdr->handle);
1964 flags = hci_flags(handle);
1965 handle = hci_handle(handle);
1966
1967 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1968
1969 hdev->stat.acl_rx++;
1970
1971 hci_dev_lock(hdev);
1972 conn = hci_conn_hash_lookup_handle(hdev, handle);
1973 hci_dev_unlock(hdev);
8e87d142 1974
1da177e4
LT
1975 if (conn) {
1976 register struct hci_proto *hp;
1977
04837f64
MH
1978 hci_conn_enter_active_mode(conn);
1979
1da177e4 1980 /* Send to upper protocol */
70f23020
AE
1981 hp = hci_proto[HCI_PROTO_L2CAP];
1982 if (hp && hp->recv_acldata) {
1da177e4
LT
1983 hp->recv_acldata(conn, skb, flags);
1984 return;
1985 }
1986 } else {
8e87d142 1987 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1988 hdev->name, handle);
1989 }
1990
1991 kfree_skb(skb);
1992}
1993
1994/* SCO data packet */
1995static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1996{
1997 struct hci_sco_hdr *hdr = (void *) skb->data;
1998 struct hci_conn *conn;
1999 __u16 handle;
2000
2001 skb_pull(skb, HCI_SCO_HDR_SIZE);
2002
2003 handle = __le16_to_cpu(hdr->handle);
2004
2005 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2006
2007 hdev->stat.sco_rx++;
2008
2009 hci_dev_lock(hdev);
2010 conn = hci_conn_hash_lookup_handle(hdev, handle);
2011 hci_dev_unlock(hdev);
2012
2013 if (conn) {
2014 register struct hci_proto *hp;
2015
2016 /* Send to upper protocol */
70f23020
AE
2017 hp = hci_proto[HCI_PROTO_SCO];
2018 if (hp && hp->recv_scodata) {
1da177e4
LT
2019 hp->recv_scodata(conn, skb);
2020 return;
2021 }
2022 } else {
8e87d142 2023 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2024 hdev->name, handle);
2025 }
2026
2027 kfree_skb(skb);
2028}
2029
6516455d 2030static void hci_rx_task(unsigned long arg)
1da177e4
LT
2031{
2032 struct hci_dev *hdev = (struct hci_dev *) arg;
2033 struct sk_buff *skb;
2034
2035 BT_DBG("%s", hdev->name);
2036
2037 read_lock(&hci_task_lock);
2038
2039 while ((skb = skb_dequeue(&hdev->rx_q))) {
2040 if (atomic_read(&hdev->promisc)) {
2041 /* Send copy to the sockets */
eec8d2bc 2042 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2043 }
2044
2045 if (test_bit(HCI_RAW, &hdev->flags)) {
2046 kfree_skb(skb);
2047 continue;
2048 }
2049
2050 if (test_bit(HCI_INIT, &hdev->flags)) {
2051 /* Don't process data packets in this states. */
0d48d939 2052 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2053 case HCI_ACLDATA_PKT:
2054 case HCI_SCODATA_PKT:
2055 kfree_skb(skb);
2056 continue;
3ff50b79 2057 }
1da177e4
LT
2058 }
2059
2060 /* Process frame */
0d48d939 2061 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2062 case HCI_EVENT_PKT:
2063 hci_event_packet(hdev, skb);
2064 break;
2065
2066 case HCI_ACLDATA_PKT:
2067 BT_DBG("%s ACL data packet", hdev->name);
2068 hci_acldata_packet(hdev, skb);
2069 break;
2070
2071 case HCI_SCODATA_PKT:
2072 BT_DBG("%s SCO data packet", hdev->name);
2073 hci_scodata_packet(hdev, skb);
2074 break;
2075
2076 default:
2077 kfree_skb(skb);
2078 break;
2079 }
2080 }
2081
2082 read_unlock(&hci_task_lock);
2083}
2084
2085static void hci_cmd_task(unsigned long arg)
2086{
2087 struct hci_dev *hdev = (struct hci_dev *) arg;
2088 struct sk_buff *skb;
2089
2090 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2091
1da177e4 2092 /* Send queued commands */
5a08ecce
AE
2093 if (atomic_read(&hdev->cmd_cnt)) {
2094 skb = skb_dequeue(&hdev->cmd_q);
2095 if (!skb)
2096 return;
2097
7585b97a 2098 kfree_skb(hdev->sent_cmd);
1da177e4 2099
70f23020
AE
2100 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2101 if (hdev->sent_cmd) {
1da177e4
LT
2102 atomic_dec(&hdev->cmd_cnt);
2103 hci_send_frame(skb);
6bd32326
VT
2104 mod_timer(&hdev->cmd_timer,
2105 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2106 } else {
2107 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2108 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2109 }
2110 }
2111}