Bluetooth: Make hci a child of the corresponding tty device.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
1da177e4
LT
44#include <net/sock.h>
45
46#include <asm/system.h>
70f23020 47#include <linux/uaccess.h>
1da177e4
LT
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
ab81cbf9
JH
53#define AUTO_OFF_TIMEOUT 2000
54
1da177e4
LT
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
e041c683 75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
e041c683 81 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
e041c683 86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
87}
88
6516455d 89static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 90{
e041c683 91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
92}
93
94/* ---- HCI requests ---- */
95
23bb5763 96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 97{
23bb5763
JH
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
a5040efa
JH
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 104 return;
1da177e4
LT
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
8e87d142 125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
3ff50b79 158 }
1da177e4 159
a5040efa 160 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
7c6a329e
MH
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
1da177e4
LT
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
a9de9248 188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
b0916ea0 193 struct hci_cp_delete_stored_link_key cp;
1da177e4 194 struct sk_buff *skb;
1ebb9252 195 __le16 param;
89f2783d 196 __u8 flt_type;
1da177e4
LT
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 205 skb->dev = (void *) hdev;
c78ae283 206
1da177e4 207 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 208 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
7a9d4020 215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
217
218 /* Read Local Supported Features */
a9de9248 219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 220
1143e5a6 221 /* Read Local Version */
a9de9248 222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 223
1da177e4 224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
aca3192c 231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
236 }
237#endif
238
239 /* Read BD Address */
a9de9248
MH
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
247
248 /* Read Voice Setting */
a9de9248 249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
89f2783d 254 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 256
1da177e4 257 /* Connection accept timeout ~20 secs */
aca3192c 258 param = cpu_to_le16(0x7d00);
a9de9248 259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
264}
265
6ed58ec5
VT
266static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
267{
268 BT_DBG("%s", hdev->name);
269
270 /* Read LE buffer size */
271 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
272}
273
1da177e4
LT
274static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
275{
276 __u8 scan = opt;
277
278 BT_DBG("%s %x", hdev->name, scan);
279
280 /* Inquiry and Page scans */
a9de9248 281 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
282}
283
284static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 auth = opt;
287
288 BT_DBG("%s %x", hdev->name, auth);
289
290 /* Authentication */
a9de9248 291 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
292}
293
294static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 encrypt = opt;
297
298 BT_DBG("%s %x", hdev->name, encrypt);
299
e4e8e37c 300 /* Encryption */
a9de9248 301 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
302}
303
e4e8e37c
MH
304static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __le16 policy = cpu_to_le16(opt);
307
a418b893 308 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
309
310 /* Default link policy */
311 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
312}
313
8e87d142 314/* Get HCI device by index.
1da177e4
LT
315 * Device is held on return. */
316struct hci_dev *hci_dev_get(int index)
317{
318 struct hci_dev *hdev = NULL;
319 struct list_head *p;
320
321 BT_DBG("%d", index);
322
323 if (index < 0)
324 return NULL;
325
326 read_lock(&hci_dev_list_lock);
327 list_for_each(p, &hci_dev_list) {
328 struct hci_dev *d = list_entry(p, struct hci_dev, list);
329 if (d->id == index) {
330 hdev = hci_dev_hold(d);
331 break;
332 }
333 }
334 read_unlock(&hci_dev_list_lock);
335 return hdev;
336}
1da177e4
LT
337
338/* ---- Inquiry support ---- */
339static void inquiry_cache_flush(struct hci_dev *hdev)
340{
341 struct inquiry_cache *cache = &hdev->inq_cache;
342 struct inquiry_entry *next = cache->list, *e;
343
344 BT_DBG("cache %p", cache);
345
346 cache->list = NULL;
347 while ((e = next)) {
348 next = e->next;
349 kfree(e);
350 }
351}
352
353struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
354{
355 struct inquiry_cache *cache = &hdev->inq_cache;
356 struct inquiry_entry *e;
357
358 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
359
360 for (e = cache->list; e; e = e->next)
361 if (!bacmp(&e->data.bdaddr, bdaddr))
362 break;
363 return e;
364}
365
366void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
367{
368 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 369 struct inquiry_entry *ie;
1da177e4
LT
370
371 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
372
70f23020
AE
373 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
374 if (!ie) {
1da177e4 375 /* Entry not in the cache. Add new one. */
70f23020
AE
376 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
377 if (!ie)
1da177e4 378 return;
70f23020
AE
379
380 ie->next = cache->list;
381 cache->list = ie;
1da177e4
LT
382 }
383
70f23020
AE
384 memcpy(&ie->data, data, sizeof(*data));
385 ie->timestamp = jiffies;
1da177e4
LT
386 cache->timestamp = jiffies;
387}
388
389static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_info *info = (struct inquiry_info *) buf;
393 struct inquiry_entry *e;
394 int copied = 0;
395
396 for (e = cache->list; e && copied < num; e = e->next, copied++) {
397 struct inquiry_data *data = &e->data;
398 bacpy(&info->bdaddr, &data->bdaddr);
399 info->pscan_rep_mode = data->pscan_rep_mode;
400 info->pscan_period_mode = data->pscan_period_mode;
401 info->pscan_mode = data->pscan_mode;
402 memcpy(info->dev_class, data->dev_class, 3);
403 info->clock_offset = data->clock_offset;
404 info++;
405 }
406
407 BT_DBG("cache %p, copied %d", cache, copied);
408 return copied;
409}
410
411static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
412{
413 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
414 struct hci_cp_inquiry cp;
415
416 BT_DBG("%s", hdev->name);
417
418 if (test_bit(HCI_INQUIRY, &hdev->flags))
419 return;
420
421 /* Start Inquiry */
422 memcpy(&cp.lap, &ir->lap, 3);
423 cp.length = ir->length;
424 cp.num_rsp = ir->num_rsp;
a9de9248 425 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
426}
427
428int hci_inquiry(void __user *arg)
429{
430 __u8 __user *ptr = arg;
431 struct hci_inquiry_req ir;
432 struct hci_dev *hdev;
433 int err = 0, do_inquiry = 0, max_rsp;
434 long timeo;
435 __u8 *buf;
436
437 if (copy_from_user(&ir, ptr, sizeof(ir)))
438 return -EFAULT;
439
5a08ecce
AE
440 hdev = hci_dev_get(ir.dev_id);
441 if (!hdev)
1da177e4
LT
442 return -ENODEV;
443
444 hci_dev_lock_bh(hdev);
8e87d142 445 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
446 inquiry_cache_empty(hdev) ||
447 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
448 inquiry_cache_flush(hdev);
449 do_inquiry = 1;
450 }
451 hci_dev_unlock_bh(hdev);
452
04837f64 453 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
454
455 if (do_inquiry) {
456 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
457 if (err < 0)
458 goto done;
459 }
1da177e4
LT
460
461 /* for unlimited number of responses we will use buffer with 255 entries */
462 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
463
464 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
465 * copy it to the user space.
466 */
70f23020
AE
467 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
468 if (!buf) {
1da177e4
LT
469 err = -ENOMEM;
470 goto done;
471 }
472
473 hci_dev_lock_bh(hdev);
474 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
475 hci_dev_unlock_bh(hdev);
476
477 BT_DBG("num_rsp %d", ir.num_rsp);
478
479 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
480 ptr += sizeof(ir);
481 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
482 ir.num_rsp))
483 err = -EFAULT;
8e87d142 484 } else
1da177e4
LT
485 err = -EFAULT;
486
487 kfree(buf);
488
489done:
490 hci_dev_put(hdev);
491 return err;
492}
493
494/* ---- HCI ioctl helpers ---- */
495
496int hci_dev_open(__u16 dev)
497{
498 struct hci_dev *hdev;
499 int ret = 0;
500
5a08ecce
AE
501 hdev = hci_dev_get(dev);
502 if (!hdev)
1da177e4
LT
503 return -ENODEV;
504
505 BT_DBG("%s %p", hdev->name, hdev);
506
507 hci_req_lock(hdev);
508
611b30f7
MH
509 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
510 ret = -ERFKILL;
511 goto done;
512 }
513
1da177e4
LT
514 if (test_bit(HCI_UP, &hdev->flags)) {
515 ret = -EALREADY;
516 goto done;
517 }
518
519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
520 set_bit(HCI_RAW, &hdev->flags);
521
943da25d
MH
522 /* Treat all non BR/EDR controllers as raw devices for now */
523 if (hdev->dev_type != HCI_BREDR)
524 set_bit(HCI_RAW, &hdev->flags);
525
1da177e4
LT
526 if (hdev->open(hdev)) {
527 ret = -EIO;
528 goto done;
529 }
530
531 if (!test_bit(HCI_RAW, &hdev->flags)) {
532 atomic_set(&hdev->cmd_cnt, 1);
533 set_bit(HCI_INIT, &hdev->flags);
a5040efa 534 hdev->init_last_cmd = 0;
1da177e4
LT
535
536 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
537 ret = __hci_request(hdev, hci_init_req, 0,
538 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 539
6ed58ec5
VT
540 if (lmp_le_capable(hdev))
541 ret = __hci_request(hdev, hci_le_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
543
1da177e4
LT
544 clear_bit(HCI_INIT, &hdev->flags);
545 }
546
547 if (!ret) {
548 hci_dev_hold(hdev);
549 set_bit(HCI_UP, &hdev->flags);
550 hci_notify(hdev, HCI_DEV_UP);
5add6af8
JH
551 if (!test_bit(HCI_SETUP, &hdev->flags))
552 mgmt_powered(hdev->id, 1);
8e87d142 553 } else {
1da177e4
LT
554 /* Init failed, cleanup */
555 tasklet_kill(&hdev->rx_task);
556 tasklet_kill(&hdev->tx_task);
557 tasklet_kill(&hdev->cmd_task);
558
559 skb_queue_purge(&hdev->cmd_q);
560 skb_queue_purge(&hdev->rx_q);
561
562 if (hdev->flush)
563 hdev->flush(hdev);
564
565 if (hdev->sent_cmd) {
566 kfree_skb(hdev->sent_cmd);
567 hdev->sent_cmd = NULL;
568 }
569
570 hdev->close(hdev);
571 hdev->flags = 0;
572 }
573
574done:
575 hci_req_unlock(hdev);
576 hci_dev_put(hdev);
577 return ret;
578}
579
580static int hci_dev_do_close(struct hci_dev *hdev)
581{
582 BT_DBG("%s %p", hdev->name, hdev);
583
584 hci_req_cancel(hdev, ENODEV);
585 hci_req_lock(hdev);
586
587 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
588 hci_req_unlock(hdev);
589 return 0;
590 }
591
592 /* Kill RX and TX tasks */
593 tasklet_kill(&hdev->rx_task);
594 tasklet_kill(&hdev->tx_task);
595
596 hci_dev_lock_bh(hdev);
597 inquiry_cache_flush(hdev);
598 hci_conn_hash_flush(hdev);
599 hci_dev_unlock_bh(hdev);
600
601 hci_notify(hdev, HCI_DEV_DOWN);
602
603 if (hdev->flush)
604 hdev->flush(hdev);
605
606 /* Reset device */
607 skb_queue_purge(&hdev->cmd_q);
608 atomic_set(&hdev->cmd_cnt, 1);
609 if (!test_bit(HCI_RAW, &hdev->flags)) {
610 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
611 __hci_request(hdev, hci_reset_req, 0,
612 msecs_to_jiffies(250));
1da177e4
LT
613 clear_bit(HCI_INIT, &hdev->flags);
614 }
615
616 /* Kill cmd task */
617 tasklet_kill(&hdev->cmd_task);
618
619 /* Drop queues */
620 skb_queue_purge(&hdev->rx_q);
621 skb_queue_purge(&hdev->cmd_q);
622 skb_queue_purge(&hdev->raw_q);
623
624 /* Drop last sent command */
625 if (hdev->sent_cmd) {
626 kfree_skb(hdev->sent_cmd);
627 hdev->sent_cmd = NULL;
628 }
629
630 /* After this point our queues are empty
631 * and no tasks are scheduled. */
632 hdev->close(hdev);
633
5add6af8
JH
634 mgmt_powered(hdev->id, 0);
635
1da177e4
LT
636 /* Clear flags */
637 hdev->flags = 0;
638
639 hci_req_unlock(hdev);
640
641 hci_dev_put(hdev);
642 return 0;
643}
644
645int hci_dev_close(__u16 dev)
646{
647 struct hci_dev *hdev;
648 int err;
649
70f23020
AE
650 hdev = hci_dev_get(dev);
651 if (!hdev)
1da177e4
LT
652 return -ENODEV;
653 err = hci_dev_do_close(hdev);
654 hci_dev_put(hdev);
655 return err;
656}
657
658int hci_dev_reset(__u16 dev)
659{
660 struct hci_dev *hdev;
661 int ret = 0;
662
70f23020
AE
663 hdev = hci_dev_get(dev);
664 if (!hdev)
1da177e4
LT
665 return -ENODEV;
666
667 hci_req_lock(hdev);
668 tasklet_disable(&hdev->tx_task);
669
670 if (!test_bit(HCI_UP, &hdev->flags))
671 goto done;
672
673 /* Drop queues */
674 skb_queue_purge(&hdev->rx_q);
675 skb_queue_purge(&hdev->cmd_q);
676
677 hci_dev_lock_bh(hdev);
678 inquiry_cache_flush(hdev);
679 hci_conn_hash_flush(hdev);
680 hci_dev_unlock_bh(hdev);
681
682 if (hdev->flush)
683 hdev->flush(hdev);
684
8e87d142 685 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 686 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
687
688 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
689 ret = __hci_request(hdev, hci_reset_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
691
692done:
693 tasklet_enable(&hdev->tx_task);
694 hci_req_unlock(hdev);
695 hci_dev_put(hdev);
696 return ret;
697}
698
699int hci_dev_reset_stat(__u16 dev)
700{
701 struct hci_dev *hdev;
702 int ret = 0;
703
70f23020
AE
704 hdev = hci_dev_get(dev);
705 if (!hdev)
1da177e4
LT
706 return -ENODEV;
707
708 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
709
710 hci_dev_put(hdev);
711
712 return ret;
713}
714
715int hci_dev_cmd(unsigned int cmd, void __user *arg)
716{
717 struct hci_dev *hdev;
718 struct hci_dev_req dr;
719 int err = 0;
720
721 if (copy_from_user(&dr, arg, sizeof(dr)))
722 return -EFAULT;
723
70f23020
AE
724 hdev = hci_dev_get(dr.dev_id);
725 if (!hdev)
1da177e4
LT
726 return -ENODEV;
727
728 switch (cmd) {
729 case HCISETAUTH:
04837f64
MH
730 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
732 break;
733
734 case HCISETENCRYPT:
735 if (!lmp_encrypt_capable(hdev)) {
736 err = -EOPNOTSUPP;
737 break;
738 }
739
740 if (!test_bit(HCI_AUTH, &hdev->flags)) {
741 /* Auth must be enabled first */
04837f64
MH
742 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
743 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
744 if (err)
745 break;
746 }
747
04837f64
MH
748 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
750 break;
751
752 case HCISETSCAN:
04837f64
MH
753 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
755 break;
756
1da177e4 757 case HCISETLINKPOL:
e4e8e37c
MH
758 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
760 break;
761
762 case HCISETLINKMODE:
e4e8e37c
MH
763 hdev->link_mode = ((__u16) dr.dev_opt) &
764 (HCI_LM_MASTER | HCI_LM_ACCEPT);
765 break;
766
767 case HCISETPTYPE:
768 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
769 break;
770
771 case HCISETACLMTU:
e4e8e37c
MH
772 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
773 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
774 break;
775
776 case HCISETSCOMTU:
e4e8e37c
MH
777 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
779 break;
780
781 default:
782 err = -EINVAL;
783 break;
784 }
e4e8e37c 785
1da177e4
LT
786 hci_dev_put(hdev);
787 return err;
788}
789
790int hci_get_dev_list(void __user *arg)
791{
792 struct hci_dev_list_req *dl;
793 struct hci_dev_req *dr;
794 struct list_head *p;
795 int n = 0, size, err;
796 __u16 dev_num;
797
798 if (get_user(dev_num, (__u16 __user *) arg))
799 return -EFAULT;
800
801 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
802 return -EINVAL;
803
804 size = sizeof(*dl) + dev_num * sizeof(*dr);
805
70f23020
AE
806 dl = kzalloc(size, GFP_KERNEL);
807 if (!dl)
1da177e4
LT
808 return -ENOMEM;
809
810 dr = dl->dev_req;
811
812 read_lock_bh(&hci_dev_list_lock);
813 list_for_each(p, &hci_dev_list) {
814 struct hci_dev *hdev;
c542a06c 815
1da177e4 816 hdev = list_entry(p, struct hci_dev, list);
c542a06c 817
ab81cbf9 818 hci_del_off_timer(hdev);
c542a06c
JH
819
820 if (!test_bit(HCI_MGMT, &hdev->flags))
821 set_bit(HCI_PAIRABLE, &hdev->flags);
822
1da177e4
LT
823 (dr + n)->dev_id = hdev->id;
824 (dr + n)->dev_opt = hdev->flags;
c542a06c 825
1da177e4
LT
826 if (++n >= dev_num)
827 break;
828 }
829 read_unlock_bh(&hci_dev_list_lock);
830
831 dl->dev_num = n;
832 size = sizeof(*dl) + n * sizeof(*dr);
833
834 err = copy_to_user(arg, dl, size);
835 kfree(dl);
836
837 return err ? -EFAULT : 0;
838}
839
840int hci_get_dev_info(void __user *arg)
841{
842 struct hci_dev *hdev;
843 struct hci_dev_info di;
844 int err = 0;
845
846 if (copy_from_user(&di, arg, sizeof(di)))
847 return -EFAULT;
848
70f23020
AE
849 hdev = hci_dev_get(di.dev_id);
850 if (!hdev)
1da177e4
LT
851 return -ENODEV;
852
ab81cbf9
JH
853 hci_del_off_timer(hdev);
854
c542a06c
JH
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
1da177e4
LT
858 strcpy(di.name, hdev->name);
859 di.bdaddr = hdev->bdaddr;
943da25d 860 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
861 di.flags = hdev->flags;
862 di.pkt_type = hdev->pkt_type;
863 di.acl_mtu = hdev->acl_mtu;
864 di.acl_pkts = hdev->acl_pkts;
865 di.sco_mtu = hdev->sco_mtu;
866 di.sco_pkts = hdev->sco_pkts;
867 di.link_policy = hdev->link_policy;
868 di.link_mode = hdev->link_mode;
869
870 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
871 memcpy(&di.features, &hdev->features, sizeof(di.features));
872
873 if (copy_to_user(arg, &di, sizeof(di)))
874 err = -EFAULT;
875
876 hci_dev_put(hdev);
877
878 return err;
879}
880
881/* ---- Interface to HCI drivers ---- */
882
611b30f7
MH
883static int hci_rfkill_set_block(void *data, bool blocked)
884{
885 struct hci_dev *hdev = data;
886
887 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
888
889 if (!blocked)
890 return 0;
891
892 hci_dev_do_close(hdev);
893
894 return 0;
895}
896
897static const struct rfkill_ops hci_rfkill_ops = {
898 .set_block = hci_rfkill_set_block,
899};
900
1da177e4
LT
901/* Alloc HCI device */
902struct hci_dev *hci_alloc_dev(void)
903{
904 struct hci_dev *hdev;
905
25ea6db0 906 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
907 if (!hdev)
908 return NULL;
909
1da177e4
LT
910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913}
914EXPORT_SYMBOL(hci_alloc_dev);
915
916/* Free HCI device */
917void hci_free_dev(struct hci_dev *hdev)
918{
919 skb_queue_purge(&hdev->driver_init);
920
a91f2e39
MH
921 /* will free via device release */
922 put_device(&hdev->dev);
1da177e4
LT
923}
924EXPORT_SYMBOL(hci_free_dev);
925
ab81cbf9
JH
926static void hci_power_on(struct work_struct *work)
927{
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941}
942
943static void hci_power_off(struct work_struct *work)
944{
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950}
951
952static void hci_auto_off(unsigned long data)
953{
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961}
962
963void hci_del_off_timer(struct hci_dev *hdev)
964{
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969}
970
2aeb9a1a
JH
971int hci_uuids_clear(struct hci_dev *hdev)
972{
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985}
986
55ed8ca1
JH
987int hci_link_keys_clear(struct hci_dev *hdev)
988{
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001}
1002
1003struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004{
1005 struct list_head *p;
1006
1007 list_for_each(p, &hdev->link_keys) {
1008 struct link_key *k;
1009
1010 k = list_entry(p, struct link_key, list);
1011
1012 if (bacmp(bdaddr, &k->bdaddr) == 0)
1013 return k;
1014 }
1015
1016 return NULL;
1017}
1018
1019int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1020 u8 *val, u8 type, u8 pin_len)
1021{
1022 struct link_key *key, *old_key;
1023 u8 old_key_type;
1024
1025 old_key = hci_find_link_key(hdev, bdaddr);
1026 if (old_key) {
1027 old_key_type = old_key->type;
1028 key = old_key;
1029 } else {
1030 old_key_type = 0xff;
1031 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1032 if (!key)
1033 return -ENOMEM;
1034 list_add(&key->list, &hdev->link_keys);
1035 }
1036
1037 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1038
1039 bacpy(&key->bdaddr, bdaddr);
1040 memcpy(key->val, val, 16);
1041 key->type = type;
1042 key->pin_len = pin_len;
1043
1044 if (new_key)
1045 mgmt_new_key(hdev->id, key, old_key_type);
1046
1047 if (type == 0x06)
1048 key->type = old_key_type;
1049
1050 return 0;
1051}
1052
1053int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1054{
1055 struct link_key *key;
1056
1057 key = hci_find_link_key(hdev, bdaddr);
1058 if (!key)
1059 return -ENOENT;
1060
1061 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1062
1063 list_del(&key->list);
1064 kfree(key);
1065
1066 return 0;
1067}
1068
1da177e4
LT
1069/* Register HCI device */
1070int hci_register_dev(struct hci_dev *hdev)
1071{
1072 struct list_head *head = &hci_dev_list, *p;
ef222013 1073 int i, id = 0;
1da177e4 1074
c13854ce
MH
1075 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1076 hdev->bus, hdev->owner);
1da177e4
LT
1077
1078 if (!hdev->open || !hdev->close || !hdev->destruct)
1079 return -EINVAL;
1080
1081 write_lock_bh(&hci_dev_list_lock);
1082
1083 /* Find first available device id */
1084 list_for_each(p, &hci_dev_list) {
1085 if (list_entry(p, struct hci_dev, list)->id != id)
1086 break;
1087 head = p; id++;
1088 }
8e87d142 1089
1da177e4
LT
1090 sprintf(hdev->name, "hci%d", id);
1091 hdev->id = id;
1092 list_add(&hdev->list, head);
1093
1094 atomic_set(&hdev->refcnt, 1);
1095 spin_lock_init(&hdev->lock);
1096
1097 hdev->flags = 0;
1098 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1099 hdev->esco_type = (ESCO_HV1);
1da177e4 1100 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1101 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1102
04837f64
MH
1103 hdev->idle_timeout = 0;
1104 hdev->sniff_max_interval = 800;
1105 hdev->sniff_min_interval = 80;
1106
70f23020 1107 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1108 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1109 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1110
1111 skb_queue_head_init(&hdev->rx_q);
1112 skb_queue_head_init(&hdev->cmd_q);
1113 skb_queue_head_init(&hdev->raw_q);
1114
cd4c5391 1115 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1116 hdev->reassembly[i] = NULL;
1117
1da177e4 1118 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1119 mutex_init(&hdev->req_lock);
1da177e4
LT
1120
1121 inquiry_cache_init(hdev);
1122
1123 hci_conn_hash_init(hdev);
1124
ea4bd8ba 1125 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1126
2aeb9a1a
JH
1127 INIT_LIST_HEAD(&hdev->uuids);
1128
55ed8ca1
JH
1129 INIT_LIST_HEAD(&hdev->link_keys);
1130
ab81cbf9
JH
1131 INIT_WORK(&hdev->power_on, hci_power_on);
1132 INIT_WORK(&hdev->power_off, hci_power_off);
1133 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1134
1da177e4
LT
1135 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1136
1137 atomic_set(&hdev->promisc, 0);
1138
1139 write_unlock_bh(&hci_dev_list_lock);
1140
f48fd9c8
MH
1141 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1142 if (!hdev->workqueue)
1143 goto nomem;
1144
1da177e4
LT
1145 hci_register_sysfs(hdev);
1146
611b30f7
MH
1147 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1148 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1149 if (hdev->rfkill) {
1150 if (rfkill_register(hdev->rfkill) < 0) {
1151 rfkill_destroy(hdev->rfkill);
1152 hdev->rfkill = NULL;
1153 }
1154 }
1155
ab81cbf9
JH
1156 set_bit(HCI_AUTO_OFF, &hdev->flags);
1157 set_bit(HCI_SETUP, &hdev->flags);
1158 queue_work(hdev->workqueue, &hdev->power_on);
1159
1da177e4
LT
1160 hci_notify(hdev, HCI_DEV_REG);
1161
1162 return id;
f48fd9c8
MH
1163
1164nomem:
1165 write_lock_bh(&hci_dev_list_lock);
1166 list_del(&hdev->list);
1167 write_unlock_bh(&hci_dev_list_lock);
1168
1169 return -ENOMEM;
1da177e4
LT
1170}
1171EXPORT_SYMBOL(hci_register_dev);
1172
1173/* Unregister HCI device */
1174int hci_unregister_dev(struct hci_dev *hdev)
1175{
ef222013
MH
1176 int i;
1177
c13854ce 1178 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1179
1da177e4
LT
1180 write_lock_bh(&hci_dev_list_lock);
1181 list_del(&hdev->list);
1182 write_unlock_bh(&hci_dev_list_lock);
1183
1184 hci_dev_do_close(hdev);
1185
cd4c5391 1186 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1187 kfree_skb(hdev->reassembly[i]);
1188
ab81cbf9
JH
1189 if (!test_bit(HCI_INIT, &hdev->flags) &&
1190 !test_bit(HCI_SETUP, &hdev->flags))
1191 mgmt_index_removed(hdev->id);
1192
1da177e4
LT
1193 hci_notify(hdev, HCI_DEV_UNREG);
1194
611b30f7
MH
1195 if (hdev->rfkill) {
1196 rfkill_unregister(hdev->rfkill);
1197 rfkill_destroy(hdev->rfkill);
1198 }
1199
147e2d59
DY
1200 hci_unregister_sysfs(hdev);
1201
c6f3c5f7
GP
1202 hci_del_off_timer(hdev);
1203
f48fd9c8
MH
1204 destroy_workqueue(hdev->workqueue);
1205
e2e0cacb
JH
1206 hci_dev_lock_bh(hdev);
1207 hci_blacklist_clear(hdev);
2aeb9a1a 1208 hci_uuids_clear(hdev);
55ed8ca1 1209 hci_link_keys_clear(hdev);
e2e0cacb
JH
1210 hci_dev_unlock_bh(hdev);
1211
1da177e4 1212 __hci_dev_put(hdev);
ef222013 1213
1da177e4
LT
1214 return 0;
1215}
1216EXPORT_SYMBOL(hci_unregister_dev);
1217
1218/* Suspend HCI device */
1219int hci_suspend_dev(struct hci_dev *hdev)
1220{
1221 hci_notify(hdev, HCI_DEV_SUSPEND);
1222 return 0;
1223}
1224EXPORT_SYMBOL(hci_suspend_dev);
1225
1226/* Resume HCI device */
1227int hci_resume_dev(struct hci_dev *hdev)
1228{
1229 hci_notify(hdev, HCI_DEV_RESUME);
1230 return 0;
1231}
1232EXPORT_SYMBOL(hci_resume_dev);
1233
76bca880
MH
1234/* Receive frame from HCI drivers */
1235int hci_recv_frame(struct sk_buff *skb)
1236{
1237 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1238 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1239 && !test_bit(HCI_INIT, &hdev->flags))) {
1240 kfree_skb(skb);
1241 return -ENXIO;
1242 }
1243
1244 /* Incomming skb */
1245 bt_cb(skb)->incoming = 1;
1246
1247 /* Time stamp */
1248 __net_timestamp(skb);
1249
1250 /* Queue frame for rx task */
1251 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1252 tasklet_schedule(&hdev->rx_task);
1253
76bca880
MH
1254 return 0;
1255}
1256EXPORT_SYMBOL(hci_recv_frame);
1257
33e882a5
SS
1258static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1259 int count, __u8 index, gfp_t gfp_mask)
1260{
1261 int len = 0;
1262 int hlen = 0;
1263 int remain = count;
1264 struct sk_buff *skb;
1265 struct bt_skb_cb *scb;
1266
1267 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1268 index >= NUM_REASSEMBLY)
1269 return -EILSEQ;
1270
1271 skb = hdev->reassembly[index];
1272
1273 if (!skb) {
1274 switch (type) {
1275 case HCI_ACLDATA_PKT:
1276 len = HCI_MAX_FRAME_SIZE;
1277 hlen = HCI_ACL_HDR_SIZE;
1278 break;
1279 case HCI_EVENT_PKT:
1280 len = HCI_MAX_EVENT_SIZE;
1281 hlen = HCI_EVENT_HDR_SIZE;
1282 break;
1283 case HCI_SCODATA_PKT:
1284 len = HCI_MAX_SCO_SIZE;
1285 hlen = HCI_SCO_HDR_SIZE;
1286 break;
1287 }
1288
1289 skb = bt_skb_alloc(len, gfp_mask);
1290 if (!skb)
1291 return -ENOMEM;
1292
1293 scb = (void *) skb->cb;
1294 scb->expect = hlen;
1295 scb->pkt_type = type;
1296
1297 skb->dev = (void *) hdev;
1298 hdev->reassembly[index] = skb;
1299 }
1300
1301 while (count) {
1302 scb = (void *) skb->cb;
1303 len = min(scb->expect, (__u16)count);
1304
1305 memcpy(skb_put(skb, len), data, len);
1306
1307 count -= len;
1308 data += len;
1309 scb->expect -= len;
1310 remain = count;
1311
1312 switch (type) {
1313 case HCI_EVENT_PKT:
1314 if (skb->len == HCI_EVENT_HDR_SIZE) {
1315 struct hci_event_hdr *h = hci_event_hdr(skb);
1316 scb->expect = h->plen;
1317
1318 if (skb_tailroom(skb) < scb->expect) {
1319 kfree_skb(skb);
1320 hdev->reassembly[index] = NULL;
1321 return -ENOMEM;
1322 }
1323 }
1324 break;
1325
1326 case HCI_ACLDATA_PKT:
1327 if (skb->len == HCI_ACL_HDR_SIZE) {
1328 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1329 scb->expect = __le16_to_cpu(h->dlen);
1330
1331 if (skb_tailroom(skb) < scb->expect) {
1332 kfree_skb(skb);
1333 hdev->reassembly[index] = NULL;
1334 return -ENOMEM;
1335 }
1336 }
1337 break;
1338
1339 case HCI_SCODATA_PKT:
1340 if (skb->len == HCI_SCO_HDR_SIZE) {
1341 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1342 scb->expect = h->dlen;
1343
1344 if (skb_tailroom(skb) < scb->expect) {
1345 kfree_skb(skb);
1346 hdev->reassembly[index] = NULL;
1347 return -ENOMEM;
1348 }
1349 }
1350 break;
1351 }
1352
1353 if (scb->expect == 0) {
1354 /* Complete frame */
1355
1356 bt_cb(skb)->pkt_type = type;
1357 hci_recv_frame(skb);
1358
1359 hdev->reassembly[index] = NULL;
1360 return remain;
1361 }
1362 }
1363
1364 return remain;
1365}
1366
ef222013
MH
1367int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1368{
f39a3c06
SS
1369 int rem = 0;
1370
ef222013
MH
1371 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1372 return -EILSEQ;
1373
da5f6c37 1374 while (count) {
f39a3c06
SS
1375 rem = hci_reassembly(hdev, type, data, count,
1376 type - 1, GFP_ATOMIC);
1377 if (rem < 0)
1378 return rem;
ef222013 1379
f39a3c06
SS
1380 data += (count - rem);
1381 count = rem;
da5f6c37 1382 };
ef222013 1383
f39a3c06 1384 return rem;
ef222013
MH
1385}
1386EXPORT_SYMBOL(hci_recv_fragment);
1387
99811510
SS
1388#define STREAM_REASSEMBLY 0
1389
1390int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1391{
1392 int type;
1393 int rem = 0;
1394
da5f6c37 1395 while (count) {
99811510
SS
1396 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1397
1398 if (!skb) {
1399 struct { char type; } *pkt;
1400
1401 /* Start of the frame */
1402 pkt = data;
1403 type = pkt->type;
1404
1405 data++;
1406 count--;
1407 } else
1408 type = bt_cb(skb)->pkt_type;
1409
1410 rem = hci_reassembly(hdev, type, data,
1411 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1412 if (rem < 0)
1413 return rem;
1414
1415 data += (count - rem);
1416 count = rem;
da5f6c37 1417 };
99811510
SS
1418
1419 return rem;
1420}
1421EXPORT_SYMBOL(hci_recv_stream_fragment);
1422
1da177e4
LT
1423/* ---- Interface to upper protocols ---- */
1424
1425/* Register/Unregister protocols.
1426 * hci_task_lock is used to ensure that no tasks are running. */
1427int hci_register_proto(struct hci_proto *hp)
1428{
1429 int err = 0;
1430
1431 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1432
1433 if (hp->id >= HCI_MAX_PROTO)
1434 return -EINVAL;
1435
1436 write_lock_bh(&hci_task_lock);
1437
1438 if (!hci_proto[hp->id])
1439 hci_proto[hp->id] = hp;
1440 else
1441 err = -EEXIST;
1442
1443 write_unlock_bh(&hci_task_lock);
1444
1445 return err;
1446}
1447EXPORT_SYMBOL(hci_register_proto);
1448
1449int hci_unregister_proto(struct hci_proto *hp)
1450{
1451 int err = 0;
1452
1453 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1454
1455 if (hp->id >= HCI_MAX_PROTO)
1456 return -EINVAL;
1457
1458 write_lock_bh(&hci_task_lock);
1459
1460 if (hci_proto[hp->id])
1461 hci_proto[hp->id] = NULL;
1462 else
1463 err = -ENOENT;
1464
1465 write_unlock_bh(&hci_task_lock);
1466
1467 return err;
1468}
1469EXPORT_SYMBOL(hci_unregister_proto);
1470
1471int hci_register_cb(struct hci_cb *cb)
1472{
1473 BT_DBG("%p name %s", cb, cb->name);
1474
1475 write_lock_bh(&hci_cb_list_lock);
1476 list_add(&cb->list, &hci_cb_list);
1477 write_unlock_bh(&hci_cb_list_lock);
1478
1479 return 0;
1480}
1481EXPORT_SYMBOL(hci_register_cb);
1482
1483int hci_unregister_cb(struct hci_cb *cb)
1484{
1485 BT_DBG("%p name %s", cb, cb->name);
1486
1487 write_lock_bh(&hci_cb_list_lock);
1488 list_del(&cb->list);
1489 write_unlock_bh(&hci_cb_list_lock);
1490
1491 return 0;
1492}
1493EXPORT_SYMBOL(hci_unregister_cb);
1494
1495static int hci_send_frame(struct sk_buff *skb)
1496{
1497 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1498
1499 if (!hdev) {
1500 kfree_skb(skb);
1501 return -ENODEV;
1502 }
1503
0d48d939 1504 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1505
1506 if (atomic_read(&hdev->promisc)) {
1507 /* Time stamp */
a61bbcf2 1508 __net_timestamp(skb);
1da177e4 1509
eec8d2bc 1510 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1511 }
1512
1513 /* Get rid of skb owner, prior to sending to the driver. */
1514 skb_orphan(skb);
1515
1516 return hdev->send(skb);
1517}
1518
1519/* Send HCI command */
a9de9248 1520int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1521{
1522 int len = HCI_COMMAND_HDR_SIZE + plen;
1523 struct hci_command_hdr *hdr;
1524 struct sk_buff *skb;
1525
a9de9248 1526 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1527
1528 skb = bt_skb_alloc(len, GFP_ATOMIC);
1529 if (!skb) {
ef222013 1530 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1531 return -ENOMEM;
1532 }
1533
1534 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1535 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1536 hdr->plen = plen;
1537
1538 if (plen)
1539 memcpy(skb_put(skb, plen), param, plen);
1540
1541 BT_DBG("skb len %d", skb->len);
1542
0d48d939 1543 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1544 skb->dev = (void *) hdev;
c78ae283 1545
a5040efa
JH
1546 if (test_bit(HCI_INIT, &hdev->flags))
1547 hdev->init_last_cmd = opcode;
1548
1da177e4 1549 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1550 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1551
1552 return 0;
1553}
1da177e4
LT
1554
1555/* Get data from the previously sent command */
a9de9248 1556void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1557{
1558 struct hci_command_hdr *hdr;
1559
1560 if (!hdev->sent_cmd)
1561 return NULL;
1562
1563 hdr = (void *) hdev->sent_cmd->data;
1564
a9de9248 1565 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1566 return NULL;
1567
a9de9248 1568 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1569
1570 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1571}
1572
1573/* Send ACL data */
1574static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1575{
1576 struct hci_acl_hdr *hdr;
1577 int len = skb->len;
1578
badff6d0
ACM
1579 skb_push(skb, HCI_ACL_HDR_SIZE);
1580 skb_reset_transport_header(skb);
9c70220b 1581 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1582 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1583 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1584}
1585
9a9c6a34 1586void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1da177e4
LT
1587{
1588 struct hci_dev *hdev = conn->hdev;
1589 struct sk_buff *list;
1590
1591 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1592
1593 skb->dev = (void *) hdev;
0d48d939 1594 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1595 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4 1596
70f23020
AE
1597 list = skb_shinfo(skb)->frag_list;
1598 if (!list) {
1da177e4
LT
1599 /* Non fragmented */
1600 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1601
1602 skb_queue_tail(&conn->data_q, skb);
1603 } else {
1604 /* Fragmented */
1605 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1606
1607 skb_shinfo(skb)->frag_list = NULL;
1608
1609 /* Queue all fragments atomically */
1610 spin_lock_bh(&conn->data_q.lock);
1611
1612 __skb_queue_tail(&conn->data_q, skb);
e702112f
AE
1613
1614 flags &= ~ACL_START;
1615 flags |= ACL_CONT;
1da177e4
LT
1616 do {
1617 skb = list; list = list->next;
8e87d142 1618
1da177e4 1619 skb->dev = (void *) hdev;
0d48d939 1620 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1621 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1622
1623 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1624
1625 __skb_queue_tail(&conn->data_q, skb);
1626 } while (list);
1627
1628 spin_unlock_bh(&conn->data_q.lock);
1629 }
1630
c78ae283 1631 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1632}
1633EXPORT_SYMBOL(hci_send_acl);
1634
1635/* Send SCO data */
0d861d8b 1636void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
1637{
1638 struct hci_dev *hdev = conn->hdev;
1639 struct hci_sco_hdr hdr;
1640
1641 BT_DBG("%s len %d", hdev->name, skb->len);
1642
aca3192c 1643 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1644 hdr.dlen = skb->len;
1645
badff6d0
ACM
1646 skb_push(skb, HCI_SCO_HDR_SIZE);
1647 skb_reset_transport_header(skb);
9c70220b 1648 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1649
1650 skb->dev = (void *) hdev;
0d48d939 1651 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1652
1da177e4 1653 skb_queue_tail(&conn->data_q, skb);
c78ae283 1654 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1655}
1656EXPORT_SYMBOL(hci_send_sco);
1657
1658/* ---- HCI TX task (outgoing data) ---- */
1659
1660/* HCI Connection scheduler */
1661static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1662{
1663 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1664 struct hci_conn *conn = NULL;
1da177e4
LT
1665 int num = 0, min = ~0;
1666 struct list_head *p;
1667
8e87d142 1668 /* We don't have to lock device here. Connections are always
1da177e4
LT
1669 * added and removed with TX task disabled. */
1670 list_for_each(p, &h->list) {
1671 struct hci_conn *c;
1672 c = list_entry(p, struct hci_conn, list);
1673
769be974 1674 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1675 continue;
769be974
MH
1676
1677 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1678 continue;
1679
1da177e4
LT
1680 num++;
1681
1682 if (c->sent < min) {
1683 min = c->sent;
1684 conn = c;
1685 }
1686 }
1687
1688 if (conn) {
6ed58ec5
VT
1689 int cnt, q;
1690
1691 switch (conn->type) {
1692 case ACL_LINK:
1693 cnt = hdev->acl_cnt;
1694 break;
1695 case SCO_LINK:
1696 case ESCO_LINK:
1697 cnt = hdev->sco_cnt;
1698 break;
1699 case LE_LINK:
1700 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1701 break;
1702 default:
1703 cnt = 0;
1704 BT_ERR("Unknown link type");
1705 }
1706
1707 q = cnt / num;
1da177e4
LT
1708 *quote = q ? q : 1;
1709 } else
1710 *quote = 0;
1711
1712 BT_DBG("conn %p quote %d", conn, *quote);
1713 return conn;
1714}
1715
bae1f5d9 1716static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
1717{
1718 struct hci_conn_hash *h = &hdev->conn_hash;
1719 struct list_head *p;
1720 struct hci_conn *c;
1721
bae1f5d9 1722 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
1723
1724 /* Kill stalled connections */
1725 list_for_each(p, &h->list) {
1726 c = list_entry(p, struct hci_conn, list);
bae1f5d9
VT
1727 if (c->type == type && c->sent) {
1728 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
1729 hdev->name, batostr(&c->dst));
1730 hci_acl_disconn(c, 0x13);
1731 }
1732 }
1733}
1734
1735static inline void hci_sched_acl(struct hci_dev *hdev)
1736{
1737 struct hci_conn *conn;
1738 struct sk_buff *skb;
1739 int quote;
1740
1741 BT_DBG("%s", hdev->name);
1742
1743 if (!test_bit(HCI_RAW, &hdev->flags)) {
1744 /* ACL tx timeout must be longer than maximum
1745 * link supervision timeout (40.9 seconds) */
82453021 1746 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 1747 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
1748 }
1749
1750 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1751 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1752 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1753
1754 hci_conn_enter_active_mode(conn);
1755
1da177e4
LT
1756 hci_send_frame(skb);
1757 hdev->acl_last_tx = jiffies;
1758
1759 hdev->acl_cnt--;
1760 conn->sent++;
1761 }
1762 }
1763}
1764
1765/* Schedule SCO */
1766static inline void hci_sched_sco(struct hci_dev *hdev)
1767{
1768 struct hci_conn *conn;
1769 struct sk_buff *skb;
1770 int quote;
1771
1772 BT_DBG("%s", hdev->name);
1773
1774 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1775 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1776 BT_DBG("skb %p len %d", skb, skb->len);
1777 hci_send_frame(skb);
1778
1779 conn->sent++;
1780 if (conn->sent == ~0)
1781 conn->sent = 0;
1782 }
1783 }
1784}
1785
b6a0dc82
MH
1786static inline void hci_sched_esco(struct hci_dev *hdev)
1787{
1788 struct hci_conn *conn;
1789 struct sk_buff *skb;
1790 int quote;
1791
1792 BT_DBG("%s", hdev->name);
1793
1794 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1795 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1796 BT_DBG("skb %p len %d", skb, skb->len);
1797 hci_send_frame(skb);
1798
1799 conn->sent++;
1800 if (conn->sent == ~0)
1801 conn->sent = 0;
1802 }
1803 }
1804}
1805
6ed58ec5
VT
1806static inline void hci_sched_le(struct hci_dev *hdev)
1807{
1808 struct hci_conn *conn;
1809 struct sk_buff *skb;
1810 int quote, cnt;
1811
1812 BT_DBG("%s", hdev->name);
1813
1814 if (!test_bit(HCI_RAW, &hdev->flags)) {
1815 /* LE tx timeout must be longer than maximum
1816 * link supervision timeout (40.9 seconds) */
bae1f5d9 1817 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 1818 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 1819 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
1820 }
1821
1822 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1823 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1824 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1825 BT_DBG("skb %p len %d", skb, skb->len);
1826
1827 hci_send_frame(skb);
1828 hdev->le_last_tx = jiffies;
1829
1830 cnt--;
1831 conn->sent++;
1832 }
1833 }
1834 if (hdev->le_pkts)
1835 hdev->le_cnt = cnt;
1836 else
1837 hdev->acl_cnt = cnt;
1838}
1839
1da177e4
LT
1840static void hci_tx_task(unsigned long arg)
1841{
1842 struct hci_dev *hdev = (struct hci_dev *) arg;
1843 struct sk_buff *skb;
1844
1845 read_lock(&hci_task_lock);
1846
6ed58ec5
VT
1847 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1848 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
1849
1850 /* Schedule queues and send stuff to HCI driver */
1851
1852 hci_sched_acl(hdev);
1853
1854 hci_sched_sco(hdev);
1855
b6a0dc82
MH
1856 hci_sched_esco(hdev);
1857
6ed58ec5
VT
1858 hci_sched_le(hdev);
1859
1da177e4
LT
1860 /* Send next queued raw (unknown type) packet */
1861 while ((skb = skb_dequeue(&hdev->raw_q)))
1862 hci_send_frame(skb);
1863
1864 read_unlock(&hci_task_lock);
1865}
1866
1867/* ----- HCI RX task (incoming data proccessing) ----- */
1868
1869/* ACL data packet */
1870static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1871{
1872 struct hci_acl_hdr *hdr = (void *) skb->data;
1873 struct hci_conn *conn;
1874 __u16 handle, flags;
1875
1876 skb_pull(skb, HCI_ACL_HDR_SIZE);
1877
1878 handle = __le16_to_cpu(hdr->handle);
1879 flags = hci_flags(handle);
1880 handle = hci_handle(handle);
1881
1882 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1883
1884 hdev->stat.acl_rx++;
1885
1886 hci_dev_lock(hdev);
1887 conn = hci_conn_hash_lookup_handle(hdev, handle);
1888 hci_dev_unlock(hdev);
8e87d142 1889
1da177e4
LT
1890 if (conn) {
1891 register struct hci_proto *hp;
1892
04837f64
MH
1893 hci_conn_enter_active_mode(conn);
1894
1da177e4 1895 /* Send to upper protocol */
70f23020
AE
1896 hp = hci_proto[HCI_PROTO_L2CAP];
1897 if (hp && hp->recv_acldata) {
1da177e4
LT
1898 hp->recv_acldata(conn, skb, flags);
1899 return;
1900 }
1901 } else {
8e87d142 1902 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1903 hdev->name, handle);
1904 }
1905
1906 kfree_skb(skb);
1907}
1908
1909/* SCO data packet */
1910static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1911{
1912 struct hci_sco_hdr *hdr = (void *) skb->data;
1913 struct hci_conn *conn;
1914 __u16 handle;
1915
1916 skb_pull(skb, HCI_SCO_HDR_SIZE);
1917
1918 handle = __le16_to_cpu(hdr->handle);
1919
1920 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1921
1922 hdev->stat.sco_rx++;
1923
1924 hci_dev_lock(hdev);
1925 conn = hci_conn_hash_lookup_handle(hdev, handle);
1926 hci_dev_unlock(hdev);
1927
1928 if (conn) {
1929 register struct hci_proto *hp;
1930
1931 /* Send to upper protocol */
70f23020
AE
1932 hp = hci_proto[HCI_PROTO_SCO];
1933 if (hp && hp->recv_scodata) {
1da177e4
LT
1934 hp->recv_scodata(conn, skb);
1935 return;
1936 }
1937 } else {
8e87d142 1938 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1939 hdev->name, handle);
1940 }
1941
1942 kfree_skb(skb);
1943}
1944
6516455d 1945static void hci_rx_task(unsigned long arg)
1da177e4
LT
1946{
1947 struct hci_dev *hdev = (struct hci_dev *) arg;
1948 struct sk_buff *skb;
1949
1950 BT_DBG("%s", hdev->name);
1951
1952 read_lock(&hci_task_lock);
1953
1954 while ((skb = skb_dequeue(&hdev->rx_q))) {
1955 if (atomic_read(&hdev->promisc)) {
1956 /* Send copy to the sockets */
eec8d2bc 1957 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1958 }
1959
1960 if (test_bit(HCI_RAW, &hdev->flags)) {
1961 kfree_skb(skb);
1962 continue;
1963 }
1964
1965 if (test_bit(HCI_INIT, &hdev->flags)) {
1966 /* Don't process data packets in this states. */
0d48d939 1967 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1968 case HCI_ACLDATA_PKT:
1969 case HCI_SCODATA_PKT:
1970 kfree_skb(skb);
1971 continue;
3ff50b79 1972 }
1da177e4
LT
1973 }
1974
1975 /* Process frame */
0d48d939 1976 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1977 case HCI_EVENT_PKT:
1978 hci_event_packet(hdev, skb);
1979 break;
1980
1981 case HCI_ACLDATA_PKT:
1982 BT_DBG("%s ACL data packet", hdev->name);
1983 hci_acldata_packet(hdev, skb);
1984 break;
1985
1986 case HCI_SCODATA_PKT:
1987 BT_DBG("%s SCO data packet", hdev->name);
1988 hci_scodata_packet(hdev, skb);
1989 break;
1990
1991 default:
1992 kfree_skb(skb);
1993 break;
1994 }
1995 }
1996
1997 read_unlock(&hci_task_lock);
1998}
1999
2000static void hci_cmd_task(unsigned long arg)
2001{
2002 struct hci_dev *hdev = (struct hci_dev *) arg;
2003 struct sk_buff *skb;
2004
2005 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2006
82453021 2007 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1da177e4
LT
2008 BT_ERR("%s command tx timeout", hdev->name);
2009 atomic_set(&hdev->cmd_cnt, 1);
2010 }
2011
2012 /* Send queued commands */
5a08ecce
AE
2013 if (atomic_read(&hdev->cmd_cnt)) {
2014 skb = skb_dequeue(&hdev->cmd_q);
2015 if (!skb)
2016 return;
2017
7585b97a 2018 kfree_skb(hdev->sent_cmd);
1da177e4 2019
70f23020
AE
2020 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2021 if (hdev->sent_cmd) {
1da177e4
LT
2022 atomic_dec(&hdev->cmd_cnt);
2023 hci_send_frame(skb);
2024 hdev->cmd_last_tx = jiffies;
2025 } else {
2026 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2027 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2028 }
2029 }
2030}