Bluetooth: Add read_local_oob_data management command
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
1da177e4
LT
45#include <net/sock.h>
46
47#include <asm/system.h>
70f23020 48#include <linux/uaccess.h>
1da177e4
LT
49#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
ab81cbf9
JH
54#define AUTO_OFF_TIMEOUT 2000
55
1da177e4
LT
56static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg);
1da177e4
LT
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
e041c683 75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
e041c683 81 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
e041c683 86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
87}
88
6516455d 89static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 90{
e041c683 91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
92}
93
94/* ---- HCI requests ---- */
95
23bb5763 96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 97{
23bb5763
JH
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
a5040efa
JH
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 104 return;
1da177e4
LT
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
8e87d142 125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 126 unsigned long opt, __u32 timeout)
1da177e4
LT
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
3ff50b79 158 }
1da177e4 159
a5040efa 160 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 168 unsigned long opt, __u32 timeout)
1da177e4
LT
169{
170 int ret;
171
7c6a329e
MH
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
1da177e4
LT
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
a9de9248 188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
b0916ea0 193 struct hci_cp_delete_stored_link_key cp;
1da177e4 194 struct sk_buff *skb;
1ebb9252 195 __le16 param;
89f2783d 196 __u8 flt_type;
1da177e4
LT
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 205 skb->dev = (void *) hdev;
c78ae283 206
1da177e4 207 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 208 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
7a9d4020 215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
217
218 /* Read Local Supported Features */
a9de9248 219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 220
1143e5a6 221 /* Read Local Version */
a9de9248 222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 223
1da177e4 224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
aca3192c 231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
236 }
237#endif
238
239 /* Read BD Address */
a9de9248
MH
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
247
248 /* Read Voice Setting */
a9de9248 249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
89f2783d 254 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 256
1da177e4 257 /* Connection accept timeout ~20 secs */
aca3192c 258 param = cpu_to_le16(0x7d00);
a9de9248 259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
264}
265
6ed58ec5
VT
266static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
267{
268 BT_DBG("%s", hdev->name);
269
270 /* Read LE buffer size */
271 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
272}
273
1da177e4
LT
274static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
275{
276 __u8 scan = opt;
277
278 BT_DBG("%s %x", hdev->name, scan);
279
280 /* Inquiry and Page scans */
a9de9248 281 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
282}
283
284static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 auth = opt;
287
288 BT_DBG("%s %x", hdev->name, auth);
289
290 /* Authentication */
a9de9248 291 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
292}
293
294static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 encrypt = opt;
297
298 BT_DBG("%s %x", hdev->name, encrypt);
299
e4e8e37c 300 /* Encryption */
a9de9248 301 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
302}
303
e4e8e37c
MH
304static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __le16 policy = cpu_to_le16(opt);
307
a418b893 308 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
309
310 /* Default link policy */
311 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
312}
313
8e87d142 314/* Get HCI device by index.
1da177e4
LT
315 * Device is held on return. */
316struct hci_dev *hci_dev_get(int index)
317{
318 struct hci_dev *hdev = NULL;
319 struct list_head *p;
320
321 BT_DBG("%d", index);
322
323 if (index < 0)
324 return NULL;
325
326 read_lock(&hci_dev_list_lock);
327 list_for_each(p, &hci_dev_list) {
328 struct hci_dev *d = list_entry(p, struct hci_dev, list);
329 if (d->id == index) {
330 hdev = hci_dev_hold(d);
331 break;
332 }
333 }
334 read_unlock(&hci_dev_list_lock);
335 return hdev;
336}
1da177e4
LT
337
338/* ---- Inquiry support ---- */
339static void inquiry_cache_flush(struct hci_dev *hdev)
340{
341 struct inquiry_cache *cache = &hdev->inq_cache;
342 struct inquiry_entry *next = cache->list, *e;
343
344 BT_DBG("cache %p", cache);
345
346 cache->list = NULL;
347 while ((e = next)) {
348 next = e->next;
349 kfree(e);
350 }
351}
352
353struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
354{
355 struct inquiry_cache *cache = &hdev->inq_cache;
356 struct inquiry_entry *e;
357
358 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
359
360 for (e = cache->list; e; e = e->next)
361 if (!bacmp(&e->data.bdaddr, bdaddr))
362 break;
363 return e;
364}
365
366void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
367{
368 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 369 struct inquiry_entry *ie;
1da177e4
LT
370
371 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
372
70f23020
AE
373 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
374 if (!ie) {
1da177e4 375 /* Entry not in the cache. Add new one. */
70f23020
AE
376 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
377 if (!ie)
1da177e4 378 return;
70f23020
AE
379
380 ie->next = cache->list;
381 cache->list = ie;
1da177e4
LT
382 }
383
70f23020
AE
384 memcpy(&ie->data, data, sizeof(*data));
385 ie->timestamp = jiffies;
1da177e4
LT
386 cache->timestamp = jiffies;
387}
388
389static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_info *info = (struct inquiry_info *) buf;
393 struct inquiry_entry *e;
394 int copied = 0;
395
396 for (e = cache->list; e && copied < num; e = e->next, copied++) {
397 struct inquiry_data *data = &e->data;
398 bacpy(&info->bdaddr, &data->bdaddr);
399 info->pscan_rep_mode = data->pscan_rep_mode;
400 info->pscan_period_mode = data->pscan_period_mode;
401 info->pscan_mode = data->pscan_mode;
402 memcpy(info->dev_class, data->dev_class, 3);
403 info->clock_offset = data->clock_offset;
404 info++;
405 }
406
407 BT_DBG("cache %p, copied %d", cache, copied);
408 return copied;
409}
410
411static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
412{
413 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
414 struct hci_cp_inquiry cp;
415
416 BT_DBG("%s", hdev->name);
417
418 if (test_bit(HCI_INQUIRY, &hdev->flags))
419 return;
420
421 /* Start Inquiry */
422 memcpy(&cp.lap, &ir->lap, 3);
423 cp.length = ir->length;
424 cp.num_rsp = ir->num_rsp;
a9de9248 425 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
426}
427
428int hci_inquiry(void __user *arg)
429{
430 __u8 __user *ptr = arg;
431 struct hci_inquiry_req ir;
432 struct hci_dev *hdev;
433 int err = 0, do_inquiry = 0, max_rsp;
434 long timeo;
435 __u8 *buf;
436
437 if (copy_from_user(&ir, ptr, sizeof(ir)))
438 return -EFAULT;
439
5a08ecce
AE
440 hdev = hci_dev_get(ir.dev_id);
441 if (!hdev)
1da177e4
LT
442 return -ENODEV;
443
444 hci_dev_lock_bh(hdev);
8e87d142 445 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
446 inquiry_cache_empty(hdev) ||
447 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
448 inquiry_cache_flush(hdev);
449 do_inquiry = 1;
450 }
451 hci_dev_unlock_bh(hdev);
452
04837f64 453 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
454
455 if (do_inquiry) {
456 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
457 if (err < 0)
458 goto done;
459 }
1da177e4
LT
460
461 /* for unlimited number of responses we will use buffer with 255 entries */
462 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
463
464 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
465 * copy it to the user space.
466 */
01df8c31 467 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 468 if (!buf) {
1da177e4
LT
469 err = -ENOMEM;
470 goto done;
471 }
472
473 hci_dev_lock_bh(hdev);
474 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
475 hci_dev_unlock_bh(hdev);
476
477 BT_DBG("num_rsp %d", ir.num_rsp);
478
479 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
480 ptr += sizeof(ir);
481 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
482 ir.num_rsp))
483 err = -EFAULT;
8e87d142 484 } else
1da177e4
LT
485 err = -EFAULT;
486
487 kfree(buf);
488
489done:
490 hci_dev_put(hdev);
491 return err;
492}
493
494/* ---- HCI ioctl helpers ---- */
495
496int hci_dev_open(__u16 dev)
497{
498 struct hci_dev *hdev;
499 int ret = 0;
500
5a08ecce
AE
501 hdev = hci_dev_get(dev);
502 if (!hdev)
1da177e4
LT
503 return -ENODEV;
504
505 BT_DBG("%s %p", hdev->name, hdev);
506
507 hci_req_lock(hdev);
508
611b30f7
MH
509 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
510 ret = -ERFKILL;
511 goto done;
512 }
513
1da177e4
LT
514 if (test_bit(HCI_UP, &hdev->flags)) {
515 ret = -EALREADY;
516 goto done;
517 }
518
519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
520 set_bit(HCI_RAW, &hdev->flags);
521
943da25d
MH
522 /* Treat all non BR/EDR controllers as raw devices for now */
523 if (hdev->dev_type != HCI_BREDR)
524 set_bit(HCI_RAW, &hdev->flags);
525
1da177e4
LT
526 if (hdev->open(hdev)) {
527 ret = -EIO;
528 goto done;
529 }
530
531 if (!test_bit(HCI_RAW, &hdev->flags)) {
532 atomic_set(&hdev->cmd_cnt, 1);
533 set_bit(HCI_INIT, &hdev->flags);
a5040efa 534 hdev->init_last_cmd = 0;
1da177e4 535
04837f64
MH
536 ret = __hci_request(hdev, hci_init_req, 0,
537 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 538
6ed58ec5
VT
539 if (lmp_le_capable(hdev))
540 ret = __hci_request(hdev, hci_le_init_req, 0,
541 msecs_to_jiffies(HCI_INIT_TIMEOUT));
542
1da177e4
LT
543 clear_bit(HCI_INIT, &hdev->flags);
544 }
545
546 if (!ret) {
547 hci_dev_hold(hdev);
548 set_bit(HCI_UP, &hdev->flags);
549 hci_notify(hdev, HCI_DEV_UP);
5add6af8
JH
550 if (!test_bit(HCI_SETUP, &hdev->flags))
551 mgmt_powered(hdev->id, 1);
8e87d142 552 } else {
1da177e4
LT
553 /* Init failed, cleanup */
554 tasklet_kill(&hdev->rx_task);
555 tasklet_kill(&hdev->tx_task);
556 tasklet_kill(&hdev->cmd_task);
557
558 skb_queue_purge(&hdev->cmd_q);
559 skb_queue_purge(&hdev->rx_q);
560
561 if (hdev->flush)
562 hdev->flush(hdev);
563
564 if (hdev->sent_cmd) {
565 kfree_skb(hdev->sent_cmd);
566 hdev->sent_cmd = NULL;
567 }
568
569 hdev->close(hdev);
570 hdev->flags = 0;
571 }
572
573done:
574 hci_req_unlock(hdev);
575 hci_dev_put(hdev);
576 return ret;
577}
578
579static int hci_dev_do_close(struct hci_dev *hdev)
580{
581 BT_DBG("%s %p", hdev->name, hdev);
582
583 hci_req_cancel(hdev, ENODEV);
584 hci_req_lock(hdev);
585
586 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
587 hci_req_unlock(hdev);
588 return 0;
589 }
590
591 /* Kill RX and TX tasks */
592 tasklet_kill(&hdev->rx_task);
593 tasklet_kill(&hdev->tx_task);
594
595 hci_dev_lock_bh(hdev);
596 inquiry_cache_flush(hdev);
597 hci_conn_hash_flush(hdev);
598 hci_dev_unlock_bh(hdev);
599
600 hci_notify(hdev, HCI_DEV_DOWN);
601
602 if (hdev->flush)
603 hdev->flush(hdev);
604
605 /* Reset device */
606 skb_queue_purge(&hdev->cmd_q);
607 atomic_set(&hdev->cmd_cnt, 1);
608 if (!test_bit(HCI_RAW, &hdev->flags)) {
609 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
610 __hci_request(hdev, hci_reset_req, 0,
611 msecs_to_jiffies(250));
1da177e4
LT
612 clear_bit(HCI_INIT, &hdev->flags);
613 }
614
615 /* Kill cmd task */
616 tasklet_kill(&hdev->cmd_task);
617
618 /* Drop queues */
619 skb_queue_purge(&hdev->rx_q);
620 skb_queue_purge(&hdev->cmd_q);
621 skb_queue_purge(&hdev->raw_q);
622
623 /* Drop last sent command */
624 if (hdev->sent_cmd) {
6bd32326 625 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
626 kfree_skb(hdev->sent_cmd);
627 hdev->sent_cmd = NULL;
628 }
629
630 /* After this point our queues are empty
631 * and no tasks are scheduled. */
632 hdev->close(hdev);
633
5add6af8
JH
634 mgmt_powered(hdev->id, 0);
635
1da177e4
LT
636 /* Clear flags */
637 hdev->flags = 0;
638
639 hci_req_unlock(hdev);
640
641 hci_dev_put(hdev);
642 return 0;
643}
644
645int hci_dev_close(__u16 dev)
646{
647 struct hci_dev *hdev;
648 int err;
649
70f23020
AE
650 hdev = hci_dev_get(dev);
651 if (!hdev)
1da177e4
LT
652 return -ENODEV;
653 err = hci_dev_do_close(hdev);
654 hci_dev_put(hdev);
655 return err;
656}
657
658int hci_dev_reset(__u16 dev)
659{
660 struct hci_dev *hdev;
661 int ret = 0;
662
70f23020
AE
663 hdev = hci_dev_get(dev);
664 if (!hdev)
1da177e4
LT
665 return -ENODEV;
666
667 hci_req_lock(hdev);
668 tasklet_disable(&hdev->tx_task);
669
670 if (!test_bit(HCI_UP, &hdev->flags))
671 goto done;
672
673 /* Drop queues */
674 skb_queue_purge(&hdev->rx_q);
675 skb_queue_purge(&hdev->cmd_q);
676
677 hci_dev_lock_bh(hdev);
678 inquiry_cache_flush(hdev);
679 hci_conn_hash_flush(hdev);
680 hci_dev_unlock_bh(hdev);
681
682 if (hdev->flush)
683 hdev->flush(hdev);
684
8e87d142 685 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 686 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
687
688 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
689 ret = __hci_request(hdev, hci_reset_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
691
692done:
693 tasklet_enable(&hdev->tx_task);
694 hci_req_unlock(hdev);
695 hci_dev_put(hdev);
696 return ret;
697}
698
699int hci_dev_reset_stat(__u16 dev)
700{
701 struct hci_dev *hdev;
702 int ret = 0;
703
70f23020
AE
704 hdev = hci_dev_get(dev);
705 if (!hdev)
1da177e4
LT
706 return -ENODEV;
707
708 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
709
710 hci_dev_put(hdev);
711
712 return ret;
713}
714
715int hci_dev_cmd(unsigned int cmd, void __user *arg)
716{
717 struct hci_dev *hdev;
718 struct hci_dev_req dr;
719 int err = 0;
720
721 if (copy_from_user(&dr, arg, sizeof(dr)))
722 return -EFAULT;
723
70f23020
AE
724 hdev = hci_dev_get(dr.dev_id);
725 if (!hdev)
1da177e4
LT
726 return -ENODEV;
727
728 switch (cmd) {
729 case HCISETAUTH:
04837f64
MH
730 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
732 break;
733
734 case HCISETENCRYPT:
735 if (!lmp_encrypt_capable(hdev)) {
736 err = -EOPNOTSUPP;
737 break;
738 }
739
740 if (!test_bit(HCI_AUTH, &hdev->flags)) {
741 /* Auth must be enabled first */
04837f64
MH
742 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
743 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
744 if (err)
745 break;
746 }
747
04837f64
MH
748 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
750 break;
751
752 case HCISETSCAN:
04837f64
MH
753 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
755 break;
756
1da177e4 757 case HCISETLINKPOL:
e4e8e37c
MH
758 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
760 break;
761
762 case HCISETLINKMODE:
e4e8e37c
MH
763 hdev->link_mode = ((__u16) dr.dev_opt) &
764 (HCI_LM_MASTER | HCI_LM_ACCEPT);
765 break;
766
767 case HCISETPTYPE:
768 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
769 break;
770
771 case HCISETACLMTU:
e4e8e37c
MH
772 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
773 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
774 break;
775
776 case HCISETSCOMTU:
e4e8e37c
MH
777 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
779 break;
780
781 default:
782 err = -EINVAL;
783 break;
784 }
e4e8e37c 785
1da177e4
LT
786 hci_dev_put(hdev);
787 return err;
788}
789
790int hci_get_dev_list(void __user *arg)
791{
792 struct hci_dev_list_req *dl;
793 struct hci_dev_req *dr;
794 struct list_head *p;
795 int n = 0, size, err;
796 __u16 dev_num;
797
798 if (get_user(dev_num, (__u16 __user *) arg))
799 return -EFAULT;
800
801 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
802 return -EINVAL;
803
804 size = sizeof(*dl) + dev_num * sizeof(*dr);
805
70f23020
AE
806 dl = kzalloc(size, GFP_KERNEL);
807 if (!dl)
1da177e4
LT
808 return -ENOMEM;
809
810 dr = dl->dev_req;
811
812 read_lock_bh(&hci_dev_list_lock);
813 list_for_each(p, &hci_dev_list) {
814 struct hci_dev *hdev;
c542a06c 815
1da177e4 816 hdev = list_entry(p, struct hci_dev, list);
c542a06c 817
ab81cbf9 818 hci_del_off_timer(hdev);
c542a06c
JH
819
820 if (!test_bit(HCI_MGMT, &hdev->flags))
821 set_bit(HCI_PAIRABLE, &hdev->flags);
822
1da177e4
LT
823 (dr + n)->dev_id = hdev->id;
824 (dr + n)->dev_opt = hdev->flags;
c542a06c 825
1da177e4
LT
826 if (++n >= dev_num)
827 break;
828 }
829 read_unlock_bh(&hci_dev_list_lock);
830
831 dl->dev_num = n;
832 size = sizeof(*dl) + n * sizeof(*dr);
833
834 err = copy_to_user(arg, dl, size);
835 kfree(dl);
836
837 return err ? -EFAULT : 0;
838}
839
840int hci_get_dev_info(void __user *arg)
841{
842 struct hci_dev *hdev;
843 struct hci_dev_info di;
844 int err = 0;
845
846 if (copy_from_user(&di, arg, sizeof(di)))
847 return -EFAULT;
848
70f23020
AE
849 hdev = hci_dev_get(di.dev_id);
850 if (!hdev)
1da177e4
LT
851 return -ENODEV;
852
ab81cbf9
JH
853 hci_del_off_timer(hdev);
854
c542a06c
JH
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
1da177e4
LT
858 strcpy(di.name, hdev->name);
859 di.bdaddr = hdev->bdaddr;
943da25d 860 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
861 di.flags = hdev->flags;
862 di.pkt_type = hdev->pkt_type;
863 di.acl_mtu = hdev->acl_mtu;
864 di.acl_pkts = hdev->acl_pkts;
865 di.sco_mtu = hdev->sco_mtu;
866 di.sco_pkts = hdev->sco_pkts;
867 di.link_policy = hdev->link_policy;
868 di.link_mode = hdev->link_mode;
869
870 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
871 memcpy(&di.features, &hdev->features, sizeof(di.features));
872
873 if (copy_to_user(arg, &di, sizeof(di)))
874 err = -EFAULT;
875
876 hci_dev_put(hdev);
877
878 return err;
879}
880
881/* ---- Interface to HCI drivers ---- */
882
611b30f7
MH
883static int hci_rfkill_set_block(void *data, bool blocked)
884{
885 struct hci_dev *hdev = data;
886
887 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
888
889 if (!blocked)
890 return 0;
891
892 hci_dev_do_close(hdev);
893
894 return 0;
895}
896
897static const struct rfkill_ops hci_rfkill_ops = {
898 .set_block = hci_rfkill_set_block,
899};
900
1da177e4
LT
901/* Alloc HCI device */
902struct hci_dev *hci_alloc_dev(void)
903{
904 struct hci_dev *hdev;
905
25ea6db0 906 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
907 if (!hdev)
908 return NULL;
909
1da177e4
LT
910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913}
914EXPORT_SYMBOL(hci_alloc_dev);
915
916/* Free HCI device */
917void hci_free_dev(struct hci_dev *hdev)
918{
919 skb_queue_purge(&hdev->driver_init);
920
a91f2e39
MH
921 /* will free via device release */
922 put_device(&hdev->dev);
1da177e4
LT
923}
924EXPORT_SYMBOL(hci_free_dev);
925
ab81cbf9
JH
926static void hci_power_on(struct work_struct *work)
927{
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941}
942
943static void hci_power_off(struct work_struct *work)
944{
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950}
951
952static void hci_auto_off(unsigned long data)
953{
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961}
962
963void hci_del_off_timer(struct hci_dev *hdev)
964{
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969}
970
2aeb9a1a
JH
971int hci_uuids_clear(struct hci_dev *hdev)
972{
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985}
986
55ed8ca1
JH
987int hci_link_keys_clear(struct hci_dev *hdev)
988{
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001}
1002
1003struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004{
1005 struct list_head *p;
1006
1007 list_for_each(p, &hdev->link_keys) {
1008 struct link_key *k;
1009
1010 k = list_entry(p, struct link_key, list);
1011
1012 if (bacmp(bdaddr, &k->bdaddr) == 0)
1013 return k;
1014 }
1015
1016 return NULL;
1017}
1018
1019int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1020 u8 *val, u8 type, u8 pin_len)
1021{
1022 struct link_key *key, *old_key;
1023 u8 old_key_type;
1024
1025 old_key = hci_find_link_key(hdev, bdaddr);
1026 if (old_key) {
1027 old_key_type = old_key->type;
1028 key = old_key;
1029 } else {
1030 old_key_type = 0xff;
1031 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1032 if (!key)
1033 return -ENOMEM;
1034 list_add(&key->list, &hdev->link_keys);
1035 }
1036
1037 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1038
1039 bacpy(&key->bdaddr, bdaddr);
1040 memcpy(key->val, val, 16);
1041 key->type = type;
1042 key->pin_len = pin_len;
1043
1044 if (new_key)
1045 mgmt_new_key(hdev->id, key, old_key_type);
1046
1047 if (type == 0x06)
1048 key->type = old_key_type;
1049
1050 return 0;
1051}
1052
1053int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1054{
1055 struct link_key *key;
1056
1057 key = hci_find_link_key(hdev, bdaddr);
1058 if (!key)
1059 return -ENOENT;
1060
1061 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1062
1063 list_del(&key->list);
1064 kfree(key);
1065
1066 return 0;
1067}
1068
6bd32326
VT
1069/* HCI command timer function */
1070static void hci_cmd_timer(unsigned long arg)
1071{
1072 struct hci_dev *hdev = (void *) arg;
1073
1074 BT_ERR("%s command tx timeout", hdev->name);
1075 atomic_set(&hdev->cmd_cnt, 1);
1076 tasklet_schedule(&hdev->cmd_task);
1077}
1078
1da177e4
LT
1079/* Register HCI device */
1080int hci_register_dev(struct hci_dev *hdev)
1081{
1082 struct list_head *head = &hci_dev_list, *p;
ef222013 1083 int i, id = 0;
1da177e4 1084
c13854ce
MH
1085 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1086 hdev->bus, hdev->owner);
1da177e4
LT
1087
1088 if (!hdev->open || !hdev->close || !hdev->destruct)
1089 return -EINVAL;
1090
1091 write_lock_bh(&hci_dev_list_lock);
1092
1093 /* Find first available device id */
1094 list_for_each(p, &hci_dev_list) {
1095 if (list_entry(p, struct hci_dev, list)->id != id)
1096 break;
1097 head = p; id++;
1098 }
8e87d142 1099
1da177e4
LT
1100 sprintf(hdev->name, "hci%d", id);
1101 hdev->id = id;
1102 list_add(&hdev->list, head);
1103
1104 atomic_set(&hdev->refcnt, 1);
1105 spin_lock_init(&hdev->lock);
1106
1107 hdev->flags = 0;
1108 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1109 hdev->esco_type = (ESCO_HV1);
1da177e4 1110 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1111 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1112
04837f64
MH
1113 hdev->idle_timeout = 0;
1114 hdev->sniff_max_interval = 800;
1115 hdev->sniff_min_interval = 80;
1116
70f23020 1117 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1118 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1119 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1120
1121 skb_queue_head_init(&hdev->rx_q);
1122 skb_queue_head_init(&hdev->cmd_q);
1123 skb_queue_head_init(&hdev->raw_q);
1124
6bd32326
VT
1125 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1126
cd4c5391 1127 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1128 hdev->reassembly[i] = NULL;
1129
1da177e4 1130 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1131 mutex_init(&hdev->req_lock);
1da177e4
LT
1132
1133 inquiry_cache_init(hdev);
1134
1135 hci_conn_hash_init(hdev);
1136
ea4bd8ba 1137 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1138
2aeb9a1a
JH
1139 INIT_LIST_HEAD(&hdev->uuids);
1140
55ed8ca1
JH
1141 INIT_LIST_HEAD(&hdev->link_keys);
1142
ab81cbf9
JH
1143 INIT_WORK(&hdev->power_on, hci_power_on);
1144 INIT_WORK(&hdev->power_off, hci_power_off);
1145 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1146
1da177e4
LT
1147 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1148
1149 atomic_set(&hdev->promisc, 0);
1150
1151 write_unlock_bh(&hci_dev_list_lock);
1152
f48fd9c8
MH
1153 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1154 if (!hdev->workqueue)
1155 goto nomem;
1156
1da177e4
LT
1157 hci_register_sysfs(hdev);
1158
611b30f7
MH
1159 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1160 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1161 if (hdev->rfkill) {
1162 if (rfkill_register(hdev->rfkill) < 0) {
1163 rfkill_destroy(hdev->rfkill);
1164 hdev->rfkill = NULL;
1165 }
1166 }
1167
ab81cbf9
JH
1168 set_bit(HCI_AUTO_OFF, &hdev->flags);
1169 set_bit(HCI_SETUP, &hdev->flags);
1170 queue_work(hdev->workqueue, &hdev->power_on);
1171
1da177e4
LT
1172 hci_notify(hdev, HCI_DEV_REG);
1173
1174 return id;
f48fd9c8
MH
1175
1176nomem:
1177 write_lock_bh(&hci_dev_list_lock);
1178 list_del(&hdev->list);
1179 write_unlock_bh(&hci_dev_list_lock);
1180
1181 return -ENOMEM;
1da177e4
LT
1182}
1183EXPORT_SYMBOL(hci_register_dev);
1184
1185/* Unregister HCI device */
1186int hci_unregister_dev(struct hci_dev *hdev)
1187{
ef222013
MH
1188 int i;
1189
c13854ce 1190 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1191
1da177e4
LT
1192 write_lock_bh(&hci_dev_list_lock);
1193 list_del(&hdev->list);
1194 write_unlock_bh(&hci_dev_list_lock);
1195
1196 hci_dev_do_close(hdev);
1197
cd4c5391 1198 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1199 kfree_skb(hdev->reassembly[i]);
1200
ab81cbf9
JH
1201 if (!test_bit(HCI_INIT, &hdev->flags) &&
1202 !test_bit(HCI_SETUP, &hdev->flags))
1203 mgmt_index_removed(hdev->id);
1204
1da177e4
LT
1205 hci_notify(hdev, HCI_DEV_UNREG);
1206
611b30f7
MH
1207 if (hdev->rfkill) {
1208 rfkill_unregister(hdev->rfkill);
1209 rfkill_destroy(hdev->rfkill);
1210 }
1211
147e2d59
DY
1212 hci_unregister_sysfs(hdev);
1213
c6f3c5f7
GP
1214 hci_del_off_timer(hdev);
1215
f48fd9c8
MH
1216 destroy_workqueue(hdev->workqueue);
1217
e2e0cacb
JH
1218 hci_dev_lock_bh(hdev);
1219 hci_blacklist_clear(hdev);
2aeb9a1a 1220 hci_uuids_clear(hdev);
55ed8ca1 1221 hci_link_keys_clear(hdev);
e2e0cacb
JH
1222 hci_dev_unlock_bh(hdev);
1223
1da177e4 1224 __hci_dev_put(hdev);
ef222013 1225
1da177e4
LT
1226 return 0;
1227}
1228EXPORT_SYMBOL(hci_unregister_dev);
1229
1230/* Suspend HCI device */
1231int hci_suspend_dev(struct hci_dev *hdev)
1232{
1233 hci_notify(hdev, HCI_DEV_SUSPEND);
1234 return 0;
1235}
1236EXPORT_SYMBOL(hci_suspend_dev);
1237
1238/* Resume HCI device */
1239int hci_resume_dev(struct hci_dev *hdev)
1240{
1241 hci_notify(hdev, HCI_DEV_RESUME);
1242 return 0;
1243}
1244EXPORT_SYMBOL(hci_resume_dev);
1245
76bca880
MH
1246/* Receive frame from HCI drivers */
1247int hci_recv_frame(struct sk_buff *skb)
1248{
1249 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1250 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1251 && !test_bit(HCI_INIT, &hdev->flags))) {
1252 kfree_skb(skb);
1253 return -ENXIO;
1254 }
1255
1256 /* Incomming skb */
1257 bt_cb(skb)->incoming = 1;
1258
1259 /* Time stamp */
1260 __net_timestamp(skb);
1261
1262 /* Queue frame for rx task */
1263 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1264 tasklet_schedule(&hdev->rx_task);
1265
76bca880
MH
1266 return 0;
1267}
1268EXPORT_SYMBOL(hci_recv_frame);
1269
33e882a5
SS
1270static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1271 int count, __u8 index, gfp_t gfp_mask)
1272{
1273 int len = 0;
1274 int hlen = 0;
1275 int remain = count;
1276 struct sk_buff *skb;
1277 struct bt_skb_cb *scb;
1278
1279 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1280 index >= NUM_REASSEMBLY)
1281 return -EILSEQ;
1282
1283 skb = hdev->reassembly[index];
1284
1285 if (!skb) {
1286 switch (type) {
1287 case HCI_ACLDATA_PKT:
1288 len = HCI_MAX_FRAME_SIZE;
1289 hlen = HCI_ACL_HDR_SIZE;
1290 break;
1291 case HCI_EVENT_PKT:
1292 len = HCI_MAX_EVENT_SIZE;
1293 hlen = HCI_EVENT_HDR_SIZE;
1294 break;
1295 case HCI_SCODATA_PKT:
1296 len = HCI_MAX_SCO_SIZE;
1297 hlen = HCI_SCO_HDR_SIZE;
1298 break;
1299 }
1300
1301 skb = bt_skb_alloc(len, gfp_mask);
1302 if (!skb)
1303 return -ENOMEM;
1304
1305 scb = (void *) skb->cb;
1306 scb->expect = hlen;
1307 scb->pkt_type = type;
1308
1309 skb->dev = (void *) hdev;
1310 hdev->reassembly[index] = skb;
1311 }
1312
1313 while (count) {
1314 scb = (void *) skb->cb;
1315 len = min(scb->expect, (__u16)count);
1316
1317 memcpy(skb_put(skb, len), data, len);
1318
1319 count -= len;
1320 data += len;
1321 scb->expect -= len;
1322 remain = count;
1323
1324 switch (type) {
1325 case HCI_EVENT_PKT:
1326 if (skb->len == HCI_EVENT_HDR_SIZE) {
1327 struct hci_event_hdr *h = hci_event_hdr(skb);
1328 scb->expect = h->plen;
1329
1330 if (skb_tailroom(skb) < scb->expect) {
1331 kfree_skb(skb);
1332 hdev->reassembly[index] = NULL;
1333 return -ENOMEM;
1334 }
1335 }
1336 break;
1337
1338 case HCI_ACLDATA_PKT:
1339 if (skb->len == HCI_ACL_HDR_SIZE) {
1340 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1341 scb->expect = __le16_to_cpu(h->dlen);
1342
1343 if (skb_tailroom(skb) < scb->expect) {
1344 kfree_skb(skb);
1345 hdev->reassembly[index] = NULL;
1346 return -ENOMEM;
1347 }
1348 }
1349 break;
1350
1351 case HCI_SCODATA_PKT:
1352 if (skb->len == HCI_SCO_HDR_SIZE) {
1353 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1354 scb->expect = h->dlen;
1355
1356 if (skb_tailroom(skb) < scb->expect) {
1357 kfree_skb(skb);
1358 hdev->reassembly[index] = NULL;
1359 return -ENOMEM;
1360 }
1361 }
1362 break;
1363 }
1364
1365 if (scb->expect == 0) {
1366 /* Complete frame */
1367
1368 bt_cb(skb)->pkt_type = type;
1369 hci_recv_frame(skb);
1370
1371 hdev->reassembly[index] = NULL;
1372 return remain;
1373 }
1374 }
1375
1376 return remain;
1377}
1378
ef222013
MH
1379int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1380{
f39a3c06
SS
1381 int rem = 0;
1382
ef222013
MH
1383 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1384 return -EILSEQ;
1385
da5f6c37 1386 while (count) {
f39a3c06
SS
1387 rem = hci_reassembly(hdev, type, data, count,
1388 type - 1, GFP_ATOMIC);
1389 if (rem < 0)
1390 return rem;
ef222013 1391
f39a3c06
SS
1392 data += (count - rem);
1393 count = rem;
da5f6c37 1394 };
ef222013 1395
f39a3c06 1396 return rem;
ef222013
MH
1397}
1398EXPORT_SYMBOL(hci_recv_fragment);
1399
99811510
SS
1400#define STREAM_REASSEMBLY 0
1401
1402int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1403{
1404 int type;
1405 int rem = 0;
1406
da5f6c37 1407 while (count) {
99811510
SS
1408 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1409
1410 if (!skb) {
1411 struct { char type; } *pkt;
1412
1413 /* Start of the frame */
1414 pkt = data;
1415 type = pkt->type;
1416
1417 data++;
1418 count--;
1419 } else
1420 type = bt_cb(skb)->pkt_type;
1421
1422 rem = hci_reassembly(hdev, type, data,
1423 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1424 if (rem < 0)
1425 return rem;
1426
1427 data += (count - rem);
1428 count = rem;
da5f6c37 1429 };
99811510
SS
1430
1431 return rem;
1432}
1433EXPORT_SYMBOL(hci_recv_stream_fragment);
1434
1da177e4
LT
1435/* ---- Interface to upper protocols ---- */
1436
1437/* Register/Unregister protocols.
1438 * hci_task_lock is used to ensure that no tasks are running. */
1439int hci_register_proto(struct hci_proto *hp)
1440{
1441 int err = 0;
1442
1443 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1444
1445 if (hp->id >= HCI_MAX_PROTO)
1446 return -EINVAL;
1447
1448 write_lock_bh(&hci_task_lock);
1449
1450 if (!hci_proto[hp->id])
1451 hci_proto[hp->id] = hp;
1452 else
1453 err = -EEXIST;
1454
1455 write_unlock_bh(&hci_task_lock);
1456
1457 return err;
1458}
1459EXPORT_SYMBOL(hci_register_proto);
1460
1461int hci_unregister_proto(struct hci_proto *hp)
1462{
1463 int err = 0;
1464
1465 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1466
1467 if (hp->id >= HCI_MAX_PROTO)
1468 return -EINVAL;
1469
1470 write_lock_bh(&hci_task_lock);
1471
1472 if (hci_proto[hp->id])
1473 hci_proto[hp->id] = NULL;
1474 else
1475 err = -ENOENT;
1476
1477 write_unlock_bh(&hci_task_lock);
1478
1479 return err;
1480}
1481EXPORT_SYMBOL(hci_unregister_proto);
1482
1483int hci_register_cb(struct hci_cb *cb)
1484{
1485 BT_DBG("%p name %s", cb, cb->name);
1486
1487 write_lock_bh(&hci_cb_list_lock);
1488 list_add(&cb->list, &hci_cb_list);
1489 write_unlock_bh(&hci_cb_list_lock);
1490
1491 return 0;
1492}
1493EXPORT_SYMBOL(hci_register_cb);
1494
1495int hci_unregister_cb(struct hci_cb *cb)
1496{
1497 BT_DBG("%p name %s", cb, cb->name);
1498
1499 write_lock_bh(&hci_cb_list_lock);
1500 list_del(&cb->list);
1501 write_unlock_bh(&hci_cb_list_lock);
1502
1503 return 0;
1504}
1505EXPORT_SYMBOL(hci_unregister_cb);
1506
1507static int hci_send_frame(struct sk_buff *skb)
1508{
1509 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1510
1511 if (!hdev) {
1512 kfree_skb(skb);
1513 return -ENODEV;
1514 }
1515
0d48d939 1516 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1517
1518 if (atomic_read(&hdev->promisc)) {
1519 /* Time stamp */
a61bbcf2 1520 __net_timestamp(skb);
1da177e4 1521
eec8d2bc 1522 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1523 }
1524
1525 /* Get rid of skb owner, prior to sending to the driver. */
1526 skb_orphan(skb);
1527
1528 return hdev->send(skb);
1529}
1530
1531/* Send HCI command */
a9de9248 1532int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1533{
1534 int len = HCI_COMMAND_HDR_SIZE + plen;
1535 struct hci_command_hdr *hdr;
1536 struct sk_buff *skb;
1537
a9de9248 1538 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1539
1540 skb = bt_skb_alloc(len, GFP_ATOMIC);
1541 if (!skb) {
ef222013 1542 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1543 return -ENOMEM;
1544 }
1545
1546 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1547 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1548 hdr->plen = plen;
1549
1550 if (plen)
1551 memcpy(skb_put(skb, plen), param, plen);
1552
1553 BT_DBG("skb len %d", skb->len);
1554
0d48d939 1555 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1556 skb->dev = (void *) hdev;
c78ae283 1557
a5040efa
JH
1558 if (test_bit(HCI_INIT, &hdev->flags))
1559 hdev->init_last_cmd = opcode;
1560
1da177e4 1561 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1562 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1563
1564 return 0;
1565}
1da177e4
LT
1566
1567/* Get data from the previously sent command */
a9de9248 1568void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1569{
1570 struct hci_command_hdr *hdr;
1571
1572 if (!hdev->sent_cmd)
1573 return NULL;
1574
1575 hdr = (void *) hdev->sent_cmd->data;
1576
a9de9248 1577 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1578 return NULL;
1579
a9de9248 1580 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1581
1582 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1583}
1584
1585/* Send ACL data */
1586static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1587{
1588 struct hci_acl_hdr *hdr;
1589 int len = skb->len;
1590
badff6d0
ACM
1591 skb_push(skb, HCI_ACL_HDR_SIZE);
1592 skb_reset_transport_header(skb);
9c70220b 1593 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1594 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1595 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1596}
1597
9a9c6a34 1598void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1da177e4
LT
1599{
1600 struct hci_dev *hdev = conn->hdev;
1601 struct sk_buff *list;
1602
1603 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1604
1605 skb->dev = (void *) hdev;
0d48d939 1606 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1607 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4 1608
70f23020
AE
1609 list = skb_shinfo(skb)->frag_list;
1610 if (!list) {
1da177e4
LT
1611 /* Non fragmented */
1612 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1613
1614 skb_queue_tail(&conn->data_q, skb);
1615 } else {
1616 /* Fragmented */
1617 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1618
1619 skb_shinfo(skb)->frag_list = NULL;
1620
1621 /* Queue all fragments atomically */
1622 spin_lock_bh(&conn->data_q.lock);
1623
1624 __skb_queue_tail(&conn->data_q, skb);
e702112f
AE
1625
1626 flags &= ~ACL_START;
1627 flags |= ACL_CONT;
1da177e4
LT
1628 do {
1629 skb = list; list = list->next;
8e87d142 1630
1da177e4 1631 skb->dev = (void *) hdev;
0d48d939 1632 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1633 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1634
1635 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1636
1637 __skb_queue_tail(&conn->data_q, skb);
1638 } while (list);
1639
1640 spin_unlock_bh(&conn->data_q.lock);
1641 }
1642
c78ae283 1643 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1644}
1645EXPORT_SYMBOL(hci_send_acl);
1646
1647/* Send SCO data */
0d861d8b 1648void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
1649{
1650 struct hci_dev *hdev = conn->hdev;
1651 struct hci_sco_hdr hdr;
1652
1653 BT_DBG("%s len %d", hdev->name, skb->len);
1654
aca3192c 1655 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1656 hdr.dlen = skb->len;
1657
badff6d0
ACM
1658 skb_push(skb, HCI_SCO_HDR_SIZE);
1659 skb_reset_transport_header(skb);
9c70220b 1660 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1661
1662 skb->dev = (void *) hdev;
0d48d939 1663 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1664
1da177e4 1665 skb_queue_tail(&conn->data_q, skb);
c78ae283 1666 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1667}
1668EXPORT_SYMBOL(hci_send_sco);
1669
1670/* ---- HCI TX task (outgoing data) ---- */
1671
1672/* HCI Connection scheduler */
1673static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1674{
1675 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1676 struct hci_conn *conn = NULL;
1da177e4
LT
1677 int num = 0, min = ~0;
1678 struct list_head *p;
1679
8e87d142 1680 /* We don't have to lock device here. Connections are always
1da177e4
LT
1681 * added and removed with TX task disabled. */
1682 list_for_each(p, &h->list) {
1683 struct hci_conn *c;
1684 c = list_entry(p, struct hci_conn, list);
1685
769be974 1686 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1687 continue;
769be974
MH
1688
1689 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1690 continue;
1691
1da177e4
LT
1692 num++;
1693
1694 if (c->sent < min) {
1695 min = c->sent;
1696 conn = c;
1697 }
1698 }
1699
1700 if (conn) {
6ed58ec5
VT
1701 int cnt, q;
1702
1703 switch (conn->type) {
1704 case ACL_LINK:
1705 cnt = hdev->acl_cnt;
1706 break;
1707 case SCO_LINK:
1708 case ESCO_LINK:
1709 cnt = hdev->sco_cnt;
1710 break;
1711 case LE_LINK:
1712 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1713 break;
1714 default:
1715 cnt = 0;
1716 BT_ERR("Unknown link type");
1717 }
1718
1719 q = cnt / num;
1da177e4
LT
1720 *quote = q ? q : 1;
1721 } else
1722 *quote = 0;
1723
1724 BT_DBG("conn %p quote %d", conn, *quote);
1725 return conn;
1726}
1727
bae1f5d9 1728static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
1729{
1730 struct hci_conn_hash *h = &hdev->conn_hash;
1731 struct list_head *p;
1732 struct hci_conn *c;
1733
bae1f5d9 1734 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
1735
1736 /* Kill stalled connections */
1737 list_for_each(p, &h->list) {
1738 c = list_entry(p, struct hci_conn, list);
bae1f5d9
VT
1739 if (c->type == type && c->sent) {
1740 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
1741 hdev->name, batostr(&c->dst));
1742 hci_acl_disconn(c, 0x13);
1743 }
1744 }
1745}
1746
1747static inline void hci_sched_acl(struct hci_dev *hdev)
1748{
1749 struct hci_conn *conn;
1750 struct sk_buff *skb;
1751 int quote;
1752
1753 BT_DBG("%s", hdev->name);
1754
1755 if (!test_bit(HCI_RAW, &hdev->flags)) {
1756 /* ACL tx timeout must be longer than maximum
1757 * link supervision timeout (40.9 seconds) */
82453021 1758 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 1759 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
1760 }
1761
1762 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1763 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1764 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1765
1766 hci_conn_enter_active_mode(conn);
1767
1da177e4
LT
1768 hci_send_frame(skb);
1769 hdev->acl_last_tx = jiffies;
1770
1771 hdev->acl_cnt--;
1772 conn->sent++;
1773 }
1774 }
1775}
1776
1777/* Schedule SCO */
1778static inline void hci_sched_sco(struct hci_dev *hdev)
1779{
1780 struct hci_conn *conn;
1781 struct sk_buff *skb;
1782 int quote;
1783
1784 BT_DBG("%s", hdev->name);
1785
1786 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1787 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1788 BT_DBG("skb %p len %d", skb, skb->len);
1789 hci_send_frame(skb);
1790
1791 conn->sent++;
1792 if (conn->sent == ~0)
1793 conn->sent = 0;
1794 }
1795 }
1796}
1797
b6a0dc82
MH
1798static inline void hci_sched_esco(struct hci_dev *hdev)
1799{
1800 struct hci_conn *conn;
1801 struct sk_buff *skb;
1802 int quote;
1803
1804 BT_DBG("%s", hdev->name);
1805
1806 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1807 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1808 BT_DBG("skb %p len %d", skb, skb->len);
1809 hci_send_frame(skb);
1810
1811 conn->sent++;
1812 if (conn->sent == ~0)
1813 conn->sent = 0;
1814 }
1815 }
1816}
1817
6ed58ec5
VT
1818static inline void hci_sched_le(struct hci_dev *hdev)
1819{
1820 struct hci_conn *conn;
1821 struct sk_buff *skb;
1822 int quote, cnt;
1823
1824 BT_DBG("%s", hdev->name);
1825
1826 if (!test_bit(HCI_RAW, &hdev->flags)) {
1827 /* LE tx timeout must be longer than maximum
1828 * link supervision timeout (40.9 seconds) */
bae1f5d9 1829 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 1830 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 1831 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
1832 }
1833
1834 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1835 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1836 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1837 BT_DBG("skb %p len %d", skb, skb->len);
1838
1839 hci_send_frame(skb);
1840 hdev->le_last_tx = jiffies;
1841
1842 cnt--;
1843 conn->sent++;
1844 }
1845 }
1846 if (hdev->le_pkts)
1847 hdev->le_cnt = cnt;
1848 else
1849 hdev->acl_cnt = cnt;
1850}
1851
1da177e4
LT
1852static void hci_tx_task(unsigned long arg)
1853{
1854 struct hci_dev *hdev = (struct hci_dev *) arg;
1855 struct sk_buff *skb;
1856
1857 read_lock(&hci_task_lock);
1858
6ed58ec5
VT
1859 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1860 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
1861
1862 /* Schedule queues and send stuff to HCI driver */
1863
1864 hci_sched_acl(hdev);
1865
1866 hci_sched_sco(hdev);
1867
b6a0dc82
MH
1868 hci_sched_esco(hdev);
1869
6ed58ec5
VT
1870 hci_sched_le(hdev);
1871
1da177e4
LT
1872 /* Send next queued raw (unknown type) packet */
1873 while ((skb = skb_dequeue(&hdev->raw_q)))
1874 hci_send_frame(skb);
1875
1876 read_unlock(&hci_task_lock);
1877}
1878
1879/* ----- HCI RX task (incoming data proccessing) ----- */
1880
1881/* ACL data packet */
1882static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1883{
1884 struct hci_acl_hdr *hdr = (void *) skb->data;
1885 struct hci_conn *conn;
1886 __u16 handle, flags;
1887
1888 skb_pull(skb, HCI_ACL_HDR_SIZE);
1889
1890 handle = __le16_to_cpu(hdr->handle);
1891 flags = hci_flags(handle);
1892 handle = hci_handle(handle);
1893
1894 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1895
1896 hdev->stat.acl_rx++;
1897
1898 hci_dev_lock(hdev);
1899 conn = hci_conn_hash_lookup_handle(hdev, handle);
1900 hci_dev_unlock(hdev);
8e87d142 1901
1da177e4
LT
1902 if (conn) {
1903 register struct hci_proto *hp;
1904
04837f64
MH
1905 hci_conn_enter_active_mode(conn);
1906
1da177e4 1907 /* Send to upper protocol */
70f23020
AE
1908 hp = hci_proto[HCI_PROTO_L2CAP];
1909 if (hp && hp->recv_acldata) {
1da177e4
LT
1910 hp->recv_acldata(conn, skb, flags);
1911 return;
1912 }
1913 } else {
8e87d142 1914 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1915 hdev->name, handle);
1916 }
1917
1918 kfree_skb(skb);
1919}
1920
1921/* SCO data packet */
1922static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1923{
1924 struct hci_sco_hdr *hdr = (void *) skb->data;
1925 struct hci_conn *conn;
1926 __u16 handle;
1927
1928 skb_pull(skb, HCI_SCO_HDR_SIZE);
1929
1930 handle = __le16_to_cpu(hdr->handle);
1931
1932 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1933
1934 hdev->stat.sco_rx++;
1935
1936 hci_dev_lock(hdev);
1937 conn = hci_conn_hash_lookup_handle(hdev, handle);
1938 hci_dev_unlock(hdev);
1939
1940 if (conn) {
1941 register struct hci_proto *hp;
1942
1943 /* Send to upper protocol */
70f23020
AE
1944 hp = hci_proto[HCI_PROTO_SCO];
1945 if (hp && hp->recv_scodata) {
1da177e4
LT
1946 hp->recv_scodata(conn, skb);
1947 return;
1948 }
1949 } else {
8e87d142 1950 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1951 hdev->name, handle);
1952 }
1953
1954 kfree_skb(skb);
1955}
1956
6516455d 1957static void hci_rx_task(unsigned long arg)
1da177e4
LT
1958{
1959 struct hci_dev *hdev = (struct hci_dev *) arg;
1960 struct sk_buff *skb;
1961
1962 BT_DBG("%s", hdev->name);
1963
1964 read_lock(&hci_task_lock);
1965
1966 while ((skb = skb_dequeue(&hdev->rx_q))) {
1967 if (atomic_read(&hdev->promisc)) {
1968 /* Send copy to the sockets */
eec8d2bc 1969 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1970 }
1971
1972 if (test_bit(HCI_RAW, &hdev->flags)) {
1973 kfree_skb(skb);
1974 continue;
1975 }
1976
1977 if (test_bit(HCI_INIT, &hdev->flags)) {
1978 /* Don't process data packets in this states. */
0d48d939 1979 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1980 case HCI_ACLDATA_PKT:
1981 case HCI_SCODATA_PKT:
1982 kfree_skb(skb);
1983 continue;
3ff50b79 1984 }
1da177e4
LT
1985 }
1986
1987 /* Process frame */
0d48d939 1988 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1989 case HCI_EVENT_PKT:
1990 hci_event_packet(hdev, skb);
1991 break;
1992
1993 case HCI_ACLDATA_PKT:
1994 BT_DBG("%s ACL data packet", hdev->name);
1995 hci_acldata_packet(hdev, skb);
1996 break;
1997
1998 case HCI_SCODATA_PKT:
1999 BT_DBG("%s SCO data packet", hdev->name);
2000 hci_scodata_packet(hdev, skb);
2001 break;
2002
2003 default:
2004 kfree_skb(skb);
2005 break;
2006 }
2007 }
2008
2009 read_unlock(&hci_task_lock);
2010}
2011
2012static void hci_cmd_task(unsigned long arg)
2013{
2014 struct hci_dev *hdev = (struct hci_dev *) arg;
2015 struct sk_buff *skb;
2016
2017 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2018
1da177e4 2019 /* Send queued commands */
5a08ecce
AE
2020 if (atomic_read(&hdev->cmd_cnt)) {
2021 skb = skb_dequeue(&hdev->cmd_q);
2022 if (!skb)
2023 return;
2024
7585b97a 2025 kfree_skb(hdev->sent_cmd);
1da177e4 2026
70f23020
AE
2027 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2028 if (hdev->sent_cmd) {
1da177e4
LT
2029 atomic_dec(&hdev->cmd_cnt);
2030 hci_send_frame(skb);
6bd32326
VT
2031 mod_timer(&hdev->cmd_timer,
2032 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2033 } else {
2034 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2035 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2036 }
2037 }
2038}