Bluetooth: Add HCI Read Flow Control Mode function
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
7784d78f
AE
57int enable_hs;
58
1da177e4
LT
59static void hci_cmd_task(unsigned long arg);
60static void hci_rx_task(unsigned long arg);
61static void hci_tx_task(unsigned long arg);
1da177e4
LT
62
63static DEFINE_RWLOCK(hci_task_lock);
64
65/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
73/* HCI protocols */
74#define HCI_MAX_PROTO 2
75struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77/* HCI notifiers list */
e041c683 78static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
79
80/* ---- HCI notifications ---- */
81
82int hci_register_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
85}
86
87int hci_unregister_notifier(struct notifier_block *nb)
88{
e041c683 89 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
90}
91
6516455d 92static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 93{
e041c683 94 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
95}
96
97/* ---- HCI requests ---- */
98
23bb5763 99void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 100{
23bb5763
JH
101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
a5040efa
JH
103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
105 */
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 107 return;
1da177e4
LT
108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116static void hci_req_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127/* Execute request and wait for completion. */
8e87d142 128static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 129 unsigned long opt, __u32 timeout)
1da177e4
LT
130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
e175072f 151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
3ff50b79 161 }
1da177e4 162
a5040efa 163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
170static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 171 unsigned long opt, __u32 timeout)
1da177e4
LT
172{
173 int ret;
174
7c6a329e
MH
175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
177
1da177e4
LT
178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
182
183 return ret;
184}
185
186static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187{
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Reset device */
f630cf0d 191 set_bit(HCI_RESET, &hdev->flags);
a9de9248 192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
193}
194
195static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196{
b0916ea0 197 struct hci_cp_delete_stored_link_key cp;
1da177e4 198 struct sk_buff *skb;
1ebb9252 199 __le16 param;
89f2783d 200 __u8 flt_type;
1da177e4
LT
201
202 BT_DBG("%s %ld", hdev->name, opt);
203
204 /* Driver initialization */
205
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 209 skb->dev = (void *) hdev;
c78ae283 210
1da177e4 211 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 212 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
213 }
214 skb_queue_purge(&hdev->driver_init);
215
216 /* Mandatory initialization */
217
218 /* Reset */
f630cf0d
GP
219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
a9de9248 221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 222 }
1da177e4
LT
223
224 /* Read Local Supported Features */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 226
1143e5a6 227 /* Read Local Version */
a9de9248 228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 229
1da177e4 230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
232
233#if 0
234 /* Host buffer size */
235 {
236 struct hci_cp_host_buffer_size cp;
aca3192c 237 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 238 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
239 cp.acl_max_pkt = cpu_to_le16(0xffff);
240 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 241 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
242 }
243#endif
244
245 /* Read BD Address */
a9de9248
MH
246 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
247
248 /* Read Class of Device */
249 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
250
251 /* Read Local Name */
252 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
253
254 /* Read Voice Setting */
a9de9248 255 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
256
257 /* Optional initialization */
258
259 /* Clear Event Filters */
89f2783d 260 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 261 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 262
1da177e4 263 /* Connection accept timeout ~20 secs */
aca3192c 264 param = cpu_to_le16(0x7d00);
a9de9248 265 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
266
267 bacpy(&cp.bdaddr, BDADDR_ANY);
268 cp.delete_all = 1;
269 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
270}
271
6ed58ec5
VT
272static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
273{
274 BT_DBG("%s", hdev->name);
275
276 /* Read LE buffer size */
277 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278}
279
1da177e4
LT
280static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
281{
282 __u8 scan = opt;
283
284 BT_DBG("%s %x", hdev->name, scan);
285
286 /* Inquiry and Page scans */
a9de9248 287 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
288}
289
290static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
291{
292 __u8 auth = opt;
293
294 BT_DBG("%s %x", hdev->name, auth);
295
296 /* Authentication */
a9de9248 297 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
298}
299
300static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
301{
302 __u8 encrypt = opt;
303
304 BT_DBG("%s %x", hdev->name, encrypt);
305
e4e8e37c 306 /* Encryption */
a9de9248 307 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
308}
309
e4e8e37c
MH
310static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __le16 policy = cpu_to_le16(opt);
313
a418b893 314 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
315
316 /* Default link policy */
317 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
318}
319
8e87d142 320/* Get HCI device by index.
1da177e4
LT
321 * Device is held on return. */
322struct hci_dev *hci_dev_get(int index)
323{
8035ded4 324 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
325
326 BT_DBG("%d", index);
327
328 if (index < 0)
329 return NULL;
330
331 read_lock(&hci_dev_list_lock);
8035ded4 332 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
333 if (d->id == index) {
334 hdev = hci_dev_hold(d);
335 break;
336 }
337 }
338 read_unlock(&hci_dev_list_lock);
339 return hdev;
340}
1da177e4
LT
341
342/* ---- Inquiry support ---- */
343static void inquiry_cache_flush(struct hci_dev *hdev)
344{
345 struct inquiry_cache *cache = &hdev->inq_cache;
346 struct inquiry_entry *next = cache->list, *e;
347
348 BT_DBG("cache %p", cache);
349
350 cache->list = NULL;
351 while ((e = next)) {
352 next = e->next;
353 kfree(e);
354 }
355}
356
357struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358{
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_entry *e;
361
362 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363
364 for (e = cache->list; e; e = e->next)
365 if (!bacmp(&e->data.bdaddr, bdaddr))
366 break;
367 return e;
368}
369
370void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371{
372 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 373 struct inquiry_entry *ie;
1da177e4
LT
374
375 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376
70f23020
AE
377 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 if (!ie) {
1da177e4 379 /* Entry not in the cache. Add new one. */
70f23020
AE
380 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381 if (!ie)
1da177e4 382 return;
70f23020
AE
383
384 ie->next = cache->list;
385 cache->list = ie;
1da177e4
LT
386 }
387
70f23020
AE
388 memcpy(&ie->data, data, sizeof(*data));
389 ie->timestamp = jiffies;
1da177e4
LT
390 cache->timestamp = jiffies;
391}
392
393static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394{
395 struct inquiry_cache *cache = &hdev->inq_cache;
396 struct inquiry_info *info = (struct inquiry_info *) buf;
397 struct inquiry_entry *e;
398 int copied = 0;
399
400 for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 struct inquiry_data *data = &e->data;
402 bacpy(&info->bdaddr, &data->bdaddr);
403 info->pscan_rep_mode = data->pscan_rep_mode;
404 info->pscan_period_mode = data->pscan_period_mode;
405 info->pscan_mode = data->pscan_mode;
406 memcpy(info->dev_class, data->dev_class, 3);
407 info->clock_offset = data->clock_offset;
408 info++;
409 }
410
411 BT_DBG("cache %p, copied %d", cache, copied);
412 return copied;
413}
414
415static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416{
417 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 struct hci_cp_inquiry cp;
419
420 BT_DBG("%s", hdev->name);
421
422 if (test_bit(HCI_INQUIRY, &hdev->flags))
423 return;
424
425 /* Start Inquiry */
426 memcpy(&cp.lap, &ir->lap, 3);
427 cp.length = ir->length;
428 cp.num_rsp = ir->num_rsp;
a9de9248 429 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
430}
431
432int hci_inquiry(void __user *arg)
433{
434 __u8 __user *ptr = arg;
435 struct hci_inquiry_req ir;
436 struct hci_dev *hdev;
437 int err = 0, do_inquiry = 0, max_rsp;
438 long timeo;
439 __u8 *buf;
440
441 if (copy_from_user(&ir, ptr, sizeof(ir)))
442 return -EFAULT;
443
5a08ecce
AE
444 hdev = hci_dev_get(ir.dev_id);
445 if (!hdev)
1da177e4
LT
446 return -ENODEV;
447
448 hci_dev_lock_bh(hdev);
8e87d142 449 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
450 inquiry_cache_empty(hdev) ||
451 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
452 inquiry_cache_flush(hdev);
453 do_inquiry = 1;
454 }
455 hci_dev_unlock_bh(hdev);
456
04837f64 457 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
458
459 if (do_inquiry) {
460 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461 if (err < 0)
462 goto done;
463 }
1da177e4
LT
464
465 /* for unlimited number of responses we will use buffer with 255 entries */
466 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467
468 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469 * copy it to the user space.
470 */
01df8c31 471 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 472 if (!buf) {
1da177e4
LT
473 err = -ENOMEM;
474 goto done;
475 }
476
477 hci_dev_lock_bh(hdev);
478 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 hci_dev_unlock_bh(hdev);
480
481 BT_DBG("num_rsp %d", ir.num_rsp);
482
483 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 ptr += sizeof(ir);
485 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 ir.num_rsp))
487 err = -EFAULT;
8e87d142 488 } else
1da177e4
LT
489 err = -EFAULT;
490
491 kfree(buf);
492
493done:
494 hci_dev_put(hdev);
495 return err;
496}
497
498/* ---- HCI ioctl helpers ---- */
499
500int hci_dev_open(__u16 dev)
501{
502 struct hci_dev *hdev;
503 int ret = 0;
504
5a08ecce
AE
505 hdev = hci_dev_get(dev);
506 if (!hdev)
1da177e4
LT
507 return -ENODEV;
508
509 BT_DBG("%s %p", hdev->name, hdev);
510
511 hci_req_lock(hdev);
512
611b30f7
MH
513 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514 ret = -ERFKILL;
515 goto done;
516 }
517
1da177e4
LT
518 if (test_bit(HCI_UP, &hdev->flags)) {
519 ret = -EALREADY;
520 goto done;
521 }
522
523 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524 set_bit(HCI_RAW, &hdev->flags);
525
07e3b94a
AE
526 /* Treat all non BR/EDR controllers as raw devices if
527 enable_hs is not set */
528 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
529 set_bit(HCI_RAW, &hdev->flags);
530
1da177e4
LT
531 if (hdev->open(hdev)) {
532 ret = -EIO;
533 goto done;
534 }
535
536 if (!test_bit(HCI_RAW, &hdev->flags)) {
537 atomic_set(&hdev->cmd_cnt, 1);
538 set_bit(HCI_INIT, &hdev->flags);
a5040efa 539 hdev->init_last_cmd = 0;
1da177e4 540
04837f64
MH
541 ret = __hci_request(hdev, hci_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 543
eead27da 544 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
545 ret = __hci_request(hdev, hci_le_init_req, 0,
546 msecs_to_jiffies(HCI_INIT_TIMEOUT));
547
1da177e4
LT
548 clear_bit(HCI_INIT, &hdev->flags);
549 }
550
551 if (!ret) {
552 hci_dev_hold(hdev);
553 set_bit(HCI_UP, &hdev->flags);
554 hci_notify(hdev, HCI_DEV_UP);
56e5cb86
JH
555 if (!test_bit(HCI_SETUP, &hdev->flags)) {
556 hci_dev_lock_bh(hdev);
744cf19e 557 mgmt_powered(hdev, 1);
56e5cb86
JH
558 hci_dev_unlock_bh(hdev);
559 }
8e87d142 560 } else {
1da177e4
LT
561 /* Init failed, cleanup */
562 tasklet_kill(&hdev->rx_task);
563 tasklet_kill(&hdev->tx_task);
564 tasklet_kill(&hdev->cmd_task);
565
566 skb_queue_purge(&hdev->cmd_q);
567 skb_queue_purge(&hdev->rx_q);
568
569 if (hdev->flush)
570 hdev->flush(hdev);
571
572 if (hdev->sent_cmd) {
573 kfree_skb(hdev->sent_cmd);
574 hdev->sent_cmd = NULL;
575 }
576
577 hdev->close(hdev);
578 hdev->flags = 0;
579 }
580
581done:
582 hci_req_unlock(hdev);
583 hci_dev_put(hdev);
584 return ret;
585}
586
587static int hci_dev_do_close(struct hci_dev *hdev)
588{
589 BT_DBG("%s %p", hdev->name, hdev);
590
591 hci_req_cancel(hdev, ENODEV);
592 hci_req_lock(hdev);
593
594 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 595 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
596 hci_req_unlock(hdev);
597 return 0;
598 }
599
600 /* Kill RX and TX tasks */
601 tasklet_kill(&hdev->rx_task);
602 tasklet_kill(&hdev->tx_task);
603
16ab91ab 604 if (hdev->discov_timeout > 0) {
e0f9309f 605 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
606 hdev->discov_timeout = 0;
607 }
608
3243553f 609 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 610 cancel_delayed_work(&hdev->power_off);
3243553f 611
1da177e4
LT
612 hci_dev_lock_bh(hdev);
613 inquiry_cache_flush(hdev);
614 hci_conn_hash_flush(hdev);
615 hci_dev_unlock_bh(hdev);
616
617 hci_notify(hdev, HCI_DEV_DOWN);
618
619 if (hdev->flush)
620 hdev->flush(hdev);
621
622 /* Reset device */
623 skb_queue_purge(&hdev->cmd_q);
624 atomic_set(&hdev->cmd_cnt, 1);
625 if (!test_bit(HCI_RAW, &hdev->flags)) {
626 set_bit(HCI_INIT, &hdev->flags);
04837f64 627 __hci_request(hdev, hci_reset_req, 0,
43611a7b 628 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
629 clear_bit(HCI_INIT, &hdev->flags);
630 }
631
632 /* Kill cmd task */
633 tasklet_kill(&hdev->cmd_task);
634
635 /* Drop queues */
636 skb_queue_purge(&hdev->rx_q);
637 skb_queue_purge(&hdev->cmd_q);
638 skb_queue_purge(&hdev->raw_q);
639
640 /* Drop last sent command */
641 if (hdev->sent_cmd) {
b79f44c1 642 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
643 kfree_skb(hdev->sent_cmd);
644 hdev->sent_cmd = NULL;
645 }
646
647 /* After this point our queues are empty
648 * and no tasks are scheduled. */
649 hdev->close(hdev);
650
56e5cb86 651 hci_dev_lock_bh(hdev);
744cf19e 652 mgmt_powered(hdev, 0);
56e5cb86 653 hci_dev_unlock_bh(hdev);
5add6af8 654
1da177e4
LT
655 /* Clear flags */
656 hdev->flags = 0;
657
658 hci_req_unlock(hdev);
659
660 hci_dev_put(hdev);
661 return 0;
662}
663
664int hci_dev_close(__u16 dev)
665{
666 struct hci_dev *hdev;
667 int err;
668
70f23020
AE
669 hdev = hci_dev_get(dev);
670 if (!hdev)
1da177e4
LT
671 return -ENODEV;
672 err = hci_dev_do_close(hdev);
673 hci_dev_put(hdev);
674 return err;
675}
676
677int hci_dev_reset(__u16 dev)
678{
679 struct hci_dev *hdev;
680 int ret = 0;
681
70f23020
AE
682 hdev = hci_dev_get(dev);
683 if (!hdev)
1da177e4
LT
684 return -ENODEV;
685
686 hci_req_lock(hdev);
687 tasklet_disable(&hdev->tx_task);
688
689 if (!test_bit(HCI_UP, &hdev->flags))
690 goto done;
691
692 /* Drop queues */
693 skb_queue_purge(&hdev->rx_q);
694 skb_queue_purge(&hdev->cmd_q);
695
696 hci_dev_lock_bh(hdev);
697 inquiry_cache_flush(hdev);
698 hci_conn_hash_flush(hdev);
699 hci_dev_unlock_bh(hdev);
700
701 if (hdev->flush)
702 hdev->flush(hdev);
703
8e87d142 704 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 705 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
706
707 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
708 ret = __hci_request(hdev, hci_reset_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
710
711done:
712 tasklet_enable(&hdev->tx_task);
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716}
717
718int hci_dev_reset_stat(__u16 dev)
719{
720 struct hci_dev *hdev;
721 int ret = 0;
722
70f23020
AE
723 hdev = hci_dev_get(dev);
724 if (!hdev)
1da177e4
LT
725 return -ENODEV;
726
727 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
728
729 hci_dev_put(hdev);
730
731 return ret;
732}
733
734int hci_dev_cmd(unsigned int cmd, void __user *arg)
735{
736 struct hci_dev *hdev;
737 struct hci_dev_req dr;
738 int err = 0;
739
740 if (copy_from_user(&dr, arg, sizeof(dr)))
741 return -EFAULT;
742
70f23020
AE
743 hdev = hci_dev_get(dr.dev_id);
744 if (!hdev)
1da177e4
LT
745 return -ENODEV;
746
747 switch (cmd) {
748 case HCISETAUTH:
04837f64
MH
749 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
751 break;
752
753 case HCISETENCRYPT:
754 if (!lmp_encrypt_capable(hdev)) {
755 err = -EOPNOTSUPP;
756 break;
757 }
758
759 if (!test_bit(HCI_AUTH, &hdev->flags)) {
760 /* Auth must be enabled first */
04837f64
MH
761 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
763 if (err)
764 break;
765 }
766
04837f64
MH
767 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
769 break;
770
771 case HCISETSCAN:
04837f64
MH
772 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
773 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
774 break;
775
1da177e4 776 case HCISETLINKPOL:
e4e8e37c
MH
777 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
778 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
779 break;
780
781 case HCISETLINKMODE:
e4e8e37c
MH
782 hdev->link_mode = ((__u16) dr.dev_opt) &
783 (HCI_LM_MASTER | HCI_LM_ACCEPT);
784 break;
785
786 case HCISETPTYPE:
787 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
788 break;
789
790 case HCISETACLMTU:
e4e8e37c
MH
791 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
792 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
793 break;
794
795 case HCISETSCOMTU:
e4e8e37c
MH
796 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
797 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
798 break;
799
800 default:
801 err = -EINVAL;
802 break;
803 }
e4e8e37c 804
1da177e4
LT
805 hci_dev_put(hdev);
806 return err;
807}
808
809int hci_get_dev_list(void __user *arg)
810{
8035ded4 811 struct hci_dev *hdev;
1da177e4
LT
812 struct hci_dev_list_req *dl;
813 struct hci_dev_req *dr;
1da177e4
LT
814 int n = 0, size, err;
815 __u16 dev_num;
816
817 if (get_user(dev_num, (__u16 __user *) arg))
818 return -EFAULT;
819
820 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
821 return -EINVAL;
822
823 size = sizeof(*dl) + dev_num * sizeof(*dr);
824
70f23020
AE
825 dl = kzalloc(size, GFP_KERNEL);
826 if (!dl)
1da177e4
LT
827 return -ENOMEM;
828
829 dr = dl->dev_req;
830
831 read_lock_bh(&hci_dev_list_lock);
8035ded4 832 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 833 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 834 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
835
836 if (!test_bit(HCI_MGMT, &hdev->flags))
837 set_bit(HCI_PAIRABLE, &hdev->flags);
838
1da177e4
LT
839 (dr + n)->dev_id = hdev->id;
840 (dr + n)->dev_opt = hdev->flags;
c542a06c 841
1da177e4
LT
842 if (++n >= dev_num)
843 break;
844 }
845 read_unlock_bh(&hci_dev_list_lock);
846
847 dl->dev_num = n;
848 size = sizeof(*dl) + n * sizeof(*dr);
849
850 err = copy_to_user(arg, dl, size);
851 kfree(dl);
852
853 return err ? -EFAULT : 0;
854}
855
856int hci_get_dev_info(void __user *arg)
857{
858 struct hci_dev *hdev;
859 struct hci_dev_info di;
860 int err = 0;
861
862 if (copy_from_user(&di, arg, sizeof(di)))
863 return -EFAULT;
864
70f23020
AE
865 hdev = hci_dev_get(di.dev_id);
866 if (!hdev)
1da177e4
LT
867 return -ENODEV;
868
3243553f
JH
869 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
870 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 871
c542a06c
JH
872 if (!test_bit(HCI_MGMT, &hdev->flags))
873 set_bit(HCI_PAIRABLE, &hdev->flags);
874
1da177e4
LT
875 strcpy(di.name, hdev->name);
876 di.bdaddr = hdev->bdaddr;
943da25d 877 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
878 di.flags = hdev->flags;
879 di.pkt_type = hdev->pkt_type;
880 di.acl_mtu = hdev->acl_mtu;
881 di.acl_pkts = hdev->acl_pkts;
882 di.sco_mtu = hdev->sco_mtu;
883 di.sco_pkts = hdev->sco_pkts;
884 di.link_policy = hdev->link_policy;
885 di.link_mode = hdev->link_mode;
886
887 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
888 memcpy(&di.features, &hdev->features, sizeof(di.features));
889
890 if (copy_to_user(arg, &di, sizeof(di)))
891 err = -EFAULT;
892
893 hci_dev_put(hdev);
894
895 return err;
896}
897
898/* ---- Interface to HCI drivers ---- */
899
611b30f7
MH
900static int hci_rfkill_set_block(void *data, bool blocked)
901{
902 struct hci_dev *hdev = data;
903
904 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
905
906 if (!blocked)
907 return 0;
908
909 hci_dev_do_close(hdev);
910
911 return 0;
912}
913
914static const struct rfkill_ops hci_rfkill_ops = {
915 .set_block = hci_rfkill_set_block,
916};
917
1da177e4
LT
918/* Alloc HCI device */
919struct hci_dev *hci_alloc_dev(void)
920{
921 struct hci_dev *hdev;
922
25ea6db0 923 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
924 if (!hdev)
925 return NULL;
926
0ac7e700 927 hci_init_sysfs(hdev);
1da177e4
LT
928 skb_queue_head_init(&hdev->driver_init);
929
930 return hdev;
931}
932EXPORT_SYMBOL(hci_alloc_dev);
933
934/* Free HCI device */
935void hci_free_dev(struct hci_dev *hdev)
936{
937 skb_queue_purge(&hdev->driver_init);
938
a91f2e39
MH
939 /* will free via device release */
940 put_device(&hdev->dev);
1da177e4
LT
941}
942EXPORT_SYMBOL(hci_free_dev);
943
ab81cbf9
JH
944static void hci_power_on(struct work_struct *work)
945{
946 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
947
948 BT_DBG("%s", hdev->name);
949
950 if (hci_dev_open(hdev->id) < 0)
951 return;
952
953 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
3243553f
JH
954 queue_delayed_work(hdev->workqueue, &hdev->power_off,
955 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
956
957 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 958 mgmt_index_added(hdev);
ab81cbf9
JH
959}
960
961static void hci_power_off(struct work_struct *work)
962{
3243553f
JH
963 struct hci_dev *hdev = container_of(work, struct hci_dev,
964 power_off.work);
ab81cbf9
JH
965
966 BT_DBG("%s", hdev->name);
967
968 clear_bit(HCI_AUTO_OFF, &hdev->flags);
969
3243553f 970 hci_dev_close(hdev->id);
ab81cbf9
JH
971}
972
16ab91ab
JH
973static void hci_discov_off(struct work_struct *work)
974{
975 struct hci_dev *hdev;
976 u8 scan = SCAN_PAGE;
977
978 hdev = container_of(work, struct hci_dev, discov_off.work);
979
980 BT_DBG("%s", hdev->name);
981
982 hci_dev_lock_bh(hdev);
983
984 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
985
986 hdev->discov_timeout = 0;
987
988 hci_dev_unlock_bh(hdev);
989}
990
2aeb9a1a
JH
991int hci_uuids_clear(struct hci_dev *hdev)
992{
993 struct list_head *p, *n;
994
995 list_for_each_safe(p, n, &hdev->uuids) {
996 struct bt_uuid *uuid;
997
998 uuid = list_entry(p, struct bt_uuid, list);
999
1000 list_del(p);
1001 kfree(uuid);
1002 }
1003
1004 return 0;
1005}
1006
55ed8ca1
JH
1007int hci_link_keys_clear(struct hci_dev *hdev)
1008{
1009 struct list_head *p, *n;
1010
1011 list_for_each_safe(p, n, &hdev->link_keys) {
1012 struct link_key *key;
1013
1014 key = list_entry(p, struct link_key, list);
1015
1016 list_del(p);
1017 kfree(key);
1018 }
1019
1020 return 0;
1021}
1022
1023struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1024{
8035ded4 1025 struct link_key *k;
55ed8ca1 1026
8035ded4 1027 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1028 if (bacmp(bdaddr, &k->bdaddr) == 0)
1029 return k;
55ed8ca1
JH
1030
1031 return NULL;
1032}
1033
d25e28ab
JH
1034static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1035 u8 key_type, u8 old_key_type)
1036{
1037 /* Legacy key */
1038 if (key_type < 0x03)
1039 return 1;
1040
1041 /* Debug keys are insecure so don't store them persistently */
1042 if (key_type == HCI_LK_DEBUG_COMBINATION)
1043 return 0;
1044
1045 /* Changed combination key and there's no previous one */
1046 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1047 return 0;
1048
1049 /* Security mode 3 case */
1050 if (!conn)
1051 return 1;
1052
1053 /* Neither local nor remote side had no-bonding as requirement */
1054 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1055 return 1;
1056
1057 /* Local side had dedicated bonding as requirement */
1058 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1059 return 1;
1060
1061 /* Remote side had dedicated bonding as requirement */
1062 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1063 return 1;
1064
1065 /* If none of the above criteria match, then don't store the key
1066 * persistently */
1067 return 0;
1068}
1069
75d262c2
VCG
1070struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1071{
1072 struct link_key *k;
1073
1074 list_for_each_entry(k, &hdev->link_keys, list) {
1075 struct key_master_id *id;
1076
1077 if (k->type != HCI_LK_SMP_LTK)
1078 continue;
1079
1080 if (k->dlen != sizeof(*id))
1081 continue;
1082
1083 id = (void *) &k->data;
1084 if (id->ediv == ediv &&
1085 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1086 return k;
1087 }
1088
1089 return NULL;
1090}
1091EXPORT_SYMBOL(hci_find_ltk);
1092
1093struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1094 bdaddr_t *bdaddr, u8 type)
1095{
1096 struct link_key *k;
1097
1098 list_for_each_entry(k, &hdev->link_keys, list)
1099 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1100 return k;
1101
1102 return NULL;
1103}
1104EXPORT_SYMBOL(hci_find_link_key_type);
1105
d25e28ab
JH
1106int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1107 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1108{
1109 struct link_key *key, *old_key;
4df378a1 1110 u8 old_key_type, persistent;
55ed8ca1
JH
1111
1112 old_key = hci_find_link_key(hdev, bdaddr);
1113 if (old_key) {
1114 old_key_type = old_key->type;
1115 key = old_key;
1116 } else {
12adcf3a 1117 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1118 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1119 if (!key)
1120 return -ENOMEM;
1121 list_add(&key->list, &hdev->link_keys);
1122 }
1123
1124 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1125
d25e28ab
JH
1126 /* Some buggy controller combinations generate a changed
1127 * combination key for legacy pairing even when there's no
1128 * previous key */
1129 if (type == HCI_LK_CHANGED_COMBINATION &&
1130 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1131 old_key_type == 0xff) {
d25e28ab 1132 type = HCI_LK_COMBINATION;
655fe6ec
JH
1133 if (conn)
1134 conn->key_type = type;
1135 }
d25e28ab 1136
55ed8ca1
JH
1137 bacpy(&key->bdaddr, bdaddr);
1138 memcpy(key->val, val, 16);
55ed8ca1
JH
1139 key->pin_len = pin_len;
1140
b6020ba0 1141 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1142 key->type = old_key_type;
4748fed2
JH
1143 else
1144 key->type = type;
1145
4df378a1
JH
1146 if (!new_key)
1147 return 0;
1148
1149 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1150
744cf19e 1151 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1152
1153 if (!persistent) {
1154 list_del(&key->list);
1155 kfree(key);
1156 }
55ed8ca1
JH
1157
1158 return 0;
1159}
1160
75d262c2 1161int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1162 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1163{
1164 struct link_key *key, *old_key;
1165 struct key_master_id *id;
1166 u8 old_key_type;
1167
1168 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1169
1170 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1171 if (old_key) {
1172 key = old_key;
1173 old_key_type = old_key->type;
1174 } else {
1175 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1176 if (!key)
1177 return -ENOMEM;
1178 list_add(&key->list, &hdev->link_keys);
1179 old_key_type = 0xff;
1180 }
1181
1182 key->dlen = sizeof(*id);
1183
1184 bacpy(&key->bdaddr, bdaddr);
1185 memcpy(key->val, ltk, sizeof(key->val));
1186 key->type = HCI_LK_SMP_LTK;
726b4ffc 1187 key->pin_len = key_size;
75d262c2
VCG
1188
1189 id = (void *) &key->data;
1190 id->ediv = ediv;
1191 memcpy(id->rand, rand, sizeof(id->rand));
1192
1193 if (new_key)
744cf19e 1194 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1195
1196 return 0;
1197}
1198
55ed8ca1
JH
1199int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1200{
1201 struct link_key *key;
1202
1203 key = hci_find_link_key(hdev, bdaddr);
1204 if (!key)
1205 return -ENOENT;
1206
1207 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1208
1209 list_del(&key->list);
1210 kfree(key);
1211
1212 return 0;
1213}
1214
6bd32326
VT
1215/* HCI command timer function */
1216static void hci_cmd_timer(unsigned long arg)
1217{
1218 struct hci_dev *hdev = (void *) arg;
1219
1220 BT_ERR("%s command tx timeout", hdev->name);
1221 atomic_set(&hdev->cmd_cnt, 1);
1222 tasklet_schedule(&hdev->cmd_task);
1223}
1224
2763eda6
SJ
1225struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1226 bdaddr_t *bdaddr)
1227{
1228 struct oob_data *data;
1229
1230 list_for_each_entry(data, &hdev->remote_oob_data, list)
1231 if (bacmp(bdaddr, &data->bdaddr) == 0)
1232 return data;
1233
1234 return NULL;
1235}
1236
1237int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1238{
1239 struct oob_data *data;
1240
1241 data = hci_find_remote_oob_data(hdev, bdaddr);
1242 if (!data)
1243 return -ENOENT;
1244
1245 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1246
1247 list_del(&data->list);
1248 kfree(data);
1249
1250 return 0;
1251}
1252
1253int hci_remote_oob_data_clear(struct hci_dev *hdev)
1254{
1255 struct oob_data *data, *n;
1256
1257 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1258 list_del(&data->list);
1259 kfree(data);
1260 }
1261
1262 return 0;
1263}
1264
1265int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1266 u8 *randomizer)
1267{
1268 struct oob_data *data;
1269
1270 data = hci_find_remote_oob_data(hdev, bdaddr);
1271
1272 if (!data) {
1273 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1274 if (!data)
1275 return -ENOMEM;
1276
1277 bacpy(&data->bdaddr, bdaddr);
1278 list_add(&data->list, &hdev->remote_oob_data);
1279 }
1280
1281 memcpy(data->hash, hash, sizeof(data->hash));
1282 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1283
1284 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1285
1286 return 0;
1287}
1288
b2a66aad
AJ
1289struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1290 bdaddr_t *bdaddr)
1291{
8035ded4 1292 struct bdaddr_list *b;
b2a66aad 1293
8035ded4 1294 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1295 if (bacmp(bdaddr, &b->bdaddr) == 0)
1296 return b;
b2a66aad
AJ
1297
1298 return NULL;
1299}
1300
1301int hci_blacklist_clear(struct hci_dev *hdev)
1302{
1303 struct list_head *p, *n;
1304
1305 list_for_each_safe(p, n, &hdev->blacklist) {
1306 struct bdaddr_list *b;
1307
1308 b = list_entry(p, struct bdaddr_list, list);
1309
1310 list_del(p);
1311 kfree(b);
1312 }
1313
1314 return 0;
1315}
1316
1317int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1318{
1319 struct bdaddr_list *entry;
b2a66aad
AJ
1320
1321 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1322 return -EBADF;
1323
5e762444
AJ
1324 if (hci_blacklist_lookup(hdev, bdaddr))
1325 return -EEXIST;
b2a66aad
AJ
1326
1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1328 if (!entry)
1329 return -ENOMEM;
b2a66aad
AJ
1330
1331 bacpy(&entry->bdaddr, bdaddr);
1332
1333 list_add(&entry->list, &hdev->blacklist);
1334
744cf19e 1335 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1336}
1337
1338int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1339{
1340 struct bdaddr_list *entry;
b2a66aad 1341
1ec918ce 1342 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1343 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1344
1345 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1346 if (!entry)
5e762444 1347 return -ENOENT;
b2a66aad
AJ
1348
1349 list_del(&entry->list);
1350 kfree(entry);
1351
744cf19e 1352 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1353}
1354
35815085
AG
1355static void hci_clear_adv_cache(unsigned long arg)
1356{
1357 struct hci_dev *hdev = (void *) arg;
1358
1359 hci_dev_lock(hdev);
1360
1361 hci_adv_entries_clear(hdev);
1362
1363 hci_dev_unlock(hdev);
1364}
1365
76c8686f
AG
1366int hci_adv_entries_clear(struct hci_dev *hdev)
1367{
1368 struct adv_entry *entry, *tmp;
1369
1370 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1371 list_del(&entry->list);
1372 kfree(entry);
1373 }
1374
1375 BT_DBG("%s adv cache cleared", hdev->name);
1376
1377 return 0;
1378}
1379
1380struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381{
1382 struct adv_entry *entry;
1383
1384 list_for_each_entry(entry, &hdev->adv_entries, list)
1385 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1386 return entry;
1387
1388 return NULL;
1389}
1390
1391static inline int is_connectable_adv(u8 evt_type)
1392{
1393 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1394 return 1;
1395
1396 return 0;
1397}
1398
1399int hci_add_adv_entry(struct hci_dev *hdev,
1400 struct hci_ev_le_advertising_info *ev)
1401{
1402 struct adv_entry *entry;
1403
1404 if (!is_connectable_adv(ev->evt_type))
1405 return -EINVAL;
1406
1407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
1409 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1410 return 0;
1411
1412 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1413 if (!entry)
1414 return -ENOMEM;
1415
1416 bacpy(&entry->bdaddr, &ev->bdaddr);
1417 entry->bdaddr_type = ev->bdaddr_type;
1418
1419 list_add(&entry->list, &hdev->adv_entries);
1420
1421 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1422 batostr(&entry->bdaddr), entry->bdaddr_type);
1423
1424 return 0;
1425}
1426
1da177e4
LT
1427/* Register HCI device */
1428int hci_register_dev(struct hci_dev *hdev)
1429{
1430 struct list_head *head = &hci_dev_list, *p;
08add513 1431 int i, id, error;
1da177e4 1432
c13854ce
MH
1433 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1434 hdev->bus, hdev->owner);
1da177e4
LT
1435
1436 if (!hdev->open || !hdev->close || !hdev->destruct)
1437 return -EINVAL;
1438
08add513
MM
1439 /* Do not allow HCI_AMP devices to register at index 0,
1440 * so the index can be used as the AMP controller ID.
1441 */
1442 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1443
1da177e4
LT
1444 write_lock_bh(&hci_dev_list_lock);
1445
1446 /* Find first available device id */
1447 list_for_each(p, &hci_dev_list) {
1448 if (list_entry(p, struct hci_dev, list)->id != id)
1449 break;
1450 head = p; id++;
1451 }
8e87d142 1452
1da177e4
LT
1453 sprintf(hdev->name, "hci%d", id);
1454 hdev->id = id;
c6feeb28 1455 list_add_tail(&hdev->list, head);
1da177e4
LT
1456
1457 atomic_set(&hdev->refcnt, 1);
1458 spin_lock_init(&hdev->lock);
1459
1460 hdev->flags = 0;
1461 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1462 hdev->esco_type = (ESCO_HV1);
1da177e4 1463 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1464 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1465
04837f64
MH
1466 hdev->idle_timeout = 0;
1467 hdev->sniff_max_interval = 800;
1468 hdev->sniff_min_interval = 80;
1469
70f23020 1470 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1471 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1472 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1473
1474 skb_queue_head_init(&hdev->rx_q);
1475 skb_queue_head_init(&hdev->cmd_q);
1476 skb_queue_head_init(&hdev->raw_q);
1477
6bd32326
VT
1478 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1479
cd4c5391 1480 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1481 hdev->reassembly[i] = NULL;
1482
1da177e4 1483 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1484 mutex_init(&hdev->req_lock);
1da177e4
LT
1485
1486 inquiry_cache_init(hdev);
1487
1488 hci_conn_hash_init(hdev);
1489
2e58ef3e
JH
1490 INIT_LIST_HEAD(&hdev->mgmt_pending);
1491
ea4bd8ba 1492 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1493
2aeb9a1a
JH
1494 INIT_LIST_HEAD(&hdev->uuids);
1495
55ed8ca1
JH
1496 INIT_LIST_HEAD(&hdev->link_keys);
1497
2763eda6
SJ
1498 INIT_LIST_HEAD(&hdev->remote_oob_data);
1499
76c8686f 1500 INIT_LIST_HEAD(&hdev->adv_entries);
35815085
AG
1501 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1502 (unsigned long) hdev);
76c8686f 1503
ab81cbf9 1504 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1505 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1506
16ab91ab
JH
1507 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1508
1da177e4
LT
1509 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1510
1511 atomic_set(&hdev->promisc, 0);
1512
1513 write_unlock_bh(&hci_dev_list_lock);
1514
f48fd9c8 1515 hdev->workqueue = create_singlethread_workqueue(hdev->name);
33ca954d
DH
1516 if (!hdev->workqueue) {
1517 error = -ENOMEM;
1518 goto err;
1519 }
f48fd9c8 1520
33ca954d
DH
1521 error = hci_add_sysfs(hdev);
1522 if (error < 0)
1523 goto err_wqueue;
1da177e4 1524
611b30f7
MH
1525 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1526 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1527 if (hdev->rfkill) {
1528 if (rfkill_register(hdev->rfkill) < 0) {
1529 rfkill_destroy(hdev->rfkill);
1530 hdev->rfkill = NULL;
1531 }
1532 }
1533
ab81cbf9
JH
1534 set_bit(HCI_AUTO_OFF, &hdev->flags);
1535 set_bit(HCI_SETUP, &hdev->flags);
1536 queue_work(hdev->workqueue, &hdev->power_on);
1537
1da177e4
LT
1538 hci_notify(hdev, HCI_DEV_REG);
1539
1540 return id;
f48fd9c8 1541
33ca954d
DH
1542err_wqueue:
1543 destroy_workqueue(hdev->workqueue);
1544err:
f48fd9c8
MH
1545 write_lock_bh(&hci_dev_list_lock);
1546 list_del(&hdev->list);
1547 write_unlock_bh(&hci_dev_list_lock);
1548
33ca954d 1549 return error;
1da177e4
LT
1550}
1551EXPORT_SYMBOL(hci_register_dev);
1552
1553/* Unregister HCI device */
59735631 1554void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1555{
ef222013
MH
1556 int i;
1557
c13854ce 1558 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1559
1da177e4
LT
1560 write_lock_bh(&hci_dev_list_lock);
1561 list_del(&hdev->list);
1562 write_unlock_bh(&hci_dev_list_lock);
1563
1564 hci_dev_do_close(hdev);
1565
cd4c5391 1566 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1567 kfree_skb(hdev->reassembly[i]);
1568
ab81cbf9 1569 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86
JH
1570 !test_bit(HCI_SETUP, &hdev->flags)) {
1571 hci_dev_lock_bh(hdev);
744cf19e 1572 mgmt_index_removed(hdev);
56e5cb86
JH
1573 hci_dev_unlock_bh(hdev);
1574 }
ab81cbf9 1575
2e58ef3e
JH
1576 /* mgmt_index_removed should take care of emptying the
1577 * pending list */
1578 BUG_ON(!list_empty(&hdev->mgmt_pending));
1579
1da177e4
LT
1580 hci_notify(hdev, HCI_DEV_UNREG);
1581
611b30f7
MH
1582 if (hdev->rfkill) {
1583 rfkill_unregister(hdev->rfkill);
1584 rfkill_destroy(hdev->rfkill);
1585 }
1586
ce242970 1587 hci_del_sysfs(hdev);
147e2d59 1588
35815085 1589 del_timer(&hdev->adv_timer);
c6f3c5f7 1590
f48fd9c8
MH
1591 destroy_workqueue(hdev->workqueue);
1592
e2e0cacb
JH
1593 hci_dev_lock_bh(hdev);
1594 hci_blacklist_clear(hdev);
2aeb9a1a 1595 hci_uuids_clear(hdev);
55ed8ca1 1596 hci_link_keys_clear(hdev);
2763eda6 1597 hci_remote_oob_data_clear(hdev);
76c8686f 1598 hci_adv_entries_clear(hdev);
e2e0cacb
JH
1599 hci_dev_unlock_bh(hdev);
1600
1da177e4 1601 __hci_dev_put(hdev);
1da177e4
LT
1602}
1603EXPORT_SYMBOL(hci_unregister_dev);
1604
1605/* Suspend HCI device */
1606int hci_suspend_dev(struct hci_dev *hdev)
1607{
1608 hci_notify(hdev, HCI_DEV_SUSPEND);
1609 return 0;
1610}
1611EXPORT_SYMBOL(hci_suspend_dev);
1612
1613/* Resume HCI device */
1614int hci_resume_dev(struct hci_dev *hdev)
1615{
1616 hci_notify(hdev, HCI_DEV_RESUME);
1617 return 0;
1618}
1619EXPORT_SYMBOL(hci_resume_dev);
1620
76bca880
MH
1621/* Receive frame from HCI drivers */
1622int hci_recv_frame(struct sk_buff *skb)
1623{
1624 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1625 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1626 && !test_bit(HCI_INIT, &hdev->flags))) {
1627 kfree_skb(skb);
1628 return -ENXIO;
1629 }
1630
1631 /* Incomming skb */
1632 bt_cb(skb)->incoming = 1;
1633
1634 /* Time stamp */
1635 __net_timestamp(skb);
1636
1637 /* Queue frame for rx task */
1638 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1639 tasklet_schedule(&hdev->rx_task);
1640
76bca880
MH
1641 return 0;
1642}
1643EXPORT_SYMBOL(hci_recv_frame);
1644
33e882a5 1645static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1646 int count, __u8 index)
33e882a5
SS
1647{
1648 int len = 0;
1649 int hlen = 0;
1650 int remain = count;
1651 struct sk_buff *skb;
1652 struct bt_skb_cb *scb;
1653
1654 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1655 index >= NUM_REASSEMBLY)
1656 return -EILSEQ;
1657
1658 skb = hdev->reassembly[index];
1659
1660 if (!skb) {
1661 switch (type) {
1662 case HCI_ACLDATA_PKT:
1663 len = HCI_MAX_FRAME_SIZE;
1664 hlen = HCI_ACL_HDR_SIZE;
1665 break;
1666 case HCI_EVENT_PKT:
1667 len = HCI_MAX_EVENT_SIZE;
1668 hlen = HCI_EVENT_HDR_SIZE;
1669 break;
1670 case HCI_SCODATA_PKT:
1671 len = HCI_MAX_SCO_SIZE;
1672 hlen = HCI_SCO_HDR_SIZE;
1673 break;
1674 }
1675
1e429f38 1676 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1677 if (!skb)
1678 return -ENOMEM;
1679
1680 scb = (void *) skb->cb;
1681 scb->expect = hlen;
1682 scb->pkt_type = type;
1683
1684 skb->dev = (void *) hdev;
1685 hdev->reassembly[index] = skb;
1686 }
1687
1688 while (count) {
1689 scb = (void *) skb->cb;
1690 len = min(scb->expect, (__u16)count);
1691
1692 memcpy(skb_put(skb, len), data, len);
1693
1694 count -= len;
1695 data += len;
1696 scb->expect -= len;
1697 remain = count;
1698
1699 switch (type) {
1700 case HCI_EVENT_PKT:
1701 if (skb->len == HCI_EVENT_HDR_SIZE) {
1702 struct hci_event_hdr *h = hci_event_hdr(skb);
1703 scb->expect = h->plen;
1704
1705 if (skb_tailroom(skb) < scb->expect) {
1706 kfree_skb(skb);
1707 hdev->reassembly[index] = NULL;
1708 return -ENOMEM;
1709 }
1710 }
1711 break;
1712
1713 case HCI_ACLDATA_PKT:
1714 if (skb->len == HCI_ACL_HDR_SIZE) {
1715 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1716 scb->expect = __le16_to_cpu(h->dlen);
1717
1718 if (skb_tailroom(skb) < scb->expect) {
1719 kfree_skb(skb);
1720 hdev->reassembly[index] = NULL;
1721 return -ENOMEM;
1722 }
1723 }
1724 break;
1725
1726 case HCI_SCODATA_PKT:
1727 if (skb->len == HCI_SCO_HDR_SIZE) {
1728 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1729 scb->expect = h->dlen;
1730
1731 if (skb_tailroom(skb) < scb->expect) {
1732 kfree_skb(skb);
1733 hdev->reassembly[index] = NULL;
1734 return -ENOMEM;
1735 }
1736 }
1737 break;
1738 }
1739
1740 if (scb->expect == 0) {
1741 /* Complete frame */
1742
1743 bt_cb(skb)->pkt_type = type;
1744 hci_recv_frame(skb);
1745
1746 hdev->reassembly[index] = NULL;
1747 return remain;
1748 }
1749 }
1750
1751 return remain;
1752}
1753
ef222013
MH
1754int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1755{
f39a3c06
SS
1756 int rem = 0;
1757
ef222013
MH
1758 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1759 return -EILSEQ;
1760
da5f6c37 1761 while (count) {
1e429f38 1762 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1763 if (rem < 0)
1764 return rem;
ef222013 1765
f39a3c06
SS
1766 data += (count - rem);
1767 count = rem;
f81c6224 1768 }
ef222013 1769
f39a3c06 1770 return rem;
ef222013
MH
1771}
1772EXPORT_SYMBOL(hci_recv_fragment);
1773
99811510
SS
1774#define STREAM_REASSEMBLY 0
1775
1776int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1777{
1778 int type;
1779 int rem = 0;
1780
da5f6c37 1781 while (count) {
99811510
SS
1782 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1783
1784 if (!skb) {
1785 struct { char type; } *pkt;
1786
1787 /* Start of the frame */
1788 pkt = data;
1789 type = pkt->type;
1790
1791 data++;
1792 count--;
1793 } else
1794 type = bt_cb(skb)->pkt_type;
1795
1e429f38
GP
1796 rem = hci_reassembly(hdev, type, data, count,
1797 STREAM_REASSEMBLY);
99811510
SS
1798 if (rem < 0)
1799 return rem;
1800
1801 data += (count - rem);
1802 count = rem;
f81c6224 1803 }
99811510
SS
1804
1805 return rem;
1806}
1807EXPORT_SYMBOL(hci_recv_stream_fragment);
1808
1da177e4
LT
1809/* ---- Interface to upper protocols ---- */
1810
1811/* Register/Unregister protocols.
1812 * hci_task_lock is used to ensure that no tasks are running. */
1813int hci_register_proto(struct hci_proto *hp)
1814{
1815 int err = 0;
1816
1817 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1818
1819 if (hp->id >= HCI_MAX_PROTO)
1820 return -EINVAL;
1821
1822 write_lock_bh(&hci_task_lock);
1823
1824 if (!hci_proto[hp->id])
1825 hci_proto[hp->id] = hp;
1826 else
1827 err = -EEXIST;
1828
1829 write_unlock_bh(&hci_task_lock);
1830
1831 return err;
1832}
1833EXPORT_SYMBOL(hci_register_proto);
1834
1835int hci_unregister_proto(struct hci_proto *hp)
1836{
1837 int err = 0;
1838
1839 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1840
1841 if (hp->id >= HCI_MAX_PROTO)
1842 return -EINVAL;
1843
1844 write_lock_bh(&hci_task_lock);
1845
1846 if (hci_proto[hp->id])
1847 hci_proto[hp->id] = NULL;
1848 else
1849 err = -ENOENT;
1850
1851 write_unlock_bh(&hci_task_lock);
1852
1853 return err;
1854}
1855EXPORT_SYMBOL(hci_unregister_proto);
1856
1857int hci_register_cb(struct hci_cb *cb)
1858{
1859 BT_DBG("%p name %s", cb, cb->name);
1860
1861 write_lock_bh(&hci_cb_list_lock);
1862 list_add(&cb->list, &hci_cb_list);
1863 write_unlock_bh(&hci_cb_list_lock);
1864
1865 return 0;
1866}
1867EXPORT_SYMBOL(hci_register_cb);
1868
1869int hci_unregister_cb(struct hci_cb *cb)
1870{
1871 BT_DBG("%p name %s", cb, cb->name);
1872
1873 write_lock_bh(&hci_cb_list_lock);
1874 list_del(&cb->list);
1875 write_unlock_bh(&hci_cb_list_lock);
1876
1877 return 0;
1878}
1879EXPORT_SYMBOL(hci_unregister_cb);
1880
1881static int hci_send_frame(struct sk_buff *skb)
1882{
1883 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1884
1885 if (!hdev) {
1886 kfree_skb(skb);
1887 return -ENODEV;
1888 }
1889
0d48d939 1890 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1891
1892 if (atomic_read(&hdev->promisc)) {
1893 /* Time stamp */
a61bbcf2 1894 __net_timestamp(skb);
1da177e4 1895
eec8d2bc 1896 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1897 }
1898
1899 /* Get rid of skb owner, prior to sending to the driver. */
1900 skb_orphan(skb);
1901
1902 return hdev->send(skb);
1903}
1904
1905/* Send HCI command */
a9de9248 1906int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1907{
1908 int len = HCI_COMMAND_HDR_SIZE + plen;
1909 struct hci_command_hdr *hdr;
1910 struct sk_buff *skb;
1911
a9de9248 1912 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1913
1914 skb = bt_skb_alloc(len, GFP_ATOMIC);
1915 if (!skb) {
ef222013 1916 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1917 return -ENOMEM;
1918 }
1919
1920 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1921 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1922 hdr->plen = plen;
1923
1924 if (plen)
1925 memcpy(skb_put(skb, plen), param, plen);
1926
1927 BT_DBG("skb len %d", skb->len);
1928
0d48d939 1929 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1930 skb->dev = (void *) hdev;
c78ae283 1931
a5040efa
JH
1932 if (test_bit(HCI_INIT, &hdev->flags))
1933 hdev->init_last_cmd = opcode;
1934
1da177e4 1935 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1936 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1937
1938 return 0;
1939}
1da177e4
LT
1940
1941/* Get data from the previously sent command */
a9de9248 1942void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1943{
1944 struct hci_command_hdr *hdr;
1945
1946 if (!hdev->sent_cmd)
1947 return NULL;
1948
1949 hdr = (void *) hdev->sent_cmd->data;
1950
a9de9248 1951 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1952 return NULL;
1953
a9de9248 1954 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1955
1956 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1957}
1958
1959/* Send ACL data */
1960static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1961{
1962 struct hci_acl_hdr *hdr;
1963 int len = skb->len;
1964
badff6d0
ACM
1965 skb_push(skb, HCI_ACL_HDR_SIZE);
1966 skb_reset_transport_header(skb);
9c70220b 1967 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1968 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1969 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1970}
1971
73d80deb
LAD
1972static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1973 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1974{
1975 struct hci_dev *hdev = conn->hdev;
1976 struct sk_buff *list;
1977
70f23020
AE
1978 list = skb_shinfo(skb)->frag_list;
1979 if (!list) {
1da177e4
LT
1980 /* Non fragmented */
1981 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1982
73d80deb 1983 skb_queue_tail(queue, skb);
1da177e4
LT
1984 } else {
1985 /* Fragmented */
1986 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1987
1988 skb_shinfo(skb)->frag_list = NULL;
1989
1990 /* Queue all fragments atomically */
73d80deb 1991 spin_lock_bh(&queue->lock);
1da177e4 1992
73d80deb 1993 __skb_queue_tail(queue, skb);
e702112f
AE
1994
1995 flags &= ~ACL_START;
1996 flags |= ACL_CONT;
1da177e4
LT
1997 do {
1998 skb = list; list = list->next;
8e87d142 1999
1da177e4 2000 skb->dev = (void *) hdev;
0d48d939 2001 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2002 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2003
2004 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2005
73d80deb 2006 __skb_queue_tail(queue, skb);
1da177e4
LT
2007 } while (list);
2008
73d80deb 2009 spin_unlock_bh(&queue->lock);
1da177e4 2010 }
73d80deb
LAD
2011}
2012
2013void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2014{
2015 struct hci_conn *conn = chan->conn;
2016 struct hci_dev *hdev = conn->hdev;
2017
2018 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2019
2020 skb->dev = (void *) hdev;
2021 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2022 hci_add_acl_hdr(skb, conn->handle, flags);
2023
2024 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2025
c78ae283 2026 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2027}
2028EXPORT_SYMBOL(hci_send_acl);
2029
2030/* Send SCO data */
0d861d8b 2031void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2032{
2033 struct hci_dev *hdev = conn->hdev;
2034 struct hci_sco_hdr hdr;
2035
2036 BT_DBG("%s len %d", hdev->name, skb->len);
2037
aca3192c 2038 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2039 hdr.dlen = skb->len;
2040
badff6d0
ACM
2041 skb_push(skb, HCI_SCO_HDR_SIZE);
2042 skb_reset_transport_header(skb);
9c70220b 2043 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2044
2045 skb->dev = (void *) hdev;
0d48d939 2046 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2047
1da177e4 2048 skb_queue_tail(&conn->data_q, skb);
c78ae283 2049 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2050}
2051EXPORT_SYMBOL(hci_send_sco);
2052
2053/* ---- HCI TX task (outgoing data) ---- */
2054
2055/* HCI Connection scheduler */
2056static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2057{
2058 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2059 struct hci_conn *conn = NULL, *c;
1da177e4 2060 int num = 0, min = ~0;
1da177e4 2061
8e87d142 2062 /* We don't have to lock device here. Connections are always
1da177e4 2063 * added and removed with TX task disabled. */
8035ded4 2064 list_for_each_entry(c, &h->list, list) {
769be974 2065 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2066 continue;
769be974
MH
2067
2068 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2069 continue;
2070
1da177e4
LT
2071 num++;
2072
2073 if (c->sent < min) {
2074 min = c->sent;
2075 conn = c;
2076 }
52087a79
LAD
2077
2078 if (hci_conn_num(hdev, type) == num)
2079 break;
1da177e4
LT
2080 }
2081
2082 if (conn) {
6ed58ec5
VT
2083 int cnt, q;
2084
2085 switch (conn->type) {
2086 case ACL_LINK:
2087 cnt = hdev->acl_cnt;
2088 break;
2089 case SCO_LINK:
2090 case ESCO_LINK:
2091 cnt = hdev->sco_cnt;
2092 break;
2093 case LE_LINK:
2094 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2095 break;
2096 default:
2097 cnt = 0;
2098 BT_ERR("Unknown link type");
2099 }
2100
2101 q = cnt / num;
1da177e4
LT
2102 *quote = q ? q : 1;
2103 } else
2104 *quote = 0;
2105
2106 BT_DBG("conn %p quote %d", conn, *quote);
2107 return conn;
2108}
2109
bae1f5d9 2110static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2111{
2112 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2113 struct hci_conn *c;
1da177e4 2114
bae1f5d9 2115 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
2116
2117 /* Kill stalled connections */
8035ded4 2118 list_for_each_entry(c, &h->list, list) {
bae1f5d9
VT
2119 if (c->type == type && c->sent) {
2120 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2121 hdev->name, batostr(&c->dst));
2122 hci_acl_disconn(c, 0x13);
2123 }
2124 }
2125}
2126
73d80deb
LAD
2127static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2128 int *quote)
1da177e4 2129{
73d80deb
LAD
2130 struct hci_conn_hash *h = &hdev->conn_hash;
2131 struct hci_chan *chan = NULL;
2132 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2133 struct hci_conn *conn;
73d80deb
LAD
2134 int cnt, q, conn_num = 0;
2135
2136 BT_DBG("%s", hdev->name);
2137
2138 list_for_each_entry(conn, &h->list, list) {
2139 struct hci_chan_hash *ch;
2140 struct hci_chan *tmp;
2141
2142 if (conn->type != type)
2143 continue;
2144
2145 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2146 continue;
2147
2148 conn_num++;
2149
2150 ch = &conn->chan_hash;
2151
2152 list_for_each_entry(tmp, &ch->list, list) {
2153 struct sk_buff *skb;
2154
2155 if (skb_queue_empty(&tmp->data_q))
2156 continue;
2157
2158 skb = skb_peek(&tmp->data_q);
2159 if (skb->priority < cur_prio)
2160 continue;
2161
2162 if (skb->priority > cur_prio) {
2163 num = 0;
2164 min = ~0;
2165 cur_prio = skb->priority;
2166 }
2167
2168 num++;
2169
2170 if (conn->sent < min) {
2171 min = conn->sent;
2172 chan = tmp;
2173 }
2174 }
2175
2176 if (hci_conn_num(hdev, type) == conn_num)
2177 break;
2178 }
2179
2180 if (!chan)
2181 return NULL;
2182
2183 switch (chan->conn->type) {
2184 case ACL_LINK:
2185 cnt = hdev->acl_cnt;
2186 break;
2187 case SCO_LINK:
2188 case ESCO_LINK:
2189 cnt = hdev->sco_cnt;
2190 break;
2191 case LE_LINK:
2192 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2193 break;
2194 default:
2195 cnt = 0;
2196 BT_ERR("Unknown link type");
2197 }
2198
2199 q = cnt / num;
2200 *quote = q ? q : 1;
2201 BT_DBG("chan %p quote %d", chan, *quote);
2202 return chan;
2203}
2204
02b20f0b
LAD
2205static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2206{
2207 struct hci_conn_hash *h = &hdev->conn_hash;
2208 struct hci_conn *conn;
2209 int num = 0;
2210
2211 BT_DBG("%s", hdev->name);
2212
2213 list_for_each_entry(conn, &h->list, list) {
2214 struct hci_chan_hash *ch;
2215 struct hci_chan *chan;
2216
2217 if (conn->type != type)
2218 continue;
2219
2220 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2221 continue;
2222
2223 num++;
2224
2225 ch = &conn->chan_hash;
2226 list_for_each_entry(chan, &ch->list, list) {
2227 struct sk_buff *skb;
2228
2229 if (chan->sent) {
2230 chan->sent = 0;
2231 continue;
2232 }
2233
2234 if (skb_queue_empty(&chan->data_q))
2235 continue;
2236
2237 skb = skb_peek(&chan->data_q);
2238 if (skb->priority >= HCI_PRIO_MAX - 1)
2239 continue;
2240
2241 skb->priority = HCI_PRIO_MAX - 1;
2242
2243 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2244 skb->priority);
2245 }
2246
2247 if (hci_conn_num(hdev, type) == num)
2248 break;
2249 }
2250}
2251
73d80deb
LAD
2252static inline void hci_sched_acl(struct hci_dev *hdev)
2253{
2254 struct hci_chan *chan;
1da177e4
LT
2255 struct sk_buff *skb;
2256 int quote;
73d80deb 2257 unsigned int cnt;
1da177e4
LT
2258
2259 BT_DBG("%s", hdev->name);
2260
52087a79
LAD
2261 if (!hci_conn_num(hdev, ACL_LINK))
2262 return;
2263
1da177e4
LT
2264 if (!test_bit(HCI_RAW, &hdev->flags)) {
2265 /* ACL tx timeout must be longer than maximum
2266 * link supervision timeout (40.9 seconds) */
82453021 2267 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2268 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2269 }
2270
73d80deb 2271 cnt = hdev->acl_cnt;
04837f64 2272
73d80deb
LAD
2273 while (hdev->acl_cnt &&
2274 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2275 u32 priority = (skb_peek(&chan->data_q))->priority;
2276 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2277 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2278 skb->len, skb->priority);
2279
ec1cce24
LAD
2280 /* Stop if priority has changed */
2281 if (skb->priority < priority)
2282 break;
2283
2284 skb = skb_dequeue(&chan->data_q);
2285
73d80deb
LAD
2286 hci_conn_enter_active_mode(chan->conn,
2287 bt_cb(skb)->force_active);
04837f64 2288
1da177e4
LT
2289 hci_send_frame(skb);
2290 hdev->acl_last_tx = jiffies;
2291
2292 hdev->acl_cnt--;
73d80deb
LAD
2293 chan->sent++;
2294 chan->conn->sent++;
1da177e4
LT
2295 }
2296 }
02b20f0b
LAD
2297
2298 if (cnt != hdev->acl_cnt)
2299 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2300}
2301
2302/* Schedule SCO */
2303static inline void hci_sched_sco(struct hci_dev *hdev)
2304{
2305 struct hci_conn *conn;
2306 struct sk_buff *skb;
2307 int quote;
2308
2309 BT_DBG("%s", hdev->name);
2310
52087a79
LAD
2311 if (!hci_conn_num(hdev, SCO_LINK))
2312 return;
2313
1da177e4
LT
2314 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2315 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2316 BT_DBG("skb %p len %d", skb, skb->len);
2317 hci_send_frame(skb);
2318
2319 conn->sent++;
2320 if (conn->sent == ~0)
2321 conn->sent = 0;
2322 }
2323 }
2324}
2325
b6a0dc82
MH
2326static inline void hci_sched_esco(struct hci_dev *hdev)
2327{
2328 struct hci_conn *conn;
2329 struct sk_buff *skb;
2330 int quote;
2331
2332 BT_DBG("%s", hdev->name);
2333
52087a79
LAD
2334 if (!hci_conn_num(hdev, ESCO_LINK))
2335 return;
2336
b6a0dc82
MH
2337 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2338 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2339 BT_DBG("skb %p len %d", skb, skb->len);
2340 hci_send_frame(skb);
2341
2342 conn->sent++;
2343 if (conn->sent == ~0)
2344 conn->sent = 0;
2345 }
2346 }
2347}
2348
6ed58ec5
VT
2349static inline void hci_sched_le(struct hci_dev *hdev)
2350{
73d80deb 2351 struct hci_chan *chan;
6ed58ec5 2352 struct sk_buff *skb;
02b20f0b 2353 int quote, cnt, tmp;
6ed58ec5
VT
2354
2355 BT_DBG("%s", hdev->name);
2356
52087a79
LAD
2357 if (!hci_conn_num(hdev, LE_LINK))
2358 return;
2359
6ed58ec5
VT
2360 if (!test_bit(HCI_RAW, &hdev->flags)) {
2361 /* LE tx timeout must be longer than maximum
2362 * link supervision timeout (40.9 seconds) */
bae1f5d9 2363 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2364 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2365 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2366 }
2367
2368 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2369 tmp = cnt;
73d80deb 2370 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2371 u32 priority = (skb_peek(&chan->data_q))->priority;
2372 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2373 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2374 skb->len, skb->priority);
6ed58ec5 2375
ec1cce24
LAD
2376 /* Stop if priority has changed */
2377 if (skb->priority < priority)
2378 break;
2379
2380 skb = skb_dequeue(&chan->data_q);
2381
6ed58ec5
VT
2382 hci_send_frame(skb);
2383 hdev->le_last_tx = jiffies;
2384
2385 cnt--;
73d80deb
LAD
2386 chan->sent++;
2387 chan->conn->sent++;
6ed58ec5
VT
2388 }
2389 }
73d80deb 2390
6ed58ec5
VT
2391 if (hdev->le_pkts)
2392 hdev->le_cnt = cnt;
2393 else
2394 hdev->acl_cnt = cnt;
02b20f0b
LAD
2395
2396 if (cnt != tmp)
2397 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2398}
2399
1da177e4
LT
2400static void hci_tx_task(unsigned long arg)
2401{
2402 struct hci_dev *hdev = (struct hci_dev *) arg;
2403 struct sk_buff *skb;
2404
2405 read_lock(&hci_task_lock);
2406
6ed58ec5
VT
2407 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2408 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2409
2410 /* Schedule queues and send stuff to HCI driver */
2411
2412 hci_sched_acl(hdev);
2413
2414 hci_sched_sco(hdev);
2415
b6a0dc82
MH
2416 hci_sched_esco(hdev);
2417
6ed58ec5
VT
2418 hci_sched_le(hdev);
2419
1da177e4
LT
2420 /* Send next queued raw (unknown type) packet */
2421 while ((skb = skb_dequeue(&hdev->raw_q)))
2422 hci_send_frame(skb);
2423
2424 read_unlock(&hci_task_lock);
2425}
2426
25985edc 2427/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2428
2429/* ACL data packet */
2430static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2431{
2432 struct hci_acl_hdr *hdr = (void *) skb->data;
2433 struct hci_conn *conn;
2434 __u16 handle, flags;
2435
2436 skb_pull(skb, HCI_ACL_HDR_SIZE);
2437
2438 handle = __le16_to_cpu(hdr->handle);
2439 flags = hci_flags(handle);
2440 handle = hci_handle(handle);
2441
2442 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2443
2444 hdev->stat.acl_rx++;
2445
2446 hci_dev_lock(hdev);
2447 conn = hci_conn_hash_lookup_handle(hdev, handle);
2448 hci_dev_unlock(hdev);
8e87d142 2449
1da177e4
LT
2450 if (conn) {
2451 register struct hci_proto *hp;
2452
14b12d0b 2453 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
04837f64 2454
1da177e4 2455 /* Send to upper protocol */
70f23020
AE
2456 hp = hci_proto[HCI_PROTO_L2CAP];
2457 if (hp && hp->recv_acldata) {
1da177e4
LT
2458 hp->recv_acldata(conn, skb, flags);
2459 return;
2460 }
2461 } else {
8e87d142 2462 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2463 hdev->name, handle);
2464 }
2465
2466 kfree_skb(skb);
2467}
2468
2469/* SCO data packet */
2470static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2471{
2472 struct hci_sco_hdr *hdr = (void *) skb->data;
2473 struct hci_conn *conn;
2474 __u16 handle;
2475
2476 skb_pull(skb, HCI_SCO_HDR_SIZE);
2477
2478 handle = __le16_to_cpu(hdr->handle);
2479
2480 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2481
2482 hdev->stat.sco_rx++;
2483
2484 hci_dev_lock(hdev);
2485 conn = hci_conn_hash_lookup_handle(hdev, handle);
2486 hci_dev_unlock(hdev);
2487
2488 if (conn) {
2489 register struct hci_proto *hp;
2490
2491 /* Send to upper protocol */
70f23020
AE
2492 hp = hci_proto[HCI_PROTO_SCO];
2493 if (hp && hp->recv_scodata) {
1da177e4
LT
2494 hp->recv_scodata(conn, skb);
2495 return;
2496 }
2497 } else {
8e87d142 2498 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2499 hdev->name, handle);
2500 }
2501
2502 kfree_skb(skb);
2503}
2504
6516455d 2505static void hci_rx_task(unsigned long arg)
1da177e4
LT
2506{
2507 struct hci_dev *hdev = (struct hci_dev *) arg;
2508 struct sk_buff *skb;
2509
2510 BT_DBG("%s", hdev->name);
2511
2512 read_lock(&hci_task_lock);
2513
2514 while ((skb = skb_dequeue(&hdev->rx_q))) {
2515 if (atomic_read(&hdev->promisc)) {
2516 /* Send copy to the sockets */
eec8d2bc 2517 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2518 }
2519
2520 if (test_bit(HCI_RAW, &hdev->flags)) {
2521 kfree_skb(skb);
2522 continue;
2523 }
2524
2525 if (test_bit(HCI_INIT, &hdev->flags)) {
2526 /* Don't process data packets in this states. */
0d48d939 2527 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2528 case HCI_ACLDATA_PKT:
2529 case HCI_SCODATA_PKT:
2530 kfree_skb(skb);
2531 continue;
3ff50b79 2532 }
1da177e4
LT
2533 }
2534
2535 /* Process frame */
0d48d939 2536 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2537 case HCI_EVENT_PKT:
2538 hci_event_packet(hdev, skb);
2539 break;
2540
2541 case HCI_ACLDATA_PKT:
2542 BT_DBG("%s ACL data packet", hdev->name);
2543 hci_acldata_packet(hdev, skb);
2544 break;
2545
2546 case HCI_SCODATA_PKT:
2547 BT_DBG("%s SCO data packet", hdev->name);
2548 hci_scodata_packet(hdev, skb);
2549 break;
2550
2551 default:
2552 kfree_skb(skb);
2553 break;
2554 }
2555 }
2556
2557 read_unlock(&hci_task_lock);
2558}
2559
2560static void hci_cmd_task(unsigned long arg)
2561{
2562 struct hci_dev *hdev = (struct hci_dev *) arg;
2563 struct sk_buff *skb;
2564
2565 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2566
1da177e4 2567 /* Send queued commands */
5a08ecce
AE
2568 if (atomic_read(&hdev->cmd_cnt)) {
2569 skb = skb_dequeue(&hdev->cmd_q);
2570 if (!skb)
2571 return;
2572
7585b97a 2573 kfree_skb(hdev->sent_cmd);
1da177e4 2574
70f23020
AE
2575 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2576 if (hdev->sent_cmd) {
1da177e4
LT
2577 atomic_dec(&hdev->cmd_cnt);
2578 hci_send_frame(skb);
7bdb8a5c
SJ
2579 if (test_bit(HCI_RESET, &hdev->flags))
2580 del_timer(&hdev->cmd_timer);
2581 else
2582 mod_timer(&hdev->cmd_timer,
6bd32326 2583 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2584 } else {
2585 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2586 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2587 }
2588 }
2589}
2519a1fc
AG
2590
2591int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2592{
2593 /* General inquiry access code (GIAC) */
2594 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2595 struct hci_cp_inquiry cp;
2596
2597 BT_DBG("%s", hdev->name);
2598
2599 if (test_bit(HCI_INQUIRY, &hdev->flags))
2600 return -EINPROGRESS;
2601
2602 memset(&cp, 0, sizeof(cp));
2603 memcpy(&cp.lap, lap, sizeof(cp.lap));
2604 cp.length = length;
2605
2606 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2607}
023d5049
AG
2608
2609int hci_cancel_inquiry(struct hci_dev *hdev)
2610{
2611 BT_DBG("%s", hdev->name);
2612
2613 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2614 return -EPERM;
2615
2616 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2617}
7784d78f
AE
2618
2619module_param(enable_hs, bool, 0644);
2620MODULE_PARM_DESC(enable_hs, "Enable High Speed");