Bluetooth: Add MGMT event for Passkey Entry
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
1da177e4
LT
57static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
1da177e4
LT
60
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
e041c683 76static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
77
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
e041c683 82 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
83}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
e041c683 87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
88}
89
6516455d 90static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 91{
e041c683 92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
93}
94
95/* ---- HCI requests ---- */
96
23bb5763 97void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 98{
23bb5763
JH
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
a5040efa
JH
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 105 return;
1da177e4
LT
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
8e87d142 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 127 unsigned long opt, __u32 timeout)
1da177e4
LT
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
e175072f 149 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
3ff50b79 159 }
1da177e4 160
a5040efa 161 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 169 unsigned long opt, __u32 timeout)
1da177e4
LT
170{
171 int ret;
172
7c6a329e
MH
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
1da177e4
LT
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
f630cf0d 189 set_bit(HCI_RESET, &hdev->flags);
a9de9248 190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
b0916ea0 195 struct hci_cp_delete_stored_link_key cp;
1da177e4 196 struct sk_buff *skb;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4
LT
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 207 skb->dev = (void *) hdev;
c78ae283 208
1da177e4 209 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 210 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
f630cf0d
GP
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
a9de9248 219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 220 }
1da177e4
LT
221
222 /* Read Local Supported Features */
a9de9248 223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 224
1143e5a6 225 /* Read Local Version */
a9de9248 226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 227
1da177e4 228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
aca3192c 235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
240 }
241#endif
242
243 /* Read BD Address */
a9de9248
MH
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
251
252 /* Read Voice Setting */
a9de9248 253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
89f2783d 258 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 260
1da177e4 261 /* Connection accept timeout ~20 secs */
aca3192c 262 param = cpu_to_le16(0x7d00);
a9de9248 263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
268}
269
6ed58ec5
VT
270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
1da177e4
LT
278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
a9de9248 285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
a9de9248 295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
e4e8e37c 304 /* Encryption */
a9de9248 305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
306}
307
e4e8e37c
MH
308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
a418b893 312 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
8e87d142 318/* Get HCI device by index.
1da177e4
LT
319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
8035ded4 322 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
8035ded4 330 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
1da177e4
LT
339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 371 struct inquiry_entry *ie;
1da177e4
LT
372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
70f23020
AE
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
1da177e4 377 /* Entry not in the cache. Add new one. */
70f23020
AE
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
1da177e4 380 return;
70f23020
AE
381
382 ie->next = cache->list;
383 cache->list = ie;
1da177e4
LT
384 }
385
70f23020
AE
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
1da177e4
LT
388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
a9de9248 427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
5a08ecce
AE
442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
1da177e4
LT
444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
8e87d142 447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
04837f64 455 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
1da177e4
LT
462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
01df8c31 469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 470 if (!buf) {
1da177e4
LT
471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
8e87d142 486 } else
1da177e4
LT
487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
5a08ecce
AE
503 hdev = hci_dev_get(dev);
504 if (!hdev)
1da177e4
LT
505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
611b30f7
MH
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
1da177e4
LT
516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
07e3b94a
AE
524 /* Treat all non BR/EDR controllers as raw devices if
525 enable_hs is not set */
526 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
527 set_bit(HCI_RAW, &hdev->flags);
528
1da177e4
LT
529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
a5040efa 537 hdev->init_last_cmd = 0;
1da177e4 538
04837f64
MH
539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 541
eead27da 542 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
1da177e4
LT
546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
56e5cb86
JH
553 if (!test_bit(HCI_SETUP, &hdev->flags)) {
554 hci_dev_lock_bh(hdev);
744cf19e 555 mgmt_powered(hdev, 1);
56e5cb86
JH
556 hci_dev_unlock_bh(hdev);
557 }
8e87d142 558 } else {
1da177e4
LT
559 /* Init failed, cleanup */
560 tasklet_kill(&hdev->rx_task);
561 tasklet_kill(&hdev->tx_task);
562 tasklet_kill(&hdev->cmd_task);
563
564 skb_queue_purge(&hdev->cmd_q);
565 skb_queue_purge(&hdev->rx_q);
566
567 if (hdev->flush)
568 hdev->flush(hdev);
569
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
573 }
574
575 hdev->close(hdev);
576 hdev->flags = 0;
577 }
578
579done:
580 hci_req_unlock(hdev);
581 hci_dev_put(hdev);
582 return ret;
583}
584
585static int hci_dev_do_close(struct hci_dev *hdev)
586{
587 BT_DBG("%s %p", hdev->name, hdev);
588
589 hci_req_cancel(hdev, ENODEV);
590 hci_req_lock(hdev);
591
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 593 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
594 hci_req_unlock(hdev);
595 return 0;
596 }
597
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
601
16ab91ab 602 if (hdev->discov_timeout > 0) {
e0f9309f 603 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
604 hdev->discov_timeout = 0;
605 }
606
3243553f 607 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 608 cancel_delayed_work(&hdev->power_off);
3243553f 609
1da177e4
LT
610 hci_dev_lock_bh(hdev);
611 inquiry_cache_flush(hdev);
612 hci_conn_hash_flush(hdev);
613 hci_dev_unlock_bh(hdev);
614
615 hci_notify(hdev, HCI_DEV_DOWN);
616
617 if (hdev->flush)
618 hdev->flush(hdev);
619
620 /* Reset device */
621 skb_queue_purge(&hdev->cmd_q);
622 atomic_set(&hdev->cmd_cnt, 1);
623 if (!test_bit(HCI_RAW, &hdev->flags)) {
624 set_bit(HCI_INIT, &hdev->flags);
04837f64 625 __hci_request(hdev, hci_reset_req, 0,
43611a7b 626 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
627 clear_bit(HCI_INIT, &hdev->flags);
628 }
629
630 /* Kill cmd task */
631 tasklet_kill(&hdev->cmd_task);
632
633 /* Drop queues */
634 skb_queue_purge(&hdev->rx_q);
635 skb_queue_purge(&hdev->cmd_q);
636 skb_queue_purge(&hdev->raw_q);
637
638 /* Drop last sent command */
639 if (hdev->sent_cmd) {
b79f44c1 640 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
641 kfree_skb(hdev->sent_cmd);
642 hdev->sent_cmd = NULL;
643 }
644
645 /* After this point our queues are empty
646 * and no tasks are scheduled. */
647 hdev->close(hdev);
648
56e5cb86 649 hci_dev_lock_bh(hdev);
744cf19e 650 mgmt_powered(hdev, 0);
56e5cb86 651 hci_dev_unlock_bh(hdev);
5add6af8 652
1da177e4
LT
653 /* Clear flags */
654 hdev->flags = 0;
655
656 hci_req_unlock(hdev);
657
658 hci_dev_put(hdev);
659 return 0;
660}
661
662int hci_dev_close(__u16 dev)
663{
664 struct hci_dev *hdev;
665 int err;
666
70f23020
AE
667 hdev = hci_dev_get(dev);
668 if (!hdev)
1da177e4
LT
669 return -ENODEV;
670 err = hci_dev_do_close(hdev);
671 hci_dev_put(hdev);
672 return err;
673}
674
675int hci_dev_reset(__u16 dev)
676{
677 struct hci_dev *hdev;
678 int ret = 0;
679
70f23020
AE
680 hdev = hci_dev_get(dev);
681 if (!hdev)
1da177e4
LT
682 return -ENODEV;
683
684 hci_req_lock(hdev);
685 tasklet_disable(&hdev->tx_task);
686
687 if (!test_bit(HCI_UP, &hdev->flags))
688 goto done;
689
690 /* Drop queues */
691 skb_queue_purge(&hdev->rx_q);
692 skb_queue_purge(&hdev->cmd_q);
693
694 hci_dev_lock_bh(hdev);
695 inquiry_cache_flush(hdev);
696 hci_conn_hash_flush(hdev);
697 hci_dev_unlock_bh(hdev);
698
699 if (hdev->flush)
700 hdev->flush(hdev);
701
8e87d142 702 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 703 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
704
705 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
706 ret = __hci_request(hdev, hci_reset_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
708
709done:
710 tasklet_enable(&hdev->tx_task);
711 hci_req_unlock(hdev);
712 hci_dev_put(hdev);
713 return ret;
714}
715
716int hci_dev_reset_stat(__u16 dev)
717{
718 struct hci_dev *hdev;
719 int ret = 0;
720
70f23020
AE
721 hdev = hci_dev_get(dev);
722 if (!hdev)
1da177e4
LT
723 return -ENODEV;
724
725 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
726
727 hci_dev_put(hdev);
728
729 return ret;
730}
731
732int hci_dev_cmd(unsigned int cmd, void __user *arg)
733{
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
736 int err = 0;
737
738 if (copy_from_user(&dr, arg, sizeof(dr)))
739 return -EFAULT;
740
70f23020
AE
741 hdev = hci_dev_get(dr.dev_id);
742 if (!hdev)
1da177e4
LT
743 return -ENODEV;
744
745 switch (cmd) {
746 case HCISETAUTH:
04837f64
MH
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
749 break;
750
751 case HCISETENCRYPT:
752 if (!lmp_encrypt_capable(hdev)) {
753 err = -EOPNOTSUPP;
754 break;
755 }
756
757 if (!test_bit(HCI_AUTH, &hdev->flags)) {
758 /* Auth must be enabled first */
04837f64
MH
759 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
761 if (err)
762 break;
763 }
764
04837f64
MH
765 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
767 break;
768
769 case HCISETSCAN:
04837f64
MH
770 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
771 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
772 break;
773
1da177e4 774 case HCISETLINKPOL:
e4e8e37c
MH
775 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
777 break;
778
779 case HCISETLINKMODE:
e4e8e37c
MH
780 hdev->link_mode = ((__u16) dr.dev_opt) &
781 (HCI_LM_MASTER | HCI_LM_ACCEPT);
782 break;
783
784 case HCISETPTYPE:
785 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
786 break;
787
788 case HCISETACLMTU:
e4e8e37c
MH
789 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
790 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
791 break;
792
793 case HCISETSCOMTU:
e4e8e37c
MH
794 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
795 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
796 break;
797
798 default:
799 err = -EINVAL;
800 break;
801 }
e4e8e37c 802
1da177e4
LT
803 hci_dev_put(hdev);
804 return err;
805}
806
807int hci_get_dev_list(void __user *arg)
808{
8035ded4 809 struct hci_dev *hdev;
1da177e4
LT
810 struct hci_dev_list_req *dl;
811 struct hci_dev_req *dr;
1da177e4
LT
812 int n = 0, size, err;
813 __u16 dev_num;
814
815 if (get_user(dev_num, (__u16 __user *) arg))
816 return -EFAULT;
817
818 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
819 return -EINVAL;
820
821 size = sizeof(*dl) + dev_num * sizeof(*dr);
822
70f23020
AE
823 dl = kzalloc(size, GFP_KERNEL);
824 if (!dl)
1da177e4
LT
825 return -ENOMEM;
826
827 dr = dl->dev_req;
828
829 read_lock_bh(&hci_dev_list_lock);
8035ded4 830 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 832 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
833
834 if (!test_bit(HCI_MGMT, &hdev->flags))
835 set_bit(HCI_PAIRABLE, &hdev->flags);
836
1da177e4
LT
837 (dr + n)->dev_id = hdev->id;
838 (dr + n)->dev_opt = hdev->flags;
c542a06c 839
1da177e4
LT
840 if (++n >= dev_num)
841 break;
842 }
843 read_unlock_bh(&hci_dev_list_lock);
844
845 dl->dev_num = n;
846 size = sizeof(*dl) + n * sizeof(*dr);
847
848 err = copy_to_user(arg, dl, size);
849 kfree(dl);
850
851 return err ? -EFAULT : 0;
852}
853
854int hci_get_dev_info(void __user *arg)
855{
856 struct hci_dev *hdev;
857 struct hci_dev_info di;
858 int err = 0;
859
860 if (copy_from_user(&di, arg, sizeof(di)))
861 return -EFAULT;
862
70f23020
AE
863 hdev = hci_dev_get(di.dev_id);
864 if (!hdev)
1da177e4
LT
865 return -ENODEV;
866
3243553f
JH
867 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
868 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 869
c542a06c
JH
870 if (!test_bit(HCI_MGMT, &hdev->flags))
871 set_bit(HCI_PAIRABLE, &hdev->flags);
872
1da177e4
LT
873 strcpy(di.name, hdev->name);
874 di.bdaddr = hdev->bdaddr;
943da25d 875 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
876 di.flags = hdev->flags;
877 di.pkt_type = hdev->pkt_type;
878 di.acl_mtu = hdev->acl_mtu;
879 di.acl_pkts = hdev->acl_pkts;
880 di.sco_mtu = hdev->sco_mtu;
881 di.sco_pkts = hdev->sco_pkts;
882 di.link_policy = hdev->link_policy;
883 di.link_mode = hdev->link_mode;
884
885 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
886 memcpy(&di.features, &hdev->features, sizeof(di.features));
887
888 if (copy_to_user(arg, &di, sizeof(di)))
889 err = -EFAULT;
890
891 hci_dev_put(hdev);
892
893 return err;
894}
895
896/* ---- Interface to HCI drivers ---- */
897
611b30f7
MH
898static int hci_rfkill_set_block(void *data, bool blocked)
899{
900 struct hci_dev *hdev = data;
901
902 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
903
904 if (!blocked)
905 return 0;
906
907 hci_dev_do_close(hdev);
908
909 return 0;
910}
911
912static const struct rfkill_ops hci_rfkill_ops = {
913 .set_block = hci_rfkill_set_block,
914};
915
1da177e4
LT
916/* Alloc HCI device */
917struct hci_dev *hci_alloc_dev(void)
918{
919 struct hci_dev *hdev;
920
25ea6db0 921 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
922 if (!hdev)
923 return NULL;
924
0ac7e700 925 hci_init_sysfs(hdev);
1da177e4
LT
926 skb_queue_head_init(&hdev->driver_init);
927
928 return hdev;
929}
930EXPORT_SYMBOL(hci_alloc_dev);
931
932/* Free HCI device */
933void hci_free_dev(struct hci_dev *hdev)
934{
935 skb_queue_purge(&hdev->driver_init);
936
a91f2e39
MH
937 /* will free via device release */
938 put_device(&hdev->dev);
1da177e4
LT
939}
940EXPORT_SYMBOL(hci_free_dev);
941
ab81cbf9
JH
942static void hci_power_on(struct work_struct *work)
943{
944 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
945
946 BT_DBG("%s", hdev->name);
947
948 if (hci_dev_open(hdev->id) < 0)
949 return;
950
951 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
3243553f
JH
952 queue_delayed_work(hdev->workqueue, &hdev->power_off,
953 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
954
955 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 956 mgmt_index_added(hdev);
ab81cbf9
JH
957}
958
959static void hci_power_off(struct work_struct *work)
960{
3243553f
JH
961 struct hci_dev *hdev = container_of(work, struct hci_dev,
962 power_off.work);
ab81cbf9
JH
963
964 BT_DBG("%s", hdev->name);
965
966 clear_bit(HCI_AUTO_OFF, &hdev->flags);
967
3243553f 968 hci_dev_close(hdev->id);
ab81cbf9
JH
969}
970
16ab91ab
JH
971static void hci_discov_off(struct work_struct *work)
972{
973 struct hci_dev *hdev;
974 u8 scan = SCAN_PAGE;
975
976 hdev = container_of(work, struct hci_dev, discov_off.work);
977
978 BT_DBG("%s", hdev->name);
979
980 hci_dev_lock_bh(hdev);
981
982 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
983
984 hdev->discov_timeout = 0;
985
986 hci_dev_unlock_bh(hdev);
987}
988
2aeb9a1a
JH
989int hci_uuids_clear(struct hci_dev *hdev)
990{
991 struct list_head *p, *n;
992
993 list_for_each_safe(p, n, &hdev->uuids) {
994 struct bt_uuid *uuid;
995
996 uuid = list_entry(p, struct bt_uuid, list);
997
998 list_del(p);
999 kfree(uuid);
1000 }
1001
1002 return 0;
1003}
1004
55ed8ca1
JH
1005int hci_link_keys_clear(struct hci_dev *hdev)
1006{
1007 struct list_head *p, *n;
1008
1009 list_for_each_safe(p, n, &hdev->link_keys) {
1010 struct link_key *key;
1011
1012 key = list_entry(p, struct link_key, list);
1013
1014 list_del(p);
1015 kfree(key);
1016 }
1017
1018 return 0;
1019}
1020
1021struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1022{
8035ded4 1023 struct link_key *k;
55ed8ca1 1024
8035ded4 1025 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1026 if (bacmp(bdaddr, &k->bdaddr) == 0)
1027 return k;
55ed8ca1
JH
1028
1029 return NULL;
1030}
1031
d25e28ab
JH
1032static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1033 u8 key_type, u8 old_key_type)
1034{
1035 /* Legacy key */
1036 if (key_type < 0x03)
1037 return 1;
1038
1039 /* Debug keys are insecure so don't store them persistently */
1040 if (key_type == HCI_LK_DEBUG_COMBINATION)
1041 return 0;
1042
1043 /* Changed combination key and there's no previous one */
1044 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1045 return 0;
1046
1047 /* Security mode 3 case */
1048 if (!conn)
1049 return 1;
1050
1051 /* Neither local nor remote side had no-bonding as requirement */
1052 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1053 return 1;
1054
1055 /* Local side had dedicated bonding as requirement */
1056 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1057 return 1;
1058
1059 /* Remote side had dedicated bonding as requirement */
1060 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1061 return 1;
1062
1063 /* If none of the above criteria match, then don't store the key
1064 * persistently */
1065 return 0;
1066}
1067
75d262c2
VCG
1068struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1069{
1070 struct link_key *k;
1071
1072 list_for_each_entry(k, &hdev->link_keys, list) {
1073 struct key_master_id *id;
1074
1075 if (k->type != HCI_LK_SMP_LTK)
1076 continue;
1077
1078 if (k->dlen != sizeof(*id))
1079 continue;
1080
1081 id = (void *) &k->data;
1082 if (id->ediv == ediv &&
1083 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1084 return k;
1085 }
1086
1087 return NULL;
1088}
1089EXPORT_SYMBOL(hci_find_ltk);
1090
1091struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1092 bdaddr_t *bdaddr, u8 type)
1093{
1094 struct link_key *k;
1095
1096 list_for_each_entry(k, &hdev->link_keys, list)
1097 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1098 return k;
1099
1100 return NULL;
1101}
1102EXPORT_SYMBOL(hci_find_link_key_type);
1103
d25e28ab
JH
1104int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1105 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1106{
1107 struct link_key *key, *old_key;
4df378a1 1108 u8 old_key_type, persistent;
55ed8ca1
JH
1109
1110 old_key = hci_find_link_key(hdev, bdaddr);
1111 if (old_key) {
1112 old_key_type = old_key->type;
1113 key = old_key;
1114 } else {
12adcf3a 1115 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1116 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1117 if (!key)
1118 return -ENOMEM;
1119 list_add(&key->list, &hdev->link_keys);
1120 }
1121
1122 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1123
d25e28ab
JH
1124 /* Some buggy controller combinations generate a changed
1125 * combination key for legacy pairing even when there's no
1126 * previous key */
1127 if (type == HCI_LK_CHANGED_COMBINATION &&
1128 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1129 old_key_type == 0xff) {
d25e28ab 1130 type = HCI_LK_COMBINATION;
655fe6ec
JH
1131 if (conn)
1132 conn->key_type = type;
1133 }
d25e28ab 1134
55ed8ca1
JH
1135 bacpy(&key->bdaddr, bdaddr);
1136 memcpy(key->val, val, 16);
55ed8ca1
JH
1137 key->pin_len = pin_len;
1138
b6020ba0 1139 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1140 key->type = old_key_type;
4748fed2
JH
1141 else
1142 key->type = type;
1143
4df378a1
JH
1144 if (!new_key)
1145 return 0;
1146
1147 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1148
744cf19e 1149 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1150
1151 if (!persistent) {
1152 list_del(&key->list);
1153 kfree(key);
1154 }
55ed8ca1
JH
1155
1156 return 0;
1157}
1158
75d262c2 1159int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1160 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1161{
1162 struct link_key *key, *old_key;
1163 struct key_master_id *id;
1164 u8 old_key_type;
1165
1166 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1167
1168 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1169 if (old_key) {
1170 key = old_key;
1171 old_key_type = old_key->type;
1172 } else {
1173 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1174 if (!key)
1175 return -ENOMEM;
1176 list_add(&key->list, &hdev->link_keys);
1177 old_key_type = 0xff;
1178 }
1179
1180 key->dlen = sizeof(*id);
1181
1182 bacpy(&key->bdaddr, bdaddr);
1183 memcpy(key->val, ltk, sizeof(key->val));
1184 key->type = HCI_LK_SMP_LTK;
726b4ffc 1185 key->pin_len = key_size;
75d262c2
VCG
1186
1187 id = (void *) &key->data;
1188 id->ediv = ediv;
1189 memcpy(id->rand, rand, sizeof(id->rand));
1190
1191 if (new_key)
744cf19e 1192 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1193
1194 return 0;
1195}
1196
55ed8ca1
JH
1197int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1198{
1199 struct link_key *key;
1200
1201 key = hci_find_link_key(hdev, bdaddr);
1202 if (!key)
1203 return -ENOENT;
1204
1205 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1206
1207 list_del(&key->list);
1208 kfree(key);
1209
1210 return 0;
1211}
1212
6bd32326
VT
1213/* HCI command timer function */
1214static void hci_cmd_timer(unsigned long arg)
1215{
1216 struct hci_dev *hdev = (void *) arg;
1217
1218 BT_ERR("%s command tx timeout", hdev->name);
1219 atomic_set(&hdev->cmd_cnt, 1);
1220 tasklet_schedule(&hdev->cmd_task);
1221}
1222
2763eda6
SJ
1223struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1224 bdaddr_t *bdaddr)
1225{
1226 struct oob_data *data;
1227
1228 list_for_each_entry(data, &hdev->remote_oob_data, list)
1229 if (bacmp(bdaddr, &data->bdaddr) == 0)
1230 return data;
1231
1232 return NULL;
1233}
1234
1235int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1236{
1237 struct oob_data *data;
1238
1239 data = hci_find_remote_oob_data(hdev, bdaddr);
1240 if (!data)
1241 return -ENOENT;
1242
1243 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1244
1245 list_del(&data->list);
1246 kfree(data);
1247
1248 return 0;
1249}
1250
1251int hci_remote_oob_data_clear(struct hci_dev *hdev)
1252{
1253 struct oob_data *data, *n;
1254
1255 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1256 list_del(&data->list);
1257 kfree(data);
1258 }
1259
1260 return 0;
1261}
1262
1263int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1264 u8 *randomizer)
1265{
1266 struct oob_data *data;
1267
1268 data = hci_find_remote_oob_data(hdev, bdaddr);
1269
1270 if (!data) {
1271 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1272 if (!data)
1273 return -ENOMEM;
1274
1275 bacpy(&data->bdaddr, bdaddr);
1276 list_add(&data->list, &hdev->remote_oob_data);
1277 }
1278
1279 memcpy(data->hash, hash, sizeof(data->hash));
1280 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1281
1282 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1283
1284 return 0;
1285}
1286
b2a66aad
AJ
1287struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1288 bdaddr_t *bdaddr)
1289{
8035ded4 1290 struct bdaddr_list *b;
b2a66aad 1291
8035ded4 1292 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1293 if (bacmp(bdaddr, &b->bdaddr) == 0)
1294 return b;
b2a66aad
AJ
1295
1296 return NULL;
1297}
1298
1299int hci_blacklist_clear(struct hci_dev *hdev)
1300{
1301 struct list_head *p, *n;
1302
1303 list_for_each_safe(p, n, &hdev->blacklist) {
1304 struct bdaddr_list *b;
1305
1306 b = list_entry(p, struct bdaddr_list, list);
1307
1308 list_del(p);
1309 kfree(b);
1310 }
1311
1312 return 0;
1313}
1314
1315int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1316{
1317 struct bdaddr_list *entry;
b2a66aad
AJ
1318
1319 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1320 return -EBADF;
1321
5e762444
AJ
1322 if (hci_blacklist_lookup(hdev, bdaddr))
1323 return -EEXIST;
b2a66aad
AJ
1324
1325 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1326 if (!entry)
1327 return -ENOMEM;
b2a66aad
AJ
1328
1329 bacpy(&entry->bdaddr, bdaddr);
1330
1331 list_add(&entry->list, &hdev->blacklist);
1332
744cf19e 1333 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1334}
1335
1336int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337{
1338 struct bdaddr_list *entry;
b2a66aad 1339
1ec918ce 1340 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1341 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1342
1343 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1344 if (!entry)
5e762444 1345 return -ENOENT;
b2a66aad
AJ
1346
1347 list_del(&entry->list);
1348 kfree(entry);
1349
744cf19e 1350 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1351}
1352
35815085
AG
1353static void hci_clear_adv_cache(unsigned long arg)
1354{
1355 struct hci_dev *hdev = (void *) arg;
1356
1357 hci_dev_lock(hdev);
1358
1359 hci_adv_entries_clear(hdev);
1360
1361 hci_dev_unlock(hdev);
1362}
1363
76c8686f
AG
1364int hci_adv_entries_clear(struct hci_dev *hdev)
1365{
1366 struct adv_entry *entry, *tmp;
1367
1368 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1369 list_del(&entry->list);
1370 kfree(entry);
1371 }
1372
1373 BT_DBG("%s adv cache cleared", hdev->name);
1374
1375 return 0;
1376}
1377
1378struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1379{
1380 struct adv_entry *entry;
1381
1382 list_for_each_entry(entry, &hdev->adv_entries, list)
1383 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1384 return entry;
1385
1386 return NULL;
1387}
1388
1389static inline int is_connectable_adv(u8 evt_type)
1390{
1391 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1392 return 1;
1393
1394 return 0;
1395}
1396
1397int hci_add_adv_entry(struct hci_dev *hdev,
1398 struct hci_ev_le_advertising_info *ev)
1399{
1400 struct adv_entry *entry;
1401
1402 if (!is_connectable_adv(ev->evt_type))
1403 return -EINVAL;
1404
1405 /* Only new entries should be added to adv_entries. So, if
1406 * bdaddr was found, don't add it. */
1407 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1408 return 0;
1409
1410 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1411 if (!entry)
1412 return -ENOMEM;
1413
1414 bacpy(&entry->bdaddr, &ev->bdaddr);
1415 entry->bdaddr_type = ev->bdaddr_type;
1416
1417 list_add(&entry->list, &hdev->adv_entries);
1418
1419 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1420 batostr(&entry->bdaddr), entry->bdaddr_type);
1421
1422 return 0;
1423}
1424
1da177e4
LT
1425/* Register HCI device */
1426int hci_register_dev(struct hci_dev *hdev)
1427{
1428 struct list_head *head = &hci_dev_list, *p;
08add513 1429 int i, id, error;
1da177e4 1430
c13854ce
MH
1431 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1432 hdev->bus, hdev->owner);
1da177e4
LT
1433
1434 if (!hdev->open || !hdev->close || !hdev->destruct)
1435 return -EINVAL;
1436
08add513
MM
1437 /* Do not allow HCI_AMP devices to register at index 0,
1438 * so the index can be used as the AMP controller ID.
1439 */
1440 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1441
1da177e4
LT
1442 write_lock_bh(&hci_dev_list_lock);
1443
1444 /* Find first available device id */
1445 list_for_each(p, &hci_dev_list) {
1446 if (list_entry(p, struct hci_dev, list)->id != id)
1447 break;
1448 head = p; id++;
1449 }
8e87d142 1450
1da177e4
LT
1451 sprintf(hdev->name, "hci%d", id);
1452 hdev->id = id;
1453 list_add(&hdev->list, head);
1454
1455 atomic_set(&hdev->refcnt, 1);
1456 spin_lock_init(&hdev->lock);
1457
1458 hdev->flags = 0;
1459 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1460 hdev->esco_type = (ESCO_HV1);
1da177e4 1461 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1462 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1463
04837f64
MH
1464 hdev->idle_timeout = 0;
1465 hdev->sniff_max_interval = 800;
1466 hdev->sniff_min_interval = 80;
1467
70f23020 1468 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1469 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1470 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1471
1472 skb_queue_head_init(&hdev->rx_q);
1473 skb_queue_head_init(&hdev->cmd_q);
1474 skb_queue_head_init(&hdev->raw_q);
1475
6bd32326
VT
1476 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1477
cd4c5391 1478 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1479 hdev->reassembly[i] = NULL;
1480
1da177e4 1481 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1482 mutex_init(&hdev->req_lock);
1da177e4
LT
1483
1484 inquiry_cache_init(hdev);
1485
1486 hci_conn_hash_init(hdev);
1487
2e58ef3e
JH
1488 INIT_LIST_HEAD(&hdev->mgmt_pending);
1489
ea4bd8ba 1490 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1491
2aeb9a1a
JH
1492 INIT_LIST_HEAD(&hdev->uuids);
1493
55ed8ca1
JH
1494 INIT_LIST_HEAD(&hdev->link_keys);
1495
2763eda6
SJ
1496 INIT_LIST_HEAD(&hdev->remote_oob_data);
1497
76c8686f 1498 INIT_LIST_HEAD(&hdev->adv_entries);
35815085
AG
1499 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1500 (unsigned long) hdev);
76c8686f 1501
ab81cbf9 1502 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1503 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1504
16ab91ab
JH
1505 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1506
1da177e4
LT
1507 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1508
1509 atomic_set(&hdev->promisc, 0);
1510
1511 write_unlock_bh(&hci_dev_list_lock);
1512
f48fd9c8 1513 hdev->workqueue = create_singlethread_workqueue(hdev->name);
33ca954d
DH
1514 if (!hdev->workqueue) {
1515 error = -ENOMEM;
1516 goto err;
1517 }
f48fd9c8 1518
33ca954d
DH
1519 error = hci_add_sysfs(hdev);
1520 if (error < 0)
1521 goto err_wqueue;
1da177e4 1522
611b30f7
MH
1523 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1524 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1525 if (hdev->rfkill) {
1526 if (rfkill_register(hdev->rfkill) < 0) {
1527 rfkill_destroy(hdev->rfkill);
1528 hdev->rfkill = NULL;
1529 }
1530 }
1531
ab81cbf9
JH
1532 set_bit(HCI_AUTO_OFF, &hdev->flags);
1533 set_bit(HCI_SETUP, &hdev->flags);
1534 queue_work(hdev->workqueue, &hdev->power_on);
1535
1da177e4
LT
1536 hci_notify(hdev, HCI_DEV_REG);
1537
1538 return id;
f48fd9c8 1539
33ca954d
DH
1540err_wqueue:
1541 destroy_workqueue(hdev->workqueue);
1542err:
f48fd9c8
MH
1543 write_lock_bh(&hci_dev_list_lock);
1544 list_del(&hdev->list);
1545 write_unlock_bh(&hci_dev_list_lock);
1546
33ca954d 1547 return error;
1da177e4
LT
1548}
1549EXPORT_SYMBOL(hci_register_dev);
1550
1551/* Unregister HCI device */
59735631 1552void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1553{
ef222013
MH
1554 int i;
1555
c13854ce 1556 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1557
1da177e4
LT
1558 write_lock_bh(&hci_dev_list_lock);
1559 list_del(&hdev->list);
1560 write_unlock_bh(&hci_dev_list_lock);
1561
1562 hci_dev_do_close(hdev);
1563
cd4c5391 1564 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1565 kfree_skb(hdev->reassembly[i]);
1566
ab81cbf9 1567 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86
JH
1568 !test_bit(HCI_SETUP, &hdev->flags)) {
1569 hci_dev_lock_bh(hdev);
744cf19e 1570 mgmt_index_removed(hdev);
56e5cb86
JH
1571 hci_dev_unlock_bh(hdev);
1572 }
ab81cbf9 1573
2e58ef3e
JH
1574 /* mgmt_index_removed should take care of emptying the
1575 * pending list */
1576 BUG_ON(!list_empty(&hdev->mgmt_pending));
1577
1da177e4
LT
1578 hci_notify(hdev, HCI_DEV_UNREG);
1579
611b30f7
MH
1580 if (hdev->rfkill) {
1581 rfkill_unregister(hdev->rfkill);
1582 rfkill_destroy(hdev->rfkill);
1583 }
1584
ce242970 1585 hci_del_sysfs(hdev);
147e2d59 1586
35815085 1587 del_timer(&hdev->adv_timer);
c6f3c5f7 1588
f48fd9c8
MH
1589 destroy_workqueue(hdev->workqueue);
1590
e2e0cacb
JH
1591 hci_dev_lock_bh(hdev);
1592 hci_blacklist_clear(hdev);
2aeb9a1a 1593 hci_uuids_clear(hdev);
55ed8ca1 1594 hci_link_keys_clear(hdev);
2763eda6 1595 hci_remote_oob_data_clear(hdev);
76c8686f 1596 hci_adv_entries_clear(hdev);
e2e0cacb
JH
1597 hci_dev_unlock_bh(hdev);
1598
1da177e4 1599 __hci_dev_put(hdev);
1da177e4
LT
1600}
1601EXPORT_SYMBOL(hci_unregister_dev);
1602
1603/* Suspend HCI device */
1604int hci_suspend_dev(struct hci_dev *hdev)
1605{
1606 hci_notify(hdev, HCI_DEV_SUSPEND);
1607 return 0;
1608}
1609EXPORT_SYMBOL(hci_suspend_dev);
1610
1611/* Resume HCI device */
1612int hci_resume_dev(struct hci_dev *hdev)
1613{
1614 hci_notify(hdev, HCI_DEV_RESUME);
1615 return 0;
1616}
1617EXPORT_SYMBOL(hci_resume_dev);
1618
76bca880
MH
1619/* Receive frame from HCI drivers */
1620int hci_recv_frame(struct sk_buff *skb)
1621{
1622 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1623 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1624 && !test_bit(HCI_INIT, &hdev->flags))) {
1625 kfree_skb(skb);
1626 return -ENXIO;
1627 }
1628
1629 /* Incomming skb */
1630 bt_cb(skb)->incoming = 1;
1631
1632 /* Time stamp */
1633 __net_timestamp(skb);
1634
1635 /* Queue frame for rx task */
1636 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1637 tasklet_schedule(&hdev->rx_task);
1638
76bca880
MH
1639 return 0;
1640}
1641EXPORT_SYMBOL(hci_recv_frame);
1642
33e882a5 1643static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1644 int count, __u8 index)
33e882a5
SS
1645{
1646 int len = 0;
1647 int hlen = 0;
1648 int remain = count;
1649 struct sk_buff *skb;
1650 struct bt_skb_cb *scb;
1651
1652 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1653 index >= NUM_REASSEMBLY)
1654 return -EILSEQ;
1655
1656 skb = hdev->reassembly[index];
1657
1658 if (!skb) {
1659 switch (type) {
1660 case HCI_ACLDATA_PKT:
1661 len = HCI_MAX_FRAME_SIZE;
1662 hlen = HCI_ACL_HDR_SIZE;
1663 break;
1664 case HCI_EVENT_PKT:
1665 len = HCI_MAX_EVENT_SIZE;
1666 hlen = HCI_EVENT_HDR_SIZE;
1667 break;
1668 case HCI_SCODATA_PKT:
1669 len = HCI_MAX_SCO_SIZE;
1670 hlen = HCI_SCO_HDR_SIZE;
1671 break;
1672 }
1673
1e429f38 1674 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1675 if (!skb)
1676 return -ENOMEM;
1677
1678 scb = (void *) skb->cb;
1679 scb->expect = hlen;
1680 scb->pkt_type = type;
1681
1682 skb->dev = (void *) hdev;
1683 hdev->reassembly[index] = skb;
1684 }
1685
1686 while (count) {
1687 scb = (void *) skb->cb;
1688 len = min(scb->expect, (__u16)count);
1689
1690 memcpy(skb_put(skb, len), data, len);
1691
1692 count -= len;
1693 data += len;
1694 scb->expect -= len;
1695 remain = count;
1696
1697 switch (type) {
1698 case HCI_EVENT_PKT:
1699 if (skb->len == HCI_EVENT_HDR_SIZE) {
1700 struct hci_event_hdr *h = hci_event_hdr(skb);
1701 scb->expect = h->plen;
1702
1703 if (skb_tailroom(skb) < scb->expect) {
1704 kfree_skb(skb);
1705 hdev->reassembly[index] = NULL;
1706 return -ENOMEM;
1707 }
1708 }
1709 break;
1710
1711 case HCI_ACLDATA_PKT:
1712 if (skb->len == HCI_ACL_HDR_SIZE) {
1713 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1714 scb->expect = __le16_to_cpu(h->dlen);
1715
1716 if (skb_tailroom(skb) < scb->expect) {
1717 kfree_skb(skb);
1718 hdev->reassembly[index] = NULL;
1719 return -ENOMEM;
1720 }
1721 }
1722 break;
1723
1724 case HCI_SCODATA_PKT:
1725 if (skb->len == HCI_SCO_HDR_SIZE) {
1726 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1727 scb->expect = h->dlen;
1728
1729 if (skb_tailroom(skb) < scb->expect) {
1730 kfree_skb(skb);
1731 hdev->reassembly[index] = NULL;
1732 return -ENOMEM;
1733 }
1734 }
1735 break;
1736 }
1737
1738 if (scb->expect == 0) {
1739 /* Complete frame */
1740
1741 bt_cb(skb)->pkt_type = type;
1742 hci_recv_frame(skb);
1743
1744 hdev->reassembly[index] = NULL;
1745 return remain;
1746 }
1747 }
1748
1749 return remain;
1750}
1751
ef222013
MH
1752int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1753{
f39a3c06
SS
1754 int rem = 0;
1755
ef222013
MH
1756 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1757 return -EILSEQ;
1758
da5f6c37 1759 while (count) {
1e429f38 1760 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1761 if (rem < 0)
1762 return rem;
ef222013 1763
f39a3c06
SS
1764 data += (count - rem);
1765 count = rem;
f81c6224 1766 }
ef222013 1767
f39a3c06 1768 return rem;
ef222013
MH
1769}
1770EXPORT_SYMBOL(hci_recv_fragment);
1771
99811510
SS
1772#define STREAM_REASSEMBLY 0
1773
1774int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1775{
1776 int type;
1777 int rem = 0;
1778
da5f6c37 1779 while (count) {
99811510
SS
1780 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1781
1782 if (!skb) {
1783 struct { char type; } *pkt;
1784
1785 /* Start of the frame */
1786 pkt = data;
1787 type = pkt->type;
1788
1789 data++;
1790 count--;
1791 } else
1792 type = bt_cb(skb)->pkt_type;
1793
1e429f38
GP
1794 rem = hci_reassembly(hdev, type, data, count,
1795 STREAM_REASSEMBLY);
99811510
SS
1796 if (rem < 0)
1797 return rem;
1798
1799 data += (count - rem);
1800 count = rem;
f81c6224 1801 }
99811510
SS
1802
1803 return rem;
1804}
1805EXPORT_SYMBOL(hci_recv_stream_fragment);
1806
1da177e4
LT
1807/* ---- Interface to upper protocols ---- */
1808
1809/* Register/Unregister protocols.
1810 * hci_task_lock is used to ensure that no tasks are running. */
1811int hci_register_proto(struct hci_proto *hp)
1812{
1813 int err = 0;
1814
1815 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1816
1817 if (hp->id >= HCI_MAX_PROTO)
1818 return -EINVAL;
1819
1820 write_lock_bh(&hci_task_lock);
1821
1822 if (!hci_proto[hp->id])
1823 hci_proto[hp->id] = hp;
1824 else
1825 err = -EEXIST;
1826
1827 write_unlock_bh(&hci_task_lock);
1828
1829 return err;
1830}
1831EXPORT_SYMBOL(hci_register_proto);
1832
1833int hci_unregister_proto(struct hci_proto *hp)
1834{
1835 int err = 0;
1836
1837 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1838
1839 if (hp->id >= HCI_MAX_PROTO)
1840 return -EINVAL;
1841
1842 write_lock_bh(&hci_task_lock);
1843
1844 if (hci_proto[hp->id])
1845 hci_proto[hp->id] = NULL;
1846 else
1847 err = -ENOENT;
1848
1849 write_unlock_bh(&hci_task_lock);
1850
1851 return err;
1852}
1853EXPORT_SYMBOL(hci_unregister_proto);
1854
1855int hci_register_cb(struct hci_cb *cb)
1856{
1857 BT_DBG("%p name %s", cb, cb->name);
1858
1859 write_lock_bh(&hci_cb_list_lock);
1860 list_add(&cb->list, &hci_cb_list);
1861 write_unlock_bh(&hci_cb_list_lock);
1862
1863 return 0;
1864}
1865EXPORT_SYMBOL(hci_register_cb);
1866
1867int hci_unregister_cb(struct hci_cb *cb)
1868{
1869 BT_DBG("%p name %s", cb, cb->name);
1870
1871 write_lock_bh(&hci_cb_list_lock);
1872 list_del(&cb->list);
1873 write_unlock_bh(&hci_cb_list_lock);
1874
1875 return 0;
1876}
1877EXPORT_SYMBOL(hci_unregister_cb);
1878
1879static int hci_send_frame(struct sk_buff *skb)
1880{
1881 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1882
1883 if (!hdev) {
1884 kfree_skb(skb);
1885 return -ENODEV;
1886 }
1887
0d48d939 1888 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1889
1890 if (atomic_read(&hdev->promisc)) {
1891 /* Time stamp */
a61bbcf2 1892 __net_timestamp(skb);
1da177e4 1893
eec8d2bc 1894 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1895 }
1896
1897 /* Get rid of skb owner, prior to sending to the driver. */
1898 skb_orphan(skb);
1899
1900 return hdev->send(skb);
1901}
1902
1903/* Send HCI command */
a9de9248 1904int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1905{
1906 int len = HCI_COMMAND_HDR_SIZE + plen;
1907 struct hci_command_hdr *hdr;
1908 struct sk_buff *skb;
1909
a9de9248 1910 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1911
1912 skb = bt_skb_alloc(len, GFP_ATOMIC);
1913 if (!skb) {
ef222013 1914 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1915 return -ENOMEM;
1916 }
1917
1918 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1919 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1920 hdr->plen = plen;
1921
1922 if (plen)
1923 memcpy(skb_put(skb, plen), param, plen);
1924
1925 BT_DBG("skb len %d", skb->len);
1926
0d48d939 1927 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1928 skb->dev = (void *) hdev;
c78ae283 1929
a5040efa
JH
1930 if (test_bit(HCI_INIT, &hdev->flags))
1931 hdev->init_last_cmd = opcode;
1932
1da177e4 1933 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1934 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1935
1936 return 0;
1937}
1da177e4
LT
1938
1939/* Get data from the previously sent command */
a9de9248 1940void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1941{
1942 struct hci_command_hdr *hdr;
1943
1944 if (!hdev->sent_cmd)
1945 return NULL;
1946
1947 hdr = (void *) hdev->sent_cmd->data;
1948
a9de9248 1949 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1950 return NULL;
1951
a9de9248 1952 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1953
1954 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1955}
1956
1957/* Send ACL data */
1958static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1959{
1960 struct hci_acl_hdr *hdr;
1961 int len = skb->len;
1962
badff6d0
ACM
1963 skb_push(skb, HCI_ACL_HDR_SIZE);
1964 skb_reset_transport_header(skb);
9c70220b 1965 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1966 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1967 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1968}
1969
73d80deb
LAD
1970static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1971 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1972{
1973 struct hci_dev *hdev = conn->hdev;
1974 struct sk_buff *list;
1975
70f23020
AE
1976 list = skb_shinfo(skb)->frag_list;
1977 if (!list) {
1da177e4
LT
1978 /* Non fragmented */
1979 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1980
73d80deb 1981 skb_queue_tail(queue, skb);
1da177e4
LT
1982 } else {
1983 /* Fragmented */
1984 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1985
1986 skb_shinfo(skb)->frag_list = NULL;
1987
1988 /* Queue all fragments atomically */
73d80deb 1989 spin_lock_bh(&queue->lock);
1da177e4 1990
73d80deb 1991 __skb_queue_tail(queue, skb);
e702112f
AE
1992
1993 flags &= ~ACL_START;
1994 flags |= ACL_CONT;
1da177e4
LT
1995 do {
1996 skb = list; list = list->next;
8e87d142 1997
1da177e4 1998 skb->dev = (void *) hdev;
0d48d939 1999 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2000 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2001
2002 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2003
73d80deb 2004 __skb_queue_tail(queue, skb);
1da177e4
LT
2005 } while (list);
2006
73d80deb 2007 spin_unlock_bh(&queue->lock);
1da177e4 2008 }
73d80deb
LAD
2009}
2010
2011void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2012{
2013 struct hci_conn *conn = chan->conn;
2014 struct hci_dev *hdev = conn->hdev;
2015
2016 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2017
2018 skb->dev = (void *) hdev;
2019 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2020 hci_add_acl_hdr(skb, conn->handle, flags);
2021
2022 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2023
c78ae283 2024 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2025}
2026EXPORT_SYMBOL(hci_send_acl);
2027
2028/* Send SCO data */
0d861d8b 2029void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2030{
2031 struct hci_dev *hdev = conn->hdev;
2032 struct hci_sco_hdr hdr;
2033
2034 BT_DBG("%s len %d", hdev->name, skb->len);
2035
aca3192c 2036 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2037 hdr.dlen = skb->len;
2038
badff6d0
ACM
2039 skb_push(skb, HCI_SCO_HDR_SIZE);
2040 skb_reset_transport_header(skb);
9c70220b 2041 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2042
2043 skb->dev = (void *) hdev;
0d48d939 2044 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2045
1da177e4 2046 skb_queue_tail(&conn->data_q, skb);
c78ae283 2047 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2048}
2049EXPORT_SYMBOL(hci_send_sco);
2050
2051/* ---- HCI TX task (outgoing data) ---- */
2052
2053/* HCI Connection scheduler */
2054static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2055{
2056 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2057 struct hci_conn *conn = NULL, *c;
1da177e4 2058 int num = 0, min = ~0;
1da177e4 2059
8e87d142 2060 /* We don't have to lock device here. Connections are always
1da177e4 2061 * added and removed with TX task disabled. */
8035ded4 2062 list_for_each_entry(c, &h->list, list) {
769be974 2063 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2064 continue;
769be974
MH
2065
2066 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2067 continue;
2068
1da177e4
LT
2069 num++;
2070
2071 if (c->sent < min) {
2072 min = c->sent;
2073 conn = c;
2074 }
52087a79
LAD
2075
2076 if (hci_conn_num(hdev, type) == num)
2077 break;
1da177e4
LT
2078 }
2079
2080 if (conn) {
6ed58ec5
VT
2081 int cnt, q;
2082
2083 switch (conn->type) {
2084 case ACL_LINK:
2085 cnt = hdev->acl_cnt;
2086 break;
2087 case SCO_LINK:
2088 case ESCO_LINK:
2089 cnt = hdev->sco_cnt;
2090 break;
2091 case LE_LINK:
2092 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2093 break;
2094 default:
2095 cnt = 0;
2096 BT_ERR("Unknown link type");
2097 }
2098
2099 q = cnt / num;
1da177e4
LT
2100 *quote = q ? q : 1;
2101 } else
2102 *quote = 0;
2103
2104 BT_DBG("conn %p quote %d", conn, *quote);
2105 return conn;
2106}
2107
bae1f5d9 2108static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2109{
2110 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2111 struct hci_conn *c;
1da177e4 2112
bae1f5d9 2113 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
2114
2115 /* Kill stalled connections */
8035ded4 2116 list_for_each_entry(c, &h->list, list) {
bae1f5d9
VT
2117 if (c->type == type && c->sent) {
2118 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2119 hdev->name, batostr(&c->dst));
2120 hci_acl_disconn(c, 0x13);
2121 }
2122 }
2123}
2124
73d80deb
LAD
2125static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126 int *quote)
1da177e4 2127{
73d80deb
LAD
2128 struct hci_conn_hash *h = &hdev->conn_hash;
2129 struct hci_chan *chan = NULL;
2130 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2131 struct hci_conn *conn;
73d80deb
LAD
2132 int cnt, q, conn_num = 0;
2133
2134 BT_DBG("%s", hdev->name);
2135
2136 list_for_each_entry(conn, &h->list, list) {
2137 struct hci_chan_hash *ch;
2138 struct hci_chan *tmp;
2139
2140 if (conn->type != type)
2141 continue;
2142
2143 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2144 continue;
2145
2146 conn_num++;
2147
2148 ch = &conn->chan_hash;
2149
2150 list_for_each_entry(tmp, &ch->list, list) {
2151 struct sk_buff *skb;
2152
2153 if (skb_queue_empty(&tmp->data_q))
2154 continue;
2155
2156 skb = skb_peek(&tmp->data_q);
2157 if (skb->priority < cur_prio)
2158 continue;
2159
2160 if (skb->priority > cur_prio) {
2161 num = 0;
2162 min = ~0;
2163 cur_prio = skb->priority;
2164 }
2165
2166 num++;
2167
2168 if (conn->sent < min) {
2169 min = conn->sent;
2170 chan = tmp;
2171 }
2172 }
2173
2174 if (hci_conn_num(hdev, type) == conn_num)
2175 break;
2176 }
2177
2178 if (!chan)
2179 return NULL;
2180
2181 switch (chan->conn->type) {
2182 case ACL_LINK:
2183 cnt = hdev->acl_cnt;
2184 break;
2185 case SCO_LINK:
2186 case ESCO_LINK:
2187 cnt = hdev->sco_cnt;
2188 break;
2189 case LE_LINK:
2190 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2191 break;
2192 default:
2193 cnt = 0;
2194 BT_ERR("Unknown link type");
2195 }
2196
2197 q = cnt / num;
2198 *quote = q ? q : 1;
2199 BT_DBG("chan %p quote %d", chan, *quote);
2200 return chan;
2201}
2202
02b20f0b
LAD
2203static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2204{
2205 struct hci_conn_hash *h = &hdev->conn_hash;
2206 struct hci_conn *conn;
2207 int num = 0;
2208
2209 BT_DBG("%s", hdev->name);
2210
2211 list_for_each_entry(conn, &h->list, list) {
2212 struct hci_chan_hash *ch;
2213 struct hci_chan *chan;
2214
2215 if (conn->type != type)
2216 continue;
2217
2218 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2219 continue;
2220
2221 num++;
2222
2223 ch = &conn->chan_hash;
2224 list_for_each_entry(chan, &ch->list, list) {
2225 struct sk_buff *skb;
2226
2227 if (chan->sent) {
2228 chan->sent = 0;
2229 continue;
2230 }
2231
2232 if (skb_queue_empty(&chan->data_q))
2233 continue;
2234
2235 skb = skb_peek(&chan->data_q);
2236 if (skb->priority >= HCI_PRIO_MAX - 1)
2237 continue;
2238
2239 skb->priority = HCI_PRIO_MAX - 1;
2240
2241 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2242 skb->priority);
2243 }
2244
2245 if (hci_conn_num(hdev, type) == num)
2246 break;
2247 }
2248}
2249
73d80deb
LAD
2250static inline void hci_sched_acl(struct hci_dev *hdev)
2251{
2252 struct hci_chan *chan;
1da177e4
LT
2253 struct sk_buff *skb;
2254 int quote;
73d80deb 2255 unsigned int cnt;
1da177e4
LT
2256
2257 BT_DBG("%s", hdev->name);
2258
52087a79
LAD
2259 if (!hci_conn_num(hdev, ACL_LINK))
2260 return;
2261
1da177e4
LT
2262 if (!test_bit(HCI_RAW, &hdev->flags)) {
2263 /* ACL tx timeout must be longer than maximum
2264 * link supervision timeout (40.9 seconds) */
82453021 2265 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2266 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2267 }
2268
73d80deb 2269 cnt = hdev->acl_cnt;
04837f64 2270
73d80deb
LAD
2271 while (hdev->acl_cnt &&
2272 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2273 u32 priority = (skb_peek(&chan->data_q))->priority;
2274 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2275 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2276 skb->len, skb->priority);
2277
ec1cce24
LAD
2278 /* Stop if priority has changed */
2279 if (skb->priority < priority)
2280 break;
2281
2282 skb = skb_dequeue(&chan->data_q);
2283
73d80deb
LAD
2284 hci_conn_enter_active_mode(chan->conn,
2285 bt_cb(skb)->force_active);
04837f64 2286
1da177e4
LT
2287 hci_send_frame(skb);
2288 hdev->acl_last_tx = jiffies;
2289
2290 hdev->acl_cnt--;
73d80deb
LAD
2291 chan->sent++;
2292 chan->conn->sent++;
1da177e4
LT
2293 }
2294 }
02b20f0b
LAD
2295
2296 if (cnt != hdev->acl_cnt)
2297 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2298}
2299
2300/* Schedule SCO */
2301static inline void hci_sched_sco(struct hci_dev *hdev)
2302{
2303 struct hci_conn *conn;
2304 struct sk_buff *skb;
2305 int quote;
2306
2307 BT_DBG("%s", hdev->name);
2308
52087a79
LAD
2309 if (!hci_conn_num(hdev, SCO_LINK))
2310 return;
2311
1da177e4
LT
2312 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2313 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2314 BT_DBG("skb %p len %d", skb, skb->len);
2315 hci_send_frame(skb);
2316
2317 conn->sent++;
2318 if (conn->sent == ~0)
2319 conn->sent = 0;
2320 }
2321 }
2322}
2323
b6a0dc82
MH
2324static inline void hci_sched_esco(struct hci_dev *hdev)
2325{
2326 struct hci_conn *conn;
2327 struct sk_buff *skb;
2328 int quote;
2329
2330 BT_DBG("%s", hdev->name);
2331
52087a79
LAD
2332 if (!hci_conn_num(hdev, ESCO_LINK))
2333 return;
2334
b6a0dc82
MH
2335 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2336 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2337 BT_DBG("skb %p len %d", skb, skb->len);
2338 hci_send_frame(skb);
2339
2340 conn->sent++;
2341 if (conn->sent == ~0)
2342 conn->sent = 0;
2343 }
2344 }
2345}
2346
6ed58ec5
VT
2347static inline void hci_sched_le(struct hci_dev *hdev)
2348{
73d80deb 2349 struct hci_chan *chan;
6ed58ec5 2350 struct sk_buff *skb;
02b20f0b 2351 int quote, cnt, tmp;
6ed58ec5
VT
2352
2353 BT_DBG("%s", hdev->name);
2354
52087a79
LAD
2355 if (!hci_conn_num(hdev, LE_LINK))
2356 return;
2357
6ed58ec5
VT
2358 if (!test_bit(HCI_RAW, &hdev->flags)) {
2359 /* LE tx timeout must be longer than maximum
2360 * link supervision timeout (40.9 seconds) */
bae1f5d9 2361 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2362 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2363 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2364 }
2365
2366 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2367 tmp = cnt;
73d80deb 2368 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2369 u32 priority = (skb_peek(&chan->data_q))->priority;
2370 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2371 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2372 skb->len, skb->priority);
6ed58ec5 2373
ec1cce24
LAD
2374 /* Stop if priority has changed */
2375 if (skb->priority < priority)
2376 break;
2377
2378 skb = skb_dequeue(&chan->data_q);
2379
6ed58ec5
VT
2380 hci_send_frame(skb);
2381 hdev->le_last_tx = jiffies;
2382
2383 cnt--;
73d80deb
LAD
2384 chan->sent++;
2385 chan->conn->sent++;
6ed58ec5
VT
2386 }
2387 }
73d80deb 2388
6ed58ec5
VT
2389 if (hdev->le_pkts)
2390 hdev->le_cnt = cnt;
2391 else
2392 hdev->acl_cnt = cnt;
02b20f0b
LAD
2393
2394 if (cnt != tmp)
2395 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2396}
2397
1da177e4
LT
2398static void hci_tx_task(unsigned long arg)
2399{
2400 struct hci_dev *hdev = (struct hci_dev *) arg;
2401 struct sk_buff *skb;
2402
2403 read_lock(&hci_task_lock);
2404
6ed58ec5
VT
2405 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2406 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2407
2408 /* Schedule queues and send stuff to HCI driver */
2409
2410 hci_sched_acl(hdev);
2411
2412 hci_sched_sco(hdev);
2413
b6a0dc82
MH
2414 hci_sched_esco(hdev);
2415
6ed58ec5
VT
2416 hci_sched_le(hdev);
2417
1da177e4
LT
2418 /* Send next queued raw (unknown type) packet */
2419 while ((skb = skb_dequeue(&hdev->raw_q)))
2420 hci_send_frame(skb);
2421
2422 read_unlock(&hci_task_lock);
2423}
2424
25985edc 2425/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2426
2427/* ACL data packet */
2428static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2429{
2430 struct hci_acl_hdr *hdr = (void *) skb->data;
2431 struct hci_conn *conn;
2432 __u16 handle, flags;
2433
2434 skb_pull(skb, HCI_ACL_HDR_SIZE);
2435
2436 handle = __le16_to_cpu(hdr->handle);
2437 flags = hci_flags(handle);
2438 handle = hci_handle(handle);
2439
2440 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2441
2442 hdev->stat.acl_rx++;
2443
2444 hci_dev_lock(hdev);
2445 conn = hci_conn_hash_lookup_handle(hdev, handle);
2446 hci_dev_unlock(hdev);
8e87d142 2447
1da177e4
LT
2448 if (conn) {
2449 register struct hci_proto *hp;
2450
14b12d0b 2451 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
04837f64 2452
1da177e4 2453 /* Send to upper protocol */
70f23020
AE
2454 hp = hci_proto[HCI_PROTO_L2CAP];
2455 if (hp && hp->recv_acldata) {
1da177e4
LT
2456 hp->recv_acldata(conn, skb, flags);
2457 return;
2458 }
2459 } else {
8e87d142 2460 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2461 hdev->name, handle);
2462 }
2463
2464 kfree_skb(skb);
2465}
2466
2467/* SCO data packet */
2468static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2469{
2470 struct hci_sco_hdr *hdr = (void *) skb->data;
2471 struct hci_conn *conn;
2472 __u16 handle;
2473
2474 skb_pull(skb, HCI_SCO_HDR_SIZE);
2475
2476 handle = __le16_to_cpu(hdr->handle);
2477
2478 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2479
2480 hdev->stat.sco_rx++;
2481
2482 hci_dev_lock(hdev);
2483 conn = hci_conn_hash_lookup_handle(hdev, handle);
2484 hci_dev_unlock(hdev);
2485
2486 if (conn) {
2487 register struct hci_proto *hp;
2488
2489 /* Send to upper protocol */
70f23020
AE
2490 hp = hci_proto[HCI_PROTO_SCO];
2491 if (hp && hp->recv_scodata) {
1da177e4
LT
2492 hp->recv_scodata(conn, skb);
2493 return;
2494 }
2495 } else {
8e87d142 2496 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2497 hdev->name, handle);
2498 }
2499
2500 kfree_skb(skb);
2501}
2502
6516455d 2503static void hci_rx_task(unsigned long arg)
1da177e4
LT
2504{
2505 struct hci_dev *hdev = (struct hci_dev *) arg;
2506 struct sk_buff *skb;
2507
2508 BT_DBG("%s", hdev->name);
2509
2510 read_lock(&hci_task_lock);
2511
2512 while ((skb = skb_dequeue(&hdev->rx_q))) {
2513 if (atomic_read(&hdev->promisc)) {
2514 /* Send copy to the sockets */
eec8d2bc 2515 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2516 }
2517
2518 if (test_bit(HCI_RAW, &hdev->flags)) {
2519 kfree_skb(skb);
2520 continue;
2521 }
2522
2523 if (test_bit(HCI_INIT, &hdev->flags)) {
2524 /* Don't process data packets in this states. */
0d48d939 2525 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2526 case HCI_ACLDATA_PKT:
2527 case HCI_SCODATA_PKT:
2528 kfree_skb(skb);
2529 continue;
3ff50b79 2530 }
1da177e4
LT
2531 }
2532
2533 /* Process frame */
0d48d939 2534 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2535 case HCI_EVENT_PKT:
2536 hci_event_packet(hdev, skb);
2537 break;
2538
2539 case HCI_ACLDATA_PKT:
2540 BT_DBG("%s ACL data packet", hdev->name);
2541 hci_acldata_packet(hdev, skb);
2542 break;
2543
2544 case HCI_SCODATA_PKT:
2545 BT_DBG("%s SCO data packet", hdev->name);
2546 hci_scodata_packet(hdev, skb);
2547 break;
2548
2549 default:
2550 kfree_skb(skb);
2551 break;
2552 }
2553 }
2554
2555 read_unlock(&hci_task_lock);
2556}
2557
2558static void hci_cmd_task(unsigned long arg)
2559{
2560 struct hci_dev *hdev = (struct hci_dev *) arg;
2561 struct sk_buff *skb;
2562
2563 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2564
1da177e4 2565 /* Send queued commands */
5a08ecce
AE
2566 if (atomic_read(&hdev->cmd_cnt)) {
2567 skb = skb_dequeue(&hdev->cmd_q);
2568 if (!skb)
2569 return;
2570
7585b97a 2571 kfree_skb(hdev->sent_cmd);
1da177e4 2572
70f23020
AE
2573 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2574 if (hdev->sent_cmd) {
1da177e4
LT
2575 atomic_dec(&hdev->cmd_cnt);
2576 hci_send_frame(skb);
7bdb8a5c
SJ
2577 if (test_bit(HCI_RESET, &hdev->flags))
2578 del_timer(&hdev->cmd_timer);
2579 else
2580 mod_timer(&hdev->cmd_timer,
6bd32326 2581 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2582 } else {
2583 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2584 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2585 }
2586 }
2587}
2519a1fc
AG
2588
2589int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2590{
2591 /* General inquiry access code (GIAC) */
2592 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2593 struct hci_cp_inquiry cp;
2594
2595 BT_DBG("%s", hdev->name);
2596
2597 if (test_bit(HCI_INQUIRY, &hdev->flags))
2598 return -EINPROGRESS;
2599
2600 memset(&cp, 0, sizeof(cp));
2601 memcpy(&cp.lap, lap, sizeof(cp.lap));
2602 cp.length = length;
2603
2604 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2605}
023d5049
AG
2606
2607int hci_cancel_inquiry(struct hci_dev *hdev)
2608{
2609 BT_DBG("%s", hdev->name);
2610
2611 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2612 return -EPERM;
2613
2614 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2615}