Bluetooth: Fix mgmt response when HCI_Write_Scan_Enable fails
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
1da177e4
LT
57static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
1da177e4
LT
60
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
e041c683 76static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
77
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
e041c683 82 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
83}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
e041c683 87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
88}
89
6516455d 90static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 91{
e041c683 92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
93}
94
95/* ---- HCI requests ---- */
96
23bb5763 97void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 98{
23bb5763
JH
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
a5040efa
JH
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 105 return;
1da177e4
LT
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
8e87d142 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 127 unsigned long opt, __u32 timeout)
1da177e4
LT
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
e175072f 149 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
3ff50b79 159 }
1da177e4 160
a5040efa 161 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 169 unsigned long opt, __u32 timeout)
1da177e4
LT
170{
171 int ret;
172
7c6a329e
MH
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
1da177e4
LT
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
f630cf0d 189 set_bit(HCI_RESET, &hdev->flags);
a9de9248 190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
b0916ea0 195 struct hci_cp_delete_stored_link_key cp;
1da177e4 196 struct sk_buff *skb;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4
LT
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 207 skb->dev = (void *) hdev;
c78ae283 208
1da177e4 209 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 210 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
f630cf0d
GP
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
a9de9248 219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 220 }
1da177e4
LT
221
222 /* Read Local Supported Features */
a9de9248 223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 224
1143e5a6 225 /* Read Local Version */
a9de9248 226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 227
1da177e4 228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
aca3192c 235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
240 }
241#endif
242
243 /* Read BD Address */
a9de9248
MH
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
251
252 /* Read Voice Setting */
a9de9248 253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
89f2783d 258 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 260
1da177e4 261 /* Connection accept timeout ~20 secs */
aca3192c 262 param = cpu_to_le16(0x7d00);
a9de9248 263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
268}
269
6ed58ec5
VT
270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
1da177e4
LT
278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
a9de9248 285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
a9de9248 295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
e4e8e37c 304 /* Encryption */
a9de9248 305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
306}
307
e4e8e37c
MH
308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
a418b893 312 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
8e87d142 318/* Get HCI device by index.
1da177e4
LT
319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
8035ded4 322 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
8035ded4 330 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
1da177e4
LT
339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 371 struct inquiry_entry *ie;
1da177e4
LT
372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
70f23020
AE
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
1da177e4 377 /* Entry not in the cache. Add new one. */
70f23020
AE
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
1da177e4 380 return;
70f23020
AE
381
382 ie->next = cache->list;
383 cache->list = ie;
1da177e4
LT
384 }
385
70f23020
AE
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
1da177e4
LT
388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
a9de9248 427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
5a08ecce
AE
442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
1da177e4
LT
444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
8e87d142 447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
04837f64 455 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
1da177e4
LT
462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
01df8c31 469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 470 if (!buf) {
1da177e4
LT
471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
8e87d142 486 } else
1da177e4
LT
487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
5a08ecce
AE
503 hdev = hci_dev_get(dev);
504 if (!hdev)
1da177e4
LT
505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
611b30f7
MH
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
1da177e4
LT
516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
943da25d
MH
524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
527
1da177e4
LT
528 if (hdev->open(hdev)) {
529 ret = -EIO;
530 goto done;
531 }
532
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
a5040efa 536 hdev->init_last_cmd = 0;
1da177e4 537
04837f64
MH
538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 540
eead27da 541 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
1da177e4
LT
545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
5add6af8
JH
552 if (!test_bit(HCI_SETUP, &hdev->flags))
553 mgmt_powered(hdev->id, 1);
8e87d142 554 } else {
1da177e4
LT
555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
559
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
562
563 if (hdev->flush)
564 hdev->flush(hdev);
565
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
569 }
570
571 hdev->close(hdev);
572 hdev->flags = 0;
573 }
574
575done:
576 hci_req_unlock(hdev);
577 hci_dev_put(hdev);
578 return ret;
579}
580
581static int hci_dev_do_close(struct hci_dev *hdev)
582{
583 BT_DBG("%s %p", hdev->name, hdev);
584
585 hci_req_cancel(hdev, ENODEV);
586 hci_req_lock(hdev);
587
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 589 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
590 hci_req_unlock(hdev);
591 return 0;
592 }
593
594 /* Kill RX and TX tasks */
595 tasklet_kill(&hdev->rx_task);
596 tasklet_kill(&hdev->tx_task);
597
16ab91ab
JH
598 if (hdev->discov_timeout > 0) {
599 cancel_delayed_work_sync(&hdev->discov_off);
600 hdev->discov_timeout = 0;
601 }
602
1da177e4
LT
603 hci_dev_lock_bh(hdev);
604 inquiry_cache_flush(hdev);
605 hci_conn_hash_flush(hdev);
606 hci_dev_unlock_bh(hdev);
607
608 hci_notify(hdev, HCI_DEV_DOWN);
609
610 if (hdev->flush)
611 hdev->flush(hdev);
612
613 /* Reset device */
614 skb_queue_purge(&hdev->cmd_q);
615 atomic_set(&hdev->cmd_cnt, 1);
616 if (!test_bit(HCI_RAW, &hdev->flags)) {
617 set_bit(HCI_INIT, &hdev->flags);
04837f64 618 __hci_request(hdev, hci_reset_req, 0,
43611a7b 619 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
620 clear_bit(HCI_INIT, &hdev->flags);
621 }
622
623 /* Kill cmd task */
624 tasklet_kill(&hdev->cmd_task);
625
626 /* Drop queues */
627 skb_queue_purge(&hdev->rx_q);
628 skb_queue_purge(&hdev->cmd_q);
629 skb_queue_purge(&hdev->raw_q);
630
631 /* Drop last sent command */
632 if (hdev->sent_cmd) {
b79f44c1 633 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
634 kfree_skb(hdev->sent_cmd);
635 hdev->sent_cmd = NULL;
636 }
637
638 /* After this point our queues are empty
639 * and no tasks are scheduled. */
640 hdev->close(hdev);
641
5add6af8
JH
642 mgmt_powered(hdev->id, 0);
643
1da177e4
LT
644 /* Clear flags */
645 hdev->flags = 0;
646
647 hci_req_unlock(hdev);
648
649 hci_dev_put(hdev);
650 return 0;
651}
652
653int hci_dev_close(__u16 dev)
654{
655 struct hci_dev *hdev;
656 int err;
657
70f23020
AE
658 hdev = hci_dev_get(dev);
659 if (!hdev)
1da177e4
LT
660 return -ENODEV;
661 err = hci_dev_do_close(hdev);
662 hci_dev_put(hdev);
663 return err;
664}
665
666int hci_dev_reset(__u16 dev)
667{
668 struct hci_dev *hdev;
669 int ret = 0;
670
70f23020
AE
671 hdev = hci_dev_get(dev);
672 if (!hdev)
1da177e4
LT
673 return -ENODEV;
674
675 hci_req_lock(hdev);
676 tasklet_disable(&hdev->tx_task);
677
678 if (!test_bit(HCI_UP, &hdev->flags))
679 goto done;
680
681 /* Drop queues */
682 skb_queue_purge(&hdev->rx_q);
683 skb_queue_purge(&hdev->cmd_q);
684
685 hci_dev_lock_bh(hdev);
686 inquiry_cache_flush(hdev);
687 hci_conn_hash_flush(hdev);
688 hci_dev_unlock_bh(hdev);
689
690 if (hdev->flush)
691 hdev->flush(hdev);
692
8e87d142 693 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 694 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
695
696 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
697 ret = __hci_request(hdev, hci_reset_req, 0,
698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
699
700done:
701 tasklet_enable(&hdev->tx_task);
702 hci_req_unlock(hdev);
703 hci_dev_put(hdev);
704 return ret;
705}
706
707int hci_dev_reset_stat(__u16 dev)
708{
709 struct hci_dev *hdev;
710 int ret = 0;
711
70f23020
AE
712 hdev = hci_dev_get(dev);
713 if (!hdev)
1da177e4
LT
714 return -ENODEV;
715
716 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
717
718 hci_dev_put(hdev);
719
720 return ret;
721}
722
723int hci_dev_cmd(unsigned int cmd, void __user *arg)
724{
725 struct hci_dev *hdev;
726 struct hci_dev_req dr;
727 int err = 0;
728
729 if (copy_from_user(&dr, arg, sizeof(dr)))
730 return -EFAULT;
731
70f23020
AE
732 hdev = hci_dev_get(dr.dev_id);
733 if (!hdev)
1da177e4
LT
734 return -ENODEV;
735
736 switch (cmd) {
737 case HCISETAUTH:
04837f64
MH
738 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
739 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
740 break;
741
742 case HCISETENCRYPT:
743 if (!lmp_encrypt_capable(hdev)) {
744 err = -EOPNOTSUPP;
745 break;
746 }
747
748 if (!test_bit(HCI_AUTH, &hdev->flags)) {
749 /* Auth must be enabled first */
04837f64
MH
750 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
751 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
752 if (err)
753 break;
754 }
755
04837f64
MH
756 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
757 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
758 break;
759
760 case HCISETSCAN:
04837f64
MH
761 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
763 break;
764
1da177e4 765 case HCISETLINKPOL:
e4e8e37c
MH
766 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
767 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
768 break;
769
770 case HCISETLINKMODE:
e4e8e37c
MH
771 hdev->link_mode = ((__u16) dr.dev_opt) &
772 (HCI_LM_MASTER | HCI_LM_ACCEPT);
773 break;
774
775 case HCISETPTYPE:
776 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
777 break;
778
779 case HCISETACLMTU:
e4e8e37c
MH
780 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
781 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
782 break;
783
784 case HCISETSCOMTU:
e4e8e37c
MH
785 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
786 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
787 break;
788
789 default:
790 err = -EINVAL;
791 break;
792 }
e4e8e37c 793
1da177e4
LT
794 hci_dev_put(hdev);
795 return err;
796}
797
798int hci_get_dev_list(void __user *arg)
799{
8035ded4 800 struct hci_dev *hdev;
1da177e4
LT
801 struct hci_dev_list_req *dl;
802 struct hci_dev_req *dr;
1da177e4
LT
803 int n = 0, size, err;
804 __u16 dev_num;
805
806 if (get_user(dev_num, (__u16 __user *) arg))
807 return -EFAULT;
808
809 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
810 return -EINVAL;
811
812 size = sizeof(*dl) + dev_num * sizeof(*dr);
813
70f23020
AE
814 dl = kzalloc(size, GFP_KERNEL);
815 if (!dl)
1da177e4
LT
816 return -ENOMEM;
817
818 dr = dl->dev_req;
819
820 read_lock_bh(&hci_dev_list_lock);
8035ded4 821 list_for_each_entry(hdev, &hci_dev_list, list) {
ab81cbf9 822 hci_del_off_timer(hdev);
c542a06c
JH
823
824 if (!test_bit(HCI_MGMT, &hdev->flags))
825 set_bit(HCI_PAIRABLE, &hdev->flags);
826
1da177e4
LT
827 (dr + n)->dev_id = hdev->id;
828 (dr + n)->dev_opt = hdev->flags;
c542a06c 829
1da177e4
LT
830 if (++n >= dev_num)
831 break;
832 }
833 read_unlock_bh(&hci_dev_list_lock);
834
835 dl->dev_num = n;
836 size = sizeof(*dl) + n * sizeof(*dr);
837
838 err = copy_to_user(arg, dl, size);
839 kfree(dl);
840
841 return err ? -EFAULT : 0;
842}
843
844int hci_get_dev_info(void __user *arg)
845{
846 struct hci_dev *hdev;
847 struct hci_dev_info di;
848 int err = 0;
849
850 if (copy_from_user(&di, arg, sizeof(di)))
851 return -EFAULT;
852
70f23020
AE
853 hdev = hci_dev_get(di.dev_id);
854 if (!hdev)
1da177e4
LT
855 return -ENODEV;
856
ab81cbf9
JH
857 hci_del_off_timer(hdev);
858
c542a06c
JH
859 if (!test_bit(HCI_MGMT, &hdev->flags))
860 set_bit(HCI_PAIRABLE, &hdev->flags);
861
1da177e4
LT
862 strcpy(di.name, hdev->name);
863 di.bdaddr = hdev->bdaddr;
943da25d 864 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
865 di.flags = hdev->flags;
866 di.pkt_type = hdev->pkt_type;
867 di.acl_mtu = hdev->acl_mtu;
868 di.acl_pkts = hdev->acl_pkts;
869 di.sco_mtu = hdev->sco_mtu;
870 di.sco_pkts = hdev->sco_pkts;
871 di.link_policy = hdev->link_policy;
872 di.link_mode = hdev->link_mode;
873
874 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875 memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877 if (copy_to_user(arg, &di, sizeof(di)))
878 err = -EFAULT;
879
880 hci_dev_put(hdev);
881
882 return err;
883}
884
885/* ---- Interface to HCI drivers ---- */
886
611b30f7
MH
887static int hci_rfkill_set_block(void *data, bool blocked)
888{
889 struct hci_dev *hdev = data;
890
891 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
892
893 if (!blocked)
894 return 0;
895
896 hci_dev_do_close(hdev);
897
898 return 0;
899}
900
901static const struct rfkill_ops hci_rfkill_ops = {
902 .set_block = hci_rfkill_set_block,
903};
904
1da177e4
LT
905/* Alloc HCI device */
906struct hci_dev *hci_alloc_dev(void)
907{
908 struct hci_dev *hdev;
909
25ea6db0 910 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
911 if (!hdev)
912 return NULL;
913
0ac7e700 914 hci_init_sysfs(hdev);
1da177e4
LT
915 skb_queue_head_init(&hdev->driver_init);
916
917 return hdev;
918}
919EXPORT_SYMBOL(hci_alloc_dev);
920
921/* Free HCI device */
922void hci_free_dev(struct hci_dev *hdev)
923{
924 skb_queue_purge(&hdev->driver_init);
925
a91f2e39
MH
926 /* will free via device release */
927 put_device(&hdev->dev);
1da177e4
LT
928}
929EXPORT_SYMBOL(hci_free_dev);
930
ab81cbf9
JH
931static void hci_power_on(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935 BT_DBG("%s", hdev->name);
936
937 if (hci_dev_open(hdev->id) < 0)
938 return;
939
940 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 mod_timer(&hdev->off_timer,
942 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 mgmt_index_added(hdev->id);
946}
947
948static void hci_power_off(struct work_struct *work)
949{
950 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951
952 BT_DBG("%s", hdev->name);
953
954 hci_dev_close(hdev->id);
955}
956
957static void hci_auto_off(unsigned long data)
958{
959 struct hci_dev *hdev = (struct hci_dev *) data;
960
961 BT_DBG("%s", hdev->name);
962
963 clear_bit(HCI_AUTO_OFF, &hdev->flags);
964
965 queue_work(hdev->workqueue, &hdev->power_off);
966}
967
968void hci_del_off_timer(struct hci_dev *hdev)
969{
970 BT_DBG("%s", hdev->name);
971
972 clear_bit(HCI_AUTO_OFF, &hdev->flags);
973 del_timer(&hdev->off_timer);
974}
975
16ab91ab
JH
976static void hci_discov_off(struct work_struct *work)
977{
978 struct hci_dev *hdev;
979 u8 scan = SCAN_PAGE;
980
981 hdev = container_of(work, struct hci_dev, discov_off.work);
982
983 BT_DBG("%s", hdev->name);
984
985 hci_dev_lock_bh(hdev);
986
987 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
988
989 hdev->discov_timeout = 0;
990
991 hci_dev_unlock_bh(hdev);
992}
993
2aeb9a1a
JH
994int hci_uuids_clear(struct hci_dev *hdev)
995{
996 struct list_head *p, *n;
997
998 list_for_each_safe(p, n, &hdev->uuids) {
999 struct bt_uuid *uuid;
1000
1001 uuid = list_entry(p, struct bt_uuid, list);
1002
1003 list_del(p);
1004 kfree(uuid);
1005 }
1006
1007 return 0;
1008}
1009
55ed8ca1
JH
1010int hci_link_keys_clear(struct hci_dev *hdev)
1011{
1012 struct list_head *p, *n;
1013
1014 list_for_each_safe(p, n, &hdev->link_keys) {
1015 struct link_key *key;
1016
1017 key = list_entry(p, struct link_key, list);
1018
1019 list_del(p);
1020 kfree(key);
1021 }
1022
1023 return 0;
1024}
1025
1026struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1027{
8035ded4 1028 struct link_key *k;
55ed8ca1 1029
8035ded4 1030 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1031 if (bacmp(bdaddr, &k->bdaddr) == 0)
1032 return k;
55ed8ca1
JH
1033
1034 return NULL;
1035}
1036
d25e28ab
JH
1037static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1038 u8 key_type, u8 old_key_type)
1039{
1040 /* Legacy key */
1041 if (key_type < 0x03)
1042 return 1;
1043
1044 /* Debug keys are insecure so don't store them persistently */
1045 if (key_type == HCI_LK_DEBUG_COMBINATION)
1046 return 0;
1047
1048 /* Changed combination key and there's no previous one */
1049 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1050 return 0;
1051
1052 /* Security mode 3 case */
1053 if (!conn)
1054 return 1;
1055
1056 /* Neither local nor remote side had no-bonding as requirement */
1057 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1058 return 1;
1059
1060 /* Local side had dedicated bonding as requirement */
1061 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1062 return 1;
1063
1064 /* Remote side had dedicated bonding as requirement */
1065 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1066 return 1;
1067
1068 /* If none of the above criteria match, then don't store the key
1069 * persistently */
1070 return 0;
1071}
1072
75d262c2
VCG
1073struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1074{
1075 struct link_key *k;
1076
1077 list_for_each_entry(k, &hdev->link_keys, list) {
1078 struct key_master_id *id;
1079
1080 if (k->type != HCI_LK_SMP_LTK)
1081 continue;
1082
1083 if (k->dlen != sizeof(*id))
1084 continue;
1085
1086 id = (void *) &k->data;
1087 if (id->ediv == ediv &&
1088 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1089 return k;
1090 }
1091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_ltk);
1095
1096struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1097 bdaddr_t *bdaddr, u8 type)
1098{
1099 struct link_key *k;
1100
1101 list_for_each_entry(k, &hdev->link_keys, list)
1102 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1103 return k;
1104
1105 return NULL;
1106}
1107EXPORT_SYMBOL(hci_find_link_key_type);
1108
d25e28ab
JH
1109int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1110 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1111{
1112 struct link_key *key, *old_key;
4df378a1 1113 u8 old_key_type, persistent;
55ed8ca1
JH
1114
1115 old_key = hci_find_link_key(hdev, bdaddr);
1116 if (old_key) {
1117 old_key_type = old_key->type;
1118 key = old_key;
1119 } else {
12adcf3a 1120 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1121 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1122 if (!key)
1123 return -ENOMEM;
1124 list_add(&key->list, &hdev->link_keys);
1125 }
1126
1127 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1128
d25e28ab
JH
1129 /* Some buggy controller combinations generate a changed
1130 * combination key for legacy pairing even when there's no
1131 * previous key */
1132 if (type == HCI_LK_CHANGED_COMBINATION &&
1133 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1134 old_key_type == 0xff) {
d25e28ab 1135 type = HCI_LK_COMBINATION;
655fe6ec
JH
1136 if (conn)
1137 conn->key_type = type;
1138 }
d25e28ab 1139
55ed8ca1
JH
1140 bacpy(&key->bdaddr, bdaddr);
1141 memcpy(key->val, val, 16);
55ed8ca1
JH
1142 key->pin_len = pin_len;
1143
b6020ba0 1144 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1145 key->type = old_key_type;
4748fed2
JH
1146 else
1147 key->type = type;
1148
4df378a1
JH
1149 if (!new_key)
1150 return 0;
1151
1152 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1153
1154 mgmt_new_key(hdev->id, key, persistent);
1155
1156 if (!persistent) {
1157 list_del(&key->list);
1158 kfree(key);
1159 }
55ed8ca1
JH
1160
1161 return 0;
1162}
1163
75d262c2 1164int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1165 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1166{
1167 struct link_key *key, *old_key;
1168 struct key_master_id *id;
1169 u8 old_key_type;
1170
1171 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1172
1173 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1174 if (old_key) {
1175 key = old_key;
1176 old_key_type = old_key->type;
1177 } else {
1178 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1179 if (!key)
1180 return -ENOMEM;
1181 list_add(&key->list, &hdev->link_keys);
1182 old_key_type = 0xff;
1183 }
1184
1185 key->dlen = sizeof(*id);
1186
1187 bacpy(&key->bdaddr, bdaddr);
1188 memcpy(key->val, ltk, sizeof(key->val));
1189 key->type = HCI_LK_SMP_LTK;
726b4ffc 1190 key->pin_len = key_size;
75d262c2
VCG
1191
1192 id = (void *) &key->data;
1193 id->ediv = ediv;
1194 memcpy(id->rand, rand, sizeof(id->rand));
1195
1196 if (new_key)
1197 mgmt_new_key(hdev->id, key, old_key_type);
1198
1199 return 0;
1200}
1201
55ed8ca1
JH
1202int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1203{
1204 struct link_key *key;
1205
1206 key = hci_find_link_key(hdev, bdaddr);
1207 if (!key)
1208 return -ENOENT;
1209
1210 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1211
1212 list_del(&key->list);
1213 kfree(key);
1214
1215 return 0;
1216}
1217
6bd32326
VT
1218/* HCI command timer function */
1219static void hci_cmd_timer(unsigned long arg)
1220{
1221 struct hci_dev *hdev = (void *) arg;
1222
1223 BT_ERR("%s command tx timeout", hdev->name);
1224 atomic_set(&hdev->cmd_cnt, 1);
1225 tasklet_schedule(&hdev->cmd_task);
1226}
1227
2763eda6
SJ
1228struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1229 bdaddr_t *bdaddr)
1230{
1231 struct oob_data *data;
1232
1233 list_for_each_entry(data, &hdev->remote_oob_data, list)
1234 if (bacmp(bdaddr, &data->bdaddr) == 0)
1235 return data;
1236
1237 return NULL;
1238}
1239
1240int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1241{
1242 struct oob_data *data;
1243
1244 data = hci_find_remote_oob_data(hdev, bdaddr);
1245 if (!data)
1246 return -ENOENT;
1247
1248 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1249
1250 list_del(&data->list);
1251 kfree(data);
1252
1253 return 0;
1254}
1255
1256int hci_remote_oob_data_clear(struct hci_dev *hdev)
1257{
1258 struct oob_data *data, *n;
1259
1260 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1261 list_del(&data->list);
1262 kfree(data);
1263 }
1264
1265 return 0;
1266}
1267
1268int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1269 u8 *randomizer)
1270{
1271 struct oob_data *data;
1272
1273 data = hci_find_remote_oob_data(hdev, bdaddr);
1274
1275 if (!data) {
1276 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1277 if (!data)
1278 return -ENOMEM;
1279
1280 bacpy(&data->bdaddr, bdaddr);
1281 list_add(&data->list, &hdev->remote_oob_data);
1282 }
1283
1284 memcpy(data->hash, hash, sizeof(data->hash));
1285 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1286
1287 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1288
1289 return 0;
1290}
1291
b2a66aad
AJ
1292struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1293 bdaddr_t *bdaddr)
1294{
8035ded4 1295 struct bdaddr_list *b;
b2a66aad 1296
8035ded4 1297 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1298 if (bacmp(bdaddr, &b->bdaddr) == 0)
1299 return b;
b2a66aad
AJ
1300
1301 return NULL;
1302}
1303
1304int hci_blacklist_clear(struct hci_dev *hdev)
1305{
1306 struct list_head *p, *n;
1307
1308 list_for_each_safe(p, n, &hdev->blacklist) {
1309 struct bdaddr_list *b;
1310
1311 b = list_entry(p, struct bdaddr_list, list);
1312
1313 list_del(p);
1314 kfree(b);
1315 }
1316
1317 return 0;
1318}
1319
1320int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1321{
1322 struct bdaddr_list *entry;
b2a66aad
AJ
1323
1324 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1325 return -EBADF;
1326
5e762444
AJ
1327 if (hci_blacklist_lookup(hdev, bdaddr))
1328 return -EEXIST;
b2a66aad
AJ
1329
1330 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1331 if (!entry)
1332 return -ENOMEM;
b2a66aad
AJ
1333
1334 bacpy(&entry->bdaddr, bdaddr);
1335
1336 list_add(&entry->list, &hdev->blacklist);
1337
5e762444 1338 return mgmt_device_blocked(hdev->id, bdaddr);
b2a66aad
AJ
1339}
1340
1341int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1342{
1343 struct bdaddr_list *entry;
b2a66aad 1344
a7925bd2 1345 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
5e762444 1346 return hci_blacklist_clear(hdev);
a7925bd2 1347 }
b2a66aad
AJ
1348
1349 entry = hci_blacklist_lookup(hdev, bdaddr);
a7925bd2 1350 if (!entry) {
5e762444 1351 return -ENOENT;
a7925bd2 1352 }
b2a66aad
AJ
1353
1354 list_del(&entry->list);
1355 kfree(entry);
1356
5e762444 1357 return mgmt_device_unblocked(hdev->id, bdaddr);
b2a66aad
AJ
1358}
1359
35815085
AG
1360static void hci_clear_adv_cache(unsigned long arg)
1361{
1362 struct hci_dev *hdev = (void *) arg;
1363
1364 hci_dev_lock(hdev);
1365
1366 hci_adv_entries_clear(hdev);
1367
1368 hci_dev_unlock(hdev);
1369}
1370
76c8686f
AG
1371int hci_adv_entries_clear(struct hci_dev *hdev)
1372{
1373 struct adv_entry *entry, *tmp;
1374
1375 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1376 list_del(&entry->list);
1377 kfree(entry);
1378 }
1379
1380 BT_DBG("%s adv cache cleared", hdev->name);
1381
1382 return 0;
1383}
1384
1385struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1386{
1387 struct adv_entry *entry;
1388
1389 list_for_each_entry(entry, &hdev->adv_entries, list)
1390 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1391 return entry;
1392
1393 return NULL;
1394}
1395
1396static inline int is_connectable_adv(u8 evt_type)
1397{
1398 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1399 return 1;
1400
1401 return 0;
1402}
1403
1404int hci_add_adv_entry(struct hci_dev *hdev,
1405 struct hci_ev_le_advertising_info *ev)
1406{
1407 struct adv_entry *entry;
1408
1409 if (!is_connectable_adv(ev->evt_type))
1410 return -EINVAL;
1411
1412 /* Only new entries should be added to adv_entries. So, if
1413 * bdaddr was found, don't add it. */
1414 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1415 return 0;
1416
1417 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1418 if (!entry)
1419 return -ENOMEM;
1420
1421 bacpy(&entry->bdaddr, &ev->bdaddr);
1422 entry->bdaddr_type = ev->bdaddr_type;
1423
1424 list_add(&entry->list, &hdev->adv_entries);
1425
1426 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1427 batostr(&entry->bdaddr), entry->bdaddr_type);
1428
1429 return 0;
1430}
1431
1da177e4
LT
1432/* Register HCI device */
1433int hci_register_dev(struct hci_dev *hdev)
1434{
1435 struct list_head *head = &hci_dev_list, *p;
08add513 1436 int i, id, error;
1da177e4 1437
c13854ce
MH
1438 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1439 hdev->bus, hdev->owner);
1da177e4
LT
1440
1441 if (!hdev->open || !hdev->close || !hdev->destruct)
1442 return -EINVAL;
1443
08add513
MM
1444 /* Do not allow HCI_AMP devices to register at index 0,
1445 * so the index can be used as the AMP controller ID.
1446 */
1447 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1448
1da177e4
LT
1449 write_lock_bh(&hci_dev_list_lock);
1450
1451 /* Find first available device id */
1452 list_for_each(p, &hci_dev_list) {
1453 if (list_entry(p, struct hci_dev, list)->id != id)
1454 break;
1455 head = p; id++;
1456 }
8e87d142 1457
1da177e4
LT
1458 sprintf(hdev->name, "hci%d", id);
1459 hdev->id = id;
1460 list_add(&hdev->list, head);
1461
1462 atomic_set(&hdev->refcnt, 1);
1463 spin_lock_init(&hdev->lock);
1464
1465 hdev->flags = 0;
1466 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1467 hdev->esco_type = (ESCO_HV1);
1da177e4 1468 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1469 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1470
04837f64
MH
1471 hdev->idle_timeout = 0;
1472 hdev->sniff_max_interval = 800;
1473 hdev->sniff_min_interval = 80;
1474
70f23020 1475 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1476 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1477 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1478
1479 skb_queue_head_init(&hdev->rx_q);
1480 skb_queue_head_init(&hdev->cmd_q);
1481 skb_queue_head_init(&hdev->raw_q);
1482
6bd32326
VT
1483 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1484
cd4c5391 1485 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1486 hdev->reassembly[i] = NULL;
1487
1da177e4 1488 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1489 mutex_init(&hdev->req_lock);
1da177e4
LT
1490
1491 inquiry_cache_init(hdev);
1492
1493 hci_conn_hash_init(hdev);
1494
ea4bd8ba 1495 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1496
2aeb9a1a
JH
1497 INIT_LIST_HEAD(&hdev->uuids);
1498
55ed8ca1
JH
1499 INIT_LIST_HEAD(&hdev->link_keys);
1500
2763eda6
SJ
1501 INIT_LIST_HEAD(&hdev->remote_oob_data);
1502
76c8686f 1503 INIT_LIST_HEAD(&hdev->adv_entries);
35815085
AG
1504 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1505 (unsigned long) hdev);
76c8686f 1506
ab81cbf9
JH
1507 INIT_WORK(&hdev->power_on, hci_power_on);
1508 INIT_WORK(&hdev->power_off, hci_power_off);
1509 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1510
16ab91ab
JH
1511 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1512
1da177e4
LT
1513 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1514
1515 atomic_set(&hdev->promisc, 0);
1516
1517 write_unlock_bh(&hci_dev_list_lock);
1518
f48fd9c8 1519 hdev->workqueue = create_singlethread_workqueue(hdev->name);
33ca954d
DH
1520 if (!hdev->workqueue) {
1521 error = -ENOMEM;
1522 goto err;
1523 }
f48fd9c8 1524
33ca954d
DH
1525 error = hci_add_sysfs(hdev);
1526 if (error < 0)
1527 goto err_wqueue;
1da177e4 1528
611b30f7
MH
1529 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1530 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1531 if (hdev->rfkill) {
1532 if (rfkill_register(hdev->rfkill) < 0) {
1533 rfkill_destroy(hdev->rfkill);
1534 hdev->rfkill = NULL;
1535 }
1536 }
1537
ab81cbf9
JH
1538 set_bit(HCI_AUTO_OFF, &hdev->flags);
1539 set_bit(HCI_SETUP, &hdev->flags);
1540 queue_work(hdev->workqueue, &hdev->power_on);
1541
1da177e4
LT
1542 hci_notify(hdev, HCI_DEV_REG);
1543
1544 return id;
f48fd9c8 1545
33ca954d
DH
1546err_wqueue:
1547 destroy_workqueue(hdev->workqueue);
1548err:
f48fd9c8
MH
1549 write_lock_bh(&hci_dev_list_lock);
1550 list_del(&hdev->list);
1551 write_unlock_bh(&hci_dev_list_lock);
1552
33ca954d 1553 return error;
1da177e4
LT
1554}
1555EXPORT_SYMBOL(hci_register_dev);
1556
1557/* Unregister HCI device */
59735631 1558void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1559{
ef222013
MH
1560 int i;
1561
c13854ce 1562 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1563
1da177e4
LT
1564 write_lock_bh(&hci_dev_list_lock);
1565 list_del(&hdev->list);
1566 write_unlock_bh(&hci_dev_list_lock);
1567
1568 hci_dev_do_close(hdev);
1569
cd4c5391 1570 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1571 kfree_skb(hdev->reassembly[i]);
1572
ab81cbf9
JH
1573 if (!test_bit(HCI_INIT, &hdev->flags) &&
1574 !test_bit(HCI_SETUP, &hdev->flags))
1575 mgmt_index_removed(hdev->id);
1576
1da177e4
LT
1577 hci_notify(hdev, HCI_DEV_UNREG);
1578
611b30f7
MH
1579 if (hdev->rfkill) {
1580 rfkill_unregister(hdev->rfkill);
1581 rfkill_destroy(hdev->rfkill);
1582 }
1583
ce242970 1584 hci_del_sysfs(hdev);
147e2d59 1585
c6f3c5f7 1586 hci_del_off_timer(hdev);
35815085 1587 del_timer(&hdev->adv_timer);
c6f3c5f7 1588
f48fd9c8
MH
1589 destroy_workqueue(hdev->workqueue);
1590
e2e0cacb
JH
1591 hci_dev_lock_bh(hdev);
1592 hci_blacklist_clear(hdev);
2aeb9a1a 1593 hci_uuids_clear(hdev);
55ed8ca1 1594 hci_link_keys_clear(hdev);
2763eda6 1595 hci_remote_oob_data_clear(hdev);
76c8686f 1596 hci_adv_entries_clear(hdev);
e2e0cacb
JH
1597 hci_dev_unlock_bh(hdev);
1598
1da177e4 1599 __hci_dev_put(hdev);
1da177e4
LT
1600}
1601EXPORT_SYMBOL(hci_unregister_dev);
1602
1603/* Suspend HCI device */
1604int hci_suspend_dev(struct hci_dev *hdev)
1605{
1606 hci_notify(hdev, HCI_DEV_SUSPEND);
1607 return 0;
1608}
1609EXPORT_SYMBOL(hci_suspend_dev);
1610
1611/* Resume HCI device */
1612int hci_resume_dev(struct hci_dev *hdev)
1613{
1614 hci_notify(hdev, HCI_DEV_RESUME);
1615 return 0;
1616}
1617EXPORT_SYMBOL(hci_resume_dev);
1618
76bca880
MH
1619/* Receive frame from HCI drivers */
1620int hci_recv_frame(struct sk_buff *skb)
1621{
1622 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1623 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1624 && !test_bit(HCI_INIT, &hdev->flags))) {
1625 kfree_skb(skb);
1626 return -ENXIO;
1627 }
1628
1629 /* Incomming skb */
1630 bt_cb(skb)->incoming = 1;
1631
1632 /* Time stamp */
1633 __net_timestamp(skb);
1634
1635 /* Queue frame for rx task */
1636 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1637 tasklet_schedule(&hdev->rx_task);
1638
76bca880
MH
1639 return 0;
1640}
1641EXPORT_SYMBOL(hci_recv_frame);
1642
33e882a5 1643static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1644 int count, __u8 index)
33e882a5
SS
1645{
1646 int len = 0;
1647 int hlen = 0;
1648 int remain = count;
1649 struct sk_buff *skb;
1650 struct bt_skb_cb *scb;
1651
1652 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1653 index >= NUM_REASSEMBLY)
1654 return -EILSEQ;
1655
1656 skb = hdev->reassembly[index];
1657
1658 if (!skb) {
1659 switch (type) {
1660 case HCI_ACLDATA_PKT:
1661 len = HCI_MAX_FRAME_SIZE;
1662 hlen = HCI_ACL_HDR_SIZE;
1663 break;
1664 case HCI_EVENT_PKT:
1665 len = HCI_MAX_EVENT_SIZE;
1666 hlen = HCI_EVENT_HDR_SIZE;
1667 break;
1668 case HCI_SCODATA_PKT:
1669 len = HCI_MAX_SCO_SIZE;
1670 hlen = HCI_SCO_HDR_SIZE;
1671 break;
1672 }
1673
1e429f38 1674 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1675 if (!skb)
1676 return -ENOMEM;
1677
1678 scb = (void *) skb->cb;
1679 scb->expect = hlen;
1680 scb->pkt_type = type;
1681
1682 skb->dev = (void *) hdev;
1683 hdev->reassembly[index] = skb;
1684 }
1685
1686 while (count) {
1687 scb = (void *) skb->cb;
1688 len = min(scb->expect, (__u16)count);
1689
1690 memcpy(skb_put(skb, len), data, len);
1691
1692 count -= len;
1693 data += len;
1694 scb->expect -= len;
1695 remain = count;
1696
1697 switch (type) {
1698 case HCI_EVENT_PKT:
1699 if (skb->len == HCI_EVENT_HDR_SIZE) {
1700 struct hci_event_hdr *h = hci_event_hdr(skb);
1701 scb->expect = h->plen;
1702
1703 if (skb_tailroom(skb) < scb->expect) {
1704 kfree_skb(skb);
1705 hdev->reassembly[index] = NULL;
1706 return -ENOMEM;
1707 }
1708 }
1709 break;
1710
1711 case HCI_ACLDATA_PKT:
1712 if (skb->len == HCI_ACL_HDR_SIZE) {
1713 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1714 scb->expect = __le16_to_cpu(h->dlen);
1715
1716 if (skb_tailroom(skb) < scb->expect) {
1717 kfree_skb(skb);
1718 hdev->reassembly[index] = NULL;
1719 return -ENOMEM;
1720 }
1721 }
1722 break;
1723
1724 case HCI_SCODATA_PKT:
1725 if (skb->len == HCI_SCO_HDR_SIZE) {
1726 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1727 scb->expect = h->dlen;
1728
1729 if (skb_tailroom(skb) < scb->expect) {
1730 kfree_skb(skb);
1731 hdev->reassembly[index] = NULL;
1732 return -ENOMEM;
1733 }
1734 }
1735 break;
1736 }
1737
1738 if (scb->expect == 0) {
1739 /* Complete frame */
1740
1741 bt_cb(skb)->pkt_type = type;
1742 hci_recv_frame(skb);
1743
1744 hdev->reassembly[index] = NULL;
1745 return remain;
1746 }
1747 }
1748
1749 return remain;
1750}
1751
ef222013
MH
1752int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1753{
f39a3c06
SS
1754 int rem = 0;
1755
ef222013
MH
1756 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1757 return -EILSEQ;
1758
da5f6c37 1759 while (count) {
1e429f38 1760 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1761 if (rem < 0)
1762 return rem;
ef222013 1763
f39a3c06
SS
1764 data += (count - rem);
1765 count = rem;
f81c6224 1766 }
ef222013 1767
f39a3c06 1768 return rem;
ef222013
MH
1769}
1770EXPORT_SYMBOL(hci_recv_fragment);
1771
99811510
SS
1772#define STREAM_REASSEMBLY 0
1773
1774int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1775{
1776 int type;
1777 int rem = 0;
1778
da5f6c37 1779 while (count) {
99811510
SS
1780 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1781
1782 if (!skb) {
1783 struct { char type; } *pkt;
1784
1785 /* Start of the frame */
1786 pkt = data;
1787 type = pkt->type;
1788
1789 data++;
1790 count--;
1791 } else
1792 type = bt_cb(skb)->pkt_type;
1793
1e429f38
GP
1794 rem = hci_reassembly(hdev, type, data, count,
1795 STREAM_REASSEMBLY);
99811510
SS
1796 if (rem < 0)
1797 return rem;
1798
1799 data += (count - rem);
1800 count = rem;
f81c6224 1801 }
99811510
SS
1802
1803 return rem;
1804}
1805EXPORT_SYMBOL(hci_recv_stream_fragment);
1806
1da177e4
LT
1807/* ---- Interface to upper protocols ---- */
1808
1809/* Register/Unregister protocols.
1810 * hci_task_lock is used to ensure that no tasks are running. */
1811int hci_register_proto(struct hci_proto *hp)
1812{
1813 int err = 0;
1814
1815 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1816
1817 if (hp->id >= HCI_MAX_PROTO)
1818 return -EINVAL;
1819
1820 write_lock_bh(&hci_task_lock);
1821
1822 if (!hci_proto[hp->id])
1823 hci_proto[hp->id] = hp;
1824 else
1825 err = -EEXIST;
1826
1827 write_unlock_bh(&hci_task_lock);
1828
1829 return err;
1830}
1831EXPORT_SYMBOL(hci_register_proto);
1832
1833int hci_unregister_proto(struct hci_proto *hp)
1834{
1835 int err = 0;
1836
1837 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1838
1839 if (hp->id >= HCI_MAX_PROTO)
1840 return -EINVAL;
1841
1842 write_lock_bh(&hci_task_lock);
1843
1844 if (hci_proto[hp->id])
1845 hci_proto[hp->id] = NULL;
1846 else
1847 err = -ENOENT;
1848
1849 write_unlock_bh(&hci_task_lock);
1850
1851 return err;
1852}
1853EXPORT_SYMBOL(hci_unregister_proto);
1854
1855int hci_register_cb(struct hci_cb *cb)
1856{
1857 BT_DBG("%p name %s", cb, cb->name);
1858
1859 write_lock_bh(&hci_cb_list_lock);
1860 list_add(&cb->list, &hci_cb_list);
1861 write_unlock_bh(&hci_cb_list_lock);
1862
1863 return 0;
1864}
1865EXPORT_SYMBOL(hci_register_cb);
1866
1867int hci_unregister_cb(struct hci_cb *cb)
1868{
1869 BT_DBG("%p name %s", cb, cb->name);
1870
1871 write_lock_bh(&hci_cb_list_lock);
1872 list_del(&cb->list);
1873 write_unlock_bh(&hci_cb_list_lock);
1874
1875 return 0;
1876}
1877EXPORT_SYMBOL(hci_unregister_cb);
1878
1879static int hci_send_frame(struct sk_buff *skb)
1880{
1881 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1882
1883 if (!hdev) {
1884 kfree_skb(skb);
1885 return -ENODEV;
1886 }
1887
0d48d939 1888 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1889
1890 if (atomic_read(&hdev->promisc)) {
1891 /* Time stamp */
a61bbcf2 1892 __net_timestamp(skb);
1da177e4 1893
eec8d2bc 1894 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1895 }
1896
1897 /* Get rid of skb owner, prior to sending to the driver. */
1898 skb_orphan(skb);
1899
1900 return hdev->send(skb);
1901}
1902
1903/* Send HCI command */
a9de9248 1904int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1905{
1906 int len = HCI_COMMAND_HDR_SIZE + plen;
1907 struct hci_command_hdr *hdr;
1908 struct sk_buff *skb;
1909
a9de9248 1910 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1911
1912 skb = bt_skb_alloc(len, GFP_ATOMIC);
1913 if (!skb) {
ef222013 1914 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1915 return -ENOMEM;
1916 }
1917
1918 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1919 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1920 hdr->plen = plen;
1921
1922 if (plen)
1923 memcpy(skb_put(skb, plen), param, plen);
1924
1925 BT_DBG("skb len %d", skb->len);
1926
0d48d939 1927 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1928 skb->dev = (void *) hdev;
c78ae283 1929
a5040efa
JH
1930 if (test_bit(HCI_INIT, &hdev->flags))
1931 hdev->init_last_cmd = opcode;
1932
1da177e4 1933 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1934 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1935
1936 return 0;
1937}
1da177e4
LT
1938
1939/* Get data from the previously sent command */
a9de9248 1940void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1941{
1942 struct hci_command_hdr *hdr;
1943
1944 if (!hdev->sent_cmd)
1945 return NULL;
1946
1947 hdr = (void *) hdev->sent_cmd->data;
1948
a9de9248 1949 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1950 return NULL;
1951
a9de9248 1952 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1953
1954 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1955}
1956
1957/* Send ACL data */
1958static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1959{
1960 struct hci_acl_hdr *hdr;
1961 int len = skb->len;
1962
badff6d0
ACM
1963 skb_push(skb, HCI_ACL_HDR_SIZE);
1964 skb_reset_transport_header(skb);
9c70220b 1965 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1966 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1967 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1968}
1969
73d80deb
LAD
1970static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1971 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1972{
1973 struct hci_dev *hdev = conn->hdev;
1974 struct sk_buff *list;
1975
70f23020
AE
1976 list = skb_shinfo(skb)->frag_list;
1977 if (!list) {
1da177e4
LT
1978 /* Non fragmented */
1979 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1980
73d80deb 1981 skb_queue_tail(queue, skb);
1da177e4
LT
1982 } else {
1983 /* Fragmented */
1984 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1985
1986 skb_shinfo(skb)->frag_list = NULL;
1987
1988 /* Queue all fragments atomically */
73d80deb 1989 spin_lock_bh(&queue->lock);
1da177e4 1990
73d80deb 1991 __skb_queue_tail(queue, skb);
e702112f
AE
1992
1993 flags &= ~ACL_START;
1994 flags |= ACL_CONT;
1da177e4
LT
1995 do {
1996 skb = list; list = list->next;
8e87d142 1997
1da177e4 1998 skb->dev = (void *) hdev;
0d48d939 1999 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2000 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2001
2002 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2003
73d80deb 2004 __skb_queue_tail(queue, skb);
1da177e4
LT
2005 } while (list);
2006
73d80deb 2007 spin_unlock_bh(&queue->lock);
1da177e4 2008 }
73d80deb
LAD
2009}
2010
2011void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2012{
2013 struct hci_conn *conn = chan->conn;
2014 struct hci_dev *hdev = conn->hdev;
2015
2016 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2017
2018 skb->dev = (void *) hdev;
2019 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2020 hci_add_acl_hdr(skb, conn->handle, flags);
2021
2022 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2023
c78ae283 2024 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2025}
2026EXPORT_SYMBOL(hci_send_acl);
2027
2028/* Send SCO data */
0d861d8b 2029void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2030{
2031 struct hci_dev *hdev = conn->hdev;
2032 struct hci_sco_hdr hdr;
2033
2034 BT_DBG("%s len %d", hdev->name, skb->len);
2035
aca3192c 2036 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2037 hdr.dlen = skb->len;
2038
badff6d0
ACM
2039 skb_push(skb, HCI_SCO_HDR_SIZE);
2040 skb_reset_transport_header(skb);
9c70220b 2041 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2042
2043 skb->dev = (void *) hdev;
0d48d939 2044 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2045
1da177e4 2046 skb_queue_tail(&conn->data_q, skb);
c78ae283 2047 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2048}
2049EXPORT_SYMBOL(hci_send_sco);
2050
2051/* ---- HCI TX task (outgoing data) ---- */
2052
2053/* HCI Connection scheduler */
2054static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2055{
2056 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2057 struct hci_conn *conn = NULL, *c;
1da177e4 2058 int num = 0, min = ~0;
1da177e4 2059
8e87d142 2060 /* We don't have to lock device here. Connections are always
1da177e4 2061 * added and removed with TX task disabled. */
8035ded4 2062 list_for_each_entry(c, &h->list, list) {
769be974 2063 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2064 continue;
769be974
MH
2065
2066 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2067 continue;
2068
1da177e4
LT
2069 num++;
2070
2071 if (c->sent < min) {
2072 min = c->sent;
2073 conn = c;
2074 }
52087a79
LAD
2075
2076 if (hci_conn_num(hdev, type) == num)
2077 break;
1da177e4
LT
2078 }
2079
2080 if (conn) {
6ed58ec5
VT
2081 int cnt, q;
2082
2083 switch (conn->type) {
2084 case ACL_LINK:
2085 cnt = hdev->acl_cnt;
2086 break;
2087 case SCO_LINK:
2088 case ESCO_LINK:
2089 cnt = hdev->sco_cnt;
2090 break;
2091 case LE_LINK:
2092 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2093 break;
2094 default:
2095 cnt = 0;
2096 BT_ERR("Unknown link type");
2097 }
2098
2099 q = cnt / num;
1da177e4
LT
2100 *quote = q ? q : 1;
2101 } else
2102 *quote = 0;
2103
2104 BT_DBG("conn %p quote %d", conn, *quote);
2105 return conn;
2106}
2107
bae1f5d9 2108static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2109{
2110 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2111 struct hci_conn *c;
1da177e4 2112
bae1f5d9 2113 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
2114
2115 /* Kill stalled connections */
8035ded4 2116 list_for_each_entry(c, &h->list, list) {
bae1f5d9
VT
2117 if (c->type == type && c->sent) {
2118 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2119 hdev->name, batostr(&c->dst));
2120 hci_acl_disconn(c, 0x13);
2121 }
2122 }
2123}
2124
73d80deb
LAD
2125static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126 int *quote)
1da177e4 2127{
73d80deb
LAD
2128 struct hci_conn_hash *h = &hdev->conn_hash;
2129 struct hci_chan *chan = NULL;
2130 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2131 struct hci_conn *conn;
73d80deb
LAD
2132 int cnt, q, conn_num = 0;
2133
2134 BT_DBG("%s", hdev->name);
2135
2136 list_for_each_entry(conn, &h->list, list) {
2137 struct hci_chan_hash *ch;
2138 struct hci_chan *tmp;
2139
2140 if (conn->type != type)
2141 continue;
2142
2143 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2144 continue;
2145
2146 conn_num++;
2147
2148 ch = &conn->chan_hash;
2149
2150 list_for_each_entry(tmp, &ch->list, list) {
2151 struct sk_buff *skb;
2152
2153 if (skb_queue_empty(&tmp->data_q))
2154 continue;
2155
2156 skb = skb_peek(&tmp->data_q);
2157 if (skb->priority < cur_prio)
2158 continue;
2159
2160 if (skb->priority > cur_prio) {
2161 num = 0;
2162 min = ~0;
2163 cur_prio = skb->priority;
2164 }
2165
2166 num++;
2167
2168 if (conn->sent < min) {
2169 min = conn->sent;
2170 chan = tmp;
2171 }
2172 }
2173
2174 if (hci_conn_num(hdev, type) == conn_num)
2175 break;
2176 }
2177
2178 if (!chan)
2179 return NULL;
2180
2181 switch (chan->conn->type) {
2182 case ACL_LINK:
2183 cnt = hdev->acl_cnt;
2184 break;
2185 case SCO_LINK:
2186 case ESCO_LINK:
2187 cnt = hdev->sco_cnt;
2188 break;
2189 case LE_LINK:
2190 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2191 break;
2192 default:
2193 cnt = 0;
2194 BT_ERR("Unknown link type");
2195 }
2196
2197 q = cnt / num;
2198 *quote = q ? q : 1;
2199 BT_DBG("chan %p quote %d", chan, *quote);
2200 return chan;
2201}
2202
02b20f0b
LAD
2203static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2204{
2205 struct hci_conn_hash *h = &hdev->conn_hash;
2206 struct hci_conn *conn;
2207 int num = 0;
2208
2209 BT_DBG("%s", hdev->name);
2210
2211 list_for_each_entry(conn, &h->list, list) {
2212 struct hci_chan_hash *ch;
2213 struct hci_chan *chan;
2214
2215 if (conn->type != type)
2216 continue;
2217
2218 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2219 continue;
2220
2221 num++;
2222
2223 ch = &conn->chan_hash;
2224 list_for_each_entry(chan, &ch->list, list) {
2225 struct sk_buff *skb;
2226
2227 if (chan->sent) {
2228 chan->sent = 0;
2229 continue;
2230 }
2231
2232 if (skb_queue_empty(&chan->data_q))
2233 continue;
2234
2235 skb = skb_peek(&chan->data_q);
2236 if (skb->priority >= HCI_PRIO_MAX - 1)
2237 continue;
2238
2239 skb->priority = HCI_PRIO_MAX - 1;
2240
2241 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2242 skb->priority);
2243 }
2244
2245 if (hci_conn_num(hdev, type) == num)
2246 break;
2247 }
2248}
2249
73d80deb
LAD
2250static inline void hci_sched_acl(struct hci_dev *hdev)
2251{
2252 struct hci_chan *chan;
1da177e4
LT
2253 struct sk_buff *skb;
2254 int quote;
73d80deb 2255 unsigned int cnt;
1da177e4
LT
2256
2257 BT_DBG("%s", hdev->name);
2258
52087a79
LAD
2259 if (!hci_conn_num(hdev, ACL_LINK))
2260 return;
2261
1da177e4
LT
2262 if (!test_bit(HCI_RAW, &hdev->flags)) {
2263 /* ACL tx timeout must be longer than maximum
2264 * link supervision timeout (40.9 seconds) */
82453021 2265 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2266 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2267 }
2268
73d80deb 2269 cnt = hdev->acl_cnt;
04837f64 2270
73d80deb
LAD
2271 while (hdev->acl_cnt &&
2272 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2273 u32 priority = (skb_peek(&chan->data_q))->priority;
2274 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2275 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2276 skb->len, skb->priority);
2277
ec1cce24
LAD
2278 /* Stop if priority has changed */
2279 if (skb->priority < priority)
2280 break;
2281
2282 skb = skb_dequeue(&chan->data_q);
2283
73d80deb
LAD
2284 hci_conn_enter_active_mode(chan->conn,
2285 bt_cb(skb)->force_active);
04837f64 2286
1da177e4
LT
2287 hci_send_frame(skb);
2288 hdev->acl_last_tx = jiffies;
2289
2290 hdev->acl_cnt--;
73d80deb
LAD
2291 chan->sent++;
2292 chan->conn->sent++;
1da177e4
LT
2293 }
2294 }
02b20f0b
LAD
2295
2296 if (cnt != hdev->acl_cnt)
2297 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2298}
2299
2300/* Schedule SCO */
2301static inline void hci_sched_sco(struct hci_dev *hdev)
2302{
2303 struct hci_conn *conn;
2304 struct sk_buff *skb;
2305 int quote;
2306
2307 BT_DBG("%s", hdev->name);
2308
52087a79
LAD
2309 if (!hci_conn_num(hdev, SCO_LINK))
2310 return;
2311
1da177e4
LT
2312 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2313 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2314 BT_DBG("skb %p len %d", skb, skb->len);
2315 hci_send_frame(skb);
2316
2317 conn->sent++;
2318 if (conn->sent == ~0)
2319 conn->sent = 0;
2320 }
2321 }
2322}
2323
b6a0dc82
MH
2324static inline void hci_sched_esco(struct hci_dev *hdev)
2325{
2326 struct hci_conn *conn;
2327 struct sk_buff *skb;
2328 int quote;
2329
2330 BT_DBG("%s", hdev->name);
2331
52087a79
LAD
2332 if (!hci_conn_num(hdev, ESCO_LINK))
2333 return;
2334
b6a0dc82
MH
2335 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2336 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2337 BT_DBG("skb %p len %d", skb, skb->len);
2338 hci_send_frame(skb);
2339
2340 conn->sent++;
2341 if (conn->sent == ~0)
2342 conn->sent = 0;
2343 }
2344 }
2345}
2346
6ed58ec5
VT
2347static inline void hci_sched_le(struct hci_dev *hdev)
2348{
73d80deb 2349 struct hci_chan *chan;
6ed58ec5 2350 struct sk_buff *skb;
02b20f0b 2351 int quote, cnt, tmp;
6ed58ec5
VT
2352
2353 BT_DBG("%s", hdev->name);
2354
52087a79
LAD
2355 if (!hci_conn_num(hdev, LE_LINK))
2356 return;
2357
6ed58ec5
VT
2358 if (!test_bit(HCI_RAW, &hdev->flags)) {
2359 /* LE tx timeout must be longer than maximum
2360 * link supervision timeout (40.9 seconds) */
bae1f5d9 2361 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2362 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2363 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2364 }
2365
2366 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2367 tmp = cnt;
73d80deb 2368 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2369 u32 priority = (skb_peek(&chan->data_q))->priority;
2370 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2371 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2372 skb->len, skb->priority);
6ed58ec5 2373
ec1cce24
LAD
2374 /* Stop if priority has changed */
2375 if (skb->priority < priority)
2376 break;
2377
2378 skb = skb_dequeue(&chan->data_q);
2379
6ed58ec5
VT
2380 hci_send_frame(skb);
2381 hdev->le_last_tx = jiffies;
2382
2383 cnt--;
73d80deb
LAD
2384 chan->sent++;
2385 chan->conn->sent++;
6ed58ec5
VT
2386 }
2387 }
73d80deb 2388
6ed58ec5
VT
2389 if (hdev->le_pkts)
2390 hdev->le_cnt = cnt;
2391 else
2392 hdev->acl_cnt = cnt;
02b20f0b
LAD
2393
2394 if (cnt != tmp)
2395 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2396}
2397
1da177e4
LT
2398static void hci_tx_task(unsigned long arg)
2399{
2400 struct hci_dev *hdev = (struct hci_dev *) arg;
2401 struct sk_buff *skb;
2402
2403 read_lock(&hci_task_lock);
2404
6ed58ec5
VT
2405 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2406 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2407
2408 /* Schedule queues and send stuff to HCI driver */
2409
2410 hci_sched_acl(hdev);
2411
2412 hci_sched_sco(hdev);
2413
b6a0dc82
MH
2414 hci_sched_esco(hdev);
2415
6ed58ec5
VT
2416 hci_sched_le(hdev);
2417
1da177e4
LT
2418 /* Send next queued raw (unknown type) packet */
2419 while ((skb = skb_dequeue(&hdev->raw_q)))
2420 hci_send_frame(skb);
2421
2422 read_unlock(&hci_task_lock);
2423}
2424
25985edc 2425/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2426
2427/* ACL data packet */
2428static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2429{
2430 struct hci_acl_hdr *hdr = (void *) skb->data;
2431 struct hci_conn *conn;
2432 __u16 handle, flags;
2433
2434 skb_pull(skb, HCI_ACL_HDR_SIZE);
2435
2436 handle = __le16_to_cpu(hdr->handle);
2437 flags = hci_flags(handle);
2438 handle = hci_handle(handle);
2439
2440 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2441
2442 hdev->stat.acl_rx++;
2443
2444 hci_dev_lock(hdev);
2445 conn = hci_conn_hash_lookup_handle(hdev, handle);
2446 hci_dev_unlock(hdev);
8e87d142 2447
1da177e4
LT
2448 if (conn) {
2449 register struct hci_proto *hp;
2450
14b12d0b 2451 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
04837f64 2452
1da177e4 2453 /* Send to upper protocol */
70f23020
AE
2454 hp = hci_proto[HCI_PROTO_L2CAP];
2455 if (hp && hp->recv_acldata) {
1da177e4
LT
2456 hp->recv_acldata(conn, skb, flags);
2457 return;
2458 }
2459 } else {
8e87d142 2460 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2461 hdev->name, handle);
2462 }
2463
2464 kfree_skb(skb);
2465}
2466
2467/* SCO data packet */
2468static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2469{
2470 struct hci_sco_hdr *hdr = (void *) skb->data;
2471 struct hci_conn *conn;
2472 __u16 handle;
2473
2474 skb_pull(skb, HCI_SCO_HDR_SIZE);
2475
2476 handle = __le16_to_cpu(hdr->handle);
2477
2478 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2479
2480 hdev->stat.sco_rx++;
2481
2482 hci_dev_lock(hdev);
2483 conn = hci_conn_hash_lookup_handle(hdev, handle);
2484 hci_dev_unlock(hdev);
2485
2486 if (conn) {
2487 register struct hci_proto *hp;
2488
2489 /* Send to upper protocol */
70f23020
AE
2490 hp = hci_proto[HCI_PROTO_SCO];
2491 if (hp && hp->recv_scodata) {
1da177e4
LT
2492 hp->recv_scodata(conn, skb);
2493 return;
2494 }
2495 } else {
8e87d142 2496 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2497 hdev->name, handle);
2498 }
2499
2500 kfree_skb(skb);
2501}
2502
6516455d 2503static void hci_rx_task(unsigned long arg)
1da177e4
LT
2504{
2505 struct hci_dev *hdev = (struct hci_dev *) arg;
2506 struct sk_buff *skb;
2507
2508 BT_DBG("%s", hdev->name);
2509
2510 read_lock(&hci_task_lock);
2511
2512 while ((skb = skb_dequeue(&hdev->rx_q))) {
2513 if (atomic_read(&hdev->promisc)) {
2514 /* Send copy to the sockets */
eec8d2bc 2515 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2516 }
2517
2518 if (test_bit(HCI_RAW, &hdev->flags)) {
2519 kfree_skb(skb);
2520 continue;
2521 }
2522
2523 if (test_bit(HCI_INIT, &hdev->flags)) {
2524 /* Don't process data packets in this states. */
0d48d939 2525 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2526 case HCI_ACLDATA_PKT:
2527 case HCI_SCODATA_PKT:
2528 kfree_skb(skb);
2529 continue;
3ff50b79 2530 }
1da177e4
LT
2531 }
2532
2533 /* Process frame */
0d48d939 2534 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2535 case HCI_EVENT_PKT:
2536 hci_event_packet(hdev, skb);
2537 break;
2538
2539 case HCI_ACLDATA_PKT:
2540 BT_DBG("%s ACL data packet", hdev->name);
2541 hci_acldata_packet(hdev, skb);
2542 break;
2543
2544 case HCI_SCODATA_PKT:
2545 BT_DBG("%s SCO data packet", hdev->name);
2546 hci_scodata_packet(hdev, skb);
2547 break;
2548
2549 default:
2550 kfree_skb(skb);
2551 break;
2552 }
2553 }
2554
2555 read_unlock(&hci_task_lock);
2556}
2557
2558static void hci_cmd_task(unsigned long arg)
2559{
2560 struct hci_dev *hdev = (struct hci_dev *) arg;
2561 struct sk_buff *skb;
2562
2563 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2564
1da177e4 2565 /* Send queued commands */
5a08ecce
AE
2566 if (atomic_read(&hdev->cmd_cnt)) {
2567 skb = skb_dequeue(&hdev->cmd_q);
2568 if (!skb)
2569 return;
2570
7585b97a 2571 kfree_skb(hdev->sent_cmd);
1da177e4 2572
70f23020
AE
2573 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2574 if (hdev->sent_cmd) {
1da177e4
LT
2575 atomic_dec(&hdev->cmd_cnt);
2576 hci_send_frame(skb);
7bdb8a5c
SJ
2577 if (test_bit(HCI_RESET, &hdev->flags))
2578 del_timer(&hdev->cmd_timer);
2579 else
2580 mod_timer(&hdev->cmd_timer,
6bd32326 2581 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2582 } else {
2583 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2584 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2585 }
2586 }
2587}
2519a1fc
AG
2588
2589int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2590{
2591 /* General inquiry access code (GIAC) */
2592 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2593 struct hci_cp_inquiry cp;
2594
2595 BT_DBG("%s", hdev->name);
2596
2597 if (test_bit(HCI_INQUIRY, &hdev->flags))
2598 return -EINPROGRESS;
2599
2600 memset(&cp, 0, sizeof(cp));
2601 memcpy(&cp.lap, lap, sizeof(cp.lap));
2602 cp.length = length;
2603
2604 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2605}
023d5049
AG
2606
2607int hci_cancel_inquiry(struct hci_dev *hdev)
2608{
2609 BT_DBG("%s", hdev->name);
2610
2611 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2612 return -EPERM;
2613
2614 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2615}