Bluetooth: Simplify __l2cap_global_chan_by_addr
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
1da177e4
LT
57static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
1da177e4
LT
60
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
e041c683 76static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
77
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
e041c683 82 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
83}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
e041c683 87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
88}
89
6516455d 90static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 91{
e041c683 92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
93}
94
95/* ---- HCI requests ---- */
96
23bb5763 97void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 98{
23bb5763
JH
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
a5040efa
JH
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 105 return;
1da177e4
LT
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
8e87d142 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 127 unsigned long opt, __u32 timeout)
1da177e4
LT
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
e175072f 149 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
3ff50b79 159 }
1da177e4 160
a5040efa 161 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 169 unsigned long opt, __u32 timeout)
1da177e4
LT
170{
171 int ret;
172
7c6a329e
MH
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
1da177e4
LT
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
f630cf0d 189 set_bit(HCI_RESET, &hdev->flags);
a9de9248 190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
b0916ea0 195 struct hci_cp_delete_stored_link_key cp;
1da177e4 196 struct sk_buff *skb;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4
LT
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 207 skb->dev = (void *) hdev;
c78ae283 208
1da177e4 209 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 210 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
f630cf0d
GP
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
a9de9248 219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 220 }
1da177e4
LT
221
222 /* Read Local Supported Features */
a9de9248 223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 224
1143e5a6 225 /* Read Local Version */
a9de9248 226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 227
1da177e4 228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
aca3192c 235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
240 }
241#endif
242
243 /* Read BD Address */
a9de9248
MH
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
251
252 /* Read Voice Setting */
a9de9248 253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
89f2783d 258 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 260
1da177e4 261 /* Connection accept timeout ~20 secs */
aca3192c 262 param = cpu_to_le16(0x7d00);
a9de9248 263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
268}
269
6ed58ec5
VT
270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
1da177e4
LT
278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
a9de9248 285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
a9de9248 295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
e4e8e37c 304 /* Encryption */
a9de9248 305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
306}
307
e4e8e37c
MH
308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
a418b893 312 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
8e87d142 318/* Get HCI device by index.
1da177e4
LT
319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
8035ded4 322 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
8035ded4 330 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
1da177e4
LT
339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 371 struct inquiry_entry *ie;
1da177e4
LT
372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
70f23020
AE
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
1da177e4 377 /* Entry not in the cache. Add new one. */
70f23020
AE
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
1da177e4 380 return;
70f23020
AE
381
382 ie->next = cache->list;
383 cache->list = ie;
1da177e4
LT
384 }
385
70f23020
AE
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
1da177e4
LT
388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
a9de9248 427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
5a08ecce
AE
442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
1da177e4
LT
444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
8e87d142 447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
04837f64 455 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
1da177e4
LT
462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
01df8c31 469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 470 if (!buf) {
1da177e4
LT
471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
8e87d142 486 } else
1da177e4
LT
487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
5a08ecce
AE
503 hdev = hci_dev_get(dev);
504 if (!hdev)
1da177e4
LT
505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
611b30f7
MH
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
1da177e4
LT
516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
07e3b94a
AE
524 /* Treat all non BR/EDR controllers as raw devices if
525 enable_hs is not set */
526 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
527 set_bit(HCI_RAW, &hdev->flags);
528
1da177e4
LT
529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
a5040efa 537 hdev->init_last_cmd = 0;
1da177e4 538
04837f64
MH
539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 541
eead27da 542 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
1da177e4
LT
546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
56e5cb86
JH
553 if (!test_bit(HCI_SETUP, &hdev->flags)) {
554 hci_dev_lock_bh(hdev);
744cf19e 555 mgmt_powered(hdev, 1);
56e5cb86
JH
556 hci_dev_unlock_bh(hdev);
557 }
8e87d142 558 } else {
1da177e4
LT
559 /* Init failed, cleanup */
560 tasklet_kill(&hdev->rx_task);
561 tasklet_kill(&hdev->tx_task);
562 tasklet_kill(&hdev->cmd_task);
563
564 skb_queue_purge(&hdev->cmd_q);
565 skb_queue_purge(&hdev->rx_q);
566
567 if (hdev->flush)
568 hdev->flush(hdev);
569
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
573 }
574
575 hdev->close(hdev);
576 hdev->flags = 0;
577 }
578
579done:
580 hci_req_unlock(hdev);
581 hci_dev_put(hdev);
582 return ret;
583}
584
585static int hci_dev_do_close(struct hci_dev *hdev)
586{
587 BT_DBG("%s %p", hdev->name, hdev);
588
589 hci_req_cancel(hdev, ENODEV);
590 hci_req_lock(hdev);
591
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 593 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
594 hci_req_unlock(hdev);
595 return 0;
596 }
597
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
601
16ab91ab 602 if (hdev->discov_timeout > 0) {
e0f9309f 603 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
604 hdev->discov_timeout = 0;
605 }
606
3243553f 607 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 608 cancel_delayed_work(&hdev->power_off);
3243553f 609
1da177e4
LT
610 hci_dev_lock_bh(hdev);
611 inquiry_cache_flush(hdev);
612 hci_conn_hash_flush(hdev);
613 hci_dev_unlock_bh(hdev);
614
615 hci_notify(hdev, HCI_DEV_DOWN);
616
617 if (hdev->flush)
618 hdev->flush(hdev);
619
620 /* Reset device */
621 skb_queue_purge(&hdev->cmd_q);
622 atomic_set(&hdev->cmd_cnt, 1);
623 if (!test_bit(HCI_RAW, &hdev->flags)) {
624 set_bit(HCI_INIT, &hdev->flags);
04837f64 625 __hci_request(hdev, hci_reset_req, 0,
43611a7b 626 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
627 clear_bit(HCI_INIT, &hdev->flags);
628 }
629
630 /* Kill cmd task */
631 tasklet_kill(&hdev->cmd_task);
632
633 /* Drop queues */
634 skb_queue_purge(&hdev->rx_q);
635 skb_queue_purge(&hdev->cmd_q);
636 skb_queue_purge(&hdev->raw_q);
637
638 /* Drop last sent command */
639 if (hdev->sent_cmd) {
b79f44c1 640 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
641 kfree_skb(hdev->sent_cmd);
642 hdev->sent_cmd = NULL;
643 }
644
645 /* After this point our queues are empty
646 * and no tasks are scheduled. */
647 hdev->close(hdev);
648
56e5cb86 649 hci_dev_lock_bh(hdev);
744cf19e 650 mgmt_powered(hdev, 0);
56e5cb86 651 hci_dev_unlock_bh(hdev);
5add6af8 652
1da177e4
LT
653 /* Clear flags */
654 hdev->flags = 0;
655
656 hci_req_unlock(hdev);
657
658 hci_dev_put(hdev);
659 return 0;
660}
661
662int hci_dev_close(__u16 dev)
663{
664 struct hci_dev *hdev;
665 int err;
666
70f23020
AE
667 hdev = hci_dev_get(dev);
668 if (!hdev)
1da177e4
LT
669 return -ENODEV;
670 err = hci_dev_do_close(hdev);
671 hci_dev_put(hdev);
672 return err;
673}
674
675int hci_dev_reset(__u16 dev)
676{
677 struct hci_dev *hdev;
678 int ret = 0;
679
70f23020
AE
680 hdev = hci_dev_get(dev);
681 if (!hdev)
1da177e4
LT
682 return -ENODEV;
683
684 hci_req_lock(hdev);
685 tasklet_disable(&hdev->tx_task);
686
687 if (!test_bit(HCI_UP, &hdev->flags))
688 goto done;
689
690 /* Drop queues */
691 skb_queue_purge(&hdev->rx_q);
692 skb_queue_purge(&hdev->cmd_q);
693
694 hci_dev_lock_bh(hdev);
695 inquiry_cache_flush(hdev);
696 hci_conn_hash_flush(hdev);
697 hci_dev_unlock_bh(hdev);
698
699 if (hdev->flush)
700 hdev->flush(hdev);
701
8e87d142 702 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 703 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
704
705 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
706 ret = __hci_request(hdev, hci_reset_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
708
709done:
710 tasklet_enable(&hdev->tx_task);
711 hci_req_unlock(hdev);
712 hci_dev_put(hdev);
713 return ret;
714}
715
716int hci_dev_reset_stat(__u16 dev)
717{
718 struct hci_dev *hdev;
719 int ret = 0;
720
70f23020
AE
721 hdev = hci_dev_get(dev);
722 if (!hdev)
1da177e4
LT
723 return -ENODEV;
724
725 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
726
727 hci_dev_put(hdev);
728
729 return ret;
730}
731
732int hci_dev_cmd(unsigned int cmd, void __user *arg)
733{
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
736 int err = 0;
737
738 if (copy_from_user(&dr, arg, sizeof(dr)))
739 return -EFAULT;
740
70f23020
AE
741 hdev = hci_dev_get(dr.dev_id);
742 if (!hdev)
1da177e4
LT
743 return -ENODEV;
744
745 switch (cmd) {
746 case HCISETAUTH:
04837f64
MH
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
749 break;
750
751 case HCISETENCRYPT:
752 if (!lmp_encrypt_capable(hdev)) {
753 err = -EOPNOTSUPP;
754 break;
755 }
756
757 if (!test_bit(HCI_AUTH, &hdev->flags)) {
758 /* Auth must be enabled first */
04837f64
MH
759 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
761 if (err)
762 break;
763 }
764
04837f64
MH
765 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
767 break;
768
769 case HCISETSCAN:
04837f64
MH
770 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
771 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
772 break;
773
1da177e4 774 case HCISETLINKPOL:
e4e8e37c
MH
775 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
777 break;
778
779 case HCISETLINKMODE:
e4e8e37c
MH
780 hdev->link_mode = ((__u16) dr.dev_opt) &
781 (HCI_LM_MASTER | HCI_LM_ACCEPT);
782 break;
783
784 case HCISETPTYPE:
785 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
786 break;
787
788 case HCISETACLMTU:
e4e8e37c
MH
789 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
790 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
791 break;
792
793 case HCISETSCOMTU:
e4e8e37c
MH
794 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
795 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
796 break;
797
798 default:
799 err = -EINVAL;
800 break;
801 }
e4e8e37c 802
1da177e4
LT
803 hci_dev_put(hdev);
804 return err;
805}
806
807int hci_get_dev_list(void __user *arg)
808{
8035ded4 809 struct hci_dev *hdev;
1da177e4
LT
810 struct hci_dev_list_req *dl;
811 struct hci_dev_req *dr;
1da177e4
LT
812 int n = 0, size, err;
813 __u16 dev_num;
814
815 if (get_user(dev_num, (__u16 __user *) arg))
816 return -EFAULT;
817
818 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
819 return -EINVAL;
820
821 size = sizeof(*dl) + dev_num * sizeof(*dr);
822
70f23020
AE
823 dl = kzalloc(size, GFP_KERNEL);
824 if (!dl)
1da177e4
LT
825 return -ENOMEM;
826
827 dr = dl->dev_req;
828
829 read_lock_bh(&hci_dev_list_lock);
8035ded4 830 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 832 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
833
834 if (!test_bit(HCI_MGMT, &hdev->flags))
835 set_bit(HCI_PAIRABLE, &hdev->flags);
836
1da177e4
LT
837 (dr + n)->dev_id = hdev->id;
838 (dr + n)->dev_opt = hdev->flags;
c542a06c 839
1da177e4
LT
840 if (++n >= dev_num)
841 break;
842 }
843 read_unlock_bh(&hci_dev_list_lock);
844
845 dl->dev_num = n;
846 size = sizeof(*dl) + n * sizeof(*dr);
847
848 err = copy_to_user(arg, dl, size);
849 kfree(dl);
850
851 return err ? -EFAULT : 0;
852}
853
854int hci_get_dev_info(void __user *arg)
855{
856 struct hci_dev *hdev;
857 struct hci_dev_info di;
858 int err = 0;
859
860 if (copy_from_user(&di, arg, sizeof(di)))
861 return -EFAULT;
862
70f23020
AE
863 hdev = hci_dev_get(di.dev_id);
864 if (!hdev)
1da177e4
LT
865 return -ENODEV;
866
3243553f
JH
867 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
868 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 869
c542a06c
JH
870 if (!test_bit(HCI_MGMT, &hdev->flags))
871 set_bit(HCI_PAIRABLE, &hdev->flags);
872
1da177e4
LT
873 strcpy(di.name, hdev->name);
874 di.bdaddr = hdev->bdaddr;
943da25d 875 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
876 di.flags = hdev->flags;
877 di.pkt_type = hdev->pkt_type;
878 di.acl_mtu = hdev->acl_mtu;
879 di.acl_pkts = hdev->acl_pkts;
880 di.sco_mtu = hdev->sco_mtu;
881 di.sco_pkts = hdev->sco_pkts;
882 di.link_policy = hdev->link_policy;
883 di.link_mode = hdev->link_mode;
884
885 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
886 memcpy(&di.features, &hdev->features, sizeof(di.features));
887
888 if (copy_to_user(arg, &di, sizeof(di)))
889 err = -EFAULT;
890
891 hci_dev_put(hdev);
892
893 return err;
894}
895
896/* ---- Interface to HCI drivers ---- */
897
611b30f7
MH
898static int hci_rfkill_set_block(void *data, bool blocked)
899{
900 struct hci_dev *hdev = data;
901
902 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
903
904 if (!blocked)
905 return 0;
906
907 hci_dev_do_close(hdev);
908
909 return 0;
910}
911
912static const struct rfkill_ops hci_rfkill_ops = {
913 .set_block = hci_rfkill_set_block,
914};
915
1da177e4
LT
916/* Alloc HCI device */
917struct hci_dev *hci_alloc_dev(void)
918{
919 struct hci_dev *hdev;
920
25ea6db0 921 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
922 if (!hdev)
923 return NULL;
924
0ac7e700 925 hci_init_sysfs(hdev);
1da177e4
LT
926 skb_queue_head_init(&hdev->driver_init);
927
928 return hdev;
929}
930EXPORT_SYMBOL(hci_alloc_dev);
931
932/* Free HCI device */
933void hci_free_dev(struct hci_dev *hdev)
934{
935 skb_queue_purge(&hdev->driver_init);
936
a91f2e39
MH
937 /* will free via device release */
938 put_device(&hdev->dev);
1da177e4
LT
939}
940EXPORT_SYMBOL(hci_free_dev);
941
ab81cbf9
JH
942static void hci_power_on(struct work_struct *work)
943{
944 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
945
946 BT_DBG("%s", hdev->name);
947
948 if (hci_dev_open(hdev->id) < 0)
949 return;
950
951 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
3243553f
JH
952 queue_delayed_work(hdev->workqueue, &hdev->power_off,
953 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
954
955 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 956 mgmt_index_added(hdev);
ab81cbf9
JH
957}
958
959static void hci_power_off(struct work_struct *work)
960{
3243553f
JH
961 struct hci_dev *hdev = container_of(work, struct hci_dev,
962 power_off.work);
ab81cbf9
JH
963
964 BT_DBG("%s", hdev->name);
965
966 clear_bit(HCI_AUTO_OFF, &hdev->flags);
967
3243553f 968 hci_dev_close(hdev->id);
ab81cbf9
JH
969}
970
16ab91ab
JH
971static void hci_discov_off(struct work_struct *work)
972{
973 struct hci_dev *hdev;
974 u8 scan = SCAN_PAGE;
975
976 hdev = container_of(work, struct hci_dev, discov_off.work);
977
978 BT_DBG("%s", hdev->name);
979
980 hci_dev_lock_bh(hdev);
981
982 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
983
984 hdev->discov_timeout = 0;
985
986 hci_dev_unlock_bh(hdev);
987}
988
2aeb9a1a
JH
989int hci_uuids_clear(struct hci_dev *hdev)
990{
991 struct list_head *p, *n;
992
993 list_for_each_safe(p, n, &hdev->uuids) {
994 struct bt_uuid *uuid;
995
996 uuid = list_entry(p, struct bt_uuid, list);
997
998 list_del(p);
999 kfree(uuid);
1000 }
1001
1002 return 0;
1003}
1004
55ed8ca1
JH
1005int hci_link_keys_clear(struct hci_dev *hdev)
1006{
1007 struct list_head *p, *n;
1008
1009 list_for_each_safe(p, n, &hdev->link_keys) {
1010 struct link_key *key;
1011
1012 key = list_entry(p, struct link_key, list);
1013
1014 list_del(p);
1015 kfree(key);
1016 }
1017
1018 return 0;
1019}
1020
1021struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1022{
8035ded4 1023 struct link_key *k;
55ed8ca1 1024
8035ded4 1025 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1026 if (bacmp(bdaddr, &k->bdaddr) == 0)
1027 return k;
55ed8ca1
JH
1028
1029 return NULL;
1030}
1031
d25e28ab
JH
1032static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1033 u8 key_type, u8 old_key_type)
1034{
1035 /* Legacy key */
1036 if (key_type < 0x03)
1037 return 1;
1038
1039 /* Debug keys are insecure so don't store them persistently */
1040 if (key_type == HCI_LK_DEBUG_COMBINATION)
1041 return 0;
1042
1043 /* Changed combination key and there's no previous one */
1044 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1045 return 0;
1046
1047 /* Security mode 3 case */
1048 if (!conn)
1049 return 1;
1050
1051 /* Neither local nor remote side had no-bonding as requirement */
1052 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1053 return 1;
1054
1055 /* Local side had dedicated bonding as requirement */
1056 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1057 return 1;
1058
1059 /* Remote side had dedicated bonding as requirement */
1060 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1061 return 1;
1062
1063 /* If none of the above criteria match, then don't store the key
1064 * persistently */
1065 return 0;
1066}
1067
75d262c2
VCG
1068struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1069{
1070 struct link_key *k;
1071
1072 list_for_each_entry(k, &hdev->link_keys, list) {
1073 struct key_master_id *id;
1074
1075 if (k->type != HCI_LK_SMP_LTK)
1076 continue;
1077
1078 if (k->dlen != sizeof(*id))
1079 continue;
1080
1081 id = (void *) &k->data;
1082 if (id->ediv == ediv &&
1083 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1084 return k;
1085 }
1086
1087 return NULL;
1088}
1089EXPORT_SYMBOL(hci_find_ltk);
1090
1091struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1092 bdaddr_t *bdaddr, u8 type)
1093{
1094 struct link_key *k;
1095
1096 list_for_each_entry(k, &hdev->link_keys, list)
1097 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1098 return k;
1099
1100 return NULL;
1101}
1102EXPORT_SYMBOL(hci_find_link_key_type);
1103
d25e28ab
JH
1104int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1105 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1106{
1107 struct link_key *key, *old_key;
4df378a1 1108 u8 old_key_type, persistent;
55ed8ca1
JH
1109
1110 old_key = hci_find_link_key(hdev, bdaddr);
1111 if (old_key) {
1112 old_key_type = old_key->type;
1113 key = old_key;
1114 } else {
12adcf3a 1115 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1116 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1117 if (!key)
1118 return -ENOMEM;
1119 list_add(&key->list, &hdev->link_keys);
1120 }
1121
1122 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1123
d25e28ab
JH
1124 /* Some buggy controller combinations generate a changed
1125 * combination key for legacy pairing even when there's no
1126 * previous key */
1127 if (type == HCI_LK_CHANGED_COMBINATION &&
1128 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1129 old_key_type == 0xff) {
d25e28ab 1130 type = HCI_LK_COMBINATION;
655fe6ec
JH
1131 if (conn)
1132 conn->key_type = type;
1133 }
d25e28ab 1134
55ed8ca1
JH
1135 bacpy(&key->bdaddr, bdaddr);
1136 memcpy(key->val, val, 16);
55ed8ca1
JH
1137 key->pin_len = pin_len;
1138
b6020ba0 1139 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1140 key->type = old_key_type;
4748fed2
JH
1141 else
1142 key->type = type;
1143
4df378a1
JH
1144 if (!new_key)
1145 return 0;
1146
1147 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1148
744cf19e 1149 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1150
1151 if (!persistent) {
1152 list_del(&key->list);
1153 kfree(key);
1154 }
55ed8ca1
JH
1155
1156 return 0;
1157}
1158
75d262c2 1159int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1160 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1161{
1162 struct link_key *key, *old_key;
1163 struct key_master_id *id;
1164 u8 old_key_type;
1165
1166 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1167
1168 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1169 if (old_key) {
1170 key = old_key;
1171 old_key_type = old_key->type;
1172 } else {
1173 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1174 if (!key)
1175 return -ENOMEM;
1176 list_add(&key->list, &hdev->link_keys);
1177 old_key_type = 0xff;
1178 }
1179
1180 key->dlen = sizeof(*id);
1181
1182 bacpy(&key->bdaddr, bdaddr);
1183 memcpy(key->val, ltk, sizeof(key->val));
1184 key->type = HCI_LK_SMP_LTK;
726b4ffc 1185 key->pin_len = key_size;
75d262c2
VCG
1186
1187 id = (void *) &key->data;
1188 id->ediv = ediv;
1189 memcpy(id->rand, rand, sizeof(id->rand));
1190
1191 if (new_key)
744cf19e 1192 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1193
1194 return 0;
1195}
1196
55ed8ca1
JH
1197int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1198{
1199 struct link_key *key;
1200
1201 key = hci_find_link_key(hdev, bdaddr);
1202 if (!key)
1203 return -ENOENT;
1204
1205 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1206
1207 list_del(&key->list);
1208 kfree(key);
1209
1210 return 0;
1211}
1212
6bd32326
VT
1213/* HCI command timer function */
1214static void hci_cmd_timer(unsigned long arg)
1215{
1216 struct hci_dev *hdev = (void *) arg;
1217
1218 BT_ERR("%s command tx timeout", hdev->name);
1219 atomic_set(&hdev->cmd_cnt, 1);
1220 tasklet_schedule(&hdev->cmd_task);
1221}
1222
2763eda6
SJ
1223struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1224 bdaddr_t *bdaddr)
1225{
1226 struct oob_data *data;
1227
1228 list_for_each_entry(data, &hdev->remote_oob_data, list)
1229 if (bacmp(bdaddr, &data->bdaddr) == 0)
1230 return data;
1231
1232 return NULL;
1233}
1234
1235int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1236{
1237 struct oob_data *data;
1238
1239 data = hci_find_remote_oob_data(hdev, bdaddr);
1240 if (!data)
1241 return -ENOENT;
1242
1243 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1244
1245 list_del(&data->list);
1246 kfree(data);
1247
1248 return 0;
1249}
1250
1251int hci_remote_oob_data_clear(struct hci_dev *hdev)
1252{
1253 struct oob_data *data, *n;
1254
1255 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1256 list_del(&data->list);
1257 kfree(data);
1258 }
1259
1260 return 0;
1261}
1262
1263int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1264 u8 *randomizer)
1265{
1266 struct oob_data *data;
1267
1268 data = hci_find_remote_oob_data(hdev, bdaddr);
1269
1270 if (!data) {
1271 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1272 if (!data)
1273 return -ENOMEM;
1274
1275 bacpy(&data->bdaddr, bdaddr);
1276 list_add(&data->list, &hdev->remote_oob_data);
1277 }
1278
1279 memcpy(data->hash, hash, sizeof(data->hash));
1280 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1281
1282 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1283
1284 return 0;
1285}
1286
b2a66aad
AJ
1287struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1288 bdaddr_t *bdaddr)
1289{
8035ded4 1290 struct bdaddr_list *b;
b2a66aad 1291
8035ded4 1292 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1293 if (bacmp(bdaddr, &b->bdaddr) == 0)
1294 return b;
b2a66aad
AJ
1295
1296 return NULL;
1297}
1298
1299int hci_blacklist_clear(struct hci_dev *hdev)
1300{
1301 struct list_head *p, *n;
1302
1303 list_for_each_safe(p, n, &hdev->blacklist) {
1304 struct bdaddr_list *b;
1305
1306 b = list_entry(p, struct bdaddr_list, list);
1307
1308 list_del(p);
1309 kfree(b);
1310 }
1311
1312 return 0;
1313}
1314
1315int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1316{
1317 struct bdaddr_list *entry;
b2a66aad
AJ
1318
1319 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1320 return -EBADF;
1321
5e762444
AJ
1322 if (hci_blacklist_lookup(hdev, bdaddr))
1323 return -EEXIST;
b2a66aad
AJ
1324
1325 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1326 if (!entry)
1327 return -ENOMEM;
b2a66aad
AJ
1328
1329 bacpy(&entry->bdaddr, bdaddr);
1330
1331 list_add(&entry->list, &hdev->blacklist);
1332
744cf19e 1333 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1334}
1335
1336int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337{
1338 struct bdaddr_list *entry;
b2a66aad 1339
a7925bd2 1340 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
5e762444 1341 return hci_blacklist_clear(hdev);
a7925bd2 1342 }
b2a66aad
AJ
1343
1344 entry = hci_blacklist_lookup(hdev, bdaddr);
a7925bd2 1345 if (!entry) {
5e762444 1346 return -ENOENT;
a7925bd2 1347 }
b2a66aad
AJ
1348
1349 list_del(&entry->list);
1350 kfree(entry);
1351
744cf19e 1352 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1353}
1354
35815085
AG
1355static void hci_clear_adv_cache(unsigned long arg)
1356{
1357 struct hci_dev *hdev = (void *) arg;
1358
1359 hci_dev_lock(hdev);
1360
1361 hci_adv_entries_clear(hdev);
1362
1363 hci_dev_unlock(hdev);
1364}
1365
76c8686f
AG
1366int hci_adv_entries_clear(struct hci_dev *hdev)
1367{
1368 struct adv_entry *entry, *tmp;
1369
1370 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1371 list_del(&entry->list);
1372 kfree(entry);
1373 }
1374
1375 BT_DBG("%s adv cache cleared", hdev->name);
1376
1377 return 0;
1378}
1379
1380struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381{
1382 struct adv_entry *entry;
1383
1384 list_for_each_entry(entry, &hdev->adv_entries, list)
1385 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1386 return entry;
1387
1388 return NULL;
1389}
1390
1391static inline int is_connectable_adv(u8 evt_type)
1392{
1393 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1394 return 1;
1395
1396 return 0;
1397}
1398
1399int hci_add_adv_entry(struct hci_dev *hdev,
1400 struct hci_ev_le_advertising_info *ev)
1401{
1402 struct adv_entry *entry;
1403
1404 if (!is_connectable_adv(ev->evt_type))
1405 return -EINVAL;
1406
1407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
1409 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1410 return 0;
1411
1412 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1413 if (!entry)
1414 return -ENOMEM;
1415
1416 bacpy(&entry->bdaddr, &ev->bdaddr);
1417 entry->bdaddr_type = ev->bdaddr_type;
1418
1419 list_add(&entry->list, &hdev->adv_entries);
1420
1421 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1422 batostr(&entry->bdaddr), entry->bdaddr_type);
1423
1424 return 0;
1425}
1426
1da177e4
LT
1427/* Register HCI device */
1428int hci_register_dev(struct hci_dev *hdev)
1429{
1430 struct list_head *head = &hci_dev_list, *p;
08add513 1431 int i, id, error;
1da177e4 1432
c13854ce
MH
1433 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1434 hdev->bus, hdev->owner);
1da177e4
LT
1435
1436 if (!hdev->open || !hdev->close || !hdev->destruct)
1437 return -EINVAL;
1438
08add513
MM
1439 /* Do not allow HCI_AMP devices to register at index 0,
1440 * so the index can be used as the AMP controller ID.
1441 */
1442 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1443
1da177e4
LT
1444 write_lock_bh(&hci_dev_list_lock);
1445
1446 /* Find first available device id */
1447 list_for_each(p, &hci_dev_list) {
1448 if (list_entry(p, struct hci_dev, list)->id != id)
1449 break;
1450 head = p; id++;
1451 }
8e87d142 1452
1da177e4
LT
1453 sprintf(hdev->name, "hci%d", id);
1454 hdev->id = id;
1455 list_add(&hdev->list, head);
1456
1457 atomic_set(&hdev->refcnt, 1);
1458 spin_lock_init(&hdev->lock);
1459
1460 hdev->flags = 0;
1461 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1462 hdev->esco_type = (ESCO_HV1);
1da177e4 1463 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1464 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1465
04837f64
MH
1466 hdev->idle_timeout = 0;
1467 hdev->sniff_max_interval = 800;
1468 hdev->sniff_min_interval = 80;
1469
70f23020 1470 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1471 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1472 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1473
1474 skb_queue_head_init(&hdev->rx_q);
1475 skb_queue_head_init(&hdev->cmd_q);
1476 skb_queue_head_init(&hdev->raw_q);
1477
6bd32326
VT
1478 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1479
cd4c5391 1480 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1481 hdev->reassembly[i] = NULL;
1482
1da177e4 1483 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1484 mutex_init(&hdev->req_lock);
1da177e4
LT
1485
1486 inquiry_cache_init(hdev);
1487
1488 hci_conn_hash_init(hdev);
1489
2e58ef3e
JH
1490 INIT_LIST_HEAD(&hdev->mgmt_pending);
1491
ea4bd8ba 1492 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1493
2aeb9a1a
JH
1494 INIT_LIST_HEAD(&hdev->uuids);
1495
55ed8ca1
JH
1496 INIT_LIST_HEAD(&hdev->link_keys);
1497
2763eda6
SJ
1498 INIT_LIST_HEAD(&hdev->remote_oob_data);
1499
76c8686f 1500 INIT_LIST_HEAD(&hdev->adv_entries);
35815085
AG
1501 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1502 (unsigned long) hdev);
76c8686f 1503
ab81cbf9 1504 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1505 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1506
16ab91ab
JH
1507 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1508
1da177e4
LT
1509 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1510
1511 atomic_set(&hdev->promisc, 0);
1512
1513 write_unlock_bh(&hci_dev_list_lock);
1514
f48fd9c8 1515 hdev->workqueue = create_singlethread_workqueue(hdev->name);
33ca954d
DH
1516 if (!hdev->workqueue) {
1517 error = -ENOMEM;
1518 goto err;
1519 }
f48fd9c8 1520
33ca954d
DH
1521 error = hci_add_sysfs(hdev);
1522 if (error < 0)
1523 goto err_wqueue;
1da177e4 1524
611b30f7
MH
1525 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1526 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1527 if (hdev->rfkill) {
1528 if (rfkill_register(hdev->rfkill) < 0) {
1529 rfkill_destroy(hdev->rfkill);
1530 hdev->rfkill = NULL;
1531 }
1532 }
1533
ab81cbf9
JH
1534 set_bit(HCI_AUTO_OFF, &hdev->flags);
1535 set_bit(HCI_SETUP, &hdev->flags);
1536 queue_work(hdev->workqueue, &hdev->power_on);
1537
1da177e4
LT
1538 hci_notify(hdev, HCI_DEV_REG);
1539
1540 return id;
f48fd9c8 1541
33ca954d
DH
1542err_wqueue:
1543 destroy_workqueue(hdev->workqueue);
1544err:
f48fd9c8
MH
1545 write_lock_bh(&hci_dev_list_lock);
1546 list_del(&hdev->list);
1547 write_unlock_bh(&hci_dev_list_lock);
1548
33ca954d 1549 return error;
1da177e4
LT
1550}
1551EXPORT_SYMBOL(hci_register_dev);
1552
1553/* Unregister HCI device */
59735631 1554void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1555{
ef222013
MH
1556 int i;
1557
c13854ce 1558 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1559
1da177e4
LT
1560 write_lock_bh(&hci_dev_list_lock);
1561 list_del(&hdev->list);
1562 write_unlock_bh(&hci_dev_list_lock);
1563
1564 hci_dev_do_close(hdev);
1565
cd4c5391 1566 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1567 kfree_skb(hdev->reassembly[i]);
1568
ab81cbf9 1569 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86
JH
1570 !test_bit(HCI_SETUP, &hdev->flags)) {
1571 hci_dev_lock_bh(hdev);
744cf19e 1572 mgmt_index_removed(hdev);
56e5cb86
JH
1573 hci_dev_unlock_bh(hdev);
1574 }
ab81cbf9 1575
2e58ef3e
JH
1576 /* mgmt_index_removed should take care of emptying the
1577 * pending list */
1578 BUG_ON(!list_empty(&hdev->mgmt_pending));
1579
1da177e4
LT
1580 hci_notify(hdev, HCI_DEV_UNREG);
1581
611b30f7
MH
1582 if (hdev->rfkill) {
1583 rfkill_unregister(hdev->rfkill);
1584 rfkill_destroy(hdev->rfkill);
1585 }
1586
ce242970 1587 hci_del_sysfs(hdev);
147e2d59 1588
35815085 1589 del_timer(&hdev->adv_timer);
c6f3c5f7 1590
f48fd9c8
MH
1591 destroy_workqueue(hdev->workqueue);
1592
e2e0cacb
JH
1593 hci_dev_lock_bh(hdev);
1594 hci_blacklist_clear(hdev);
2aeb9a1a 1595 hci_uuids_clear(hdev);
55ed8ca1 1596 hci_link_keys_clear(hdev);
2763eda6 1597 hci_remote_oob_data_clear(hdev);
76c8686f 1598 hci_adv_entries_clear(hdev);
e2e0cacb
JH
1599 hci_dev_unlock_bh(hdev);
1600
1da177e4 1601 __hci_dev_put(hdev);
1da177e4
LT
1602}
1603EXPORT_SYMBOL(hci_unregister_dev);
1604
1605/* Suspend HCI device */
1606int hci_suspend_dev(struct hci_dev *hdev)
1607{
1608 hci_notify(hdev, HCI_DEV_SUSPEND);
1609 return 0;
1610}
1611EXPORT_SYMBOL(hci_suspend_dev);
1612
1613/* Resume HCI device */
1614int hci_resume_dev(struct hci_dev *hdev)
1615{
1616 hci_notify(hdev, HCI_DEV_RESUME);
1617 return 0;
1618}
1619EXPORT_SYMBOL(hci_resume_dev);
1620
76bca880
MH
1621/* Receive frame from HCI drivers */
1622int hci_recv_frame(struct sk_buff *skb)
1623{
1624 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1625 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1626 && !test_bit(HCI_INIT, &hdev->flags))) {
1627 kfree_skb(skb);
1628 return -ENXIO;
1629 }
1630
1631 /* Incomming skb */
1632 bt_cb(skb)->incoming = 1;
1633
1634 /* Time stamp */
1635 __net_timestamp(skb);
1636
1637 /* Queue frame for rx task */
1638 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1639 tasklet_schedule(&hdev->rx_task);
1640
76bca880
MH
1641 return 0;
1642}
1643EXPORT_SYMBOL(hci_recv_frame);
1644
33e882a5 1645static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1646 int count, __u8 index)
33e882a5
SS
1647{
1648 int len = 0;
1649 int hlen = 0;
1650 int remain = count;
1651 struct sk_buff *skb;
1652 struct bt_skb_cb *scb;
1653
1654 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1655 index >= NUM_REASSEMBLY)
1656 return -EILSEQ;
1657
1658 skb = hdev->reassembly[index];
1659
1660 if (!skb) {
1661 switch (type) {
1662 case HCI_ACLDATA_PKT:
1663 len = HCI_MAX_FRAME_SIZE;
1664 hlen = HCI_ACL_HDR_SIZE;
1665 break;
1666 case HCI_EVENT_PKT:
1667 len = HCI_MAX_EVENT_SIZE;
1668 hlen = HCI_EVENT_HDR_SIZE;
1669 break;
1670 case HCI_SCODATA_PKT:
1671 len = HCI_MAX_SCO_SIZE;
1672 hlen = HCI_SCO_HDR_SIZE;
1673 break;
1674 }
1675
1e429f38 1676 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1677 if (!skb)
1678 return -ENOMEM;
1679
1680 scb = (void *) skb->cb;
1681 scb->expect = hlen;
1682 scb->pkt_type = type;
1683
1684 skb->dev = (void *) hdev;
1685 hdev->reassembly[index] = skb;
1686 }
1687
1688 while (count) {
1689 scb = (void *) skb->cb;
1690 len = min(scb->expect, (__u16)count);
1691
1692 memcpy(skb_put(skb, len), data, len);
1693
1694 count -= len;
1695 data += len;
1696 scb->expect -= len;
1697 remain = count;
1698
1699 switch (type) {
1700 case HCI_EVENT_PKT:
1701 if (skb->len == HCI_EVENT_HDR_SIZE) {
1702 struct hci_event_hdr *h = hci_event_hdr(skb);
1703 scb->expect = h->plen;
1704
1705 if (skb_tailroom(skb) < scb->expect) {
1706 kfree_skb(skb);
1707 hdev->reassembly[index] = NULL;
1708 return -ENOMEM;
1709 }
1710 }
1711 break;
1712
1713 case HCI_ACLDATA_PKT:
1714 if (skb->len == HCI_ACL_HDR_SIZE) {
1715 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1716 scb->expect = __le16_to_cpu(h->dlen);
1717
1718 if (skb_tailroom(skb) < scb->expect) {
1719 kfree_skb(skb);
1720 hdev->reassembly[index] = NULL;
1721 return -ENOMEM;
1722 }
1723 }
1724 break;
1725
1726 case HCI_SCODATA_PKT:
1727 if (skb->len == HCI_SCO_HDR_SIZE) {
1728 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1729 scb->expect = h->dlen;
1730
1731 if (skb_tailroom(skb) < scb->expect) {
1732 kfree_skb(skb);
1733 hdev->reassembly[index] = NULL;
1734 return -ENOMEM;
1735 }
1736 }
1737 break;
1738 }
1739
1740 if (scb->expect == 0) {
1741 /* Complete frame */
1742
1743 bt_cb(skb)->pkt_type = type;
1744 hci_recv_frame(skb);
1745
1746 hdev->reassembly[index] = NULL;
1747 return remain;
1748 }
1749 }
1750
1751 return remain;
1752}
1753
ef222013
MH
1754int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1755{
f39a3c06
SS
1756 int rem = 0;
1757
ef222013
MH
1758 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1759 return -EILSEQ;
1760
da5f6c37 1761 while (count) {
1e429f38 1762 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1763 if (rem < 0)
1764 return rem;
ef222013 1765
f39a3c06
SS
1766 data += (count - rem);
1767 count = rem;
f81c6224 1768 }
ef222013 1769
f39a3c06 1770 return rem;
ef222013
MH
1771}
1772EXPORT_SYMBOL(hci_recv_fragment);
1773
99811510
SS
1774#define STREAM_REASSEMBLY 0
1775
1776int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1777{
1778 int type;
1779 int rem = 0;
1780
da5f6c37 1781 while (count) {
99811510
SS
1782 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1783
1784 if (!skb) {
1785 struct { char type; } *pkt;
1786
1787 /* Start of the frame */
1788 pkt = data;
1789 type = pkt->type;
1790
1791 data++;
1792 count--;
1793 } else
1794 type = bt_cb(skb)->pkt_type;
1795
1e429f38
GP
1796 rem = hci_reassembly(hdev, type, data, count,
1797 STREAM_REASSEMBLY);
99811510
SS
1798 if (rem < 0)
1799 return rem;
1800
1801 data += (count - rem);
1802 count = rem;
f81c6224 1803 }
99811510
SS
1804
1805 return rem;
1806}
1807EXPORT_SYMBOL(hci_recv_stream_fragment);
1808
1da177e4
LT
1809/* ---- Interface to upper protocols ---- */
1810
1811/* Register/Unregister protocols.
1812 * hci_task_lock is used to ensure that no tasks are running. */
1813int hci_register_proto(struct hci_proto *hp)
1814{
1815 int err = 0;
1816
1817 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1818
1819 if (hp->id >= HCI_MAX_PROTO)
1820 return -EINVAL;
1821
1822 write_lock_bh(&hci_task_lock);
1823
1824 if (!hci_proto[hp->id])
1825 hci_proto[hp->id] = hp;
1826 else
1827 err = -EEXIST;
1828
1829 write_unlock_bh(&hci_task_lock);
1830
1831 return err;
1832}
1833EXPORT_SYMBOL(hci_register_proto);
1834
1835int hci_unregister_proto(struct hci_proto *hp)
1836{
1837 int err = 0;
1838
1839 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1840
1841 if (hp->id >= HCI_MAX_PROTO)
1842 return -EINVAL;
1843
1844 write_lock_bh(&hci_task_lock);
1845
1846 if (hci_proto[hp->id])
1847 hci_proto[hp->id] = NULL;
1848 else
1849 err = -ENOENT;
1850
1851 write_unlock_bh(&hci_task_lock);
1852
1853 return err;
1854}
1855EXPORT_SYMBOL(hci_unregister_proto);
1856
1857int hci_register_cb(struct hci_cb *cb)
1858{
1859 BT_DBG("%p name %s", cb, cb->name);
1860
1861 write_lock_bh(&hci_cb_list_lock);
1862 list_add(&cb->list, &hci_cb_list);
1863 write_unlock_bh(&hci_cb_list_lock);
1864
1865 return 0;
1866}
1867EXPORT_SYMBOL(hci_register_cb);
1868
1869int hci_unregister_cb(struct hci_cb *cb)
1870{
1871 BT_DBG("%p name %s", cb, cb->name);
1872
1873 write_lock_bh(&hci_cb_list_lock);
1874 list_del(&cb->list);
1875 write_unlock_bh(&hci_cb_list_lock);
1876
1877 return 0;
1878}
1879EXPORT_SYMBOL(hci_unregister_cb);
1880
1881static int hci_send_frame(struct sk_buff *skb)
1882{
1883 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1884
1885 if (!hdev) {
1886 kfree_skb(skb);
1887 return -ENODEV;
1888 }
1889
0d48d939 1890 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1891
1892 if (atomic_read(&hdev->promisc)) {
1893 /* Time stamp */
a61bbcf2 1894 __net_timestamp(skb);
1da177e4 1895
eec8d2bc 1896 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1897 }
1898
1899 /* Get rid of skb owner, prior to sending to the driver. */
1900 skb_orphan(skb);
1901
1902 return hdev->send(skb);
1903}
1904
1905/* Send HCI command */
a9de9248 1906int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1907{
1908 int len = HCI_COMMAND_HDR_SIZE + plen;
1909 struct hci_command_hdr *hdr;
1910 struct sk_buff *skb;
1911
a9de9248 1912 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1913
1914 skb = bt_skb_alloc(len, GFP_ATOMIC);
1915 if (!skb) {
ef222013 1916 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1917 return -ENOMEM;
1918 }
1919
1920 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1921 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1922 hdr->plen = plen;
1923
1924 if (plen)
1925 memcpy(skb_put(skb, plen), param, plen);
1926
1927 BT_DBG("skb len %d", skb->len);
1928
0d48d939 1929 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1930 skb->dev = (void *) hdev;
c78ae283 1931
a5040efa
JH
1932 if (test_bit(HCI_INIT, &hdev->flags))
1933 hdev->init_last_cmd = opcode;
1934
1da177e4 1935 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1936 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1937
1938 return 0;
1939}
1da177e4
LT
1940
1941/* Get data from the previously sent command */
a9de9248 1942void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1943{
1944 struct hci_command_hdr *hdr;
1945
1946 if (!hdev->sent_cmd)
1947 return NULL;
1948
1949 hdr = (void *) hdev->sent_cmd->data;
1950
a9de9248 1951 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1952 return NULL;
1953
a9de9248 1954 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1955
1956 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1957}
1958
1959/* Send ACL data */
1960static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1961{
1962 struct hci_acl_hdr *hdr;
1963 int len = skb->len;
1964
badff6d0
ACM
1965 skb_push(skb, HCI_ACL_HDR_SIZE);
1966 skb_reset_transport_header(skb);
9c70220b 1967 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1968 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1969 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1970}
1971
73d80deb
LAD
1972static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1973 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1974{
1975 struct hci_dev *hdev = conn->hdev;
1976 struct sk_buff *list;
1977
70f23020
AE
1978 list = skb_shinfo(skb)->frag_list;
1979 if (!list) {
1da177e4
LT
1980 /* Non fragmented */
1981 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1982
73d80deb 1983 skb_queue_tail(queue, skb);
1da177e4
LT
1984 } else {
1985 /* Fragmented */
1986 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1987
1988 skb_shinfo(skb)->frag_list = NULL;
1989
1990 /* Queue all fragments atomically */
73d80deb 1991 spin_lock_bh(&queue->lock);
1da177e4 1992
73d80deb 1993 __skb_queue_tail(queue, skb);
e702112f
AE
1994
1995 flags &= ~ACL_START;
1996 flags |= ACL_CONT;
1da177e4
LT
1997 do {
1998 skb = list; list = list->next;
8e87d142 1999
1da177e4 2000 skb->dev = (void *) hdev;
0d48d939 2001 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2002 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2003
2004 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2005
73d80deb 2006 __skb_queue_tail(queue, skb);
1da177e4
LT
2007 } while (list);
2008
73d80deb 2009 spin_unlock_bh(&queue->lock);
1da177e4 2010 }
73d80deb
LAD
2011}
2012
2013void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2014{
2015 struct hci_conn *conn = chan->conn;
2016 struct hci_dev *hdev = conn->hdev;
2017
2018 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2019
2020 skb->dev = (void *) hdev;
2021 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2022 hci_add_acl_hdr(skb, conn->handle, flags);
2023
2024 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2025
c78ae283 2026 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2027}
2028EXPORT_SYMBOL(hci_send_acl);
2029
2030/* Send SCO data */
0d861d8b 2031void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2032{
2033 struct hci_dev *hdev = conn->hdev;
2034 struct hci_sco_hdr hdr;
2035
2036 BT_DBG("%s len %d", hdev->name, skb->len);
2037
aca3192c 2038 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2039 hdr.dlen = skb->len;
2040
badff6d0
ACM
2041 skb_push(skb, HCI_SCO_HDR_SIZE);
2042 skb_reset_transport_header(skb);
9c70220b 2043 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2044
2045 skb->dev = (void *) hdev;
0d48d939 2046 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2047
1da177e4 2048 skb_queue_tail(&conn->data_q, skb);
c78ae283 2049 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2050}
2051EXPORT_SYMBOL(hci_send_sco);
2052
2053/* ---- HCI TX task (outgoing data) ---- */
2054
2055/* HCI Connection scheduler */
2056static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2057{
2058 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2059 struct hci_conn *conn = NULL, *c;
1da177e4 2060 int num = 0, min = ~0;
1da177e4 2061
8e87d142 2062 /* We don't have to lock device here. Connections are always
1da177e4 2063 * added and removed with TX task disabled. */
8035ded4 2064 list_for_each_entry(c, &h->list, list) {
769be974 2065 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2066 continue;
769be974
MH
2067
2068 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2069 continue;
2070
1da177e4
LT
2071 num++;
2072
2073 if (c->sent < min) {
2074 min = c->sent;
2075 conn = c;
2076 }
52087a79
LAD
2077
2078 if (hci_conn_num(hdev, type) == num)
2079 break;
1da177e4
LT
2080 }
2081
2082 if (conn) {
6ed58ec5
VT
2083 int cnt, q;
2084
2085 switch (conn->type) {
2086 case ACL_LINK:
2087 cnt = hdev->acl_cnt;
2088 break;
2089 case SCO_LINK:
2090 case ESCO_LINK:
2091 cnt = hdev->sco_cnt;
2092 break;
2093 case LE_LINK:
2094 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2095 break;
2096 default:
2097 cnt = 0;
2098 BT_ERR("Unknown link type");
2099 }
2100
2101 q = cnt / num;
1da177e4
LT
2102 *quote = q ? q : 1;
2103 } else
2104 *quote = 0;
2105
2106 BT_DBG("conn %p quote %d", conn, *quote);
2107 return conn;
2108}
2109
bae1f5d9 2110static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2111{
2112 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2113 struct hci_conn *c;
1da177e4 2114
bae1f5d9 2115 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
2116
2117 /* Kill stalled connections */
8035ded4 2118 list_for_each_entry(c, &h->list, list) {
bae1f5d9
VT
2119 if (c->type == type && c->sent) {
2120 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2121 hdev->name, batostr(&c->dst));
2122 hci_acl_disconn(c, 0x13);
2123 }
2124 }
2125}
2126
73d80deb
LAD
2127static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2128 int *quote)
1da177e4 2129{
73d80deb
LAD
2130 struct hci_conn_hash *h = &hdev->conn_hash;
2131 struct hci_chan *chan = NULL;
2132 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2133 struct hci_conn *conn;
73d80deb
LAD
2134 int cnt, q, conn_num = 0;
2135
2136 BT_DBG("%s", hdev->name);
2137
2138 list_for_each_entry(conn, &h->list, list) {
2139 struct hci_chan_hash *ch;
2140 struct hci_chan *tmp;
2141
2142 if (conn->type != type)
2143 continue;
2144
2145 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2146 continue;
2147
2148 conn_num++;
2149
2150 ch = &conn->chan_hash;
2151
2152 list_for_each_entry(tmp, &ch->list, list) {
2153 struct sk_buff *skb;
2154
2155 if (skb_queue_empty(&tmp->data_q))
2156 continue;
2157
2158 skb = skb_peek(&tmp->data_q);
2159 if (skb->priority < cur_prio)
2160 continue;
2161
2162 if (skb->priority > cur_prio) {
2163 num = 0;
2164 min = ~0;
2165 cur_prio = skb->priority;
2166 }
2167
2168 num++;
2169
2170 if (conn->sent < min) {
2171 min = conn->sent;
2172 chan = tmp;
2173 }
2174 }
2175
2176 if (hci_conn_num(hdev, type) == conn_num)
2177 break;
2178 }
2179
2180 if (!chan)
2181 return NULL;
2182
2183 switch (chan->conn->type) {
2184 case ACL_LINK:
2185 cnt = hdev->acl_cnt;
2186 break;
2187 case SCO_LINK:
2188 case ESCO_LINK:
2189 cnt = hdev->sco_cnt;
2190 break;
2191 case LE_LINK:
2192 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2193 break;
2194 default:
2195 cnt = 0;
2196 BT_ERR("Unknown link type");
2197 }
2198
2199 q = cnt / num;
2200 *quote = q ? q : 1;
2201 BT_DBG("chan %p quote %d", chan, *quote);
2202 return chan;
2203}
2204
02b20f0b
LAD
2205static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2206{
2207 struct hci_conn_hash *h = &hdev->conn_hash;
2208 struct hci_conn *conn;
2209 int num = 0;
2210
2211 BT_DBG("%s", hdev->name);
2212
2213 list_for_each_entry(conn, &h->list, list) {
2214 struct hci_chan_hash *ch;
2215 struct hci_chan *chan;
2216
2217 if (conn->type != type)
2218 continue;
2219
2220 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2221 continue;
2222
2223 num++;
2224
2225 ch = &conn->chan_hash;
2226 list_for_each_entry(chan, &ch->list, list) {
2227 struct sk_buff *skb;
2228
2229 if (chan->sent) {
2230 chan->sent = 0;
2231 continue;
2232 }
2233
2234 if (skb_queue_empty(&chan->data_q))
2235 continue;
2236
2237 skb = skb_peek(&chan->data_q);
2238 if (skb->priority >= HCI_PRIO_MAX - 1)
2239 continue;
2240
2241 skb->priority = HCI_PRIO_MAX - 1;
2242
2243 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2244 skb->priority);
2245 }
2246
2247 if (hci_conn_num(hdev, type) == num)
2248 break;
2249 }
2250}
2251
73d80deb
LAD
2252static inline void hci_sched_acl(struct hci_dev *hdev)
2253{
2254 struct hci_chan *chan;
1da177e4
LT
2255 struct sk_buff *skb;
2256 int quote;
73d80deb 2257 unsigned int cnt;
1da177e4
LT
2258
2259 BT_DBG("%s", hdev->name);
2260
52087a79
LAD
2261 if (!hci_conn_num(hdev, ACL_LINK))
2262 return;
2263
1da177e4
LT
2264 if (!test_bit(HCI_RAW, &hdev->flags)) {
2265 /* ACL tx timeout must be longer than maximum
2266 * link supervision timeout (40.9 seconds) */
82453021 2267 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2268 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2269 }
2270
73d80deb 2271 cnt = hdev->acl_cnt;
04837f64 2272
73d80deb
LAD
2273 while (hdev->acl_cnt &&
2274 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2275 u32 priority = (skb_peek(&chan->data_q))->priority;
2276 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2277 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2278 skb->len, skb->priority);
2279
ec1cce24
LAD
2280 /* Stop if priority has changed */
2281 if (skb->priority < priority)
2282 break;
2283
2284 skb = skb_dequeue(&chan->data_q);
2285
73d80deb
LAD
2286 hci_conn_enter_active_mode(chan->conn,
2287 bt_cb(skb)->force_active);
04837f64 2288
1da177e4
LT
2289 hci_send_frame(skb);
2290 hdev->acl_last_tx = jiffies;
2291
2292 hdev->acl_cnt--;
73d80deb
LAD
2293 chan->sent++;
2294 chan->conn->sent++;
1da177e4
LT
2295 }
2296 }
02b20f0b
LAD
2297
2298 if (cnt != hdev->acl_cnt)
2299 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2300}
2301
2302/* Schedule SCO */
2303static inline void hci_sched_sco(struct hci_dev *hdev)
2304{
2305 struct hci_conn *conn;
2306 struct sk_buff *skb;
2307 int quote;
2308
2309 BT_DBG("%s", hdev->name);
2310
52087a79
LAD
2311 if (!hci_conn_num(hdev, SCO_LINK))
2312 return;
2313
1da177e4
LT
2314 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2315 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2316 BT_DBG("skb %p len %d", skb, skb->len);
2317 hci_send_frame(skb);
2318
2319 conn->sent++;
2320 if (conn->sent == ~0)
2321 conn->sent = 0;
2322 }
2323 }
2324}
2325
b6a0dc82
MH
2326static inline void hci_sched_esco(struct hci_dev *hdev)
2327{
2328 struct hci_conn *conn;
2329 struct sk_buff *skb;
2330 int quote;
2331
2332 BT_DBG("%s", hdev->name);
2333
52087a79
LAD
2334 if (!hci_conn_num(hdev, ESCO_LINK))
2335 return;
2336
b6a0dc82
MH
2337 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2338 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2339 BT_DBG("skb %p len %d", skb, skb->len);
2340 hci_send_frame(skb);
2341
2342 conn->sent++;
2343 if (conn->sent == ~0)
2344 conn->sent = 0;
2345 }
2346 }
2347}
2348
6ed58ec5
VT
2349static inline void hci_sched_le(struct hci_dev *hdev)
2350{
73d80deb 2351 struct hci_chan *chan;
6ed58ec5 2352 struct sk_buff *skb;
02b20f0b 2353 int quote, cnt, tmp;
6ed58ec5
VT
2354
2355 BT_DBG("%s", hdev->name);
2356
52087a79
LAD
2357 if (!hci_conn_num(hdev, LE_LINK))
2358 return;
2359
6ed58ec5
VT
2360 if (!test_bit(HCI_RAW, &hdev->flags)) {
2361 /* LE tx timeout must be longer than maximum
2362 * link supervision timeout (40.9 seconds) */
bae1f5d9 2363 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2364 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2365 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2366 }
2367
2368 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2369 tmp = cnt;
73d80deb 2370 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2371 u32 priority = (skb_peek(&chan->data_q))->priority;
2372 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2373 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2374 skb->len, skb->priority);
6ed58ec5 2375
ec1cce24
LAD
2376 /* Stop if priority has changed */
2377 if (skb->priority < priority)
2378 break;
2379
2380 skb = skb_dequeue(&chan->data_q);
2381
6ed58ec5
VT
2382 hci_send_frame(skb);
2383 hdev->le_last_tx = jiffies;
2384
2385 cnt--;
73d80deb
LAD
2386 chan->sent++;
2387 chan->conn->sent++;
6ed58ec5
VT
2388 }
2389 }
73d80deb 2390
6ed58ec5
VT
2391 if (hdev->le_pkts)
2392 hdev->le_cnt = cnt;
2393 else
2394 hdev->acl_cnt = cnt;
02b20f0b
LAD
2395
2396 if (cnt != tmp)
2397 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2398}
2399
1da177e4
LT
2400static void hci_tx_task(unsigned long arg)
2401{
2402 struct hci_dev *hdev = (struct hci_dev *) arg;
2403 struct sk_buff *skb;
2404
2405 read_lock(&hci_task_lock);
2406
6ed58ec5
VT
2407 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2408 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2409
2410 /* Schedule queues and send stuff to HCI driver */
2411
2412 hci_sched_acl(hdev);
2413
2414 hci_sched_sco(hdev);
2415
b6a0dc82
MH
2416 hci_sched_esco(hdev);
2417
6ed58ec5
VT
2418 hci_sched_le(hdev);
2419
1da177e4
LT
2420 /* Send next queued raw (unknown type) packet */
2421 while ((skb = skb_dequeue(&hdev->raw_q)))
2422 hci_send_frame(skb);
2423
2424 read_unlock(&hci_task_lock);
2425}
2426
25985edc 2427/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2428
2429/* ACL data packet */
2430static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2431{
2432 struct hci_acl_hdr *hdr = (void *) skb->data;
2433 struct hci_conn *conn;
2434 __u16 handle, flags;
2435
2436 skb_pull(skb, HCI_ACL_HDR_SIZE);
2437
2438 handle = __le16_to_cpu(hdr->handle);
2439 flags = hci_flags(handle);
2440 handle = hci_handle(handle);
2441
2442 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2443
2444 hdev->stat.acl_rx++;
2445
2446 hci_dev_lock(hdev);
2447 conn = hci_conn_hash_lookup_handle(hdev, handle);
2448 hci_dev_unlock(hdev);
8e87d142 2449
1da177e4
LT
2450 if (conn) {
2451 register struct hci_proto *hp;
2452
14b12d0b 2453 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
04837f64 2454
1da177e4 2455 /* Send to upper protocol */
70f23020
AE
2456 hp = hci_proto[HCI_PROTO_L2CAP];
2457 if (hp && hp->recv_acldata) {
1da177e4
LT
2458 hp->recv_acldata(conn, skb, flags);
2459 return;
2460 }
2461 } else {
8e87d142 2462 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2463 hdev->name, handle);
2464 }
2465
2466 kfree_skb(skb);
2467}
2468
2469/* SCO data packet */
2470static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2471{
2472 struct hci_sco_hdr *hdr = (void *) skb->data;
2473 struct hci_conn *conn;
2474 __u16 handle;
2475
2476 skb_pull(skb, HCI_SCO_HDR_SIZE);
2477
2478 handle = __le16_to_cpu(hdr->handle);
2479
2480 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2481
2482 hdev->stat.sco_rx++;
2483
2484 hci_dev_lock(hdev);
2485 conn = hci_conn_hash_lookup_handle(hdev, handle);
2486 hci_dev_unlock(hdev);
2487
2488 if (conn) {
2489 register struct hci_proto *hp;
2490
2491 /* Send to upper protocol */
70f23020
AE
2492 hp = hci_proto[HCI_PROTO_SCO];
2493 if (hp && hp->recv_scodata) {
1da177e4
LT
2494 hp->recv_scodata(conn, skb);
2495 return;
2496 }
2497 } else {
8e87d142 2498 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2499 hdev->name, handle);
2500 }
2501
2502 kfree_skb(skb);
2503}
2504
6516455d 2505static void hci_rx_task(unsigned long arg)
1da177e4
LT
2506{
2507 struct hci_dev *hdev = (struct hci_dev *) arg;
2508 struct sk_buff *skb;
2509
2510 BT_DBG("%s", hdev->name);
2511
2512 read_lock(&hci_task_lock);
2513
2514 while ((skb = skb_dequeue(&hdev->rx_q))) {
2515 if (atomic_read(&hdev->promisc)) {
2516 /* Send copy to the sockets */
eec8d2bc 2517 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2518 }
2519
2520 if (test_bit(HCI_RAW, &hdev->flags)) {
2521 kfree_skb(skb);
2522 continue;
2523 }
2524
2525 if (test_bit(HCI_INIT, &hdev->flags)) {
2526 /* Don't process data packets in this states. */
0d48d939 2527 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2528 case HCI_ACLDATA_PKT:
2529 case HCI_SCODATA_PKT:
2530 kfree_skb(skb);
2531 continue;
3ff50b79 2532 }
1da177e4
LT
2533 }
2534
2535 /* Process frame */
0d48d939 2536 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2537 case HCI_EVENT_PKT:
2538 hci_event_packet(hdev, skb);
2539 break;
2540
2541 case HCI_ACLDATA_PKT:
2542 BT_DBG("%s ACL data packet", hdev->name);
2543 hci_acldata_packet(hdev, skb);
2544 break;
2545
2546 case HCI_SCODATA_PKT:
2547 BT_DBG("%s SCO data packet", hdev->name);
2548 hci_scodata_packet(hdev, skb);
2549 break;
2550
2551 default:
2552 kfree_skb(skb);
2553 break;
2554 }
2555 }
2556
2557 read_unlock(&hci_task_lock);
2558}
2559
2560static void hci_cmd_task(unsigned long arg)
2561{
2562 struct hci_dev *hdev = (struct hci_dev *) arg;
2563 struct sk_buff *skb;
2564
2565 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2566
1da177e4 2567 /* Send queued commands */
5a08ecce
AE
2568 if (atomic_read(&hdev->cmd_cnt)) {
2569 skb = skb_dequeue(&hdev->cmd_q);
2570 if (!skb)
2571 return;
2572
7585b97a 2573 kfree_skb(hdev->sent_cmd);
1da177e4 2574
70f23020
AE
2575 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2576 if (hdev->sent_cmd) {
1da177e4
LT
2577 atomic_dec(&hdev->cmd_cnt);
2578 hci_send_frame(skb);
7bdb8a5c
SJ
2579 if (test_bit(HCI_RESET, &hdev->flags))
2580 del_timer(&hdev->cmd_timer);
2581 else
2582 mod_timer(&hdev->cmd_timer,
6bd32326 2583 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2584 } else {
2585 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2586 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2587 }
2588 }
2589}
2519a1fc
AG
2590
2591int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2592{
2593 /* General inquiry access code (GIAC) */
2594 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2595 struct hci_cp_inquiry cp;
2596
2597 BT_DBG("%s", hdev->name);
2598
2599 if (test_bit(HCI_INQUIRY, &hdev->flags))
2600 return -EINPROGRESS;
2601
2602 memset(&cp, 0, sizeof(cp));
2603 memcpy(&cp.lap, lap, sizeof(cp.lap));
2604 cp.length = length;
2605
2606 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2607}
023d5049
AG
2608
2609int hci_cancel_inquiry(struct hci_dev *hdev)
2610{
2611 BT_DBG("%s", hdev->name);
2612
2613 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2614 return -EPERM;
2615
2616 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2617}