Bluetooth: Move Extended Inquiry Response defines to hci.h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
7784d78f
AE
58int enable_hs;
59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
1da177e4 72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
23bb5763 94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 95{
23bb5763
JH
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
a5040efa
JH
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 102 return;
1da177e4
LT
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
8e87d142 123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 124 unsigned long opt, __u32 timeout)
1da177e4
LT
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
e175072f 146 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
3ff50b79 156 }
1da177e4 157
a5040efa 158 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 166 unsigned long opt, __u32 timeout)
1da177e4
LT
167{
168 int ret;
169
7c6a329e
MH
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
1da177e4
LT
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
f630cf0d 186 set_bit(HCI_RESET, &hdev->flags);
a9de9248 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
188}
189
e61ef499 190static void bredr_init(struct hci_dev *hdev)
1da177e4 191{
b0916ea0 192 struct hci_cp_delete_stored_link_key cp;
1ebb9252 193 __le16 param;
89f2783d 194 __u8 flt_type;
1da177e4 195
2455a3ea
AE
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
1da177e4
LT
198 /* Mandatory initialization */
199
200 /* Reset */
f630cf0d 201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 204 }
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 214
1da177e4 215 /* Read BD Address */
a9de9248
MH
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
223
224 /* Read Voice Setting */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
89f2783d 230 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 232
1da177e4 233 /* Connection accept timeout ~20 secs */
aca3192c 234 param = cpu_to_le16(0x7d00);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
240}
241
e61ef499
AE
242static void amp_init(struct hci_dev *hdev)
243{
2455a3ea
AE
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
e61ef499
AE
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
6ed58ec5
VT
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
1da177e4
LT
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
a9de9248 302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
a9de9248 312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
e4e8e37c 321 /* Encryption */
a9de9248 322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
323}
324
e4e8e37c
MH
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
a418b893 329 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
8e87d142 335/* Get HCI device by index.
1da177e4
LT
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
8035ded4 339 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
8035ded4 347 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
1da177e4
LT
356
357/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev)
359{
b57c1a56 360 struct inquiry_entry *p, *n;
1da177e4 361
b57c1a56
JH
362 list_for_each_entry_safe(p, n, &hdev->inq_cache.list, list) {
363 list_del(&p->list);
364 kfree(p);
1da177e4
LT
365 }
366}
367
368struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_entry *e;
372
373 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
374
b57c1a56 375 list_for_each_entry(e, &cache->list, list) {
1da177e4 376 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
377 return e;
378 }
379
380 return NULL;
1da177e4
LT
381}
382
383void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
384{
385 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 386 struct inquiry_entry *ie;
1da177e4
LT
387
388 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
389
70f23020
AE
390 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
391 if (!ie) {
1da177e4 392 /* Entry not in the cache. Add new one. */
70f23020
AE
393 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
394 if (!ie)
1da177e4 395 return;
70f23020 396
b57c1a56 397 list_add(&ie->list, &cache->list);
1da177e4
LT
398 }
399
70f23020
AE
400 memcpy(&ie->data, data, sizeof(*data));
401 ie->timestamp = jiffies;
1da177e4
LT
402 cache->timestamp = jiffies;
403}
404
405static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
406{
407 struct inquiry_cache *cache = &hdev->inq_cache;
408 struct inquiry_info *info = (struct inquiry_info *) buf;
409 struct inquiry_entry *e;
410 int copied = 0;
411
b57c1a56 412 list_for_each_entry(e, &cache->list, list) {
1da177e4 413 struct inquiry_data *data = &e->data;
b57c1a56
JH
414
415 if (copied >= num)
416 break;
417
1da177e4
LT
418 bacpy(&info->bdaddr, &data->bdaddr);
419 info->pscan_rep_mode = data->pscan_rep_mode;
420 info->pscan_period_mode = data->pscan_period_mode;
421 info->pscan_mode = data->pscan_mode;
422 memcpy(info->dev_class, data->dev_class, 3);
423 info->clock_offset = data->clock_offset;
b57c1a56 424
1da177e4 425 info++;
b57c1a56 426 copied++;
1da177e4
LT
427 }
428
429 BT_DBG("cache %p, copied %d", cache, copied);
430 return copied;
431}
432
433static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
434{
435 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
436 struct hci_cp_inquiry cp;
437
438 BT_DBG("%s", hdev->name);
439
440 if (test_bit(HCI_INQUIRY, &hdev->flags))
441 return;
442
443 /* Start Inquiry */
444 memcpy(&cp.lap, &ir->lap, 3);
445 cp.length = ir->length;
446 cp.num_rsp = ir->num_rsp;
a9de9248 447 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
448}
449
450int hci_inquiry(void __user *arg)
451{
452 __u8 __user *ptr = arg;
453 struct hci_inquiry_req ir;
454 struct hci_dev *hdev;
455 int err = 0, do_inquiry = 0, max_rsp;
456 long timeo;
457 __u8 *buf;
458
459 if (copy_from_user(&ir, ptr, sizeof(ir)))
460 return -EFAULT;
461
5a08ecce
AE
462 hdev = hci_dev_get(ir.dev_id);
463 if (!hdev)
1da177e4
LT
464 return -ENODEV;
465
09fd0de5 466 hci_dev_lock(hdev);
8e87d142 467 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
468 inquiry_cache_empty(hdev) ||
469 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
470 inquiry_cache_flush(hdev);
471 do_inquiry = 1;
472 }
09fd0de5 473 hci_dev_unlock(hdev);
1da177e4 474
04837f64 475 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
476
477 if (do_inquiry) {
478 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
479 if (err < 0)
480 goto done;
481 }
1da177e4
LT
482
483 /* for unlimited number of responses we will use buffer with 255 entries */
484 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
485
486 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
487 * copy it to the user space.
488 */
01df8c31 489 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 490 if (!buf) {
1da177e4
LT
491 err = -ENOMEM;
492 goto done;
493 }
494
09fd0de5 495 hci_dev_lock(hdev);
1da177e4 496 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 497 hci_dev_unlock(hdev);
1da177e4
LT
498
499 BT_DBG("num_rsp %d", ir.num_rsp);
500
501 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
502 ptr += sizeof(ir);
503 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
504 ir.num_rsp))
505 err = -EFAULT;
8e87d142 506 } else
1da177e4
LT
507 err = -EFAULT;
508
509 kfree(buf);
510
511done:
512 hci_dev_put(hdev);
513 return err;
514}
515
516/* ---- HCI ioctl helpers ---- */
517
518int hci_dev_open(__u16 dev)
519{
520 struct hci_dev *hdev;
521 int ret = 0;
522
5a08ecce
AE
523 hdev = hci_dev_get(dev);
524 if (!hdev)
1da177e4
LT
525 return -ENODEV;
526
527 BT_DBG("%s %p", hdev->name, hdev);
528
529 hci_req_lock(hdev);
530
611b30f7
MH
531 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
532 ret = -ERFKILL;
533 goto done;
534 }
535
1da177e4
LT
536 if (test_bit(HCI_UP, &hdev->flags)) {
537 ret = -EALREADY;
538 goto done;
539 }
540
541 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
542 set_bit(HCI_RAW, &hdev->flags);
543
07e3b94a
AE
544 /* Treat all non BR/EDR controllers as raw devices if
545 enable_hs is not set */
546 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
547 set_bit(HCI_RAW, &hdev->flags);
548
1da177e4
LT
549 if (hdev->open(hdev)) {
550 ret = -EIO;
551 goto done;
552 }
553
554 if (!test_bit(HCI_RAW, &hdev->flags)) {
555 atomic_set(&hdev->cmd_cnt, 1);
556 set_bit(HCI_INIT, &hdev->flags);
a5040efa 557 hdev->init_last_cmd = 0;
1da177e4 558
04837f64
MH
559 ret = __hci_request(hdev, hci_init_req, 0,
560 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 561
eead27da 562 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
563 ret = __hci_request(hdev, hci_le_init_req, 0,
564 msecs_to_jiffies(HCI_INIT_TIMEOUT));
565
1da177e4
LT
566 clear_bit(HCI_INIT, &hdev->flags);
567 }
568
569 if (!ret) {
570 hci_dev_hold(hdev);
571 set_bit(HCI_UP, &hdev->flags);
572 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 573 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 574 hci_dev_lock(hdev);
744cf19e 575 mgmt_powered(hdev, 1);
09fd0de5 576 hci_dev_unlock(hdev);
56e5cb86 577 }
8e87d142 578 } else {
1da177e4 579 /* Init failed, cleanup */
3eff45ea 580 flush_work(&hdev->tx_work);
c347b765 581 flush_work(&hdev->cmd_work);
b78752cc 582 flush_work(&hdev->rx_work);
1da177e4
LT
583
584 skb_queue_purge(&hdev->cmd_q);
585 skb_queue_purge(&hdev->rx_q);
586
587 if (hdev->flush)
588 hdev->flush(hdev);
589
590 if (hdev->sent_cmd) {
591 kfree_skb(hdev->sent_cmd);
592 hdev->sent_cmd = NULL;
593 }
594
595 hdev->close(hdev);
596 hdev->flags = 0;
597 }
598
599done:
600 hci_req_unlock(hdev);
601 hci_dev_put(hdev);
602 return ret;
603}
604
605static int hci_dev_do_close(struct hci_dev *hdev)
606{
607 BT_DBG("%s %p", hdev->name, hdev);
608
609 hci_req_cancel(hdev, ENODEV);
610 hci_req_lock(hdev);
611
612 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 613 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
614 hci_req_unlock(hdev);
615 return 0;
616 }
617
3eff45ea
GP
618 /* Flush RX and TX works */
619 flush_work(&hdev->tx_work);
b78752cc 620 flush_work(&hdev->rx_work);
1da177e4 621
16ab91ab 622 if (hdev->discov_timeout > 0) {
e0f9309f 623 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
624 hdev->discov_timeout = 0;
625 }
626
3243553f 627 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 628 cancel_delayed_work(&hdev->power_off);
3243553f 629
7d78525d
JH
630 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
631 cancel_delayed_work(&hdev->service_cache);
632
09fd0de5 633 hci_dev_lock(hdev);
1da177e4
LT
634 inquiry_cache_flush(hdev);
635 hci_conn_hash_flush(hdev);
09fd0de5 636 hci_dev_unlock(hdev);
1da177e4
LT
637
638 hci_notify(hdev, HCI_DEV_DOWN);
639
640 if (hdev->flush)
641 hdev->flush(hdev);
642
643 /* Reset device */
644 skb_queue_purge(&hdev->cmd_q);
645 atomic_set(&hdev->cmd_cnt, 1);
646 if (!test_bit(HCI_RAW, &hdev->flags)) {
647 set_bit(HCI_INIT, &hdev->flags);
04837f64 648 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 649 msecs_to_jiffies(250));
1da177e4
LT
650 clear_bit(HCI_INIT, &hdev->flags);
651 }
652
c347b765
GP
653 /* flush cmd work */
654 flush_work(&hdev->cmd_work);
1da177e4
LT
655
656 /* Drop queues */
657 skb_queue_purge(&hdev->rx_q);
658 skb_queue_purge(&hdev->cmd_q);
659 skb_queue_purge(&hdev->raw_q);
660
661 /* Drop last sent command */
662 if (hdev->sent_cmd) {
b79f44c1 663 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
664 kfree_skb(hdev->sent_cmd);
665 hdev->sent_cmd = NULL;
666 }
667
668 /* After this point our queues are empty
669 * and no tasks are scheduled. */
670 hdev->close(hdev);
671
09fd0de5 672 hci_dev_lock(hdev);
744cf19e 673 mgmt_powered(hdev, 0);
09fd0de5 674 hci_dev_unlock(hdev);
5add6af8 675
1da177e4
LT
676 /* Clear flags */
677 hdev->flags = 0;
678
679 hci_req_unlock(hdev);
680
681 hci_dev_put(hdev);
682 return 0;
683}
684
685int hci_dev_close(__u16 dev)
686{
687 struct hci_dev *hdev;
688 int err;
689
70f23020
AE
690 hdev = hci_dev_get(dev);
691 if (!hdev)
1da177e4
LT
692 return -ENODEV;
693 err = hci_dev_do_close(hdev);
694 hci_dev_put(hdev);
695 return err;
696}
697
698int hci_dev_reset(__u16 dev)
699{
700 struct hci_dev *hdev;
701 int ret = 0;
702
70f23020
AE
703 hdev = hci_dev_get(dev);
704 if (!hdev)
1da177e4
LT
705 return -ENODEV;
706
707 hci_req_lock(hdev);
1da177e4
LT
708
709 if (!test_bit(HCI_UP, &hdev->flags))
710 goto done;
711
712 /* Drop queues */
713 skb_queue_purge(&hdev->rx_q);
714 skb_queue_purge(&hdev->cmd_q);
715
09fd0de5 716 hci_dev_lock(hdev);
1da177e4
LT
717 inquiry_cache_flush(hdev);
718 hci_conn_hash_flush(hdev);
09fd0de5 719 hci_dev_unlock(hdev);
1da177e4
LT
720
721 if (hdev->flush)
722 hdev->flush(hdev);
723
8e87d142 724 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 725 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
726
727 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
728 ret = __hci_request(hdev, hci_reset_req, 0,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
730
731done:
1da177e4
LT
732 hci_req_unlock(hdev);
733 hci_dev_put(hdev);
734 return ret;
735}
736
737int hci_dev_reset_stat(__u16 dev)
738{
739 struct hci_dev *hdev;
740 int ret = 0;
741
70f23020
AE
742 hdev = hci_dev_get(dev);
743 if (!hdev)
1da177e4
LT
744 return -ENODEV;
745
746 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
747
748 hci_dev_put(hdev);
749
750 return ret;
751}
752
753int hci_dev_cmd(unsigned int cmd, void __user *arg)
754{
755 struct hci_dev *hdev;
756 struct hci_dev_req dr;
757 int err = 0;
758
759 if (copy_from_user(&dr, arg, sizeof(dr)))
760 return -EFAULT;
761
70f23020
AE
762 hdev = hci_dev_get(dr.dev_id);
763 if (!hdev)
1da177e4
LT
764 return -ENODEV;
765
766 switch (cmd) {
767 case HCISETAUTH:
04837f64
MH
768 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
769 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
770 break;
771
772 case HCISETENCRYPT:
773 if (!lmp_encrypt_capable(hdev)) {
774 err = -EOPNOTSUPP;
775 break;
776 }
777
778 if (!test_bit(HCI_AUTH, &hdev->flags)) {
779 /* Auth must be enabled first */
04837f64
MH
780 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
781 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
782 if (err)
783 break;
784 }
785
04837f64
MH
786 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
787 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
788 break;
789
790 case HCISETSCAN:
04837f64
MH
791 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
792 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
793 break;
794
1da177e4 795 case HCISETLINKPOL:
e4e8e37c
MH
796 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
797 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
798 break;
799
800 case HCISETLINKMODE:
e4e8e37c
MH
801 hdev->link_mode = ((__u16) dr.dev_opt) &
802 (HCI_LM_MASTER | HCI_LM_ACCEPT);
803 break;
804
805 case HCISETPTYPE:
806 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
807 break;
808
809 case HCISETACLMTU:
e4e8e37c
MH
810 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
811 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
812 break;
813
814 case HCISETSCOMTU:
e4e8e37c
MH
815 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
816 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
817 break;
818
819 default:
820 err = -EINVAL;
821 break;
822 }
e4e8e37c 823
1da177e4
LT
824 hci_dev_put(hdev);
825 return err;
826}
827
828int hci_get_dev_list(void __user *arg)
829{
8035ded4 830 struct hci_dev *hdev;
1da177e4
LT
831 struct hci_dev_list_req *dl;
832 struct hci_dev_req *dr;
1da177e4
LT
833 int n = 0, size, err;
834 __u16 dev_num;
835
836 if (get_user(dev_num, (__u16 __user *) arg))
837 return -EFAULT;
838
839 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
840 return -EINVAL;
841
842 size = sizeof(*dl) + dev_num * sizeof(*dr);
843
70f23020
AE
844 dl = kzalloc(size, GFP_KERNEL);
845 if (!dl)
1da177e4
LT
846 return -ENOMEM;
847
848 dr = dl->dev_req;
849
f20d09d5 850 read_lock(&hci_dev_list_lock);
8035ded4 851 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 852 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 853 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
854
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
1da177e4
LT
858 (dr + n)->dev_id = hdev->id;
859 (dr + n)->dev_opt = hdev->flags;
c542a06c 860
1da177e4
LT
861 if (++n >= dev_num)
862 break;
863 }
f20d09d5 864 read_unlock(&hci_dev_list_lock);
1da177e4
LT
865
866 dl->dev_num = n;
867 size = sizeof(*dl) + n * sizeof(*dr);
868
869 err = copy_to_user(arg, dl, size);
870 kfree(dl);
871
872 return err ? -EFAULT : 0;
873}
874
875int hci_get_dev_info(void __user *arg)
876{
877 struct hci_dev *hdev;
878 struct hci_dev_info di;
879 int err = 0;
880
881 if (copy_from_user(&di, arg, sizeof(di)))
882 return -EFAULT;
883
70f23020
AE
884 hdev = hci_dev_get(di.dev_id);
885 if (!hdev)
1da177e4
LT
886 return -ENODEV;
887
3243553f
JH
888 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
889 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 890
c542a06c
JH
891 if (!test_bit(HCI_MGMT, &hdev->flags))
892 set_bit(HCI_PAIRABLE, &hdev->flags);
893
1da177e4
LT
894 strcpy(di.name, hdev->name);
895 di.bdaddr = hdev->bdaddr;
943da25d 896 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
897 di.flags = hdev->flags;
898 di.pkt_type = hdev->pkt_type;
899 di.acl_mtu = hdev->acl_mtu;
900 di.acl_pkts = hdev->acl_pkts;
901 di.sco_mtu = hdev->sco_mtu;
902 di.sco_pkts = hdev->sco_pkts;
903 di.link_policy = hdev->link_policy;
904 di.link_mode = hdev->link_mode;
905
906 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
907 memcpy(&di.features, &hdev->features, sizeof(di.features));
908
909 if (copy_to_user(arg, &di, sizeof(di)))
910 err = -EFAULT;
911
912 hci_dev_put(hdev);
913
914 return err;
915}
916
917/* ---- Interface to HCI drivers ---- */
918
611b30f7
MH
919static int hci_rfkill_set_block(void *data, bool blocked)
920{
921 struct hci_dev *hdev = data;
922
923 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
924
925 if (!blocked)
926 return 0;
927
928 hci_dev_do_close(hdev);
929
930 return 0;
931}
932
933static const struct rfkill_ops hci_rfkill_ops = {
934 .set_block = hci_rfkill_set_block,
935};
936
1da177e4
LT
937/* Alloc HCI device */
938struct hci_dev *hci_alloc_dev(void)
939{
940 struct hci_dev *hdev;
941
25ea6db0 942 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
943 if (!hdev)
944 return NULL;
945
0ac7e700 946 hci_init_sysfs(hdev);
1da177e4
LT
947 skb_queue_head_init(&hdev->driver_init);
948
949 return hdev;
950}
951EXPORT_SYMBOL(hci_alloc_dev);
952
953/* Free HCI device */
954void hci_free_dev(struct hci_dev *hdev)
955{
956 skb_queue_purge(&hdev->driver_init);
957
a91f2e39
MH
958 /* will free via device release */
959 put_device(&hdev->dev);
1da177e4
LT
960}
961EXPORT_SYMBOL(hci_free_dev);
962
ab81cbf9
JH
963static void hci_power_on(struct work_struct *work)
964{
965 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
966
967 BT_DBG("%s", hdev->name);
968
969 if (hci_dev_open(hdev->id) < 0)
970 return;
971
972 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
80b7ab33 973 schedule_delayed_work(&hdev->power_off,
3243553f 974 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
975
976 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 977 mgmt_index_added(hdev);
ab81cbf9
JH
978}
979
980static void hci_power_off(struct work_struct *work)
981{
3243553f
JH
982 struct hci_dev *hdev = container_of(work, struct hci_dev,
983 power_off.work);
ab81cbf9
JH
984
985 BT_DBG("%s", hdev->name);
986
987 clear_bit(HCI_AUTO_OFF, &hdev->flags);
988
3243553f 989 hci_dev_close(hdev->id);
ab81cbf9
JH
990}
991
16ab91ab
JH
992static void hci_discov_off(struct work_struct *work)
993{
994 struct hci_dev *hdev;
995 u8 scan = SCAN_PAGE;
996
997 hdev = container_of(work, struct hci_dev, discov_off.work);
998
999 BT_DBG("%s", hdev->name);
1000
09fd0de5 1001 hci_dev_lock(hdev);
16ab91ab
JH
1002
1003 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1004
1005 hdev->discov_timeout = 0;
1006
09fd0de5 1007 hci_dev_unlock(hdev);
16ab91ab
JH
1008}
1009
2aeb9a1a
JH
1010int hci_uuids_clear(struct hci_dev *hdev)
1011{
1012 struct list_head *p, *n;
1013
1014 list_for_each_safe(p, n, &hdev->uuids) {
1015 struct bt_uuid *uuid;
1016
1017 uuid = list_entry(p, struct bt_uuid, list);
1018
1019 list_del(p);
1020 kfree(uuid);
1021 }
1022
1023 return 0;
1024}
1025
55ed8ca1
JH
1026int hci_link_keys_clear(struct hci_dev *hdev)
1027{
1028 struct list_head *p, *n;
1029
1030 list_for_each_safe(p, n, &hdev->link_keys) {
1031 struct link_key *key;
1032
1033 key = list_entry(p, struct link_key, list);
1034
1035 list_del(p);
1036 kfree(key);
1037 }
1038
1039 return 0;
1040}
1041
1042struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1043{
8035ded4 1044 struct link_key *k;
55ed8ca1 1045
8035ded4 1046 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1047 if (bacmp(bdaddr, &k->bdaddr) == 0)
1048 return k;
55ed8ca1
JH
1049
1050 return NULL;
1051}
1052
d25e28ab
JH
1053static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1054 u8 key_type, u8 old_key_type)
1055{
1056 /* Legacy key */
1057 if (key_type < 0x03)
1058 return 1;
1059
1060 /* Debug keys are insecure so don't store them persistently */
1061 if (key_type == HCI_LK_DEBUG_COMBINATION)
1062 return 0;
1063
1064 /* Changed combination key and there's no previous one */
1065 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1066 return 0;
1067
1068 /* Security mode 3 case */
1069 if (!conn)
1070 return 1;
1071
1072 /* Neither local nor remote side had no-bonding as requirement */
1073 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1074 return 1;
1075
1076 /* Local side had dedicated bonding as requirement */
1077 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1078 return 1;
1079
1080 /* Remote side had dedicated bonding as requirement */
1081 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1082 return 1;
1083
1084 /* If none of the above criteria match, then don't store the key
1085 * persistently */
1086 return 0;
1087}
1088
75d262c2
VCG
1089struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1090{
1091 struct link_key *k;
1092
1093 list_for_each_entry(k, &hdev->link_keys, list) {
1094 struct key_master_id *id;
1095
1096 if (k->type != HCI_LK_SMP_LTK)
1097 continue;
1098
1099 if (k->dlen != sizeof(*id))
1100 continue;
1101
1102 id = (void *) &k->data;
1103 if (id->ediv == ediv &&
1104 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1105 return k;
1106 }
1107
1108 return NULL;
1109}
1110EXPORT_SYMBOL(hci_find_ltk);
1111
1112struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1113 bdaddr_t *bdaddr, u8 type)
1114{
1115 struct link_key *k;
1116
1117 list_for_each_entry(k, &hdev->link_keys, list)
1118 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1119 return k;
1120
1121 return NULL;
1122}
1123EXPORT_SYMBOL(hci_find_link_key_type);
1124
d25e28ab
JH
1125int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1126 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1127{
1128 struct link_key *key, *old_key;
4df378a1 1129 u8 old_key_type, persistent;
55ed8ca1
JH
1130
1131 old_key = hci_find_link_key(hdev, bdaddr);
1132 if (old_key) {
1133 old_key_type = old_key->type;
1134 key = old_key;
1135 } else {
12adcf3a 1136 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1137 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1138 if (!key)
1139 return -ENOMEM;
1140 list_add(&key->list, &hdev->link_keys);
1141 }
1142
1143 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1144
d25e28ab
JH
1145 /* Some buggy controller combinations generate a changed
1146 * combination key for legacy pairing even when there's no
1147 * previous key */
1148 if (type == HCI_LK_CHANGED_COMBINATION &&
1149 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1150 old_key_type == 0xff) {
d25e28ab 1151 type = HCI_LK_COMBINATION;
655fe6ec
JH
1152 if (conn)
1153 conn->key_type = type;
1154 }
d25e28ab 1155
55ed8ca1
JH
1156 bacpy(&key->bdaddr, bdaddr);
1157 memcpy(key->val, val, 16);
55ed8ca1
JH
1158 key->pin_len = pin_len;
1159
b6020ba0 1160 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1161 key->type = old_key_type;
4748fed2
JH
1162 else
1163 key->type = type;
1164
4df378a1
JH
1165 if (!new_key)
1166 return 0;
1167
1168 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1169
744cf19e 1170 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1171
1172 if (!persistent) {
1173 list_del(&key->list);
1174 kfree(key);
1175 }
55ed8ca1
JH
1176
1177 return 0;
1178}
1179
75d262c2 1180int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1181 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1182{
1183 struct link_key *key, *old_key;
1184 struct key_master_id *id;
1185 u8 old_key_type;
1186
1187 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1188
1189 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1190 if (old_key) {
1191 key = old_key;
1192 old_key_type = old_key->type;
1193 } else {
1194 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1195 if (!key)
1196 return -ENOMEM;
1197 list_add(&key->list, &hdev->link_keys);
1198 old_key_type = 0xff;
1199 }
1200
1201 key->dlen = sizeof(*id);
1202
1203 bacpy(&key->bdaddr, bdaddr);
1204 memcpy(key->val, ltk, sizeof(key->val));
1205 key->type = HCI_LK_SMP_LTK;
726b4ffc 1206 key->pin_len = key_size;
75d262c2
VCG
1207
1208 id = (void *) &key->data;
1209 id->ediv = ediv;
1210 memcpy(id->rand, rand, sizeof(id->rand));
1211
1212 if (new_key)
744cf19e 1213 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1214
1215 return 0;
1216}
1217
55ed8ca1
JH
1218int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1219{
1220 struct link_key *key;
1221
1222 key = hci_find_link_key(hdev, bdaddr);
1223 if (!key)
1224 return -ENOENT;
1225
1226 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1227
1228 list_del(&key->list);
1229 kfree(key);
1230
1231 return 0;
1232}
1233
6bd32326
VT
1234/* HCI command timer function */
1235static void hci_cmd_timer(unsigned long arg)
1236{
1237 struct hci_dev *hdev = (void *) arg;
1238
1239 BT_ERR("%s command tx timeout", hdev->name);
1240 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1241 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1242}
1243
2763eda6
SJ
1244struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1245 bdaddr_t *bdaddr)
1246{
1247 struct oob_data *data;
1248
1249 list_for_each_entry(data, &hdev->remote_oob_data, list)
1250 if (bacmp(bdaddr, &data->bdaddr) == 0)
1251 return data;
1252
1253 return NULL;
1254}
1255
1256int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1257{
1258 struct oob_data *data;
1259
1260 data = hci_find_remote_oob_data(hdev, bdaddr);
1261 if (!data)
1262 return -ENOENT;
1263
1264 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1265
1266 list_del(&data->list);
1267 kfree(data);
1268
1269 return 0;
1270}
1271
1272int hci_remote_oob_data_clear(struct hci_dev *hdev)
1273{
1274 struct oob_data *data, *n;
1275
1276 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1277 list_del(&data->list);
1278 kfree(data);
1279 }
1280
1281 return 0;
1282}
1283
1284int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1285 u8 *randomizer)
1286{
1287 struct oob_data *data;
1288
1289 data = hci_find_remote_oob_data(hdev, bdaddr);
1290
1291 if (!data) {
1292 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1293 if (!data)
1294 return -ENOMEM;
1295
1296 bacpy(&data->bdaddr, bdaddr);
1297 list_add(&data->list, &hdev->remote_oob_data);
1298 }
1299
1300 memcpy(data->hash, hash, sizeof(data->hash));
1301 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1302
1303 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1304
1305 return 0;
1306}
1307
b2a66aad
AJ
1308struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1309 bdaddr_t *bdaddr)
1310{
8035ded4 1311 struct bdaddr_list *b;
b2a66aad 1312
8035ded4 1313 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1314 if (bacmp(bdaddr, &b->bdaddr) == 0)
1315 return b;
b2a66aad
AJ
1316
1317 return NULL;
1318}
1319
1320int hci_blacklist_clear(struct hci_dev *hdev)
1321{
1322 struct list_head *p, *n;
1323
1324 list_for_each_safe(p, n, &hdev->blacklist) {
1325 struct bdaddr_list *b;
1326
1327 b = list_entry(p, struct bdaddr_list, list);
1328
1329 list_del(p);
1330 kfree(b);
1331 }
1332
1333 return 0;
1334}
1335
1336int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337{
1338 struct bdaddr_list *entry;
b2a66aad
AJ
1339
1340 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1341 return -EBADF;
1342
5e762444
AJ
1343 if (hci_blacklist_lookup(hdev, bdaddr))
1344 return -EEXIST;
b2a66aad
AJ
1345
1346 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1347 if (!entry)
1348 return -ENOMEM;
b2a66aad
AJ
1349
1350 bacpy(&entry->bdaddr, bdaddr);
1351
1352 list_add(&entry->list, &hdev->blacklist);
1353
744cf19e 1354 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1355}
1356
1357int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358{
1359 struct bdaddr_list *entry;
b2a66aad 1360
1ec918ce 1361 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1362 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1363
1364 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1365 if (!entry)
5e762444 1366 return -ENOENT;
b2a66aad
AJ
1367
1368 list_del(&entry->list);
1369 kfree(entry);
1370
744cf19e 1371 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1372}
1373
db323f2f 1374static void hci_clear_adv_cache(struct work_struct *work)
35815085 1375{
db323f2f
GP
1376 struct hci_dev *hdev = container_of(work, struct hci_dev,
1377 adv_work.work);
35815085
AG
1378
1379 hci_dev_lock(hdev);
1380
1381 hci_adv_entries_clear(hdev);
1382
1383 hci_dev_unlock(hdev);
1384}
1385
76c8686f
AG
1386int hci_adv_entries_clear(struct hci_dev *hdev)
1387{
1388 struct adv_entry *entry, *tmp;
1389
1390 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1391 list_del(&entry->list);
1392 kfree(entry);
1393 }
1394
1395 BT_DBG("%s adv cache cleared", hdev->name);
1396
1397 return 0;
1398}
1399
1400struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1401{
1402 struct adv_entry *entry;
1403
1404 list_for_each_entry(entry, &hdev->adv_entries, list)
1405 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1406 return entry;
1407
1408 return NULL;
1409}
1410
1411static inline int is_connectable_adv(u8 evt_type)
1412{
1413 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1414 return 1;
1415
1416 return 0;
1417}
1418
1419int hci_add_adv_entry(struct hci_dev *hdev,
1420 struct hci_ev_le_advertising_info *ev)
1421{
1422 struct adv_entry *entry;
1423
1424 if (!is_connectable_adv(ev->evt_type))
1425 return -EINVAL;
1426
1427 /* Only new entries should be added to adv_entries. So, if
1428 * bdaddr was found, don't add it. */
1429 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1430 return 0;
1431
1432 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1433 if (!entry)
1434 return -ENOMEM;
1435
1436 bacpy(&entry->bdaddr, &ev->bdaddr);
1437 entry->bdaddr_type = ev->bdaddr_type;
1438
1439 list_add(&entry->list, &hdev->adv_entries);
1440
1441 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1442 batostr(&entry->bdaddr), entry->bdaddr_type);
1443
1444 return 0;
1445}
1446
1da177e4
LT
1447/* Register HCI device */
1448int hci_register_dev(struct hci_dev *hdev)
1449{
1450 struct list_head *head = &hci_dev_list, *p;
08add513 1451 int i, id, error;
1da177e4 1452
c13854ce
MH
1453 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1454 hdev->bus, hdev->owner);
1da177e4
LT
1455
1456 if (!hdev->open || !hdev->close || !hdev->destruct)
1457 return -EINVAL;
1458
08add513
MM
1459 /* Do not allow HCI_AMP devices to register at index 0,
1460 * so the index can be used as the AMP controller ID.
1461 */
1462 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1463
f20d09d5 1464 write_lock(&hci_dev_list_lock);
1da177e4
LT
1465
1466 /* Find first available device id */
1467 list_for_each(p, &hci_dev_list) {
1468 if (list_entry(p, struct hci_dev, list)->id != id)
1469 break;
1470 head = p; id++;
1471 }
8e87d142 1472
1da177e4
LT
1473 sprintf(hdev->name, "hci%d", id);
1474 hdev->id = id;
c6feeb28 1475 list_add_tail(&hdev->list, head);
1da177e4
LT
1476
1477 atomic_set(&hdev->refcnt, 1);
09fd0de5 1478 mutex_init(&hdev->lock);
1da177e4
LT
1479
1480 hdev->flags = 0;
d23264a8 1481 hdev->dev_flags = 0;
1da177e4 1482 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1483 hdev->esco_type = (ESCO_HV1);
1da177e4 1484 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1485 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1486
04837f64
MH
1487 hdev->idle_timeout = 0;
1488 hdev->sniff_max_interval = 800;
1489 hdev->sniff_min_interval = 80;
1490
b78752cc 1491 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1492 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1493 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1494
1da177e4
LT
1495
1496 skb_queue_head_init(&hdev->rx_q);
1497 skb_queue_head_init(&hdev->cmd_q);
1498 skb_queue_head_init(&hdev->raw_q);
1499
6bd32326
VT
1500 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1501
cd4c5391 1502 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1503 hdev->reassembly[i] = NULL;
1504
1da177e4 1505 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1506 mutex_init(&hdev->req_lock);
1da177e4
LT
1507
1508 inquiry_cache_init(hdev);
1509
1510 hci_conn_hash_init(hdev);
1511
2e58ef3e
JH
1512 INIT_LIST_HEAD(&hdev->mgmt_pending);
1513
ea4bd8ba 1514 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1515
2aeb9a1a
JH
1516 INIT_LIST_HEAD(&hdev->uuids);
1517
55ed8ca1
JH
1518 INIT_LIST_HEAD(&hdev->link_keys);
1519
2763eda6
SJ
1520 INIT_LIST_HEAD(&hdev->remote_oob_data);
1521
76c8686f
AG
1522 INIT_LIST_HEAD(&hdev->adv_entries);
1523
db323f2f 1524 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1525 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1526 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1527
16ab91ab
JH
1528 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1529
1da177e4
LT
1530 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1531
1532 atomic_set(&hdev->promisc, 0);
1533
f20d09d5 1534 write_unlock(&hci_dev_list_lock);
1da177e4 1535
32845eb1
GP
1536 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1537 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1538 if (!hdev->workqueue) {
1539 error = -ENOMEM;
1540 goto err;
1541 }
f48fd9c8 1542
33ca954d
DH
1543 error = hci_add_sysfs(hdev);
1544 if (error < 0)
1545 goto err_wqueue;
1da177e4 1546
611b30f7
MH
1547 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1548 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1549 if (hdev->rfkill) {
1550 if (rfkill_register(hdev->rfkill) < 0) {
1551 rfkill_destroy(hdev->rfkill);
1552 hdev->rfkill = NULL;
1553 }
1554 }
1555
ab81cbf9
JH
1556 set_bit(HCI_AUTO_OFF, &hdev->flags);
1557 set_bit(HCI_SETUP, &hdev->flags);
7f971041 1558 schedule_work(&hdev->power_on);
ab81cbf9 1559
1da177e4
LT
1560 hci_notify(hdev, HCI_DEV_REG);
1561
1562 return id;
f48fd9c8 1563
33ca954d
DH
1564err_wqueue:
1565 destroy_workqueue(hdev->workqueue);
1566err:
f20d09d5 1567 write_lock(&hci_dev_list_lock);
f48fd9c8 1568 list_del(&hdev->list);
f20d09d5 1569 write_unlock(&hci_dev_list_lock);
f48fd9c8 1570
33ca954d 1571 return error;
1da177e4
LT
1572}
1573EXPORT_SYMBOL(hci_register_dev);
1574
1575/* Unregister HCI device */
59735631 1576void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1577{
ef222013
MH
1578 int i;
1579
c13854ce 1580 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1581
f20d09d5 1582 write_lock(&hci_dev_list_lock);
1da177e4 1583 list_del(&hdev->list);
f20d09d5 1584 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1585
1586 hci_dev_do_close(hdev);
1587
cd4c5391 1588 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1589 kfree_skb(hdev->reassembly[i]);
1590
ab81cbf9 1591 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1592 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1593 hci_dev_lock(hdev);
744cf19e 1594 mgmt_index_removed(hdev);
09fd0de5 1595 hci_dev_unlock(hdev);
56e5cb86 1596 }
ab81cbf9 1597
2e58ef3e
JH
1598 /* mgmt_index_removed should take care of emptying the
1599 * pending list */
1600 BUG_ON(!list_empty(&hdev->mgmt_pending));
1601
1da177e4
LT
1602 hci_notify(hdev, HCI_DEV_UNREG);
1603
611b30f7
MH
1604 if (hdev->rfkill) {
1605 rfkill_unregister(hdev->rfkill);
1606 rfkill_destroy(hdev->rfkill);
1607 }
1608
ce242970 1609 hci_del_sysfs(hdev);
147e2d59 1610
db323f2f 1611 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1612
f48fd9c8
MH
1613 destroy_workqueue(hdev->workqueue);
1614
09fd0de5 1615 hci_dev_lock(hdev);
e2e0cacb 1616 hci_blacklist_clear(hdev);
2aeb9a1a 1617 hci_uuids_clear(hdev);
55ed8ca1 1618 hci_link_keys_clear(hdev);
2763eda6 1619 hci_remote_oob_data_clear(hdev);
76c8686f 1620 hci_adv_entries_clear(hdev);
09fd0de5 1621 hci_dev_unlock(hdev);
e2e0cacb 1622
1da177e4 1623 __hci_dev_put(hdev);
1da177e4
LT
1624}
1625EXPORT_SYMBOL(hci_unregister_dev);
1626
1627/* Suspend HCI device */
1628int hci_suspend_dev(struct hci_dev *hdev)
1629{
1630 hci_notify(hdev, HCI_DEV_SUSPEND);
1631 return 0;
1632}
1633EXPORT_SYMBOL(hci_suspend_dev);
1634
1635/* Resume HCI device */
1636int hci_resume_dev(struct hci_dev *hdev)
1637{
1638 hci_notify(hdev, HCI_DEV_RESUME);
1639 return 0;
1640}
1641EXPORT_SYMBOL(hci_resume_dev);
1642
76bca880
MH
1643/* Receive frame from HCI drivers */
1644int hci_recv_frame(struct sk_buff *skb)
1645{
1646 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1647 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1648 && !test_bit(HCI_INIT, &hdev->flags))) {
1649 kfree_skb(skb);
1650 return -ENXIO;
1651 }
1652
1653 /* Incomming skb */
1654 bt_cb(skb)->incoming = 1;
1655
1656 /* Time stamp */
1657 __net_timestamp(skb);
1658
76bca880 1659 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1660 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1661
76bca880
MH
1662 return 0;
1663}
1664EXPORT_SYMBOL(hci_recv_frame);
1665
33e882a5 1666static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1667 int count, __u8 index)
33e882a5
SS
1668{
1669 int len = 0;
1670 int hlen = 0;
1671 int remain = count;
1672 struct sk_buff *skb;
1673 struct bt_skb_cb *scb;
1674
1675 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1676 index >= NUM_REASSEMBLY)
1677 return -EILSEQ;
1678
1679 skb = hdev->reassembly[index];
1680
1681 if (!skb) {
1682 switch (type) {
1683 case HCI_ACLDATA_PKT:
1684 len = HCI_MAX_FRAME_SIZE;
1685 hlen = HCI_ACL_HDR_SIZE;
1686 break;
1687 case HCI_EVENT_PKT:
1688 len = HCI_MAX_EVENT_SIZE;
1689 hlen = HCI_EVENT_HDR_SIZE;
1690 break;
1691 case HCI_SCODATA_PKT:
1692 len = HCI_MAX_SCO_SIZE;
1693 hlen = HCI_SCO_HDR_SIZE;
1694 break;
1695 }
1696
1e429f38 1697 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1698 if (!skb)
1699 return -ENOMEM;
1700
1701 scb = (void *) skb->cb;
1702 scb->expect = hlen;
1703 scb->pkt_type = type;
1704
1705 skb->dev = (void *) hdev;
1706 hdev->reassembly[index] = skb;
1707 }
1708
1709 while (count) {
1710 scb = (void *) skb->cb;
1711 len = min(scb->expect, (__u16)count);
1712
1713 memcpy(skb_put(skb, len), data, len);
1714
1715 count -= len;
1716 data += len;
1717 scb->expect -= len;
1718 remain = count;
1719
1720 switch (type) {
1721 case HCI_EVENT_PKT:
1722 if (skb->len == HCI_EVENT_HDR_SIZE) {
1723 struct hci_event_hdr *h = hci_event_hdr(skb);
1724 scb->expect = h->plen;
1725
1726 if (skb_tailroom(skb) < scb->expect) {
1727 kfree_skb(skb);
1728 hdev->reassembly[index] = NULL;
1729 return -ENOMEM;
1730 }
1731 }
1732 break;
1733
1734 case HCI_ACLDATA_PKT:
1735 if (skb->len == HCI_ACL_HDR_SIZE) {
1736 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1737 scb->expect = __le16_to_cpu(h->dlen);
1738
1739 if (skb_tailroom(skb) < scb->expect) {
1740 kfree_skb(skb);
1741 hdev->reassembly[index] = NULL;
1742 return -ENOMEM;
1743 }
1744 }
1745 break;
1746
1747 case HCI_SCODATA_PKT:
1748 if (skb->len == HCI_SCO_HDR_SIZE) {
1749 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1750 scb->expect = h->dlen;
1751
1752 if (skb_tailroom(skb) < scb->expect) {
1753 kfree_skb(skb);
1754 hdev->reassembly[index] = NULL;
1755 return -ENOMEM;
1756 }
1757 }
1758 break;
1759 }
1760
1761 if (scb->expect == 0) {
1762 /* Complete frame */
1763
1764 bt_cb(skb)->pkt_type = type;
1765 hci_recv_frame(skb);
1766
1767 hdev->reassembly[index] = NULL;
1768 return remain;
1769 }
1770 }
1771
1772 return remain;
1773}
1774
ef222013
MH
1775int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1776{
f39a3c06
SS
1777 int rem = 0;
1778
ef222013
MH
1779 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1780 return -EILSEQ;
1781
da5f6c37 1782 while (count) {
1e429f38 1783 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1784 if (rem < 0)
1785 return rem;
ef222013 1786
f39a3c06
SS
1787 data += (count - rem);
1788 count = rem;
f81c6224 1789 }
ef222013 1790
f39a3c06 1791 return rem;
ef222013
MH
1792}
1793EXPORT_SYMBOL(hci_recv_fragment);
1794
99811510
SS
1795#define STREAM_REASSEMBLY 0
1796
1797int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1798{
1799 int type;
1800 int rem = 0;
1801
da5f6c37 1802 while (count) {
99811510
SS
1803 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1804
1805 if (!skb) {
1806 struct { char type; } *pkt;
1807
1808 /* Start of the frame */
1809 pkt = data;
1810 type = pkt->type;
1811
1812 data++;
1813 count--;
1814 } else
1815 type = bt_cb(skb)->pkt_type;
1816
1e429f38
GP
1817 rem = hci_reassembly(hdev, type, data, count,
1818 STREAM_REASSEMBLY);
99811510
SS
1819 if (rem < 0)
1820 return rem;
1821
1822 data += (count - rem);
1823 count = rem;
f81c6224 1824 }
99811510
SS
1825
1826 return rem;
1827}
1828EXPORT_SYMBOL(hci_recv_stream_fragment);
1829
1da177e4
LT
1830/* ---- Interface to upper protocols ---- */
1831
1da177e4
LT
1832int hci_register_cb(struct hci_cb *cb)
1833{
1834 BT_DBG("%p name %s", cb, cb->name);
1835
f20d09d5 1836 write_lock(&hci_cb_list_lock);
1da177e4 1837 list_add(&cb->list, &hci_cb_list);
f20d09d5 1838 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1839
1840 return 0;
1841}
1842EXPORT_SYMBOL(hci_register_cb);
1843
1844int hci_unregister_cb(struct hci_cb *cb)
1845{
1846 BT_DBG("%p name %s", cb, cb->name);
1847
f20d09d5 1848 write_lock(&hci_cb_list_lock);
1da177e4 1849 list_del(&cb->list);
f20d09d5 1850 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1851
1852 return 0;
1853}
1854EXPORT_SYMBOL(hci_unregister_cb);
1855
1856static int hci_send_frame(struct sk_buff *skb)
1857{
1858 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1859
1860 if (!hdev) {
1861 kfree_skb(skb);
1862 return -ENODEV;
1863 }
1864
0d48d939 1865 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1866
1867 if (atomic_read(&hdev->promisc)) {
1868 /* Time stamp */
a61bbcf2 1869 __net_timestamp(skb);
1da177e4 1870
eec8d2bc 1871 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1872 }
1873
1874 /* Get rid of skb owner, prior to sending to the driver. */
1875 skb_orphan(skb);
1876
1877 return hdev->send(skb);
1878}
1879
1880/* Send HCI command */
a9de9248 1881int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1882{
1883 int len = HCI_COMMAND_HDR_SIZE + plen;
1884 struct hci_command_hdr *hdr;
1885 struct sk_buff *skb;
1886
a9de9248 1887 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1888
1889 skb = bt_skb_alloc(len, GFP_ATOMIC);
1890 if (!skb) {
ef222013 1891 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1892 return -ENOMEM;
1893 }
1894
1895 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1896 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1897 hdr->plen = plen;
1898
1899 if (plen)
1900 memcpy(skb_put(skb, plen), param, plen);
1901
1902 BT_DBG("skb len %d", skb->len);
1903
0d48d939 1904 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1905 skb->dev = (void *) hdev;
c78ae283 1906
a5040efa
JH
1907 if (test_bit(HCI_INIT, &hdev->flags))
1908 hdev->init_last_cmd = opcode;
1909
1da177e4 1910 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1911 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1912
1913 return 0;
1914}
1da177e4
LT
1915
1916/* Get data from the previously sent command */
a9de9248 1917void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1918{
1919 struct hci_command_hdr *hdr;
1920
1921 if (!hdev->sent_cmd)
1922 return NULL;
1923
1924 hdr = (void *) hdev->sent_cmd->data;
1925
a9de9248 1926 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1927 return NULL;
1928
a9de9248 1929 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1930
1931 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1932}
1933
1934/* Send ACL data */
1935static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1936{
1937 struct hci_acl_hdr *hdr;
1938 int len = skb->len;
1939
badff6d0
ACM
1940 skb_push(skb, HCI_ACL_HDR_SIZE);
1941 skb_reset_transport_header(skb);
9c70220b 1942 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1943 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1944 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1945}
1946
73d80deb
LAD
1947static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1948 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1949{
1950 struct hci_dev *hdev = conn->hdev;
1951 struct sk_buff *list;
1952
70f23020
AE
1953 list = skb_shinfo(skb)->frag_list;
1954 if (!list) {
1da177e4
LT
1955 /* Non fragmented */
1956 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1957
73d80deb 1958 skb_queue_tail(queue, skb);
1da177e4
LT
1959 } else {
1960 /* Fragmented */
1961 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1962
1963 skb_shinfo(skb)->frag_list = NULL;
1964
1965 /* Queue all fragments atomically */
af3e6359 1966 spin_lock(&queue->lock);
1da177e4 1967
73d80deb 1968 __skb_queue_tail(queue, skb);
e702112f
AE
1969
1970 flags &= ~ACL_START;
1971 flags |= ACL_CONT;
1da177e4
LT
1972 do {
1973 skb = list; list = list->next;
8e87d142 1974
1da177e4 1975 skb->dev = (void *) hdev;
0d48d939 1976 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1977 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1978
1979 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1980
73d80deb 1981 __skb_queue_tail(queue, skb);
1da177e4
LT
1982 } while (list);
1983
af3e6359 1984 spin_unlock(&queue->lock);
1da177e4 1985 }
73d80deb
LAD
1986}
1987
1988void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1989{
1990 struct hci_conn *conn = chan->conn;
1991 struct hci_dev *hdev = conn->hdev;
1992
1993 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1994
1995 skb->dev = (void *) hdev;
1996 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1997 hci_add_acl_hdr(skb, conn->handle, flags);
1998
1999 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2000
3eff45ea 2001 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2002}
2003EXPORT_SYMBOL(hci_send_acl);
2004
2005/* Send SCO data */
0d861d8b 2006void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2007{
2008 struct hci_dev *hdev = conn->hdev;
2009 struct hci_sco_hdr hdr;
2010
2011 BT_DBG("%s len %d", hdev->name, skb->len);
2012
aca3192c 2013 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2014 hdr.dlen = skb->len;
2015
badff6d0
ACM
2016 skb_push(skb, HCI_SCO_HDR_SIZE);
2017 skb_reset_transport_header(skb);
9c70220b 2018 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2019
2020 skb->dev = (void *) hdev;
0d48d939 2021 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2022
1da177e4 2023 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2024 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2025}
2026EXPORT_SYMBOL(hci_send_sco);
2027
2028/* ---- HCI TX task (outgoing data) ---- */
2029
2030/* HCI Connection scheduler */
2031static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2032{
2033 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2034 struct hci_conn *conn = NULL, *c;
1da177e4 2035 int num = 0, min = ~0;
1da177e4 2036
8e87d142 2037 /* We don't have to lock device here. Connections are always
1da177e4 2038 * added and removed with TX task disabled. */
bf4c6325
GP
2039
2040 rcu_read_lock();
2041
2042 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2043 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2044 continue;
769be974
MH
2045
2046 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2047 continue;
2048
1da177e4
LT
2049 num++;
2050
2051 if (c->sent < min) {
2052 min = c->sent;
2053 conn = c;
2054 }
52087a79
LAD
2055
2056 if (hci_conn_num(hdev, type) == num)
2057 break;
1da177e4
LT
2058 }
2059
bf4c6325
GP
2060 rcu_read_unlock();
2061
1da177e4 2062 if (conn) {
6ed58ec5
VT
2063 int cnt, q;
2064
2065 switch (conn->type) {
2066 case ACL_LINK:
2067 cnt = hdev->acl_cnt;
2068 break;
2069 case SCO_LINK:
2070 case ESCO_LINK:
2071 cnt = hdev->sco_cnt;
2072 break;
2073 case LE_LINK:
2074 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2075 break;
2076 default:
2077 cnt = 0;
2078 BT_ERR("Unknown link type");
2079 }
2080
2081 q = cnt / num;
1da177e4
LT
2082 *quote = q ? q : 1;
2083 } else
2084 *quote = 0;
2085
2086 BT_DBG("conn %p quote %d", conn, *quote);
2087 return conn;
2088}
2089
bae1f5d9 2090static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2091{
2092 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2093 struct hci_conn *c;
1da177e4 2094
bae1f5d9 2095 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2096
bf4c6325
GP
2097 rcu_read_lock();
2098
1da177e4 2099 /* Kill stalled connections */
bf4c6325 2100 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2101 if (c->type == type && c->sent) {
2102 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2103 hdev->name, batostr(&c->dst));
2104 hci_acl_disconn(c, 0x13);
2105 }
2106 }
bf4c6325
GP
2107
2108 rcu_read_unlock();
1da177e4
LT
2109}
2110
73d80deb
LAD
2111static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2112 int *quote)
1da177e4 2113{
73d80deb
LAD
2114 struct hci_conn_hash *h = &hdev->conn_hash;
2115 struct hci_chan *chan = NULL;
2116 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2117 struct hci_conn *conn;
73d80deb
LAD
2118 int cnt, q, conn_num = 0;
2119
2120 BT_DBG("%s", hdev->name);
2121
bf4c6325
GP
2122 rcu_read_lock();
2123
2124 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2125 struct hci_chan *tmp;
2126
2127 if (conn->type != type)
2128 continue;
2129
2130 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2131 continue;
2132
2133 conn_num++;
2134
8192edef 2135 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2136 struct sk_buff *skb;
2137
2138 if (skb_queue_empty(&tmp->data_q))
2139 continue;
2140
2141 skb = skb_peek(&tmp->data_q);
2142 if (skb->priority < cur_prio)
2143 continue;
2144
2145 if (skb->priority > cur_prio) {
2146 num = 0;
2147 min = ~0;
2148 cur_prio = skb->priority;
2149 }
2150
2151 num++;
2152
2153 if (conn->sent < min) {
2154 min = conn->sent;
2155 chan = tmp;
2156 }
2157 }
2158
2159 if (hci_conn_num(hdev, type) == conn_num)
2160 break;
2161 }
2162
bf4c6325
GP
2163 rcu_read_unlock();
2164
73d80deb
LAD
2165 if (!chan)
2166 return NULL;
2167
2168 switch (chan->conn->type) {
2169 case ACL_LINK:
2170 cnt = hdev->acl_cnt;
2171 break;
2172 case SCO_LINK:
2173 case ESCO_LINK:
2174 cnt = hdev->sco_cnt;
2175 break;
2176 case LE_LINK:
2177 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2178 break;
2179 default:
2180 cnt = 0;
2181 BT_ERR("Unknown link type");
2182 }
2183
2184 q = cnt / num;
2185 *quote = q ? q : 1;
2186 BT_DBG("chan %p quote %d", chan, *quote);
2187 return chan;
2188}
2189
02b20f0b
LAD
2190static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2191{
2192 struct hci_conn_hash *h = &hdev->conn_hash;
2193 struct hci_conn *conn;
2194 int num = 0;
2195
2196 BT_DBG("%s", hdev->name);
2197
bf4c6325
GP
2198 rcu_read_lock();
2199
2200 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2201 struct hci_chan *chan;
2202
2203 if (conn->type != type)
2204 continue;
2205
2206 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2207 continue;
2208
2209 num++;
2210
8192edef 2211 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2212 struct sk_buff *skb;
2213
2214 if (chan->sent) {
2215 chan->sent = 0;
2216 continue;
2217 }
2218
2219 if (skb_queue_empty(&chan->data_q))
2220 continue;
2221
2222 skb = skb_peek(&chan->data_q);
2223 if (skb->priority >= HCI_PRIO_MAX - 1)
2224 continue;
2225
2226 skb->priority = HCI_PRIO_MAX - 1;
2227
2228 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2229 skb->priority);
2230 }
2231
2232 if (hci_conn_num(hdev, type) == num)
2233 break;
2234 }
bf4c6325
GP
2235
2236 rcu_read_unlock();
2237
02b20f0b
LAD
2238}
2239
73d80deb
LAD
2240static inline void hci_sched_acl(struct hci_dev *hdev)
2241{
2242 struct hci_chan *chan;
1da177e4
LT
2243 struct sk_buff *skb;
2244 int quote;
73d80deb 2245 unsigned int cnt;
1da177e4
LT
2246
2247 BT_DBG("%s", hdev->name);
2248
52087a79
LAD
2249 if (!hci_conn_num(hdev, ACL_LINK))
2250 return;
2251
1da177e4
LT
2252 if (!test_bit(HCI_RAW, &hdev->flags)) {
2253 /* ACL tx timeout must be longer than maximum
2254 * link supervision timeout (40.9 seconds) */
82453021 2255 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2256 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2257 }
2258
73d80deb 2259 cnt = hdev->acl_cnt;
04837f64 2260
73d80deb
LAD
2261 while (hdev->acl_cnt &&
2262 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2263 u32 priority = (skb_peek(&chan->data_q))->priority;
2264 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2265 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2266 skb->len, skb->priority);
2267
ec1cce24
LAD
2268 /* Stop if priority has changed */
2269 if (skb->priority < priority)
2270 break;
2271
2272 skb = skb_dequeue(&chan->data_q);
2273
73d80deb
LAD
2274 hci_conn_enter_active_mode(chan->conn,
2275 bt_cb(skb)->force_active);
04837f64 2276
1da177e4
LT
2277 hci_send_frame(skb);
2278 hdev->acl_last_tx = jiffies;
2279
2280 hdev->acl_cnt--;
73d80deb
LAD
2281 chan->sent++;
2282 chan->conn->sent++;
1da177e4
LT
2283 }
2284 }
02b20f0b
LAD
2285
2286 if (cnt != hdev->acl_cnt)
2287 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2288}
2289
2290/* Schedule SCO */
2291static inline void hci_sched_sco(struct hci_dev *hdev)
2292{
2293 struct hci_conn *conn;
2294 struct sk_buff *skb;
2295 int quote;
2296
2297 BT_DBG("%s", hdev->name);
2298
52087a79
LAD
2299 if (!hci_conn_num(hdev, SCO_LINK))
2300 return;
2301
1da177e4
LT
2302 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2303 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2304 BT_DBG("skb %p len %d", skb, skb->len);
2305 hci_send_frame(skb);
2306
2307 conn->sent++;
2308 if (conn->sent == ~0)
2309 conn->sent = 0;
2310 }
2311 }
2312}
2313
b6a0dc82
MH
2314static inline void hci_sched_esco(struct hci_dev *hdev)
2315{
2316 struct hci_conn *conn;
2317 struct sk_buff *skb;
2318 int quote;
2319
2320 BT_DBG("%s", hdev->name);
2321
52087a79
LAD
2322 if (!hci_conn_num(hdev, ESCO_LINK))
2323 return;
2324
b6a0dc82
MH
2325 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2326 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2327 BT_DBG("skb %p len %d", skb, skb->len);
2328 hci_send_frame(skb);
2329
2330 conn->sent++;
2331 if (conn->sent == ~0)
2332 conn->sent = 0;
2333 }
2334 }
2335}
2336
6ed58ec5
VT
2337static inline void hci_sched_le(struct hci_dev *hdev)
2338{
73d80deb 2339 struct hci_chan *chan;
6ed58ec5 2340 struct sk_buff *skb;
02b20f0b 2341 int quote, cnt, tmp;
6ed58ec5
VT
2342
2343 BT_DBG("%s", hdev->name);
2344
52087a79
LAD
2345 if (!hci_conn_num(hdev, LE_LINK))
2346 return;
2347
6ed58ec5
VT
2348 if (!test_bit(HCI_RAW, &hdev->flags)) {
2349 /* LE tx timeout must be longer than maximum
2350 * link supervision timeout (40.9 seconds) */
bae1f5d9 2351 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2352 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2353 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2354 }
2355
2356 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2357 tmp = cnt;
73d80deb 2358 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2359 u32 priority = (skb_peek(&chan->data_q))->priority;
2360 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2361 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2362 skb->len, skb->priority);
6ed58ec5 2363
ec1cce24
LAD
2364 /* Stop if priority has changed */
2365 if (skb->priority < priority)
2366 break;
2367
2368 skb = skb_dequeue(&chan->data_q);
2369
6ed58ec5
VT
2370 hci_send_frame(skb);
2371 hdev->le_last_tx = jiffies;
2372
2373 cnt--;
73d80deb
LAD
2374 chan->sent++;
2375 chan->conn->sent++;
6ed58ec5
VT
2376 }
2377 }
73d80deb 2378
6ed58ec5
VT
2379 if (hdev->le_pkts)
2380 hdev->le_cnt = cnt;
2381 else
2382 hdev->acl_cnt = cnt;
02b20f0b
LAD
2383
2384 if (cnt != tmp)
2385 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2386}
2387
3eff45ea 2388static void hci_tx_work(struct work_struct *work)
1da177e4 2389{
3eff45ea 2390 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2391 struct sk_buff *skb;
2392
6ed58ec5
VT
2393 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2394 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2395
2396 /* Schedule queues and send stuff to HCI driver */
2397
2398 hci_sched_acl(hdev);
2399
2400 hci_sched_sco(hdev);
2401
b6a0dc82
MH
2402 hci_sched_esco(hdev);
2403
6ed58ec5
VT
2404 hci_sched_le(hdev);
2405
1da177e4
LT
2406 /* Send next queued raw (unknown type) packet */
2407 while ((skb = skb_dequeue(&hdev->raw_q)))
2408 hci_send_frame(skb);
1da177e4
LT
2409}
2410
25985edc 2411/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2412
2413/* ACL data packet */
2414static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2415{
2416 struct hci_acl_hdr *hdr = (void *) skb->data;
2417 struct hci_conn *conn;
2418 __u16 handle, flags;
2419
2420 skb_pull(skb, HCI_ACL_HDR_SIZE);
2421
2422 handle = __le16_to_cpu(hdr->handle);
2423 flags = hci_flags(handle);
2424 handle = hci_handle(handle);
2425
2426 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2427
2428 hdev->stat.acl_rx++;
2429
2430 hci_dev_lock(hdev);
2431 conn = hci_conn_hash_lookup_handle(hdev, handle);
2432 hci_dev_unlock(hdev);
8e87d142 2433
1da177e4 2434 if (conn) {
65983fc7 2435 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2436
1da177e4 2437 /* Send to upper protocol */
686ebf28
UF
2438 l2cap_recv_acldata(conn, skb, flags);
2439 return;
1da177e4 2440 } else {
8e87d142 2441 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2442 hdev->name, handle);
2443 }
2444
2445 kfree_skb(skb);
2446}
2447
2448/* SCO data packet */
2449static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2450{
2451 struct hci_sco_hdr *hdr = (void *) skb->data;
2452 struct hci_conn *conn;
2453 __u16 handle;
2454
2455 skb_pull(skb, HCI_SCO_HDR_SIZE);
2456
2457 handle = __le16_to_cpu(hdr->handle);
2458
2459 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2460
2461 hdev->stat.sco_rx++;
2462
2463 hci_dev_lock(hdev);
2464 conn = hci_conn_hash_lookup_handle(hdev, handle);
2465 hci_dev_unlock(hdev);
2466
2467 if (conn) {
1da177e4 2468 /* Send to upper protocol */
686ebf28
UF
2469 sco_recv_scodata(conn, skb);
2470 return;
1da177e4 2471 } else {
8e87d142 2472 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2473 hdev->name, handle);
2474 }
2475
2476 kfree_skb(skb);
2477}
2478
b78752cc 2479static void hci_rx_work(struct work_struct *work)
1da177e4 2480{
b78752cc 2481 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2482 struct sk_buff *skb;
2483
2484 BT_DBG("%s", hdev->name);
2485
1da177e4
LT
2486 while ((skb = skb_dequeue(&hdev->rx_q))) {
2487 if (atomic_read(&hdev->promisc)) {
2488 /* Send copy to the sockets */
eec8d2bc 2489 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2490 }
2491
2492 if (test_bit(HCI_RAW, &hdev->flags)) {
2493 kfree_skb(skb);
2494 continue;
2495 }
2496
2497 if (test_bit(HCI_INIT, &hdev->flags)) {
2498 /* Don't process data packets in this states. */
0d48d939 2499 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2500 case HCI_ACLDATA_PKT:
2501 case HCI_SCODATA_PKT:
2502 kfree_skb(skb);
2503 continue;
3ff50b79 2504 }
1da177e4
LT
2505 }
2506
2507 /* Process frame */
0d48d939 2508 switch (bt_cb(skb)->pkt_type) {
1da177e4 2509 case HCI_EVENT_PKT:
b78752cc 2510 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2511 hci_event_packet(hdev, skb);
2512 break;
2513
2514 case HCI_ACLDATA_PKT:
2515 BT_DBG("%s ACL data packet", hdev->name);
2516 hci_acldata_packet(hdev, skb);
2517 break;
2518
2519 case HCI_SCODATA_PKT:
2520 BT_DBG("%s SCO data packet", hdev->name);
2521 hci_scodata_packet(hdev, skb);
2522 break;
2523
2524 default:
2525 kfree_skb(skb);
2526 break;
2527 }
2528 }
1da177e4
LT
2529}
2530
c347b765 2531static void hci_cmd_work(struct work_struct *work)
1da177e4 2532{
c347b765 2533 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2534 struct sk_buff *skb;
2535
2536 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2537
1da177e4 2538 /* Send queued commands */
5a08ecce
AE
2539 if (atomic_read(&hdev->cmd_cnt)) {
2540 skb = skb_dequeue(&hdev->cmd_q);
2541 if (!skb)
2542 return;
2543
7585b97a 2544 kfree_skb(hdev->sent_cmd);
1da177e4 2545
70f23020
AE
2546 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2547 if (hdev->sent_cmd) {
1da177e4
LT
2548 atomic_dec(&hdev->cmd_cnt);
2549 hci_send_frame(skb);
7bdb8a5c
SJ
2550 if (test_bit(HCI_RESET, &hdev->flags))
2551 del_timer(&hdev->cmd_timer);
2552 else
2553 mod_timer(&hdev->cmd_timer,
6bd32326 2554 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2555 } else {
2556 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2557 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2558 }
2559 }
2560}
2519a1fc
AG
2561
2562int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2563{
2564 /* General inquiry access code (GIAC) */
2565 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2566 struct hci_cp_inquiry cp;
2567
2568 BT_DBG("%s", hdev->name);
2569
2570 if (test_bit(HCI_INQUIRY, &hdev->flags))
2571 return -EINPROGRESS;
2572
2573 memset(&cp, 0, sizeof(cp));
2574 memcpy(&cp.lap, lap, sizeof(cp.lap));
2575 cp.length = length;
2576
2577 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2578}
023d5049
AG
2579
2580int hci_cancel_inquiry(struct hci_dev *hdev)
2581{
2582 BT_DBG("%s", hdev->name);
2583
2584 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2585 return -EPERM;
2586
2587 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2588}
7784d78f
AE
2589
2590module_param(enable_hs, bool, 0644);
2591MODULE_PARM_DESC(enable_hs, "Enable High Speed");