Bluetooth: mgmt: Fix pairable setting upon initialization
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_rx_work(struct work_struct *work);
58 static void hci_cmd_work(struct work_struct *work);
59 static void hci_tx_work(struct work_struct *work);
60
61 /* HCI device list */
62 LIST_HEAD(hci_dev_list);
63 DEFINE_RWLOCK(hci_dev_list_lock);
64
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list);
67 DEFINE_RWLOCK(hci_cb_list_lock);
68
69 /* ---- HCI notifications ---- */
70
71 static void hci_notify(struct hci_dev *hdev, int event)
72 {
73 hci_sock_dev_event(hdev, event);
74 }
75
76 /* ---- HCI requests ---- */
77
78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79 {
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
86 return;
87
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
92 }
93 }
94
95 static void hci_req_cancel(struct hci_dev *hdev, int err)
96 {
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104 }
105
106 /* Execute request and wait for completion. */
107 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
108 unsigned long opt, __u32 timeout)
109 {
110 DECLARE_WAITQUEUE(wait, current);
111 int err = 0;
112
113 BT_DBG("%s start", hdev->name);
114
115 hdev->req_status = HCI_REQ_PEND;
116
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
119
120 req(hdev, opt);
121 schedule_timeout(timeout);
122
123 remove_wait_queue(&hdev->req_wait_q, &wait);
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 switch (hdev->req_status) {
129 case HCI_REQ_DONE:
130 err = -bt_to_errno(hdev->req_result);
131 break;
132
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
135 break;
136
137 default:
138 err = -ETIMEDOUT;
139 break;
140 }
141
142 hdev->req_status = hdev->req_result = 0;
143
144 BT_DBG("%s end: err %d", hdev->name, err);
145
146 return err;
147 }
148
149 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
150 unsigned long opt, __u32 timeout)
151 {
152 int ret;
153
154 if (!test_bit(HCI_UP, &hdev->flags))
155 return -ENETDOWN;
156
157 /* Serialize all requests */
158 hci_req_lock(hdev);
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
161
162 return ret;
163 }
164
165 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166 {
167 BT_DBG("%s %ld", hdev->name, opt);
168
169 /* Reset device */
170 set_bit(HCI_RESET, &hdev->flags);
171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
172 }
173
174 static void bredr_init(struct hci_dev *hdev)
175 {
176 struct hci_cp_delete_stored_link_key cp;
177 __le16 param;
178 __u8 flt_type;
179
180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
182 /* Mandatory initialization */
183
184 /* Reset */
185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 /* Read Local Supported Features */
191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
192
193 /* Read Local Version */
194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
195
196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
198
199 /* Read BD Address */
200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
207
208 /* Read Voice Setting */
209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
210
211 /* Optional initialization */
212
213 /* Clear Event Filters */
214 flt_type = HCI_FLT_CLEAR_ALL;
215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
216
217 /* Connection accept timeout ~20 secs */
218 param = cpu_to_le16(0x7d00);
219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
220
221 bacpy(&cp.bdaddr, BDADDR_ANY);
222 cp.delete_all = 1;
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
224 }
225
226 static void amp_init(struct hci_dev *hdev)
227 {
228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
230 /* Reset */
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235 }
236
237 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238 {
239 struct sk_buff *skb;
240
241 BT_DBG("%s %ld", hdev->name, opt);
242
243 /* Driver initialization */
244
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
249
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
252 }
253 skb_queue_purge(&hdev->driver_init);
254
255 switch (hdev->dev_type) {
256 case HCI_BREDR:
257 bredr_init(hdev);
258 break;
259
260 case HCI_AMP:
261 amp_init(hdev);
262 break;
263
264 default:
265 BT_ERR("Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269 }
270
271 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272 {
273 BT_DBG("%s", hdev->name);
274
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277 }
278
279 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280 {
281 __u8 scan = opt;
282
283 BT_DBG("%s %x", hdev->name, scan);
284
285 /* Inquiry and Page scans */
286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
287 }
288
289 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290 {
291 __u8 auth = opt;
292
293 BT_DBG("%s %x", hdev->name, auth);
294
295 /* Authentication */
296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
297 }
298
299 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300 {
301 __u8 encrypt = opt;
302
303 BT_DBG("%s %x", hdev->name, encrypt);
304
305 /* Encryption */
306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
307 }
308
309 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310 {
311 __le16 policy = cpu_to_le16(opt);
312
313 BT_DBG("%s %x", hdev->name, policy);
314
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317 }
318
319 /* Get HCI device by index.
320 * Device is held on return. */
321 struct hci_dev *hci_dev_get(int index)
322 {
323 struct hci_dev *hdev = NULL, *d;
324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
331 list_for_each_entry(d, &hci_dev_list, list) {
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339 }
340
341 /* ---- Inquiry support ---- */
342
343 bool hci_discovery_active(struct hci_dev *hdev)
344 {
345 struct discovery_state *discov = &hdev->discovery;
346
347 switch (discov->state) {
348 case DISCOVERY_FINDING:
349 case DISCOVERY_RESOLVING:
350 return true;
351
352 default:
353 return false;
354 }
355 }
356
357 void hci_discovery_set_state(struct hci_dev *hdev, int state)
358 {
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361 if (hdev->discovery.state == state)
362 return;
363
364 switch (state) {
365 case DISCOVERY_STOPPED:
366 if (hdev->discovery.state != DISCOVERY_STARTING)
367 mgmt_discovering(hdev, 0);
368 hdev->discovery.type = 0;
369 break;
370 case DISCOVERY_STARTING:
371 break;
372 case DISCOVERY_FINDING:
373 mgmt_discovering(hdev, 1);
374 break;
375 case DISCOVERY_RESOLVING:
376 break;
377 case DISCOVERY_STOPPING:
378 break;
379 }
380
381 hdev->discovery.state = state;
382 }
383
384 static void inquiry_cache_flush(struct hci_dev *hdev)
385 {
386 struct discovery_state *cache = &hdev->discovery;
387 struct inquiry_entry *p, *n;
388
389 list_for_each_entry_safe(p, n, &cache->all, all) {
390 list_del(&p->all);
391 kfree(p);
392 }
393
394 INIT_LIST_HEAD(&cache->unknown);
395 INIT_LIST_HEAD(&cache->resolve);
396 cache->state = DISCOVERY_STOPPED;
397 }
398
399 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
400 {
401 struct discovery_state *cache = &hdev->discovery;
402 struct inquiry_entry *e;
403
404 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
405
406 list_for_each_entry(e, &cache->all, all) {
407 if (!bacmp(&e->data.bdaddr, bdaddr))
408 return e;
409 }
410
411 return NULL;
412 }
413
414 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
415 bdaddr_t *bdaddr)
416 {
417 struct discovery_state *cache = &hdev->discovery;
418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422 list_for_each_entry(e, &cache->unknown, list) {
423 if (!bacmp(&e->data.bdaddr, bdaddr))
424 return e;
425 }
426
427 return NULL;
428 }
429
430 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
431 bdaddr_t *bdaddr,
432 int state)
433 {
434 struct discovery_state *cache = &hdev->discovery;
435 struct inquiry_entry *e;
436
437 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
438
439 list_for_each_entry(e, &cache->resolve, list) {
440 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
441 return e;
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447 }
448
449 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450 struct inquiry_entry *ie)
451 {
452 struct discovery_state *cache = &hdev->discovery;
453 struct list_head *pos = &cache->resolve;
454 struct inquiry_entry *p;
455
456 list_del(&ie->list);
457
458 list_for_each_entry(p, &cache->resolve, list) {
459 if (p->name_state != NAME_PENDING &&
460 abs(p->data.rssi) >= abs(ie->data.rssi))
461 break;
462 pos = &p->list;
463 }
464
465 list_add(&ie->list, pos);
466 }
467
468 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
469 bool name_known)
470 {
471 struct discovery_state *cache = &hdev->discovery;
472 struct inquiry_entry *ie;
473
474 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
475
476 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
477 if (ie) {
478 if (ie->name_state == NAME_NEEDED &&
479 data->rssi != ie->data.rssi) {
480 ie->data.rssi = data->rssi;
481 hci_inquiry_cache_update_resolve(hdev, ie);
482 }
483
484 goto update;
485 }
486
487 /* Entry not in the cache. Add new one. */
488 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
489 if (!ie)
490 return false;
491
492 list_add(&ie->all, &cache->all);
493
494 if (name_known) {
495 ie->name_state = NAME_KNOWN;
496 } else {
497 ie->name_state = NAME_NOT_KNOWN;
498 list_add(&ie->list, &cache->unknown);
499 }
500
501 update:
502 if (name_known && ie->name_state != NAME_KNOWN &&
503 ie->name_state != NAME_PENDING) {
504 ie->name_state = NAME_KNOWN;
505 list_del(&ie->list);
506 }
507
508 memcpy(&ie->data, data, sizeof(*data));
509 ie->timestamp = jiffies;
510 cache->timestamp = jiffies;
511
512 if (ie->name_state == NAME_NOT_KNOWN)
513 return false;
514
515 return true;
516 }
517
518 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
519 {
520 struct discovery_state *cache = &hdev->discovery;
521 struct inquiry_info *info = (struct inquiry_info *) buf;
522 struct inquiry_entry *e;
523 int copied = 0;
524
525 list_for_each_entry(e, &cache->all, all) {
526 struct inquiry_data *data = &e->data;
527
528 if (copied >= num)
529 break;
530
531 bacpy(&info->bdaddr, &data->bdaddr);
532 info->pscan_rep_mode = data->pscan_rep_mode;
533 info->pscan_period_mode = data->pscan_period_mode;
534 info->pscan_mode = data->pscan_mode;
535 memcpy(info->dev_class, data->dev_class, 3);
536 info->clock_offset = data->clock_offset;
537
538 info++;
539 copied++;
540 }
541
542 BT_DBG("cache %p, copied %d", cache, copied);
543 return copied;
544 }
545
546 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
547 {
548 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
549 struct hci_cp_inquiry cp;
550
551 BT_DBG("%s", hdev->name);
552
553 if (test_bit(HCI_INQUIRY, &hdev->flags))
554 return;
555
556 /* Start Inquiry */
557 memcpy(&cp.lap, &ir->lap, 3);
558 cp.length = ir->length;
559 cp.num_rsp = ir->num_rsp;
560 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
561 }
562
563 int hci_inquiry(void __user *arg)
564 {
565 __u8 __user *ptr = arg;
566 struct hci_inquiry_req ir;
567 struct hci_dev *hdev;
568 int err = 0, do_inquiry = 0, max_rsp;
569 long timeo;
570 __u8 *buf;
571
572 if (copy_from_user(&ir, ptr, sizeof(ir)))
573 return -EFAULT;
574
575 hdev = hci_dev_get(ir.dev_id);
576 if (!hdev)
577 return -ENODEV;
578
579 hci_dev_lock(hdev);
580 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
581 inquiry_cache_empty(hdev) ||
582 ir.flags & IREQ_CACHE_FLUSH) {
583 inquiry_cache_flush(hdev);
584 do_inquiry = 1;
585 }
586 hci_dev_unlock(hdev);
587
588 timeo = ir.length * msecs_to_jiffies(2000);
589
590 if (do_inquiry) {
591 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
592 if (err < 0)
593 goto done;
594 }
595
596 /* for unlimited number of responses we will use buffer with 255 entries */
597 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
598
599 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600 * copy it to the user space.
601 */
602 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
603 if (!buf) {
604 err = -ENOMEM;
605 goto done;
606 }
607
608 hci_dev_lock(hdev);
609 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
610 hci_dev_unlock(hdev);
611
612 BT_DBG("num_rsp %d", ir.num_rsp);
613
614 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
615 ptr += sizeof(ir);
616 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
617 ir.num_rsp))
618 err = -EFAULT;
619 } else
620 err = -EFAULT;
621
622 kfree(buf);
623
624 done:
625 hci_dev_put(hdev);
626 return err;
627 }
628
629 /* ---- HCI ioctl helpers ---- */
630
631 int hci_dev_open(__u16 dev)
632 {
633 struct hci_dev *hdev;
634 int ret = 0;
635
636 hdev = hci_dev_get(dev);
637 if (!hdev)
638 return -ENODEV;
639
640 BT_DBG("%s %p", hdev->name, hdev);
641
642 hci_req_lock(hdev);
643
644 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
645 ret = -ERFKILL;
646 goto done;
647 }
648
649 if (test_bit(HCI_UP, &hdev->flags)) {
650 ret = -EALREADY;
651 goto done;
652 }
653
654 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
655 set_bit(HCI_RAW, &hdev->flags);
656
657 /* Treat all non BR/EDR controllers as raw devices if
658 enable_hs is not set */
659 if (hdev->dev_type != HCI_BREDR && !enable_hs)
660 set_bit(HCI_RAW, &hdev->flags);
661
662 if (hdev->open(hdev)) {
663 ret = -EIO;
664 goto done;
665 }
666
667 if (!test_bit(HCI_RAW, &hdev->flags)) {
668 atomic_set(&hdev->cmd_cnt, 1);
669 set_bit(HCI_INIT, &hdev->flags);
670 hdev->init_last_cmd = 0;
671
672 ret = __hci_request(hdev, hci_init_req, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT));
674
675 if (lmp_host_le_capable(hdev))
676 ret = __hci_request(hdev, hci_le_init_req, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
679 clear_bit(HCI_INIT, &hdev->flags);
680 }
681
682 if (!ret) {
683 hci_dev_hold(hdev);
684 set_bit(HCI_UP, &hdev->flags);
685 hci_notify(hdev, HCI_DEV_UP);
686 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
687 hci_dev_lock(hdev);
688 mgmt_powered(hdev, 1);
689 hci_dev_unlock(hdev);
690 }
691 } else {
692 /* Init failed, cleanup */
693 flush_work(&hdev->tx_work);
694 flush_work(&hdev->cmd_work);
695 flush_work(&hdev->rx_work);
696
697 skb_queue_purge(&hdev->cmd_q);
698 skb_queue_purge(&hdev->rx_q);
699
700 if (hdev->flush)
701 hdev->flush(hdev);
702
703 if (hdev->sent_cmd) {
704 kfree_skb(hdev->sent_cmd);
705 hdev->sent_cmd = NULL;
706 }
707
708 hdev->close(hdev);
709 hdev->flags = 0;
710 }
711
712 done:
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716 }
717
718 static int hci_dev_do_close(struct hci_dev *hdev)
719 {
720 BT_DBG("%s %p", hdev->name, hdev);
721
722 cancel_work_sync(&hdev->le_scan);
723
724 hci_req_cancel(hdev, ENODEV);
725 hci_req_lock(hdev);
726
727 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
728 del_timer_sync(&hdev->cmd_timer);
729 hci_req_unlock(hdev);
730 return 0;
731 }
732
733 /* Flush RX and TX works */
734 flush_work(&hdev->tx_work);
735 flush_work(&hdev->rx_work);
736
737 if (hdev->discov_timeout > 0) {
738 cancel_delayed_work(&hdev->discov_off);
739 hdev->discov_timeout = 0;
740 }
741
742 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
743 cancel_delayed_work(&hdev->service_cache);
744
745 cancel_delayed_work_sync(&hdev->le_scan_disable);
746
747 hci_dev_lock(hdev);
748 inquiry_cache_flush(hdev);
749 hci_conn_hash_flush(hdev);
750 hci_dev_unlock(hdev);
751
752 hci_notify(hdev, HCI_DEV_DOWN);
753
754 if (hdev->flush)
755 hdev->flush(hdev);
756
757 /* Reset device */
758 skb_queue_purge(&hdev->cmd_q);
759 atomic_set(&hdev->cmd_cnt, 1);
760 if (!test_bit(HCI_RAW, &hdev->flags) &&
761 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
762 set_bit(HCI_INIT, &hdev->flags);
763 __hci_request(hdev, hci_reset_req, 0,
764 msecs_to_jiffies(250));
765 clear_bit(HCI_INIT, &hdev->flags);
766 }
767
768 /* flush cmd work */
769 flush_work(&hdev->cmd_work);
770
771 /* Drop queues */
772 skb_queue_purge(&hdev->rx_q);
773 skb_queue_purge(&hdev->cmd_q);
774 skb_queue_purge(&hdev->raw_q);
775
776 /* Drop last sent command */
777 if (hdev->sent_cmd) {
778 del_timer_sync(&hdev->cmd_timer);
779 kfree_skb(hdev->sent_cmd);
780 hdev->sent_cmd = NULL;
781 }
782
783 /* After this point our queues are empty
784 * and no tasks are scheduled. */
785 hdev->close(hdev);
786
787 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
788 hci_dev_lock(hdev);
789 mgmt_powered(hdev, 0);
790 hci_dev_unlock(hdev);
791 }
792
793 /* Clear flags */
794 hdev->flags = 0;
795
796 hci_req_unlock(hdev);
797
798 hci_dev_put(hdev);
799 return 0;
800 }
801
802 int hci_dev_close(__u16 dev)
803 {
804 struct hci_dev *hdev;
805 int err;
806
807 hdev = hci_dev_get(dev);
808 if (!hdev)
809 return -ENODEV;
810
811 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
812 cancel_delayed_work(&hdev->power_off);
813
814 err = hci_dev_do_close(hdev);
815
816 hci_dev_put(hdev);
817 return err;
818 }
819
820 int hci_dev_reset(__u16 dev)
821 {
822 struct hci_dev *hdev;
823 int ret = 0;
824
825 hdev = hci_dev_get(dev);
826 if (!hdev)
827 return -ENODEV;
828
829 hci_req_lock(hdev);
830
831 if (!test_bit(HCI_UP, &hdev->flags))
832 goto done;
833
834 /* Drop queues */
835 skb_queue_purge(&hdev->rx_q);
836 skb_queue_purge(&hdev->cmd_q);
837
838 hci_dev_lock(hdev);
839 inquiry_cache_flush(hdev);
840 hci_conn_hash_flush(hdev);
841 hci_dev_unlock(hdev);
842
843 if (hdev->flush)
844 hdev->flush(hdev);
845
846 atomic_set(&hdev->cmd_cnt, 1);
847 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
848
849 if (!test_bit(HCI_RAW, &hdev->flags))
850 ret = __hci_request(hdev, hci_reset_req, 0,
851 msecs_to_jiffies(HCI_INIT_TIMEOUT));
852
853 done:
854 hci_req_unlock(hdev);
855 hci_dev_put(hdev);
856 return ret;
857 }
858
859 int hci_dev_reset_stat(__u16 dev)
860 {
861 struct hci_dev *hdev;
862 int ret = 0;
863
864 hdev = hci_dev_get(dev);
865 if (!hdev)
866 return -ENODEV;
867
868 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
869
870 hci_dev_put(hdev);
871
872 return ret;
873 }
874
875 int hci_dev_cmd(unsigned int cmd, void __user *arg)
876 {
877 struct hci_dev *hdev;
878 struct hci_dev_req dr;
879 int err = 0;
880
881 if (copy_from_user(&dr, arg, sizeof(dr)))
882 return -EFAULT;
883
884 hdev = hci_dev_get(dr.dev_id);
885 if (!hdev)
886 return -ENODEV;
887
888 switch (cmd) {
889 case HCISETAUTH:
890 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
891 msecs_to_jiffies(HCI_INIT_TIMEOUT));
892 break;
893
894 case HCISETENCRYPT:
895 if (!lmp_encrypt_capable(hdev)) {
896 err = -EOPNOTSUPP;
897 break;
898 }
899
900 if (!test_bit(HCI_AUTH, &hdev->flags)) {
901 /* Auth must be enabled first */
902 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
903 msecs_to_jiffies(HCI_INIT_TIMEOUT));
904 if (err)
905 break;
906 }
907
908 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
909 msecs_to_jiffies(HCI_INIT_TIMEOUT));
910 break;
911
912 case HCISETSCAN:
913 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
914 msecs_to_jiffies(HCI_INIT_TIMEOUT));
915 break;
916
917 case HCISETLINKPOL:
918 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
920 break;
921
922 case HCISETLINKMODE:
923 hdev->link_mode = ((__u16) dr.dev_opt) &
924 (HCI_LM_MASTER | HCI_LM_ACCEPT);
925 break;
926
927 case HCISETPTYPE:
928 hdev->pkt_type = (__u16) dr.dev_opt;
929 break;
930
931 case HCISETACLMTU:
932 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
933 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
934 break;
935
936 case HCISETSCOMTU:
937 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
938 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
939 break;
940
941 default:
942 err = -EINVAL;
943 break;
944 }
945
946 hci_dev_put(hdev);
947 return err;
948 }
949
950 int hci_get_dev_list(void __user *arg)
951 {
952 struct hci_dev *hdev;
953 struct hci_dev_list_req *dl;
954 struct hci_dev_req *dr;
955 int n = 0, size, err;
956 __u16 dev_num;
957
958 if (get_user(dev_num, (__u16 __user *) arg))
959 return -EFAULT;
960
961 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
962 return -EINVAL;
963
964 size = sizeof(*dl) + dev_num * sizeof(*dr);
965
966 dl = kzalloc(size, GFP_KERNEL);
967 if (!dl)
968 return -ENOMEM;
969
970 dr = dl->dev_req;
971
972 read_lock(&hci_dev_list_lock);
973 list_for_each_entry(hdev, &hci_dev_list, list) {
974 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
975 cancel_delayed_work(&hdev->power_off);
976
977 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
978 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
979
980 (dr + n)->dev_id = hdev->id;
981 (dr + n)->dev_opt = hdev->flags;
982
983 if (++n >= dev_num)
984 break;
985 }
986 read_unlock(&hci_dev_list_lock);
987
988 dl->dev_num = n;
989 size = sizeof(*dl) + n * sizeof(*dr);
990
991 err = copy_to_user(arg, dl, size);
992 kfree(dl);
993
994 return err ? -EFAULT : 0;
995 }
996
997 int hci_get_dev_info(void __user *arg)
998 {
999 struct hci_dev *hdev;
1000 struct hci_dev_info di;
1001 int err = 0;
1002
1003 if (copy_from_user(&di, arg, sizeof(di)))
1004 return -EFAULT;
1005
1006 hdev = hci_dev_get(di.dev_id);
1007 if (!hdev)
1008 return -ENODEV;
1009
1010 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1011 cancel_delayed_work_sync(&hdev->power_off);
1012
1013 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1014 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1015
1016 strcpy(di.name, hdev->name);
1017 di.bdaddr = hdev->bdaddr;
1018 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1019 di.flags = hdev->flags;
1020 di.pkt_type = hdev->pkt_type;
1021 di.acl_mtu = hdev->acl_mtu;
1022 di.acl_pkts = hdev->acl_pkts;
1023 di.sco_mtu = hdev->sco_mtu;
1024 di.sco_pkts = hdev->sco_pkts;
1025 di.link_policy = hdev->link_policy;
1026 di.link_mode = hdev->link_mode;
1027
1028 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1029 memcpy(&di.features, &hdev->features, sizeof(di.features));
1030
1031 if (copy_to_user(arg, &di, sizeof(di)))
1032 err = -EFAULT;
1033
1034 hci_dev_put(hdev);
1035
1036 return err;
1037 }
1038
1039 /* ---- Interface to HCI drivers ---- */
1040
1041 static int hci_rfkill_set_block(void *data, bool blocked)
1042 {
1043 struct hci_dev *hdev = data;
1044
1045 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1046
1047 if (!blocked)
1048 return 0;
1049
1050 hci_dev_do_close(hdev);
1051
1052 return 0;
1053 }
1054
1055 static const struct rfkill_ops hci_rfkill_ops = {
1056 .set_block = hci_rfkill_set_block,
1057 };
1058
1059 /* Alloc HCI device */
1060 struct hci_dev *hci_alloc_dev(void)
1061 {
1062 struct hci_dev *hdev;
1063
1064 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1065 if (!hdev)
1066 return NULL;
1067
1068 hci_init_sysfs(hdev);
1069 skb_queue_head_init(&hdev->driver_init);
1070
1071 return hdev;
1072 }
1073 EXPORT_SYMBOL(hci_alloc_dev);
1074
1075 /* Free HCI device */
1076 void hci_free_dev(struct hci_dev *hdev)
1077 {
1078 skb_queue_purge(&hdev->driver_init);
1079
1080 /* will free via device release */
1081 put_device(&hdev->dev);
1082 }
1083 EXPORT_SYMBOL(hci_free_dev);
1084
1085 static void hci_power_on(struct work_struct *work)
1086 {
1087 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1088
1089 BT_DBG("%s", hdev->name);
1090
1091 if (hci_dev_open(hdev->id) < 0)
1092 return;
1093
1094 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1095 schedule_delayed_work(&hdev->power_off,
1096 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1097
1098 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1099 mgmt_index_added(hdev);
1100 }
1101
1102 static void hci_power_off(struct work_struct *work)
1103 {
1104 struct hci_dev *hdev = container_of(work, struct hci_dev,
1105 power_off.work);
1106
1107 BT_DBG("%s", hdev->name);
1108
1109 hci_dev_do_close(hdev);
1110 }
1111
1112 static void hci_discov_off(struct work_struct *work)
1113 {
1114 struct hci_dev *hdev;
1115 u8 scan = SCAN_PAGE;
1116
1117 hdev = container_of(work, struct hci_dev, discov_off.work);
1118
1119 BT_DBG("%s", hdev->name);
1120
1121 hci_dev_lock(hdev);
1122
1123 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1124
1125 hdev->discov_timeout = 0;
1126
1127 hci_dev_unlock(hdev);
1128 }
1129
1130 int hci_uuids_clear(struct hci_dev *hdev)
1131 {
1132 struct list_head *p, *n;
1133
1134 list_for_each_safe(p, n, &hdev->uuids) {
1135 struct bt_uuid *uuid;
1136
1137 uuid = list_entry(p, struct bt_uuid, list);
1138
1139 list_del(p);
1140 kfree(uuid);
1141 }
1142
1143 return 0;
1144 }
1145
1146 int hci_link_keys_clear(struct hci_dev *hdev)
1147 {
1148 struct list_head *p, *n;
1149
1150 list_for_each_safe(p, n, &hdev->link_keys) {
1151 struct link_key *key;
1152
1153 key = list_entry(p, struct link_key, list);
1154
1155 list_del(p);
1156 kfree(key);
1157 }
1158
1159 return 0;
1160 }
1161
1162 int hci_smp_ltks_clear(struct hci_dev *hdev)
1163 {
1164 struct smp_ltk *k, *tmp;
1165
1166 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1167 list_del(&k->list);
1168 kfree(k);
1169 }
1170
1171 return 0;
1172 }
1173
1174 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1175 {
1176 struct link_key *k;
1177
1178 list_for_each_entry(k, &hdev->link_keys, list)
1179 if (bacmp(bdaddr, &k->bdaddr) == 0)
1180 return k;
1181
1182 return NULL;
1183 }
1184
1185 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1186 u8 key_type, u8 old_key_type)
1187 {
1188 /* Legacy key */
1189 if (key_type < 0x03)
1190 return 1;
1191
1192 /* Debug keys are insecure so don't store them persistently */
1193 if (key_type == HCI_LK_DEBUG_COMBINATION)
1194 return 0;
1195
1196 /* Changed combination key and there's no previous one */
1197 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1198 return 0;
1199
1200 /* Security mode 3 case */
1201 if (!conn)
1202 return 1;
1203
1204 /* Neither local nor remote side had no-bonding as requirement */
1205 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1206 return 1;
1207
1208 /* Local side had dedicated bonding as requirement */
1209 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1210 return 1;
1211
1212 /* Remote side had dedicated bonding as requirement */
1213 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1214 return 1;
1215
1216 /* If none of the above criteria match, then don't store the key
1217 * persistently */
1218 return 0;
1219 }
1220
1221 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1222 {
1223 struct smp_ltk *k;
1224
1225 list_for_each_entry(k, &hdev->long_term_keys, list) {
1226 if (k->ediv != ediv ||
1227 memcmp(rand, k->rand, sizeof(k->rand)))
1228 continue;
1229
1230 return k;
1231 }
1232
1233 return NULL;
1234 }
1235 EXPORT_SYMBOL(hci_find_ltk);
1236
1237 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1238 u8 addr_type)
1239 {
1240 struct smp_ltk *k;
1241
1242 list_for_each_entry(k, &hdev->long_term_keys, list)
1243 if (addr_type == k->bdaddr_type &&
1244 bacmp(bdaddr, &k->bdaddr) == 0)
1245 return k;
1246
1247 return NULL;
1248 }
1249 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1250
1251 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1252 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1253 {
1254 struct link_key *key, *old_key;
1255 u8 old_key_type, persistent;
1256
1257 old_key = hci_find_link_key(hdev, bdaddr);
1258 if (old_key) {
1259 old_key_type = old_key->type;
1260 key = old_key;
1261 } else {
1262 old_key_type = conn ? conn->key_type : 0xff;
1263 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1264 if (!key)
1265 return -ENOMEM;
1266 list_add(&key->list, &hdev->link_keys);
1267 }
1268
1269 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1270
1271 /* Some buggy controller combinations generate a changed
1272 * combination key for legacy pairing even when there's no
1273 * previous key */
1274 if (type == HCI_LK_CHANGED_COMBINATION &&
1275 (!conn || conn->remote_auth == 0xff) &&
1276 old_key_type == 0xff) {
1277 type = HCI_LK_COMBINATION;
1278 if (conn)
1279 conn->key_type = type;
1280 }
1281
1282 bacpy(&key->bdaddr, bdaddr);
1283 memcpy(key->val, val, 16);
1284 key->pin_len = pin_len;
1285
1286 if (type == HCI_LK_CHANGED_COMBINATION)
1287 key->type = old_key_type;
1288 else
1289 key->type = type;
1290
1291 if (!new_key)
1292 return 0;
1293
1294 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1295
1296 mgmt_new_link_key(hdev, key, persistent);
1297
1298 if (!persistent) {
1299 list_del(&key->list);
1300 kfree(key);
1301 }
1302
1303 return 0;
1304 }
1305
1306 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1307 int new_key, u8 authenticated, u8 tk[16],
1308 u8 enc_size, u16 ediv, u8 rand[8])
1309 {
1310 struct smp_ltk *key, *old_key;
1311
1312 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1313 return 0;
1314
1315 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1316 if (old_key)
1317 key = old_key;
1318 else {
1319 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1320 if (!key)
1321 return -ENOMEM;
1322 list_add(&key->list, &hdev->long_term_keys);
1323 }
1324
1325 bacpy(&key->bdaddr, bdaddr);
1326 key->bdaddr_type = addr_type;
1327 memcpy(key->val, tk, sizeof(key->val));
1328 key->authenticated = authenticated;
1329 key->ediv = ediv;
1330 key->enc_size = enc_size;
1331 key->type = type;
1332 memcpy(key->rand, rand, sizeof(key->rand));
1333
1334 if (!new_key)
1335 return 0;
1336
1337 if (type & HCI_SMP_LTK)
1338 mgmt_new_ltk(hdev, key, 1);
1339
1340 return 0;
1341 }
1342
1343 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1344 {
1345 struct link_key *key;
1346
1347 key = hci_find_link_key(hdev, bdaddr);
1348 if (!key)
1349 return -ENOENT;
1350
1351 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1352
1353 list_del(&key->list);
1354 kfree(key);
1355
1356 return 0;
1357 }
1358
1359 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1360 {
1361 struct smp_ltk *k, *tmp;
1362
1363 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1364 if (bacmp(bdaddr, &k->bdaddr))
1365 continue;
1366
1367 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1368
1369 list_del(&k->list);
1370 kfree(k);
1371 }
1372
1373 return 0;
1374 }
1375
1376 /* HCI command timer function */
1377 static void hci_cmd_timer(unsigned long arg)
1378 {
1379 struct hci_dev *hdev = (void *) arg;
1380
1381 BT_ERR("%s command tx timeout", hdev->name);
1382 atomic_set(&hdev->cmd_cnt, 1);
1383 queue_work(hdev->workqueue, &hdev->cmd_work);
1384 }
1385
1386 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1387 bdaddr_t *bdaddr)
1388 {
1389 struct oob_data *data;
1390
1391 list_for_each_entry(data, &hdev->remote_oob_data, list)
1392 if (bacmp(bdaddr, &data->bdaddr) == 0)
1393 return data;
1394
1395 return NULL;
1396 }
1397
1398 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399 {
1400 struct oob_data *data;
1401
1402 data = hci_find_remote_oob_data(hdev, bdaddr);
1403 if (!data)
1404 return -ENOENT;
1405
1406 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1407
1408 list_del(&data->list);
1409 kfree(data);
1410
1411 return 0;
1412 }
1413
1414 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1415 {
1416 struct oob_data *data, *n;
1417
1418 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1419 list_del(&data->list);
1420 kfree(data);
1421 }
1422
1423 return 0;
1424 }
1425
1426 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1427 u8 *randomizer)
1428 {
1429 struct oob_data *data;
1430
1431 data = hci_find_remote_oob_data(hdev, bdaddr);
1432
1433 if (!data) {
1434 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1435 if (!data)
1436 return -ENOMEM;
1437
1438 bacpy(&data->bdaddr, bdaddr);
1439 list_add(&data->list, &hdev->remote_oob_data);
1440 }
1441
1442 memcpy(data->hash, hash, sizeof(data->hash));
1443 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1444
1445 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1446
1447 return 0;
1448 }
1449
1450 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1451 bdaddr_t *bdaddr)
1452 {
1453 struct bdaddr_list *b;
1454
1455 list_for_each_entry(b, &hdev->blacklist, list)
1456 if (bacmp(bdaddr, &b->bdaddr) == 0)
1457 return b;
1458
1459 return NULL;
1460 }
1461
1462 int hci_blacklist_clear(struct hci_dev *hdev)
1463 {
1464 struct list_head *p, *n;
1465
1466 list_for_each_safe(p, n, &hdev->blacklist) {
1467 struct bdaddr_list *b;
1468
1469 b = list_entry(p, struct bdaddr_list, list);
1470
1471 list_del(p);
1472 kfree(b);
1473 }
1474
1475 return 0;
1476 }
1477
1478 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1479 {
1480 struct bdaddr_list *entry;
1481
1482 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1483 return -EBADF;
1484
1485 if (hci_blacklist_lookup(hdev, bdaddr))
1486 return -EEXIST;
1487
1488 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1489 if (!entry)
1490 return -ENOMEM;
1491
1492 bacpy(&entry->bdaddr, bdaddr);
1493
1494 list_add(&entry->list, &hdev->blacklist);
1495
1496 return mgmt_device_blocked(hdev, bdaddr, type);
1497 }
1498
1499 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1500 {
1501 struct bdaddr_list *entry;
1502
1503 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1504 return hci_blacklist_clear(hdev);
1505
1506 entry = hci_blacklist_lookup(hdev, bdaddr);
1507 if (!entry)
1508 return -ENOENT;
1509
1510 list_del(&entry->list);
1511 kfree(entry);
1512
1513 return mgmt_device_unblocked(hdev, bdaddr, type);
1514 }
1515
1516 static void hci_clear_adv_cache(struct work_struct *work)
1517 {
1518 struct hci_dev *hdev = container_of(work, struct hci_dev,
1519 adv_work.work);
1520
1521 hci_dev_lock(hdev);
1522
1523 hci_adv_entries_clear(hdev);
1524
1525 hci_dev_unlock(hdev);
1526 }
1527
1528 int hci_adv_entries_clear(struct hci_dev *hdev)
1529 {
1530 struct adv_entry *entry, *tmp;
1531
1532 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1533 list_del(&entry->list);
1534 kfree(entry);
1535 }
1536
1537 BT_DBG("%s adv cache cleared", hdev->name);
1538
1539 return 0;
1540 }
1541
1542 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1543 {
1544 struct adv_entry *entry;
1545
1546 list_for_each_entry(entry, &hdev->adv_entries, list)
1547 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1548 return entry;
1549
1550 return NULL;
1551 }
1552
1553 static inline int is_connectable_adv(u8 evt_type)
1554 {
1555 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1556 return 1;
1557
1558 return 0;
1559 }
1560
1561 int hci_add_adv_entry(struct hci_dev *hdev,
1562 struct hci_ev_le_advertising_info *ev)
1563 {
1564 struct adv_entry *entry;
1565
1566 if (!is_connectable_adv(ev->evt_type))
1567 return -EINVAL;
1568
1569 /* Only new entries should be added to adv_entries. So, if
1570 * bdaddr was found, don't add it. */
1571 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1572 return 0;
1573
1574 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1575 if (!entry)
1576 return -ENOMEM;
1577
1578 bacpy(&entry->bdaddr, &ev->bdaddr);
1579 entry->bdaddr_type = ev->bdaddr_type;
1580
1581 list_add(&entry->list, &hdev->adv_entries);
1582
1583 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1584 batostr(&entry->bdaddr), entry->bdaddr_type);
1585
1586 return 0;
1587 }
1588
1589 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1590 {
1591 struct le_scan_params *param = (struct le_scan_params *) opt;
1592 struct hci_cp_le_set_scan_param cp;
1593
1594 memset(&cp, 0, sizeof(cp));
1595 cp.type = param->type;
1596 cp.interval = cpu_to_le16(param->interval);
1597 cp.window = cpu_to_le16(param->window);
1598
1599 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1600 }
1601
1602 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1603 {
1604 struct hci_cp_le_set_scan_enable cp;
1605
1606 memset(&cp, 0, sizeof(cp));
1607 cp.enable = 1;
1608
1609 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1610 }
1611
1612 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1613 u16 window, int timeout)
1614 {
1615 long timeo = msecs_to_jiffies(3000);
1616 struct le_scan_params param;
1617 int err;
1618
1619 BT_DBG("%s", hdev->name);
1620
1621 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1622 return -EINPROGRESS;
1623
1624 param.type = type;
1625 param.interval = interval;
1626 param.window = window;
1627
1628 hci_req_lock(hdev);
1629
1630 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1631 timeo);
1632 if (!err)
1633 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1634
1635 hci_req_unlock(hdev);
1636
1637 if (err < 0)
1638 return err;
1639
1640 schedule_delayed_work(&hdev->le_scan_disable,
1641 msecs_to_jiffies(timeout));
1642
1643 return 0;
1644 }
1645
1646 static void le_scan_disable_work(struct work_struct *work)
1647 {
1648 struct hci_dev *hdev = container_of(work, struct hci_dev,
1649 le_scan_disable.work);
1650 struct hci_cp_le_set_scan_enable cp;
1651
1652 BT_DBG("%s", hdev->name);
1653
1654 memset(&cp, 0, sizeof(cp));
1655
1656 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1657 }
1658
1659 static void le_scan_work(struct work_struct *work)
1660 {
1661 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1662 struct le_scan_params *param = &hdev->le_scan_params;
1663
1664 BT_DBG("%s", hdev->name);
1665
1666 hci_do_le_scan(hdev, param->type, param->interval,
1667 param->window, param->timeout);
1668 }
1669
1670 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1671 int timeout)
1672 {
1673 struct le_scan_params *param = &hdev->le_scan_params;
1674
1675 BT_DBG("%s", hdev->name);
1676
1677 if (work_busy(&hdev->le_scan))
1678 return -EINPROGRESS;
1679
1680 param->type = type;
1681 param->interval = interval;
1682 param->window = window;
1683 param->timeout = timeout;
1684
1685 queue_work(system_long_wq, &hdev->le_scan);
1686
1687 return 0;
1688 }
1689
1690 /* Register HCI device */
1691 int hci_register_dev(struct hci_dev *hdev)
1692 {
1693 struct list_head *head = &hci_dev_list, *p;
1694 int i, id, error;
1695
1696 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1697
1698 if (!hdev->open || !hdev->close)
1699 return -EINVAL;
1700
1701 /* Do not allow HCI_AMP devices to register at index 0,
1702 * so the index can be used as the AMP controller ID.
1703 */
1704 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1705
1706 write_lock(&hci_dev_list_lock);
1707
1708 /* Find first available device id */
1709 list_for_each(p, &hci_dev_list) {
1710 if (list_entry(p, struct hci_dev, list)->id != id)
1711 break;
1712 head = p; id++;
1713 }
1714
1715 sprintf(hdev->name, "hci%d", id);
1716 hdev->id = id;
1717 list_add_tail(&hdev->list, head);
1718
1719 mutex_init(&hdev->lock);
1720
1721 hdev->flags = 0;
1722 hdev->dev_flags = 0;
1723 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1724 hdev->esco_type = (ESCO_HV1);
1725 hdev->link_mode = (HCI_LM_ACCEPT);
1726 hdev->io_capability = 0x03; /* No Input No Output */
1727
1728 hdev->idle_timeout = 0;
1729 hdev->sniff_max_interval = 800;
1730 hdev->sniff_min_interval = 80;
1731
1732 INIT_WORK(&hdev->rx_work, hci_rx_work);
1733 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1734 INIT_WORK(&hdev->tx_work, hci_tx_work);
1735
1736
1737 skb_queue_head_init(&hdev->rx_q);
1738 skb_queue_head_init(&hdev->cmd_q);
1739 skb_queue_head_init(&hdev->raw_q);
1740
1741 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1742
1743 for (i = 0; i < NUM_REASSEMBLY; i++)
1744 hdev->reassembly[i] = NULL;
1745
1746 init_waitqueue_head(&hdev->req_wait_q);
1747 mutex_init(&hdev->req_lock);
1748
1749 discovery_init(hdev);
1750
1751 hci_conn_hash_init(hdev);
1752
1753 INIT_LIST_HEAD(&hdev->mgmt_pending);
1754
1755 INIT_LIST_HEAD(&hdev->blacklist);
1756
1757 INIT_LIST_HEAD(&hdev->uuids);
1758
1759 INIT_LIST_HEAD(&hdev->link_keys);
1760 INIT_LIST_HEAD(&hdev->long_term_keys);
1761
1762 INIT_LIST_HEAD(&hdev->remote_oob_data);
1763
1764 INIT_LIST_HEAD(&hdev->adv_entries);
1765
1766 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1767 INIT_WORK(&hdev->power_on, hci_power_on);
1768 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1769
1770 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1771
1772 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1773
1774 atomic_set(&hdev->promisc, 0);
1775
1776 INIT_WORK(&hdev->le_scan, le_scan_work);
1777
1778 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1779
1780 write_unlock(&hci_dev_list_lock);
1781
1782 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1783 WQ_MEM_RECLAIM, 1);
1784 if (!hdev->workqueue) {
1785 error = -ENOMEM;
1786 goto err;
1787 }
1788
1789 error = hci_add_sysfs(hdev);
1790 if (error < 0)
1791 goto err_wqueue;
1792
1793 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1794 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1795 if (hdev->rfkill) {
1796 if (rfkill_register(hdev->rfkill) < 0) {
1797 rfkill_destroy(hdev->rfkill);
1798 hdev->rfkill = NULL;
1799 }
1800 }
1801
1802 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1803 set_bit(HCI_SETUP, &hdev->dev_flags);
1804 schedule_work(&hdev->power_on);
1805
1806 hci_notify(hdev, HCI_DEV_REG);
1807 hci_dev_hold(hdev);
1808
1809 return id;
1810
1811 err_wqueue:
1812 destroy_workqueue(hdev->workqueue);
1813 err:
1814 write_lock(&hci_dev_list_lock);
1815 list_del(&hdev->list);
1816 write_unlock(&hci_dev_list_lock);
1817
1818 return error;
1819 }
1820 EXPORT_SYMBOL(hci_register_dev);
1821
1822 /* Unregister HCI device */
1823 void hci_unregister_dev(struct hci_dev *hdev)
1824 {
1825 int i;
1826
1827 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1828
1829 write_lock(&hci_dev_list_lock);
1830 list_del(&hdev->list);
1831 write_unlock(&hci_dev_list_lock);
1832
1833 hci_dev_do_close(hdev);
1834
1835 for (i = 0; i < NUM_REASSEMBLY; i++)
1836 kfree_skb(hdev->reassembly[i]);
1837
1838 if (!test_bit(HCI_INIT, &hdev->flags) &&
1839 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1840 hci_dev_lock(hdev);
1841 mgmt_index_removed(hdev);
1842 hci_dev_unlock(hdev);
1843 }
1844
1845 /* mgmt_index_removed should take care of emptying the
1846 * pending list */
1847 BUG_ON(!list_empty(&hdev->mgmt_pending));
1848
1849 hci_notify(hdev, HCI_DEV_UNREG);
1850
1851 if (hdev->rfkill) {
1852 rfkill_unregister(hdev->rfkill);
1853 rfkill_destroy(hdev->rfkill);
1854 }
1855
1856 hci_del_sysfs(hdev);
1857
1858 cancel_delayed_work_sync(&hdev->adv_work);
1859
1860 destroy_workqueue(hdev->workqueue);
1861
1862 hci_dev_lock(hdev);
1863 hci_blacklist_clear(hdev);
1864 hci_uuids_clear(hdev);
1865 hci_link_keys_clear(hdev);
1866 hci_smp_ltks_clear(hdev);
1867 hci_remote_oob_data_clear(hdev);
1868 hci_adv_entries_clear(hdev);
1869 hci_dev_unlock(hdev);
1870
1871 hci_dev_put(hdev);
1872 }
1873 EXPORT_SYMBOL(hci_unregister_dev);
1874
1875 /* Suspend HCI device */
1876 int hci_suspend_dev(struct hci_dev *hdev)
1877 {
1878 hci_notify(hdev, HCI_DEV_SUSPEND);
1879 return 0;
1880 }
1881 EXPORT_SYMBOL(hci_suspend_dev);
1882
1883 /* Resume HCI device */
1884 int hci_resume_dev(struct hci_dev *hdev)
1885 {
1886 hci_notify(hdev, HCI_DEV_RESUME);
1887 return 0;
1888 }
1889 EXPORT_SYMBOL(hci_resume_dev);
1890
1891 /* Receive frame from HCI drivers */
1892 int hci_recv_frame(struct sk_buff *skb)
1893 {
1894 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1895 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1896 && !test_bit(HCI_INIT, &hdev->flags))) {
1897 kfree_skb(skb);
1898 return -ENXIO;
1899 }
1900
1901 /* Incomming skb */
1902 bt_cb(skb)->incoming = 1;
1903
1904 /* Time stamp */
1905 __net_timestamp(skb);
1906
1907 skb_queue_tail(&hdev->rx_q, skb);
1908 queue_work(hdev->workqueue, &hdev->rx_work);
1909
1910 return 0;
1911 }
1912 EXPORT_SYMBOL(hci_recv_frame);
1913
1914 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1915 int count, __u8 index)
1916 {
1917 int len = 0;
1918 int hlen = 0;
1919 int remain = count;
1920 struct sk_buff *skb;
1921 struct bt_skb_cb *scb;
1922
1923 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1924 index >= NUM_REASSEMBLY)
1925 return -EILSEQ;
1926
1927 skb = hdev->reassembly[index];
1928
1929 if (!skb) {
1930 switch (type) {
1931 case HCI_ACLDATA_PKT:
1932 len = HCI_MAX_FRAME_SIZE;
1933 hlen = HCI_ACL_HDR_SIZE;
1934 break;
1935 case HCI_EVENT_PKT:
1936 len = HCI_MAX_EVENT_SIZE;
1937 hlen = HCI_EVENT_HDR_SIZE;
1938 break;
1939 case HCI_SCODATA_PKT:
1940 len = HCI_MAX_SCO_SIZE;
1941 hlen = HCI_SCO_HDR_SIZE;
1942 break;
1943 }
1944
1945 skb = bt_skb_alloc(len, GFP_ATOMIC);
1946 if (!skb)
1947 return -ENOMEM;
1948
1949 scb = (void *) skb->cb;
1950 scb->expect = hlen;
1951 scb->pkt_type = type;
1952
1953 skb->dev = (void *) hdev;
1954 hdev->reassembly[index] = skb;
1955 }
1956
1957 while (count) {
1958 scb = (void *) skb->cb;
1959 len = min(scb->expect, (__u16)count);
1960
1961 memcpy(skb_put(skb, len), data, len);
1962
1963 count -= len;
1964 data += len;
1965 scb->expect -= len;
1966 remain = count;
1967
1968 switch (type) {
1969 case HCI_EVENT_PKT:
1970 if (skb->len == HCI_EVENT_HDR_SIZE) {
1971 struct hci_event_hdr *h = hci_event_hdr(skb);
1972 scb->expect = h->plen;
1973
1974 if (skb_tailroom(skb) < scb->expect) {
1975 kfree_skb(skb);
1976 hdev->reassembly[index] = NULL;
1977 return -ENOMEM;
1978 }
1979 }
1980 break;
1981
1982 case HCI_ACLDATA_PKT:
1983 if (skb->len == HCI_ACL_HDR_SIZE) {
1984 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1985 scb->expect = __le16_to_cpu(h->dlen);
1986
1987 if (skb_tailroom(skb) < scb->expect) {
1988 kfree_skb(skb);
1989 hdev->reassembly[index] = NULL;
1990 return -ENOMEM;
1991 }
1992 }
1993 break;
1994
1995 case HCI_SCODATA_PKT:
1996 if (skb->len == HCI_SCO_HDR_SIZE) {
1997 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1998 scb->expect = h->dlen;
1999
2000 if (skb_tailroom(skb) < scb->expect) {
2001 kfree_skb(skb);
2002 hdev->reassembly[index] = NULL;
2003 return -ENOMEM;
2004 }
2005 }
2006 break;
2007 }
2008
2009 if (scb->expect == 0) {
2010 /* Complete frame */
2011
2012 bt_cb(skb)->pkt_type = type;
2013 hci_recv_frame(skb);
2014
2015 hdev->reassembly[index] = NULL;
2016 return remain;
2017 }
2018 }
2019
2020 return remain;
2021 }
2022
2023 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2024 {
2025 int rem = 0;
2026
2027 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2028 return -EILSEQ;
2029
2030 while (count) {
2031 rem = hci_reassembly(hdev, type, data, count, type - 1);
2032 if (rem < 0)
2033 return rem;
2034
2035 data += (count - rem);
2036 count = rem;
2037 }
2038
2039 return rem;
2040 }
2041 EXPORT_SYMBOL(hci_recv_fragment);
2042
2043 #define STREAM_REASSEMBLY 0
2044
2045 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2046 {
2047 int type;
2048 int rem = 0;
2049
2050 while (count) {
2051 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2052
2053 if (!skb) {
2054 struct { char type; } *pkt;
2055
2056 /* Start of the frame */
2057 pkt = data;
2058 type = pkt->type;
2059
2060 data++;
2061 count--;
2062 } else
2063 type = bt_cb(skb)->pkt_type;
2064
2065 rem = hci_reassembly(hdev, type, data, count,
2066 STREAM_REASSEMBLY);
2067 if (rem < 0)
2068 return rem;
2069
2070 data += (count - rem);
2071 count = rem;
2072 }
2073
2074 return rem;
2075 }
2076 EXPORT_SYMBOL(hci_recv_stream_fragment);
2077
2078 /* ---- Interface to upper protocols ---- */
2079
2080 int hci_register_cb(struct hci_cb *cb)
2081 {
2082 BT_DBG("%p name %s", cb, cb->name);
2083
2084 write_lock(&hci_cb_list_lock);
2085 list_add(&cb->list, &hci_cb_list);
2086 write_unlock(&hci_cb_list_lock);
2087
2088 return 0;
2089 }
2090 EXPORT_SYMBOL(hci_register_cb);
2091
2092 int hci_unregister_cb(struct hci_cb *cb)
2093 {
2094 BT_DBG("%p name %s", cb, cb->name);
2095
2096 write_lock(&hci_cb_list_lock);
2097 list_del(&cb->list);
2098 write_unlock(&hci_cb_list_lock);
2099
2100 return 0;
2101 }
2102 EXPORT_SYMBOL(hci_unregister_cb);
2103
2104 static int hci_send_frame(struct sk_buff *skb)
2105 {
2106 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2107
2108 if (!hdev) {
2109 kfree_skb(skb);
2110 return -ENODEV;
2111 }
2112
2113 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2114
2115 /* Time stamp */
2116 __net_timestamp(skb);
2117
2118 /* Send copy to monitor */
2119 hci_send_to_monitor(hdev, skb);
2120
2121 if (atomic_read(&hdev->promisc)) {
2122 /* Send copy to the sockets */
2123 hci_send_to_sock(hdev, skb);
2124 }
2125
2126 /* Get rid of skb owner, prior to sending to the driver. */
2127 skb_orphan(skb);
2128
2129 return hdev->send(skb);
2130 }
2131
2132 /* Send HCI command */
2133 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2134 {
2135 int len = HCI_COMMAND_HDR_SIZE + plen;
2136 struct hci_command_hdr *hdr;
2137 struct sk_buff *skb;
2138
2139 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2140
2141 skb = bt_skb_alloc(len, GFP_ATOMIC);
2142 if (!skb) {
2143 BT_ERR("%s no memory for command", hdev->name);
2144 return -ENOMEM;
2145 }
2146
2147 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2148 hdr->opcode = cpu_to_le16(opcode);
2149 hdr->plen = plen;
2150
2151 if (plen)
2152 memcpy(skb_put(skb, plen), param, plen);
2153
2154 BT_DBG("skb len %d", skb->len);
2155
2156 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2157 skb->dev = (void *) hdev;
2158
2159 if (test_bit(HCI_INIT, &hdev->flags))
2160 hdev->init_last_cmd = opcode;
2161
2162 skb_queue_tail(&hdev->cmd_q, skb);
2163 queue_work(hdev->workqueue, &hdev->cmd_work);
2164
2165 return 0;
2166 }
2167
2168 /* Get data from the previously sent command */
2169 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2170 {
2171 struct hci_command_hdr *hdr;
2172
2173 if (!hdev->sent_cmd)
2174 return NULL;
2175
2176 hdr = (void *) hdev->sent_cmd->data;
2177
2178 if (hdr->opcode != cpu_to_le16(opcode))
2179 return NULL;
2180
2181 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2182
2183 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2184 }
2185
2186 /* Send ACL data */
2187 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2188 {
2189 struct hci_acl_hdr *hdr;
2190 int len = skb->len;
2191
2192 skb_push(skb, HCI_ACL_HDR_SIZE);
2193 skb_reset_transport_header(skb);
2194 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2195 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2196 hdr->dlen = cpu_to_le16(len);
2197 }
2198
2199 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2200 struct sk_buff *skb, __u16 flags)
2201 {
2202 struct hci_dev *hdev = conn->hdev;
2203 struct sk_buff *list;
2204
2205 list = skb_shinfo(skb)->frag_list;
2206 if (!list) {
2207 /* Non fragmented */
2208 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2209
2210 skb_queue_tail(queue, skb);
2211 } else {
2212 /* Fragmented */
2213 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2214
2215 skb_shinfo(skb)->frag_list = NULL;
2216
2217 /* Queue all fragments atomically */
2218 spin_lock(&queue->lock);
2219
2220 __skb_queue_tail(queue, skb);
2221
2222 flags &= ~ACL_START;
2223 flags |= ACL_CONT;
2224 do {
2225 skb = list; list = list->next;
2226
2227 skb->dev = (void *) hdev;
2228 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2229 hci_add_acl_hdr(skb, conn->handle, flags);
2230
2231 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2232
2233 __skb_queue_tail(queue, skb);
2234 } while (list);
2235
2236 spin_unlock(&queue->lock);
2237 }
2238 }
2239
2240 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2241 {
2242 struct hci_conn *conn = chan->conn;
2243 struct hci_dev *hdev = conn->hdev;
2244
2245 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2246
2247 skb->dev = (void *) hdev;
2248 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2249 hci_add_acl_hdr(skb, conn->handle, flags);
2250
2251 hci_queue_acl(conn, &chan->data_q, skb, flags);
2252
2253 queue_work(hdev->workqueue, &hdev->tx_work);
2254 }
2255 EXPORT_SYMBOL(hci_send_acl);
2256
2257 /* Send SCO data */
2258 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2259 {
2260 struct hci_dev *hdev = conn->hdev;
2261 struct hci_sco_hdr hdr;
2262
2263 BT_DBG("%s len %d", hdev->name, skb->len);
2264
2265 hdr.handle = cpu_to_le16(conn->handle);
2266 hdr.dlen = skb->len;
2267
2268 skb_push(skb, HCI_SCO_HDR_SIZE);
2269 skb_reset_transport_header(skb);
2270 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2271
2272 skb->dev = (void *) hdev;
2273 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2274
2275 skb_queue_tail(&conn->data_q, skb);
2276 queue_work(hdev->workqueue, &hdev->tx_work);
2277 }
2278 EXPORT_SYMBOL(hci_send_sco);
2279
2280 /* ---- HCI TX task (outgoing data) ---- */
2281
2282 /* HCI Connection scheduler */
2283 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2284 {
2285 struct hci_conn_hash *h = &hdev->conn_hash;
2286 struct hci_conn *conn = NULL, *c;
2287 int num = 0, min = ~0;
2288
2289 /* We don't have to lock device here. Connections are always
2290 * added and removed with TX task disabled. */
2291
2292 rcu_read_lock();
2293
2294 list_for_each_entry_rcu(c, &h->list, list) {
2295 if (c->type != type || skb_queue_empty(&c->data_q))
2296 continue;
2297
2298 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2299 continue;
2300
2301 num++;
2302
2303 if (c->sent < min) {
2304 min = c->sent;
2305 conn = c;
2306 }
2307
2308 if (hci_conn_num(hdev, type) == num)
2309 break;
2310 }
2311
2312 rcu_read_unlock();
2313
2314 if (conn) {
2315 int cnt, q;
2316
2317 switch (conn->type) {
2318 case ACL_LINK:
2319 cnt = hdev->acl_cnt;
2320 break;
2321 case SCO_LINK:
2322 case ESCO_LINK:
2323 cnt = hdev->sco_cnt;
2324 break;
2325 case LE_LINK:
2326 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2327 break;
2328 default:
2329 cnt = 0;
2330 BT_ERR("Unknown link type");
2331 }
2332
2333 q = cnt / num;
2334 *quote = q ? q : 1;
2335 } else
2336 *quote = 0;
2337
2338 BT_DBG("conn %p quote %d", conn, *quote);
2339 return conn;
2340 }
2341
2342 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2343 {
2344 struct hci_conn_hash *h = &hdev->conn_hash;
2345 struct hci_conn *c;
2346
2347 BT_ERR("%s link tx timeout", hdev->name);
2348
2349 rcu_read_lock();
2350
2351 /* Kill stalled connections */
2352 list_for_each_entry_rcu(c, &h->list, list) {
2353 if (c->type == type && c->sent) {
2354 BT_ERR("%s killing stalled connection %s",
2355 hdev->name, batostr(&c->dst));
2356 hci_acl_disconn(c, 0x13);
2357 }
2358 }
2359
2360 rcu_read_unlock();
2361 }
2362
2363 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2364 int *quote)
2365 {
2366 struct hci_conn_hash *h = &hdev->conn_hash;
2367 struct hci_chan *chan = NULL;
2368 int num = 0, min = ~0, cur_prio = 0;
2369 struct hci_conn *conn;
2370 int cnt, q, conn_num = 0;
2371
2372 BT_DBG("%s", hdev->name);
2373
2374 rcu_read_lock();
2375
2376 list_for_each_entry_rcu(conn, &h->list, list) {
2377 struct hci_chan *tmp;
2378
2379 if (conn->type != type)
2380 continue;
2381
2382 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2383 continue;
2384
2385 conn_num++;
2386
2387 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2388 struct sk_buff *skb;
2389
2390 if (skb_queue_empty(&tmp->data_q))
2391 continue;
2392
2393 skb = skb_peek(&tmp->data_q);
2394 if (skb->priority < cur_prio)
2395 continue;
2396
2397 if (skb->priority > cur_prio) {
2398 num = 0;
2399 min = ~0;
2400 cur_prio = skb->priority;
2401 }
2402
2403 num++;
2404
2405 if (conn->sent < min) {
2406 min = conn->sent;
2407 chan = tmp;
2408 }
2409 }
2410
2411 if (hci_conn_num(hdev, type) == conn_num)
2412 break;
2413 }
2414
2415 rcu_read_unlock();
2416
2417 if (!chan)
2418 return NULL;
2419
2420 switch (chan->conn->type) {
2421 case ACL_LINK:
2422 cnt = hdev->acl_cnt;
2423 break;
2424 case SCO_LINK:
2425 case ESCO_LINK:
2426 cnt = hdev->sco_cnt;
2427 break;
2428 case LE_LINK:
2429 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2430 break;
2431 default:
2432 cnt = 0;
2433 BT_ERR("Unknown link type");
2434 }
2435
2436 q = cnt / num;
2437 *quote = q ? q : 1;
2438 BT_DBG("chan %p quote %d", chan, *quote);
2439 return chan;
2440 }
2441
2442 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2443 {
2444 struct hci_conn_hash *h = &hdev->conn_hash;
2445 struct hci_conn *conn;
2446 int num = 0;
2447
2448 BT_DBG("%s", hdev->name);
2449
2450 rcu_read_lock();
2451
2452 list_for_each_entry_rcu(conn, &h->list, list) {
2453 struct hci_chan *chan;
2454
2455 if (conn->type != type)
2456 continue;
2457
2458 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2459 continue;
2460
2461 num++;
2462
2463 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2464 struct sk_buff *skb;
2465
2466 if (chan->sent) {
2467 chan->sent = 0;
2468 continue;
2469 }
2470
2471 if (skb_queue_empty(&chan->data_q))
2472 continue;
2473
2474 skb = skb_peek(&chan->data_q);
2475 if (skb->priority >= HCI_PRIO_MAX - 1)
2476 continue;
2477
2478 skb->priority = HCI_PRIO_MAX - 1;
2479
2480 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2481 skb->priority);
2482 }
2483
2484 if (hci_conn_num(hdev, type) == num)
2485 break;
2486 }
2487
2488 rcu_read_unlock();
2489
2490 }
2491
2492 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2493 {
2494 /* Calculate count of blocks used by this packet */
2495 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2496 }
2497
2498 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2499 {
2500 if (!test_bit(HCI_RAW, &hdev->flags)) {
2501 /* ACL tx timeout must be longer than maximum
2502 * link supervision timeout (40.9 seconds) */
2503 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2504 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2505 hci_link_tx_to(hdev, ACL_LINK);
2506 }
2507 }
2508
2509 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2510 {
2511 unsigned int cnt = hdev->acl_cnt;
2512 struct hci_chan *chan;
2513 struct sk_buff *skb;
2514 int quote;
2515
2516 __check_timeout(hdev, cnt);
2517
2518 while (hdev->acl_cnt &&
2519 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2520 u32 priority = (skb_peek(&chan->data_q))->priority;
2521 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2522 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2523 skb->len, skb->priority);
2524
2525 /* Stop if priority has changed */
2526 if (skb->priority < priority)
2527 break;
2528
2529 skb = skb_dequeue(&chan->data_q);
2530
2531 hci_conn_enter_active_mode(chan->conn,
2532 bt_cb(skb)->force_active);
2533
2534 hci_send_frame(skb);
2535 hdev->acl_last_tx = jiffies;
2536
2537 hdev->acl_cnt--;
2538 chan->sent++;
2539 chan->conn->sent++;
2540 }
2541 }
2542
2543 if (cnt != hdev->acl_cnt)
2544 hci_prio_recalculate(hdev, ACL_LINK);
2545 }
2546
2547 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2548 {
2549 unsigned int cnt = hdev->block_cnt;
2550 struct hci_chan *chan;
2551 struct sk_buff *skb;
2552 int quote;
2553
2554 __check_timeout(hdev, cnt);
2555
2556 while (hdev->block_cnt > 0 &&
2557 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2558 u32 priority = (skb_peek(&chan->data_q))->priority;
2559 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2560 int blocks;
2561
2562 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2563 skb->len, skb->priority);
2564
2565 /* Stop if priority has changed */
2566 if (skb->priority < priority)
2567 break;
2568
2569 skb = skb_dequeue(&chan->data_q);
2570
2571 blocks = __get_blocks(hdev, skb);
2572 if (blocks > hdev->block_cnt)
2573 return;
2574
2575 hci_conn_enter_active_mode(chan->conn,
2576 bt_cb(skb)->force_active);
2577
2578 hci_send_frame(skb);
2579 hdev->acl_last_tx = jiffies;
2580
2581 hdev->block_cnt -= blocks;
2582 quote -= blocks;
2583
2584 chan->sent += blocks;
2585 chan->conn->sent += blocks;
2586 }
2587 }
2588
2589 if (cnt != hdev->block_cnt)
2590 hci_prio_recalculate(hdev, ACL_LINK);
2591 }
2592
2593 static inline void hci_sched_acl(struct hci_dev *hdev)
2594 {
2595 BT_DBG("%s", hdev->name);
2596
2597 if (!hci_conn_num(hdev, ACL_LINK))
2598 return;
2599
2600 switch (hdev->flow_ctl_mode) {
2601 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2602 hci_sched_acl_pkt(hdev);
2603 break;
2604
2605 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2606 hci_sched_acl_blk(hdev);
2607 break;
2608 }
2609 }
2610
2611 /* Schedule SCO */
2612 static inline void hci_sched_sco(struct hci_dev *hdev)
2613 {
2614 struct hci_conn *conn;
2615 struct sk_buff *skb;
2616 int quote;
2617
2618 BT_DBG("%s", hdev->name);
2619
2620 if (!hci_conn_num(hdev, SCO_LINK))
2621 return;
2622
2623 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2624 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2625 BT_DBG("skb %p len %d", skb, skb->len);
2626 hci_send_frame(skb);
2627
2628 conn->sent++;
2629 if (conn->sent == ~0)
2630 conn->sent = 0;
2631 }
2632 }
2633 }
2634
2635 static inline void hci_sched_esco(struct hci_dev *hdev)
2636 {
2637 struct hci_conn *conn;
2638 struct sk_buff *skb;
2639 int quote;
2640
2641 BT_DBG("%s", hdev->name);
2642
2643 if (!hci_conn_num(hdev, ESCO_LINK))
2644 return;
2645
2646 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2647 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2648 BT_DBG("skb %p len %d", skb, skb->len);
2649 hci_send_frame(skb);
2650
2651 conn->sent++;
2652 if (conn->sent == ~0)
2653 conn->sent = 0;
2654 }
2655 }
2656 }
2657
2658 static inline void hci_sched_le(struct hci_dev *hdev)
2659 {
2660 struct hci_chan *chan;
2661 struct sk_buff *skb;
2662 int quote, cnt, tmp;
2663
2664 BT_DBG("%s", hdev->name);
2665
2666 if (!hci_conn_num(hdev, LE_LINK))
2667 return;
2668
2669 if (!test_bit(HCI_RAW, &hdev->flags)) {
2670 /* LE tx timeout must be longer than maximum
2671 * link supervision timeout (40.9 seconds) */
2672 if (!hdev->le_cnt && hdev->le_pkts &&
2673 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2674 hci_link_tx_to(hdev, LE_LINK);
2675 }
2676
2677 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2678 tmp = cnt;
2679 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2680 u32 priority = (skb_peek(&chan->data_q))->priority;
2681 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2682 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2683 skb->len, skb->priority);
2684
2685 /* Stop if priority has changed */
2686 if (skb->priority < priority)
2687 break;
2688
2689 skb = skb_dequeue(&chan->data_q);
2690
2691 hci_send_frame(skb);
2692 hdev->le_last_tx = jiffies;
2693
2694 cnt--;
2695 chan->sent++;
2696 chan->conn->sent++;
2697 }
2698 }
2699
2700 if (hdev->le_pkts)
2701 hdev->le_cnt = cnt;
2702 else
2703 hdev->acl_cnt = cnt;
2704
2705 if (cnt != tmp)
2706 hci_prio_recalculate(hdev, LE_LINK);
2707 }
2708
2709 static void hci_tx_work(struct work_struct *work)
2710 {
2711 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2712 struct sk_buff *skb;
2713
2714 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2715 hdev->sco_cnt, hdev->le_cnt);
2716
2717 /* Schedule queues and send stuff to HCI driver */
2718
2719 hci_sched_acl(hdev);
2720
2721 hci_sched_sco(hdev);
2722
2723 hci_sched_esco(hdev);
2724
2725 hci_sched_le(hdev);
2726
2727 /* Send next queued raw (unknown type) packet */
2728 while ((skb = skb_dequeue(&hdev->raw_q)))
2729 hci_send_frame(skb);
2730 }
2731
2732 /* ----- HCI RX task (incoming data processing) ----- */
2733
2734 /* ACL data packet */
2735 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2736 {
2737 struct hci_acl_hdr *hdr = (void *) skb->data;
2738 struct hci_conn *conn;
2739 __u16 handle, flags;
2740
2741 skb_pull(skb, HCI_ACL_HDR_SIZE);
2742
2743 handle = __le16_to_cpu(hdr->handle);
2744 flags = hci_flags(handle);
2745 handle = hci_handle(handle);
2746
2747 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2748
2749 hdev->stat.acl_rx++;
2750
2751 hci_dev_lock(hdev);
2752 conn = hci_conn_hash_lookup_handle(hdev, handle);
2753 hci_dev_unlock(hdev);
2754
2755 if (conn) {
2756 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2757
2758 /* Send to upper protocol */
2759 l2cap_recv_acldata(conn, skb, flags);
2760 return;
2761 } else {
2762 BT_ERR("%s ACL packet for unknown connection handle %d",
2763 hdev->name, handle);
2764 }
2765
2766 kfree_skb(skb);
2767 }
2768
2769 /* SCO data packet */
2770 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2771 {
2772 struct hci_sco_hdr *hdr = (void *) skb->data;
2773 struct hci_conn *conn;
2774 __u16 handle;
2775
2776 skb_pull(skb, HCI_SCO_HDR_SIZE);
2777
2778 handle = __le16_to_cpu(hdr->handle);
2779
2780 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2781
2782 hdev->stat.sco_rx++;
2783
2784 hci_dev_lock(hdev);
2785 conn = hci_conn_hash_lookup_handle(hdev, handle);
2786 hci_dev_unlock(hdev);
2787
2788 if (conn) {
2789 /* Send to upper protocol */
2790 sco_recv_scodata(conn, skb);
2791 return;
2792 } else {
2793 BT_ERR("%s SCO packet for unknown connection handle %d",
2794 hdev->name, handle);
2795 }
2796
2797 kfree_skb(skb);
2798 }
2799
2800 static void hci_rx_work(struct work_struct *work)
2801 {
2802 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2803 struct sk_buff *skb;
2804
2805 BT_DBG("%s", hdev->name);
2806
2807 while ((skb = skb_dequeue(&hdev->rx_q))) {
2808 /* Send copy to monitor */
2809 hci_send_to_monitor(hdev, skb);
2810
2811 if (atomic_read(&hdev->promisc)) {
2812 /* Send copy to the sockets */
2813 hci_send_to_sock(hdev, skb);
2814 }
2815
2816 if (test_bit(HCI_RAW, &hdev->flags)) {
2817 kfree_skb(skb);
2818 continue;
2819 }
2820
2821 if (test_bit(HCI_INIT, &hdev->flags)) {
2822 /* Don't process data packets in this states. */
2823 switch (bt_cb(skb)->pkt_type) {
2824 case HCI_ACLDATA_PKT:
2825 case HCI_SCODATA_PKT:
2826 kfree_skb(skb);
2827 continue;
2828 }
2829 }
2830
2831 /* Process frame */
2832 switch (bt_cb(skb)->pkt_type) {
2833 case HCI_EVENT_PKT:
2834 BT_DBG("%s Event packet", hdev->name);
2835 hci_event_packet(hdev, skb);
2836 break;
2837
2838 case HCI_ACLDATA_PKT:
2839 BT_DBG("%s ACL data packet", hdev->name);
2840 hci_acldata_packet(hdev, skb);
2841 break;
2842
2843 case HCI_SCODATA_PKT:
2844 BT_DBG("%s SCO data packet", hdev->name);
2845 hci_scodata_packet(hdev, skb);
2846 break;
2847
2848 default:
2849 kfree_skb(skb);
2850 break;
2851 }
2852 }
2853 }
2854
2855 static void hci_cmd_work(struct work_struct *work)
2856 {
2857 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2858 struct sk_buff *skb;
2859
2860 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2861
2862 /* Send queued commands */
2863 if (atomic_read(&hdev->cmd_cnt)) {
2864 skb = skb_dequeue(&hdev->cmd_q);
2865 if (!skb)
2866 return;
2867
2868 kfree_skb(hdev->sent_cmd);
2869
2870 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2871 if (hdev->sent_cmd) {
2872 atomic_dec(&hdev->cmd_cnt);
2873 hci_send_frame(skb);
2874 if (test_bit(HCI_RESET, &hdev->flags))
2875 del_timer(&hdev->cmd_timer);
2876 else
2877 mod_timer(&hdev->cmd_timer,
2878 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2879 } else {
2880 skb_queue_head(&hdev->cmd_q, skb);
2881 queue_work(hdev->workqueue, &hdev->cmd_work);
2882 }
2883 }
2884 }
2885
2886 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2887 {
2888 /* General inquiry access code (GIAC) */
2889 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2890 struct hci_cp_inquiry cp;
2891
2892 BT_DBG("%s", hdev->name);
2893
2894 if (test_bit(HCI_INQUIRY, &hdev->flags))
2895 return -EINPROGRESS;
2896
2897 inquiry_cache_flush(hdev);
2898
2899 memset(&cp, 0, sizeof(cp));
2900 memcpy(&cp.lap, lap, sizeof(cp.lap));
2901 cp.length = length;
2902
2903 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2904 }
2905
2906 int hci_cancel_inquiry(struct hci_dev *hdev)
2907 {
2908 BT_DBG("%s", hdev->name);
2909
2910 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2911 return -EPERM;
2912
2913 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2914 }