Bluetooth: Remove HCI notifier handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 bool enable_hs;
58
59 static void hci_rx_work(struct work_struct *work);
60 static void hci_cmd_work(struct work_struct *work);
61 static void hci_tx_work(struct work_struct *work);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* ---- HCI notifications ---- */
72
73 static void hci_notify(struct hci_dev *hdev, int event)
74 {
75 hci_sock_dev_event(hdev, event);
76 }
77
78 /* ---- HCI requests ---- */
79
80 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
81 {
82 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
83
84 /* If this is the init phase check if the completed command matches
85 * the last init command, and if not just return.
86 */
87 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
88 return;
89
90 if (hdev->req_status == HCI_REQ_PEND) {
91 hdev->req_result = result;
92 hdev->req_status = HCI_REQ_DONE;
93 wake_up_interruptible(&hdev->req_wait_q);
94 }
95 }
96
97 static void hci_req_cancel(struct hci_dev *hdev, int err)
98 {
99 BT_DBG("%s err 0x%2.2x", hdev->name, err);
100
101 if (hdev->req_status == HCI_REQ_PEND) {
102 hdev->req_result = err;
103 hdev->req_status = HCI_REQ_CANCELED;
104 wake_up_interruptible(&hdev->req_wait_q);
105 }
106 }
107
108 /* Execute request and wait for completion. */
109 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
110 unsigned long opt, __u32 timeout)
111 {
112 DECLARE_WAITQUEUE(wait, current);
113 int err = 0;
114
115 BT_DBG("%s start", hdev->name);
116
117 hdev->req_status = HCI_REQ_PEND;
118
119 add_wait_queue(&hdev->req_wait_q, &wait);
120 set_current_state(TASK_INTERRUPTIBLE);
121
122 req(hdev, opt);
123 schedule_timeout(timeout);
124
125 remove_wait_queue(&hdev->req_wait_q, &wait);
126
127 if (signal_pending(current))
128 return -EINTR;
129
130 switch (hdev->req_status) {
131 case HCI_REQ_DONE:
132 err = -bt_to_errno(hdev->req_result);
133 break;
134
135 case HCI_REQ_CANCELED:
136 err = -hdev->req_result;
137 break;
138
139 default:
140 err = -ETIMEDOUT;
141 break;
142 }
143
144 hdev->req_status = hdev->req_result = 0;
145
146 BT_DBG("%s end: err %d", hdev->name, err);
147
148 return err;
149 }
150
151 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
152 unsigned long opt, __u32 timeout)
153 {
154 int ret;
155
156 if (!test_bit(HCI_UP, &hdev->flags))
157 return -ENETDOWN;
158
159 /* Serialize all requests */
160 hci_req_lock(hdev);
161 ret = __hci_request(hdev, req, opt, timeout);
162 hci_req_unlock(hdev);
163
164 return ret;
165 }
166
167 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
168 {
169 BT_DBG("%s %ld", hdev->name, opt);
170
171 /* Reset device */
172 set_bit(HCI_RESET, &hdev->flags);
173 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
174 }
175
176 static void bredr_init(struct hci_dev *hdev)
177 {
178 struct hci_cp_delete_stored_link_key cp;
179 __le16 param;
180 __u8 flt_type;
181
182 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
183
184 /* Mandatory initialization */
185
186 /* Reset */
187 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
188 set_bit(HCI_RESET, &hdev->flags);
189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 }
191
192 /* Read Local Supported Features */
193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
194
195 /* Read Local Version */
196 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
197
198 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
199 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
200
201 /* Read BD Address */
202 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
203
204 /* Read Class of Device */
205 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
206
207 /* Read Local Name */
208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
209
210 /* Read Voice Setting */
211 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
212
213 /* Optional initialization */
214
215 /* Clear Event Filters */
216 flt_type = HCI_FLT_CLEAR_ALL;
217 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
218
219 /* Connection accept timeout ~20 secs */
220 param = cpu_to_le16(0x7d00);
221 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
222
223 bacpy(&cp.bdaddr, BDADDR_ANY);
224 cp.delete_all = 1;
225 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
226 }
227
228 static void amp_init(struct hci_dev *hdev)
229 {
230 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
231
232 /* Reset */
233 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
234
235 /* Read Local Version */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
237 }
238
239 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240 {
241 struct sk_buff *skb;
242
243 BT_DBG("%s %ld", hdev->name, opt);
244
245 /* Driver initialization */
246
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
251
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
254 }
255 skb_queue_purge(&hdev->driver_init);
256
257 switch (hdev->dev_type) {
258 case HCI_BREDR:
259 bredr_init(hdev);
260 break;
261
262 case HCI_AMP:
263 amp_init(hdev);
264 break;
265
266 default:
267 BT_ERR("Unknown device type %d", hdev->dev_type);
268 break;
269 }
270
271 }
272
273 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
274 {
275 BT_DBG("%s", hdev->name);
276
277 /* Read LE buffer size */
278 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
279 }
280
281 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
282 {
283 __u8 scan = opt;
284
285 BT_DBG("%s %x", hdev->name, scan);
286
287 /* Inquiry and Page scans */
288 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
289 }
290
291 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
292 {
293 __u8 auth = opt;
294
295 BT_DBG("%s %x", hdev->name, auth);
296
297 /* Authentication */
298 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
299 }
300
301 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
302 {
303 __u8 encrypt = opt;
304
305 BT_DBG("%s %x", hdev->name, encrypt);
306
307 /* Encryption */
308 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
309 }
310
311 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
312 {
313 __le16 policy = cpu_to_le16(opt);
314
315 BT_DBG("%s %x", hdev->name, policy);
316
317 /* Default link policy */
318 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
319 }
320
321 /* Get HCI device by index.
322 * Device is held on return. */
323 struct hci_dev *hci_dev_get(int index)
324 {
325 struct hci_dev *hdev = NULL, *d;
326
327 BT_DBG("%d", index);
328
329 if (index < 0)
330 return NULL;
331
332 read_lock(&hci_dev_list_lock);
333 list_for_each_entry(d, &hci_dev_list, list) {
334 if (d->id == index) {
335 hdev = hci_dev_hold(d);
336 break;
337 }
338 }
339 read_unlock(&hci_dev_list_lock);
340 return hdev;
341 }
342
343 /* ---- Inquiry support ---- */
344
345 bool hci_discovery_active(struct hci_dev *hdev)
346 {
347 struct discovery_state *discov = &hdev->discovery;
348
349 switch (discov->state) {
350 case DISCOVERY_FINDING:
351 case DISCOVERY_RESOLVING:
352 return true;
353
354 default:
355 return false;
356 }
357 }
358
359 void hci_discovery_set_state(struct hci_dev *hdev, int state)
360 {
361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
362
363 if (hdev->discovery.state == state)
364 return;
365
366 switch (state) {
367 case DISCOVERY_STOPPED:
368 hdev->discovery.type = 0;
369
370 if (hdev->discovery.state != DISCOVERY_STARTING)
371 mgmt_discovering(hdev, 0);
372 break;
373 case DISCOVERY_STARTING:
374 break;
375 case DISCOVERY_FINDING:
376 mgmt_discovering(hdev, 1);
377 break;
378 case DISCOVERY_RESOLVING:
379 break;
380 case DISCOVERY_STOPPING:
381 break;
382 }
383
384 hdev->discovery.state = state;
385 }
386
387 static void inquiry_cache_flush(struct hci_dev *hdev)
388 {
389 struct discovery_state *cache = &hdev->discovery;
390 struct inquiry_entry *p, *n;
391
392 list_for_each_entry_safe(p, n, &cache->all, all) {
393 list_del(&p->all);
394 kfree(p);
395 }
396
397 INIT_LIST_HEAD(&cache->unknown);
398 INIT_LIST_HEAD(&cache->resolve);
399 cache->state = DISCOVERY_STOPPED;
400 }
401
402 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
403 {
404 struct discovery_state *cache = &hdev->discovery;
405 struct inquiry_entry *e;
406
407 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
408
409 list_for_each_entry(e, &cache->all, all) {
410 if (!bacmp(&e->data.bdaddr, bdaddr))
411 return e;
412 }
413
414 return NULL;
415 }
416
417 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
418 bdaddr_t *bdaddr)
419 {
420 struct discovery_state *cache = &hdev->discovery;
421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
425 list_for_each_entry(e, &cache->unknown, list) {
426 if (!bacmp(&e->data.bdaddr, bdaddr))
427 return e;
428 }
429
430 return NULL;
431 }
432
433 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
434 bdaddr_t *bdaddr,
435 int state)
436 {
437 struct discovery_state *cache = &hdev->discovery;
438 struct inquiry_entry *e;
439
440 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
441
442 list_for_each_entry(e, &cache->resolve, list) {
443 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
444 return e;
445 if (!bacmp(&e->data.bdaddr, bdaddr))
446 return e;
447 }
448
449 return NULL;
450 }
451
452 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
453 struct inquiry_entry *ie)
454 {
455 struct discovery_state *cache = &hdev->discovery;
456 struct list_head *pos = &cache->resolve;
457 struct inquiry_entry *p;
458
459 list_del(&ie->list);
460
461 list_for_each_entry(p, &cache->resolve, list) {
462 if (p->name_state != NAME_PENDING &&
463 abs(p->data.rssi) >= abs(ie->data.rssi))
464 break;
465 pos = &p->list;
466 }
467
468 list_add(&ie->list, pos);
469 }
470
471 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
472 bool name_known)
473 {
474 struct discovery_state *cache = &hdev->discovery;
475 struct inquiry_entry *ie;
476
477 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
478
479 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
480 if (ie) {
481 if (ie->name_state == NAME_NEEDED &&
482 data->rssi != ie->data.rssi) {
483 ie->data.rssi = data->rssi;
484 hci_inquiry_cache_update_resolve(hdev, ie);
485 }
486
487 goto update;
488 }
489
490 /* Entry not in the cache. Add new one. */
491 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
492 if (!ie)
493 return false;
494
495 list_add(&ie->all, &cache->all);
496
497 if (name_known) {
498 ie->name_state = NAME_KNOWN;
499 } else {
500 ie->name_state = NAME_NOT_KNOWN;
501 list_add(&ie->list, &cache->unknown);
502 }
503
504 update:
505 if (name_known && ie->name_state != NAME_KNOWN &&
506 ie->name_state != NAME_PENDING) {
507 ie->name_state = NAME_KNOWN;
508 list_del(&ie->list);
509 }
510
511 memcpy(&ie->data, data, sizeof(*data));
512 ie->timestamp = jiffies;
513 cache->timestamp = jiffies;
514
515 if (ie->name_state == NAME_NOT_KNOWN)
516 return false;
517
518 return true;
519 }
520
521 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
522 {
523 struct discovery_state *cache = &hdev->discovery;
524 struct inquiry_info *info = (struct inquiry_info *) buf;
525 struct inquiry_entry *e;
526 int copied = 0;
527
528 list_for_each_entry(e, &cache->all, all) {
529 struct inquiry_data *data = &e->data;
530
531 if (copied >= num)
532 break;
533
534 bacpy(&info->bdaddr, &data->bdaddr);
535 info->pscan_rep_mode = data->pscan_rep_mode;
536 info->pscan_period_mode = data->pscan_period_mode;
537 info->pscan_mode = data->pscan_mode;
538 memcpy(info->dev_class, data->dev_class, 3);
539 info->clock_offset = data->clock_offset;
540
541 info++;
542 copied++;
543 }
544
545 BT_DBG("cache %p, copied %d", cache, copied);
546 return copied;
547 }
548
549 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
550 {
551 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
552 struct hci_cp_inquiry cp;
553
554 BT_DBG("%s", hdev->name);
555
556 if (test_bit(HCI_INQUIRY, &hdev->flags))
557 return;
558
559 /* Start Inquiry */
560 memcpy(&cp.lap, &ir->lap, 3);
561 cp.length = ir->length;
562 cp.num_rsp = ir->num_rsp;
563 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
564 }
565
566 int hci_inquiry(void __user *arg)
567 {
568 __u8 __user *ptr = arg;
569 struct hci_inquiry_req ir;
570 struct hci_dev *hdev;
571 int err = 0, do_inquiry = 0, max_rsp;
572 long timeo;
573 __u8 *buf;
574
575 if (copy_from_user(&ir, ptr, sizeof(ir)))
576 return -EFAULT;
577
578 hdev = hci_dev_get(ir.dev_id);
579 if (!hdev)
580 return -ENODEV;
581
582 hci_dev_lock(hdev);
583 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
584 inquiry_cache_empty(hdev) ||
585 ir.flags & IREQ_CACHE_FLUSH) {
586 inquiry_cache_flush(hdev);
587 do_inquiry = 1;
588 }
589 hci_dev_unlock(hdev);
590
591 timeo = ir.length * msecs_to_jiffies(2000);
592
593 if (do_inquiry) {
594 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
595 if (err < 0)
596 goto done;
597 }
598
599 /* for unlimited number of responses we will use buffer with 255 entries */
600 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
601
602 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
603 * copy it to the user space.
604 */
605 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
606 if (!buf) {
607 err = -ENOMEM;
608 goto done;
609 }
610
611 hci_dev_lock(hdev);
612 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
613 hci_dev_unlock(hdev);
614
615 BT_DBG("num_rsp %d", ir.num_rsp);
616
617 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
618 ptr += sizeof(ir);
619 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
620 ir.num_rsp))
621 err = -EFAULT;
622 } else
623 err = -EFAULT;
624
625 kfree(buf);
626
627 done:
628 hci_dev_put(hdev);
629 return err;
630 }
631
632 /* ---- HCI ioctl helpers ---- */
633
634 int hci_dev_open(__u16 dev)
635 {
636 struct hci_dev *hdev;
637 int ret = 0;
638
639 hdev = hci_dev_get(dev);
640 if (!hdev)
641 return -ENODEV;
642
643 BT_DBG("%s %p", hdev->name, hdev);
644
645 hci_req_lock(hdev);
646
647 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
648 ret = -ERFKILL;
649 goto done;
650 }
651
652 if (test_bit(HCI_UP, &hdev->flags)) {
653 ret = -EALREADY;
654 goto done;
655 }
656
657 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
658 set_bit(HCI_RAW, &hdev->flags);
659
660 /* Treat all non BR/EDR controllers as raw devices if
661 enable_hs is not set */
662 if (hdev->dev_type != HCI_BREDR && !enable_hs)
663 set_bit(HCI_RAW, &hdev->flags);
664
665 if (hdev->open(hdev)) {
666 ret = -EIO;
667 goto done;
668 }
669
670 if (!test_bit(HCI_RAW, &hdev->flags)) {
671 atomic_set(&hdev->cmd_cnt, 1);
672 set_bit(HCI_INIT, &hdev->flags);
673 hdev->init_last_cmd = 0;
674
675 ret = __hci_request(hdev, hci_init_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
677
678 if (lmp_host_le_capable(hdev))
679 ret = __hci_request(hdev, hci_le_init_req, 0,
680 msecs_to_jiffies(HCI_INIT_TIMEOUT));
681
682 clear_bit(HCI_INIT, &hdev->flags);
683 }
684
685 if (!ret) {
686 hci_dev_hold(hdev);
687 set_bit(HCI_UP, &hdev->flags);
688 hci_notify(hdev, HCI_DEV_UP);
689 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
690 hci_dev_lock(hdev);
691 mgmt_powered(hdev, 1);
692 hci_dev_unlock(hdev);
693 }
694 } else {
695 /* Init failed, cleanup */
696 flush_work(&hdev->tx_work);
697 flush_work(&hdev->cmd_work);
698 flush_work(&hdev->rx_work);
699
700 skb_queue_purge(&hdev->cmd_q);
701 skb_queue_purge(&hdev->rx_q);
702
703 if (hdev->flush)
704 hdev->flush(hdev);
705
706 if (hdev->sent_cmd) {
707 kfree_skb(hdev->sent_cmd);
708 hdev->sent_cmd = NULL;
709 }
710
711 hdev->close(hdev);
712 hdev->flags = 0;
713 }
714
715 done:
716 hci_req_unlock(hdev);
717 hci_dev_put(hdev);
718 return ret;
719 }
720
721 static int hci_dev_do_close(struct hci_dev *hdev)
722 {
723 BT_DBG("%s %p", hdev->name, hdev);
724
725 cancel_work_sync(&hdev->le_scan);
726
727 hci_req_cancel(hdev, ENODEV);
728 hci_req_lock(hdev);
729
730 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
731 del_timer_sync(&hdev->cmd_timer);
732 hci_req_unlock(hdev);
733 return 0;
734 }
735
736 /* Flush RX and TX works */
737 flush_work(&hdev->tx_work);
738 flush_work(&hdev->rx_work);
739
740 if (hdev->discov_timeout > 0) {
741 cancel_delayed_work(&hdev->discov_off);
742 hdev->discov_timeout = 0;
743 }
744
745 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
746 cancel_delayed_work(&hdev->power_off);
747
748 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
749 cancel_delayed_work(&hdev->service_cache);
750
751 cancel_delayed_work_sync(&hdev->le_scan_disable);
752
753 hci_dev_lock(hdev);
754 inquiry_cache_flush(hdev);
755 hci_conn_hash_flush(hdev);
756 hci_dev_unlock(hdev);
757
758 hci_notify(hdev, HCI_DEV_DOWN);
759
760 if (hdev->flush)
761 hdev->flush(hdev);
762
763 /* Reset device */
764 skb_queue_purge(&hdev->cmd_q);
765 atomic_set(&hdev->cmd_cnt, 1);
766 if (!test_bit(HCI_RAW, &hdev->flags) &&
767 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
768 set_bit(HCI_INIT, &hdev->flags);
769 __hci_request(hdev, hci_reset_req, 0,
770 msecs_to_jiffies(250));
771 clear_bit(HCI_INIT, &hdev->flags);
772 }
773
774 /* flush cmd work */
775 flush_work(&hdev->cmd_work);
776
777 /* Drop queues */
778 skb_queue_purge(&hdev->rx_q);
779 skb_queue_purge(&hdev->cmd_q);
780 skb_queue_purge(&hdev->raw_q);
781
782 /* Drop last sent command */
783 if (hdev->sent_cmd) {
784 del_timer_sync(&hdev->cmd_timer);
785 kfree_skb(hdev->sent_cmd);
786 hdev->sent_cmd = NULL;
787 }
788
789 /* After this point our queues are empty
790 * and no tasks are scheduled. */
791 hdev->close(hdev);
792
793 hci_dev_lock(hdev);
794 mgmt_powered(hdev, 0);
795 hci_dev_unlock(hdev);
796
797 /* Clear flags */
798 hdev->flags = 0;
799
800 hci_req_unlock(hdev);
801
802 hci_dev_put(hdev);
803 return 0;
804 }
805
806 int hci_dev_close(__u16 dev)
807 {
808 struct hci_dev *hdev;
809 int err;
810
811 hdev = hci_dev_get(dev);
812 if (!hdev)
813 return -ENODEV;
814 err = hci_dev_do_close(hdev);
815 hci_dev_put(hdev);
816 return err;
817 }
818
819 int hci_dev_reset(__u16 dev)
820 {
821 struct hci_dev *hdev;
822 int ret = 0;
823
824 hdev = hci_dev_get(dev);
825 if (!hdev)
826 return -ENODEV;
827
828 hci_req_lock(hdev);
829
830 if (!test_bit(HCI_UP, &hdev->flags))
831 goto done;
832
833 /* Drop queues */
834 skb_queue_purge(&hdev->rx_q);
835 skb_queue_purge(&hdev->cmd_q);
836
837 hci_dev_lock(hdev);
838 inquiry_cache_flush(hdev);
839 hci_conn_hash_flush(hdev);
840 hci_dev_unlock(hdev);
841
842 if (hdev->flush)
843 hdev->flush(hdev);
844
845 atomic_set(&hdev->cmd_cnt, 1);
846 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
847
848 if (!test_bit(HCI_RAW, &hdev->flags))
849 ret = __hci_request(hdev, hci_reset_req, 0,
850 msecs_to_jiffies(HCI_INIT_TIMEOUT));
851
852 done:
853 hci_req_unlock(hdev);
854 hci_dev_put(hdev);
855 return ret;
856 }
857
858 int hci_dev_reset_stat(__u16 dev)
859 {
860 struct hci_dev *hdev;
861 int ret = 0;
862
863 hdev = hci_dev_get(dev);
864 if (!hdev)
865 return -ENODEV;
866
867 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
868
869 hci_dev_put(hdev);
870
871 return ret;
872 }
873
874 int hci_dev_cmd(unsigned int cmd, void __user *arg)
875 {
876 struct hci_dev *hdev;
877 struct hci_dev_req dr;
878 int err = 0;
879
880 if (copy_from_user(&dr, arg, sizeof(dr)))
881 return -EFAULT;
882
883 hdev = hci_dev_get(dr.dev_id);
884 if (!hdev)
885 return -ENODEV;
886
887 switch (cmd) {
888 case HCISETAUTH:
889 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
890 msecs_to_jiffies(HCI_INIT_TIMEOUT));
891 break;
892
893 case HCISETENCRYPT:
894 if (!lmp_encrypt_capable(hdev)) {
895 err = -EOPNOTSUPP;
896 break;
897 }
898
899 if (!test_bit(HCI_AUTH, &hdev->flags)) {
900 /* Auth must be enabled first */
901 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
902 msecs_to_jiffies(HCI_INIT_TIMEOUT));
903 if (err)
904 break;
905 }
906
907 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
908 msecs_to_jiffies(HCI_INIT_TIMEOUT));
909 break;
910
911 case HCISETSCAN:
912 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT));
914 break;
915
916 case HCISETLINKPOL:
917 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
918 msecs_to_jiffies(HCI_INIT_TIMEOUT));
919 break;
920
921 case HCISETLINKMODE:
922 hdev->link_mode = ((__u16) dr.dev_opt) &
923 (HCI_LM_MASTER | HCI_LM_ACCEPT);
924 break;
925
926 case HCISETPTYPE:
927 hdev->pkt_type = (__u16) dr.dev_opt;
928 break;
929
930 case HCISETACLMTU:
931 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
932 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
933 break;
934
935 case HCISETSCOMTU:
936 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
937 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
938 break;
939
940 default:
941 err = -EINVAL;
942 break;
943 }
944
945 hci_dev_put(hdev);
946 return err;
947 }
948
949 int hci_get_dev_list(void __user *arg)
950 {
951 struct hci_dev *hdev;
952 struct hci_dev_list_req *dl;
953 struct hci_dev_req *dr;
954 int n = 0, size, err;
955 __u16 dev_num;
956
957 if (get_user(dev_num, (__u16 __user *) arg))
958 return -EFAULT;
959
960 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
961 return -EINVAL;
962
963 size = sizeof(*dl) + dev_num * sizeof(*dr);
964
965 dl = kzalloc(size, GFP_KERNEL);
966 if (!dl)
967 return -ENOMEM;
968
969 dr = dl->dev_req;
970
971 read_lock(&hci_dev_list_lock);
972 list_for_each_entry(hdev, &hci_dev_list, list) {
973 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
974 cancel_delayed_work(&hdev->power_off);
975
976 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
977 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
978
979 (dr + n)->dev_id = hdev->id;
980 (dr + n)->dev_opt = hdev->flags;
981
982 if (++n >= dev_num)
983 break;
984 }
985 read_unlock(&hci_dev_list_lock);
986
987 dl->dev_num = n;
988 size = sizeof(*dl) + n * sizeof(*dr);
989
990 err = copy_to_user(arg, dl, size);
991 kfree(dl);
992
993 return err ? -EFAULT : 0;
994 }
995
996 int hci_get_dev_info(void __user *arg)
997 {
998 struct hci_dev *hdev;
999 struct hci_dev_info di;
1000 int err = 0;
1001
1002 if (copy_from_user(&di, arg, sizeof(di)))
1003 return -EFAULT;
1004
1005 hdev = hci_dev_get(di.dev_id);
1006 if (!hdev)
1007 return -ENODEV;
1008
1009 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1010 cancel_delayed_work_sync(&hdev->power_off);
1011
1012 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1013 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1014
1015 strcpy(di.name, hdev->name);
1016 di.bdaddr = hdev->bdaddr;
1017 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1018 di.flags = hdev->flags;
1019 di.pkt_type = hdev->pkt_type;
1020 di.acl_mtu = hdev->acl_mtu;
1021 di.acl_pkts = hdev->acl_pkts;
1022 di.sco_mtu = hdev->sco_mtu;
1023 di.sco_pkts = hdev->sco_pkts;
1024 di.link_policy = hdev->link_policy;
1025 di.link_mode = hdev->link_mode;
1026
1027 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1028 memcpy(&di.features, &hdev->features, sizeof(di.features));
1029
1030 if (copy_to_user(arg, &di, sizeof(di)))
1031 err = -EFAULT;
1032
1033 hci_dev_put(hdev);
1034
1035 return err;
1036 }
1037
1038 /* ---- Interface to HCI drivers ---- */
1039
1040 static int hci_rfkill_set_block(void *data, bool blocked)
1041 {
1042 struct hci_dev *hdev = data;
1043
1044 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1045
1046 if (!blocked)
1047 return 0;
1048
1049 hci_dev_do_close(hdev);
1050
1051 return 0;
1052 }
1053
1054 static const struct rfkill_ops hci_rfkill_ops = {
1055 .set_block = hci_rfkill_set_block,
1056 };
1057
1058 /* Alloc HCI device */
1059 struct hci_dev *hci_alloc_dev(void)
1060 {
1061 struct hci_dev *hdev;
1062
1063 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1064 if (!hdev)
1065 return NULL;
1066
1067 hci_init_sysfs(hdev);
1068 skb_queue_head_init(&hdev->driver_init);
1069
1070 return hdev;
1071 }
1072 EXPORT_SYMBOL(hci_alloc_dev);
1073
1074 /* Free HCI device */
1075 void hci_free_dev(struct hci_dev *hdev)
1076 {
1077 skb_queue_purge(&hdev->driver_init);
1078
1079 /* will free via device release */
1080 put_device(&hdev->dev);
1081 }
1082 EXPORT_SYMBOL(hci_free_dev);
1083
1084 static void hci_power_on(struct work_struct *work)
1085 {
1086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1087
1088 BT_DBG("%s", hdev->name);
1089
1090 if (hci_dev_open(hdev->id) < 0)
1091 return;
1092
1093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1094 schedule_delayed_work(&hdev->power_off,
1095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1096
1097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1098 mgmt_index_added(hdev);
1099 }
1100
1101 static void hci_power_off(struct work_struct *work)
1102 {
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 power_off.work);
1105
1106 BT_DBG("%s", hdev->name);
1107
1108 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1109
1110 hci_dev_close(hdev->id);
1111 }
1112
1113 static void hci_discov_off(struct work_struct *work)
1114 {
1115 struct hci_dev *hdev;
1116 u8 scan = SCAN_PAGE;
1117
1118 hdev = container_of(work, struct hci_dev, discov_off.work);
1119
1120 BT_DBG("%s", hdev->name);
1121
1122 hci_dev_lock(hdev);
1123
1124 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1125
1126 hdev->discov_timeout = 0;
1127
1128 hci_dev_unlock(hdev);
1129 }
1130
1131 int hci_uuids_clear(struct hci_dev *hdev)
1132 {
1133 struct list_head *p, *n;
1134
1135 list_for_each_safe(p, n, &hdev->uuids) {
1136 struct bt_uuid *uuid;
1137
1138 uuid = list_entry(p, struct bt_uuid, list);
1139
1140 list_del(p);
1141 kfree(uuid);
1142 }
1143
1144 return 0;
1145 }
1146
1147 int hci_link_keys_clear(struct hci_dev *hdev)
1148 {
1149 struct list_head *p, *n;
1150
1151 list_for_each_safe(p, n, &hdev->link_keys) {
1152 struct link_key *key;
1153
1154 key = list_entry(p, struct link_key, list);
1155
1156 list_del(p);
1157 kfree(key);
1158 }
1159
1160 return 0;
1161 }
1162
1163 int hci_smp_ltks_clear(struct hci_dev *hdev)
1164 {
1165 struct smp_ltk *k, *tmp;
1166
1167 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1168 list_del(&k->list);
1169 kfree(k);
1170 }
1171
1172 return 0;
1173 }
1174
1175 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1176 {
1177 struct link_key *k;
1178
1179 list_for_each_entry(k, &hdev->link_keys, list)
1180 if (bacmp(bdaddr, &k->bdaddr) == 0)
1181 return k;
1182
1183 return NULL;
1184 }
1185
1186 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1187 u8 key_type, u8 old_key_type)
1188 {
1189 /* Legacy key */
1190 if (key_type < 0x03)
1191 return 1;
1192
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type == HCI_LK_DEBUG_COMBINATION)
1195 return 0;
1196
1197 /* Changed combination key and there's no previous one */
1198 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1199 return 0;
1200
1201 /* Security mode 3 case */
1202 if (!conn)
1203 return 1;
1204
1205 /* Neither local nor remote side had no-bonding as requirement */
1206 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1207 return 1;
1208
1209 /* Local side had dedicated bonding as requirement */
1210 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1211 return 1;
1212
1213 /* Remote side had dedicated bonding as requirement */
1214 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1215 return 1;
1216
1217 /* If none of the above criteria match, then don't store the key
1218 * persistently */
1219 return 0;
1220 }
1221
1222 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1223 {
1224 struct smp_ltk *k;
1225
1226 list_for_each_entry(k, &hdev->long_term_keys, list) {
1227 if (k->ediv != ediv ||
1228 memcmp(rand, k->rand, sizeof(k->rand)))
1229 continue;
1230
1231 return k;
1232 }
1233
1234 return NULL;
1235 }
1236 EXPORT_SYMBOL(hci_find_ltk);
1237
1238 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239 u8 addr_type)
1240 {
1241 struct smp_ltk *k;
1242
1243 list_for_each_entry(k, &hdev->long_term_keys, list)
1244 if (addr_type == k->bdaddr_type &&
1245 bacmp(bdaddr, &k->bdaddr) == 0)
1246 return k;
1247
1248 return NULL;
1249 }
1250 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1251
1252 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1253 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1254 {
1255 struct link_key *key, *old_key;
1256 u8 old_key_type, persistent;
1257
1258 old_key = hci_find_link_key(hdev, bdaddr);
1259 if (old_key) {
1260 old_key_type = old_key->type;
1261 key = old_key;
1262 } else {
1263 old_key_type = conn ? conn->key_type : 0xff;
1264 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1265 if (!key)
1266 return -ENOMEM;
1267 list_add(&key->list, &hdev->link_keys);
1268 }
1269
1270 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1271
1272 /* Some buggy controller combinations generate a changed
1273 * combination key for legacy pairing even when there's no
1274 * previous key */
1275 if (type == HCI_LK_CHANGED_COMBINATION &&
1276 (!conn || conn->remote_auth == 0xff) &&
1277 old_key_type == 0xff) {
1278 type = HCI_LK_COMBINATION;
1279 if (conn)
1280 conn->key_type = type;
1281 }
1282
1283 bacpy(&key->bdaddr, bdaddr);
1284 memcpy(key->val, val, 16);
1285 key->pin_len = pin_len;
1286
1287 if (type == HCI_LK_CHANGED_COMBINATION)
1288 key->type = old_key_type;
1289 else
1290 key->type = type;
1291
1292 if (!new_key)
1293 return 0;
1294
1295 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1296
1297 mgmt_new_link_key(hdev, key, persistent);
1298
1299 if (!persistent) {
1300 list_del(&key->list);
1301 kfree(key);
1302 }
1303
1304 return 0;
1305 }
1306
1307 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1308 int new_key, u8 authenticated, u8 tk[16],
1309 u8 enc_size, u16 ediv, u8 rand[8])
1310 {
1311 struct smp_ltk *key, *old_key;
1312
1313 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1314 return 0;
1315
1316 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1317 if (old_key)
1318 key = old_key;
1319 else {
1320 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1321 if (!key)
1322 return -ENOMEM;
1323 list_add(&key->list, &hdev->long_term_keys);
1324 }
1325
1326 bacpy(&key->bdaddr, bdaddr);
1327 key->bdaddr_type = addr_type;
1328 memcpy(key->val, tk, sizeof(key->val));
1329 key->authenticated = authenticated;
1330 key->ediv = ediv;
1331 key->enc_size = enc_size;
1332 key->type = type;
1333 memcpy(key->rand, rand, sizeof(key->rand));
1334
1335 if (!new_key)
1336 return 0;
1337
1338 if (type & HCI_SMP_LTK)
1339 mgmt_new_ltk(hdev, key, 1);
1340
1341 return 0;
1342 }
1343
1344 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345 {
1346 struct link_key *key;
1347
1348 key = hci_find_link_key(hdev, bdaddr);
1349 if (!key)
1350 return -ENOENT;
1351
1352 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1353
1354 list_del(&key->list);
1355 kfree(key);
1356
1357 return 0;
1358 }
1359
1360 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361 {
1362 struct smp_ltk *k, *tmp;
1363
1364 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1365 if (bacmp(bdaddr, &k->bdaddr))
1366 continue;
1367
1368 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369
1370 list_del(&k->list);
1371 kfree(k);
1372 }
1373
1374 return 0;
1375 }
1376
1377 /* HCI command timer function */
1378 static void hci_cmd_timer(unsigned long arg)
1379 {
1380 struct hci_dev *hdev = (void *) arg;
1381
1382 BT_ERR("%s command tx timeout", hdev->name);
1383 atomic_set(&hdev->cmd_cnt, 1);
1384 queue_work(hdev->workqueue, &hdev->cmd_work);
1385 }
1386
1387 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1388 bdaddr_t *bdaddr)
1389 {
1390 struct oob_data *data;
1391
1392 list_for_each_entry(data, &hdev->remote_oob_data, list)
1393 if (bacmp(bdaddr, &data->bdaddr) == 0)
1394 return data;
1395
1396 return NULL;
1397 }
1398
1399 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400 {
1401 struct oob_data *data;
1402
1403 data = hci_find_remote_oob_data(hdev, bdaddr);
1404 if (!data)
1405 return -ENOENT;
1406
1407 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1408
1409 list_del(&data->list);
1410 kfree(data);
1411
1412 return 0;
1413 }
1414
1415 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1416 {
1417 struct oob_data *data, *n;
1418
1419 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1420 list_del(&data->list);
1421 kfree(data);
1422 }
1423
1424 return 0;
1425 }
1426
1427 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1428 u8 *randomizer)
1429 {
1430 struct oob_data *data;
1431
1432 data = hci_find_remote_oob_data(hdev, bdaddr);
1433
1434 if (!data) {
1435 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1436 if (!data)
1437 return -ENOMEM;
1438
1439 bacpy(&data->bdaddr, bdaddr);
1440 list_add(&data->list, &hdev->remote_oob_data);
1441 }
1442
1443 memcpy(data->hash, hash, sizeof(data->hash));
1444 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1445
1446 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1447
1448 return 0;
1449 }
1450
1451 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1452 bdaddr_t *bdaddr)
1453 {
1454 struct bdaddr_list *b;
1455
1456 list_for_each_entry(b, &hdev->blacklist, list)
1457 if (bacmp(bdaddr, &b->bdaddr) == 0)
1458 return b;
1459
1460 return NULL;
1461 }
1462
1463 int hci_blacklist_clear(struct hci_dev *hdev)
1464 {
1465 struct list_head *p, *n;
1466
1467 list_for_each_safe(p, n, &hdev->blacklist) {
1468 struct bdaddr_list *b;
1469
1470 b = list_entry(p, struct bdaddr_list, list);
1471
1472 list_del(p);
1473 kfree(b);
1474 }
1475
1476 return 0;
1477 }
1478
1479 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1480 {
1481 struct bdaddr_list *entry;
1482
1483 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1484 return -EBADF;
1485
1486 if (hci_blacklist_lookup(hdev, bdaddr))
1487 return -EEXIST;
1488
1489 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1490 if (!entry)
1491 return -ENOMEM;
1492
1493 bacpy(&entry->bdaddr, bdaddr);
1494
1495 list_add(&entry->list, &hdev->blacklist);
1496
1497 return mgmt_device_blocked(hdev, bdaddr, type);
1498 }
1499
1500 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1501 {
1502 struct bdaddr_list *entry;
1503
1504 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1505 return hci_blacklist_clear(hdev);
1506
1507 entry = hci_blacklist_lookup(hdev, bdaddr);
1508 if (!entry)
1509 return -ENOENT;
1510
1511 list_del(&entry->list);
1512 kfree(entry);
1513
1514 return mgmt_device_unblocked(hdev, bdaddr, type);
1515 }
1516
1517 static void hci_clear_adv_cache(struct work_struct *work)
1518 {
1519 struct hci_dev *hdev = container_of(work, struct hci_dev,
1520 adv_work.work);
1521
1522 hci_dev_lock(hdev);
1523
1524 hci_adv_entries_clear(hdev);
1525
1526 hci_dev_unlock(hdev);
1527 }
1528
1529 int hci_adv_entries_clear(struct hci_dev *hdev)
1530 {
1531 struct adv_entry *entry, *tmp;
1532
1533 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1534 list_del(&entry->list);
1535 kfree(entry);
1536 }
1537
1538 BT_DBG("%s adv cache cleared", hdev->name);
1539
1540 return 0;
1541 }
1542
1543 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1544 {
1545 struct adv_entry *entry;
1546
1547 list_for_each_entry(entry, &hdev->adv_entries, list)
1548 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1549 return entry;
1550
1551 return NULL;
1552 }
1553
1554 static inline int is_connectable_adv(u8 evt_type)
1555 {
1556 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1557 return 1;
1558
1559 return 0;
1560 }
1561
1562 int hci_add_adv_entry(struct hci_dev *hdev,
1563 struct hci_ev_le_advertising_info *ev)
1564 {
1565 struct adv_entry *entry;
1566
1567 if (!is_connectable_adv(ev->evt_type))
1568 return -EINVAL;
1569
1570 /* Only new entries should be added to adv_entries. So, if
1571 * bdaddr was found, don't add it. */
1572 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1573 return 0;
1574
1575 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1576 if (!entry)
1577 return -ENOMEM;
1578
1579 bacpy(&entry->bdaddr, &ev->bdaddr);
1580 entry->bdaddr_type = ev->bdaddr_type;
1581
1582 list_add(&entry->list, &hdev->adv_entries);
1583
1584 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1585 batostr(&entry->bdaddr), entry->bdaddr_type);
1586
1587 return 0;
1588 }
1589
1590 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1591 {
1592 struct le_scan_params *param = (struct le_scan_params *) opt;
1593 struct hci_cp_le_set_scan_param cp;
1594
1595 memset(&cp, 0, sizeof(cp));
1596 cp.type = param->type;
1597 cp.interval = cpu_to_le16(param->interval);
1598 cp.window = cpu_to_le16(param->window);
1599
1600 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1601 }
1602
1603 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1604 {
1605 struct hci_cp_le_set_scan_enable cp;
1606
1607 memset(&cp, 0, sizeof(cp));
1608 cp.enable = 1;
1609
1610 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1611 }
1612
1613 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1614 u16 window, int timeout)
1615 {
1616 long timeo = msecs_to_jiffies(3000);
1617 struct le_scan_params param;
1618 int err;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1623 return -EINPROGRESS;
1624
1625 param.type = type;
1626 param.interval = interval;
1627 param.window = window;
1628
1629 hci_req_lock(hdev);
1630
1631 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1632 timeo);
1633 if (!err)
1634 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1635
1636 hci_req_unlock(hdev);
1637
1638 if (err < 0)
1639 return err;
1640
1641 schedule_delayed_work(&hdev->le_scan_disable,
1642 msecs_to_jiffies(timeout));
1643
1644 return 0;
1645 }
1646
1647 static void le_scan_disable_work(struct work_struct *work)
1648 {
1649 struct hci_dev *hdev = container_of(work, struct hci_dev,
1650 le_scan_disable.work);
1651 struct hci_cp_le_set_scan_enable cp;
1652
1653 BT_DBG("%s", hdev->name);
1654
1655 memset(&cp, 0, sizeof(cp));
1656
1657 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1658 }
1659
1660 static void le_scan_work(struct work_struct *work)
1661 {
1662 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1663 struct le_scan_params *param = &hdev->le_scan_params;
1664
1665 BT_DBG("%s", hdev->name);
1666
1667 hci_do_le_scan(hdev, param->type, param->interval,
1668 param->window, param->timeout);
1669 }
1670
1671 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1672 int timeout)
1673 {
1674 struct le_scan_params *param = &hdev->le_scan_params;
1675
1676 BT_DBG("%s", hdev->name);
1677
1678 if (work_busy(&hdev->le_scan))
1679 return -EINPROGRESS;
1680
1681 param->type = type;
1682 param->interval = interval;
1683 param->window = window;
1684 param->timeout = timeout;
1685
1686 queue_work(system_long_wq, &hdev->le_scan);
1687
1688 return 0;
1689 }
1690
1691 /* Register HCI device */
1692 int hci_register_dev(struct hci_dev *hdev)
1693 {
1694 struct list_head *head = &hci_dev_list, *p;
1695 int i, id, error;
1696
1697 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1698
1699 if (!hdev->open || !hdev->close)
1700 return -EINVAL;
1701
1702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1704 */
1705 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1706
1707 write_lock(&hci_dev_list_lock);
1708
1709 /* Find first available device id */
1710 list_for_each(p, &hci_dev_list) {
1711 if (list_entry(p, struct hci_dev, list)->id != id)
1712 break;
1713 head = p; id++;
1714 }
1715
1716 sprintf(hdev->name, "hci%d", id);
1717 hdev->id = id;
1718 list_add_tail(&hdev->list, head);
1719
1720 mutex_init(&hdev->lock);
1721
1722 hdev->flags = 0;
1723 hdev->dev_flags = 0;
1724 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1725 hdev->esco_type = (ESCO_HV1);
1726 hdev->link_mode = (HCI_LM_ACCEPT);
1727 hdev->io_capability = 0x03; /* No Input No Output */
1728
1729 hdev->idle_timeout = 0;
1730 hdev->sniff_max_interval = 800;
1731 hdev->sniff_min_interval = 80;
1732
1733 INIT_WORK(&hdev->rx_work, hci_rx_work);
1734 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1735 INIT_WORK(&hdev->tx_work, hci_tx_work);
1736
1737
1738 skb_queue_head_init(&hdev->rx_q);
1739 skb_queue_head_init(&hdev->cmd_q);
1740 skb_queue_head_init(&hdev->raw_q);
1741
1742 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1743
1744 for (i = 0; i < NUM_REASSEMBLY; i++)
1745 hdev->reassembly[i] = NULL;
1746
1747 init_waitqueue_head(&hdev->req_wait_q);
1748 mutex_init(&hdev->req_lock);
1749
1750 discovery_init(hdev);
1751
1752 hci_conn_hash_init(hdev);
1753
1754 INIT_LIST_HEAD(&hdev->mgmt_pending);
1755
1756 INIT_LIST_HEAD(&hdev->blacklist);
1757
1758 INIT_LIST_HEAD(&hdev->uuids);
1759
1760 INIT_LIST_HEAD(&hdev->link_keys);
1761 INIT_LIST_HEAD(&hdev->long_term_keys);
1762
1763 INIT_LIST_HEAD(&hdev->remote_oob_data);
1764
1765 INIT_LIST_HEAD(&hdev->adv_entries);
1766
1767 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1768 INIT_WORK(&hdev->power_on, hci_power_on);
1769 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1770
1771 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1772
1773 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1774
1775 atomic_set(&hdev->promisc, 0);
1776
1777 INIT_WORK(&hdev->le_scan, le_scan_work);
1778
1779 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1780
1781 write_unlock(&hci_dev_list_lock);
1782
1783 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1784 WQ_MEM_RECLAIM, 1);
1785 if (!hdev->workqueue) {
1786 error = -ENOMEM;
1787 goto err;
1788 }
1789
1790 error = hci_add_sysfs(hdev);
1791 if (error < 0)
1792 goto err_wqueue;
1793
1794 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1795 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1796 if (hdev->rfkill) {
1797 if (rfkill_register(hdev->rfkill) < 0) {
1798 rfkill_destroy(hdev->rfkill);
1799 hdev->rfkill = NULL;
1800 }
1801 }
1802
1803 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1804 set_bit(HCI_SETUP, &hdev->dev_flags);
1805 schedule_work(&hdev->power_on);
1806
1807 hci_notify(hdev, HCI_DEV_REG);
1808 hci_dev_hold(hdev);
1809
1810 return id;
1811
1812 err_wqueue:
1813 destroy_workqueue(hdev->workqueue);
1814 err:
1815 write_lock(&hci_dev_list_lock);
1816 list_del(&hdev->list);
1817 write_unlock(&hci_dev_list_lock);
1818
1819 return error;
1820 }
1821 EXPORT_SYMBOL(hci_register_dev);
1822
1823 /* Unregister HCI device */
1824 void hci_unregister_dev(struct hci_dev *hdev)
1825 {
1826 int i;
1827
1828 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1829
1830 write_lock(&hci_dev_list_lock);
1831 list_del(&hdev->list);
1832 write_unlock(&hci_dev_list_lock);
1833
1834 hci_dev_do_close(hdev);
1835
1836 for (i = 0; i < NUM_REASSEMBLY; i++)
1837 kfree_skb(hdev->reassembly[i]);
1838
1839 if (!test_bit(HCI_INIT, &hdev->flags) &&
1840 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1841 hci_dev_lock(hdev);
1842 mgmt_index_removed(hdev);
1843 hci_dev_unlock(hdev);
1844 }
1845
1846 /* mgmt_index_removed should take care of emptying the
1847 * pending list */
1848 BUG_ON(!list_empty(&hdev->mgmt_pending));
1849
1850 hci_notify(hdev, HCI_DEV_UNREG);
1851
1852 if (hdev->rfkill) {
1853 rfkill_unregister(hdev->rfkill);
1854 rfkill_destroy(hdev->rfkill);
1855 }
1856
1857 hci_del_sysfs(hdev);
1858
1859 cancel_delayed_work_sync(&hdev->adv_work);
1860
1861 destroy_workqueue(hdev->workqueue);
1862
1863 hci_dev_lock(hdev);
1864 hci_blacklist_clear(hdev);
1865 hci_uuids_clear(hdev);
1866 hci_link_keys_clear(hdev);
1867 hci_smp_ltks_clear(hdev);
1868 hci_remote_oob_data_clear(hdev);
1869 hci_adv_entries_clear(hdev);
1870 hci_dev_unlock(hdev);
1871
1872 hci_dev_put(hdev);
1873 }
1874 EXPORT_SYMBOL(hci_unregister_dev);
1875
1876 /* Suspend HCI device */
1877 int hci_suspend_dev(struct hci_dev *hdev)
1878 {
1879 hci_notify(hdev, HCI_DEV_SUSPEND);
1880 return 0;
1881 }
1882 EXPORT_SYMBOL(hci_suspend_dev);
1883
1884 /* Resume HCI device */
1885 int hci_resume_dev(struct hci_dev *hdev)
1886 {
1887 hci_notify(hdev, HCI_DEV_RESUME);
1888 return 0;
1889 }
1890 EXPORT_SYMBOL(hci_resume_dev);
1891
1892 /* Receive frame from HCI drivers */
1893 int hci_recv_frame(struct sk_buff *skb)
1894 {
1895 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1896 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1897 && !test_bit(HCI_INIT, &hdev->flags))) {
1898 kfree_skb(skb);
1899 return -ENXIO;
1900 }
1901
1902 /* Incomming skb */
1903 bt_cb(skb)->incoming = 1;
1904
1905 /* Time stamp */
1906 __net_timestamp(skb);
1907
1908 skb_queue_tail(&hdev->rx_q, skb);
1909 queue_work(hdev->workqueue, &hdev->rx_work);
1910
1911 return 0;
1912 }
1913 EXPORT_SYMBOL(hci_recv_frame);
1914
1915 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1916 int count, __u8 index)
1917 {
1918 int len = 0;
1919 int hlen = 0;
1920 int remain = count;
1921 struct sk_buff *skb;
1922 struct bt_skb_cb *scb;
1923
1924 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1925 index >= NUM_REASSEMBLY)
1926 return -EILSEQ;
1927
1928 skb = hdev->reassembly[index];
1929
1930 if (!skb) {
1931 switch (type) {
1932 case HCI_ACLDATA_PKT:
1933 len = HCI_MAX_FRAME_SIZE;
1934 hlen = HCI_ACL_HDR_SIZE;
1935 break;
1936 case HCI_EVENT_PKT:
1937 len = HCI_MAX_EVENT_SIZE;
1938 hlen = HCI_EVENT_HDR_SIZE;
1939 break;
1940 case HCI_SCODATA_PKT:
1941 len = HCI_MAX_SCO_SIZE;
1942 hlen = HCI_SCO_HDR_SIZE;
1943 break;
1944 }
1945
1946 skb = bt_skb_alloc(len, GFP_ATOMIC);
1947 if (!skb)
1948 return -ENOMEM;
1949
1950 scb = (void *) skb->cb;
1951 scb->expect = hlen;
1952 scb->pkt_type = type;
1953
1954 skb->dev = (void *) hdev;
1955 hdev->reassembly[index] = skb;
1956 }
1957
1958 while (count) {
1959 scb = (void *) skb->cb;
1960 len = min(scb->expect, (__u16)count);
1961
1962 memcpy(skb_put(skb, len), data, len);
1963
1964 count -= len;
1965 data += len;
1966 scb->expect -= len;
1967 remain = count;
1968
1969 switch (type) {
1970 case HCI_EVENT_PKT:
1971 if (skb->len == HCI_EVENT_HDR_SIZE) {
1972 struct hci_event_hdr *h = hci_event_hdr(skb);
1973 scb->expect = h->plen;
1974
1975 if (skb_tailroom(skb) < scb->expect) {
1976 kfree_skb(skb);
1977 hdev->reassembly[index] = NULL;
1978 return -ENOMEM;
1979 }
1980 }
1981 break;
1982
1983 case HCI_ACLDATA_PKT:
1984 if (skb->len == HCI_ACL_HDR_SIZE) {
1985 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1986 scb->expect = __le16_to_cpu(h->dlen);
1987
1988 if (skb_tailroom(skb) < scb->expect) {
1989 kfree_skb(skb);
1990 hdev->reassembly[index] = NULL;
1991 return -ENOMEM;
1992 }
1993 }
1994 break;
1995
1996 case HCI_SCODATA_PKT:
1997 if (skb->len == HCI_SCO_HDR_SIZE) {
1998 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1999 scb->expect = h->dlen;
2000
2001 if (skb_tailroom(skb) < scb->expect) {
2002 kfree_skb(skb);
2003 hdev->reassembly[index] = NULL;
2004 return -ENOMEM;
2005 }
2006 }
2007 break;
2008 }
2009
2010 if (scb->expect == 0) {
2011 /* Complete frame */
2012
2013 bt_cb(skb)->pkt_type = type;
2014 hci_recv_frame(skb);
2015
2016 hdev->reassembly[index] = NULL;
2017 return remain;
2018 }
2019 }
2020
2021 return remain;
2022 }
2023
2024 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2025 {
2026 int rem = 0;
2027
2028 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2029 return -EILSEQ;
2030
2031 while (count) {
2032 rem = hci_reassembly(hdev, type, data, count, type - 1);
2033 if (rem < 0)
2034 return rem;
2035
2036 data += (count - rem);
2037 count = rem;
2038 }
2039
2040 return rem;
2041 }
2042 EXPORT_SYMBOL(hci_recv_fragment);
2043
2044 #define STREAM_REASSEMBLY 0
2045
2046 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2047 {
2048 int type;
2049 int rem = 0;
2050
2051 while (count) {
2052 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2053
2054 if (!skb) {
2055 struct { char type; } *pkt;
2056
2057 /* Start of the frame */
2058 pkt = data;
2059 type = pkt->type;
2060
2061 data++;
2062 count--;
2063 } else
2064 type = bt_cb(skb)->pkt_type;
2065
2066 rem = hci_reassembly(hdev, type, data, count,
2067 STREAM_REASSEMBLY);
2068 if (rem < 0)
2069 return rem;
2070
2071 data += (count - rem);
2072 count = rem;
2073 }
2074
2075 return rem;
2076 }
2077 EXPORT_SYMBOL(hci_recv_stream_fragment);
2078
2079 /* ---- Interface to upper protocols ---- */
2080
2081 int hci_register_cb(struct hci_cb *cb)
2082 {
2083 BT_DBG("%p name %s", cb, cb->name);
2084
2085 write_lock(&hci_cb_list_lock);
2086 list_add(&cb->list, &hci_cb_list);
2087 write_unlock(&hci_cb_list_lock);
2088
2089 return 0;
2090 }
2091 EXPORT_SYMBOL(hci_register_cb);
2092
2093 int hci_unregister_cb(struct hci_cb *cb)
2094 {
2095 BT_DBG("%p name %s", cb, cb->name);
2096
2097 write_lock(&hci_cb_list_lock);
2098 list_del(&cb->list);
2099 write_unlock(&hci_cb_list_lock);
2100
2101 return 0;
2102 }
2103 EXPORT_SYMBOL(hci_unregister_cb);
2104
2105 static int hci_send_frame(struct sk_buff *skb)
2106 {
2107 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2108
2109 if (!hdev) {
2110 kfree_skb(skb);
2111 return -ENODEV;
2112 }
2113
2114 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2115
2116 if (atomic_read(&hdev->promisc)) {
2117 /* Time stamp */
2118 __net_timestamp(skb);
2119
2120 hci_send_to_sock(hdev, skb);
2121 }
2122
2123 /* Get rid of skb owner, prior to sending to the driver. */
2124 skb_orphan(skb);
2125
2126 return hdev->send(skb);
2127 }
2128
2129 /* Send HCI command */
2130 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2131 {
2132 int len = HCI_COMMAND_HDR_SIZE + plen;
2133 struct hci_command_hdr *hdr;
2134 struct sk_buff *skb;
2135
2136 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2137
2138 skb = bt_skb_alloc(len, GFP_ATOMIC);
2139 if (!skb) {
2140 BT_ERR("%s no memory for command", hdev->name);
2141 return -ENOMEM;
2142 }
2143
2144 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2145 hdr->opcode = cpu_to_le16(opcode);
2146 hdr->plen = plen;
2147
2148 if (plen)
2149 memcpy(skb_put(skb, plen), param, plen);
2150
2151 BT_DBG("skb len %d", skb->len);
2152
2153 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2154 skb->dev = (void *) hdev;
2155
2156 if (test_bit(HCI_INIT, &hdev->flags))
2157 hdev->init_last_cmd = opcode;
2158
2159 skb_queue_tail(&hdev->cmd_q, skb);
2160 queue_work(hdev->workqueue, &hdev->cmd_work);
2161
2162 return 0;
2163 }
2164
2165 /* Get data from the previously sent command */
2166 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2167 {
2168 struct hci_command_hdr *hdr;
2169
2170 if (!hdev->sent_cmd)
2171 return NULL;
2172
2173 hdr = (void *) hdev->sent_cmd->data;
2174
2175 if (hdr->opcode != cpu_to_le16(opcode))
2176 return NULL;
2177
2178 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2179
2180 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2181 }
2182
2183 /* Send ACL data */
2184 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2185 {
2186 struct hci_acl_hdr *hdr;
2187 int len = skb->len;
2188
2189 skb_push(skb, HCI_ACL_HDR_SIZE);
2190 skb_reset_transport_header(skb);
2191 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2192 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2193 hdr->dlen = cpu_to_le16(len);
2194 }
2195
2196 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2197 struct sk_buff *skb, __u16 flags)
2198 {
2199 struct hci_dev *hdev = conn->hdev;
2200 struct sk_buff *list;
2201
2202 list = skb_shinfo(skb)->frag_list;
2203 if (!list) {
2204 /* Non fragmented */
2205 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2206
2207 skb_queue_tail(queue, skb);
2208 } else {
2209 /* Fragmented */
2210 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2211
2212 skb_shinfo(skb)->frag_list = NULL;
2213
2214 /* Queue all fragments atomically */
2215 spin_lock(&queue->lock);
2216
2217 __skb_queue_tail(queue, skb);
2218
2219 flags &= ~ACL_START;
2220 flags |= ACL_CONT;
2221 do {
2222 skb = list; list = list->next;
2223
2224 skb->dev = (void *) hdev;
2225 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2226 hci_add_acl_hdr(skb, conn->handle, flags);
2227
2228 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2229
2230 __skb_queue_tail(queue, skb);
2231 } while (list);
2232
2233 spin_unlock(&queue->lock);
2234 }
2235 }
2236
2237 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2238 {
2239 struct hci_conn *conn = chan->conn;
2240 struct hci_dev *hdev = conn->hdev;
2241
2242 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2243
2244 skb->dev = (void *) hdev;
2245 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2246 hci_add_acl_hdr(skb, conn->handle, flags);
2247
2248 hci_queue_acl(conn, &chan->data_q, skb, flags);
2249
2250 queue_work(hdev->workqueue, &hdev->tx_work);
2251 }
2252 EXPORT_SYMBOL(hci_send_acl);
2253
2254 /* Send SCO data */
2255 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2256 {
2257 struct hci_dev *hdev = conn->hdev;
2258 struct hci_sco_hdr hdr;
2259
2260 BT_DBG("%s len %d", hdev->name, skb->len);
2261
2262 hdr.handle = cpu_to_le16(conn->handle);
2263 hdr.dlen = skb->len;
2264
2265 skb_push(skb, HCI_SCO_HDR_SIZE);
2266 skb_reset_transport_header(skb);
2267 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2268
2269 skb->dev = (void *) hdev;
2270 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2271
2272 skb_queue_tail(&conn->data_q, skb);
2273 queue_work(hdev->workqueue, &hdev->tx_work);
2274 }
2275 EXPORT_SYMBOL(hci_send_sco);
2276
2277 /* ---- HCI TX task (outgoing data) ---- */
2278
2279 /* HCI Connection scheduler */
2280 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2281 {
2282 struct hci_conn_hash *h = &hdev->conn_hash;
2283 struct hci_conn *conn = NULL, *c;
2284 int num = 0, min = ~0;
2285
2286 /* We don't have to lock device here. Connections are always
2287 * added and removed with TX task disabled. */
2288
2289 rcu_read_lock();
2290
2291 list_for_each_entry_rcu(c, &h->list, list) {
2292 if (c->type != type || skb_queue_empty(&c->data_q))
2293 continue;
2294
2295 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2296 continue;
2297
2298 num++;
2299
2300 if (c->sent < min) {
2301 min = c->sent;
2302 conn = c;
2303 }
2304
2305 if (hci_conn_num(hdev, type) == num)
2306 break;
2307 }
2308
2309 rcu_read_unlock();
2310
2311 if (conn) {
2312 int cnt, q;
2313
2314 switch (conn->type) {
2315 case ACL_LINK:
2316 cnt = hdev->acl_cnt;
2317 break;
2318 case SCO_LINK:
2319 case ESCO_LINK:
2320 cnt = hdev->sco_cnt;
2321 break;
2322 case LE_LINK:
2323 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2324 break;
2325 default:
2326 cnt = 0;
2327 BT_ERR("Unknown link type");
2328 }
2329
2330 q = cnt / num;
2331 *quote = q ? q : 1;
2332 } else
2333 *quote = 0;
2334
2335 BT_DBG("conn %p quote %d", conn, *quote);
2336 return conn;
2337 }
2338
2339 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2340 {
2341 struct hci_conn_hash *h = &hdev->conn_hash;
2342 struct hci_conn *c;
2343
2344 BT_ERR("%s link tx timeout", hdev->name);
2345
2346 rcu_read_lock();
2347
2348 /* Kill stalled connections */
2349 list_for_each_entry_rcu(c, &h->list, list) {
2350 if (c->type == type && c->sent) {
2351 BT_ERR("%s killing stalled connection %s",
2352 hdev->name, batostr(&c->dst));
2353 hci_acl_disconn(c, 0x13);
2354 }
2355 }
2356
2357 rcu_read_unlock();
2358 }
2359
2360 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2361 int *quote)
2362 {
2363 struct hci_conn_hash *h = &hdev->conn_hash;
2364 struct hci_chan *chan = NULL;
2365 int num = 0, min = ~0, cur_prio = 0;
2366 struct hci_conn *conn;
2367 int cnt, q, conn_num = 0;
2368
2369 BT_DBG("%s", hdev->name);
2370
2371 rcu_read_lock();
2372
2373 list_for_each_entry_rcu(conn, &h->list, list) {
2374 struct hci_chan *tmp;
2375
2376 if (conn->type != type)
2377 continue;
2378
2379 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2380 continue;
2381
2382 conn_num++;
2383
2384 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2385 struct sk_buff *skb;
2386
2387 if (skb_queue_empty(&tmp->data_q))
2388 continue;
2389
2390 skb = skb_peek(&tmp->data_q);
2391 if (skb->priority < cur_prio)
2392 continue;
2393
2394 if (skb->priority > cur_prio) {
2395 num = 0;
2396 min = ~0;
2397 cur_prio = skb->priority;
2398 }
2399
2400 num++;
2401
2402 if (conn->sent < min) {
2403 min = conn->sent;
2404 chan = tmp;
2405 }
2406 }
2407
2408 if (hci_conn_num(hdev, type) == conn_num)
2409 break;
2410 }
2411
2412 rcu_read_unlock();
2413
2414 if (!chan)
2415 return NULL;
2416
2417 switch (chan->conn->type) {
2418 case ACL_LINK:
2419 cnt = hdev->acl_cnt;
2420 break;
2421 case SCO_LINK:
2422 case ESCO_LINK:
2423 cnt = hdev->sco_cnt;
2424 break;
2425 case LE_LINK:
2426 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2427 break;
2428 default:
2429 cnt = 0;
2430 BT_ERR("Unknown link type");
2431 }
2432
2433 q = cnt / num;
2434 *quote = q ? q : 1;
2435 BT_DBG("chan %p quote %d", chan, *quote);
2436 return chan;
2437 }
2438
2439 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2440 {
2441 struct hci_conn_hash *h = &hdev->conn_hash;
2442 struct hci_conn *conn;
2443 int num = 0;
2444
2445 BT_DBG("%s", hdev->name);
2446
2447 rcu_read_lock();
2448
2449 list_for_each_entry_rcu(conn, &h->list, list) {
2450 struct hci_chan *chan;
2451
2452 if (conn->type != type)
2453 continue;
2454
2455 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2456 continue;
2457
2458 num++;
2459
2460 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2461 struct sk_buff *skb;
2462
2463 if (chan->sent) {
2464 chan->sent = 0;
2465 continue;
2466 }
2467
2468 if (skb_queue_empty(&chan->data_q))
2469 continue;
2470
2471 skb = skb_peek(&chan->data_q);
2472 if (skb->priority >= HCI_PRIO_MAX - 1)
2473 continue;
2474
2475 skb->priority = HCI_PRIO_MAX - 1;
2476
2477 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2478 skb->priority);
2479 }
2480
2481 if (hci_conn_num(hdev, type) == num)
2482 break;
2483 }
2484
2485 rcu_read_unlock();
2486
2487 }
2488
2489 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2490 {
2491 /* Calculate count of blocks used by this packet */
2492 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2493 }
2494
2495 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2496 {
2497 if (!test_bit(HCI_RAW, &hdev->flags)) {
2498 /* ACL tx timeout must be longer than maximum
2499 * link supervision timeout (40.9 seconds) */
2500 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2501 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2502 hci_link_tx_to(hdev, ACL_LINK);
2503 }
2504 }
2505
2506 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2507 {
2508 unsigned int cnt = hdev->acl_cnt;
2509 struct hci_chan *chan;
2510 struct sk_buff *skb;
2511 int quote;
2512
2513 __check_timeout(hdev, cnt);
2514
2515 while (hdev->acl_cnt &&
2516 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2517 u32 priority = (skb_peek(&chan->data_q))->priority;
2518 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2519 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2520 skb->len, skb->priority);
2521
2522 /* Stop if priority has changed */
2523 if (skb->priority < priority)
2524 break;
2525
2526 skb = skb_dequeue(&chan->data_q);
2527
2528 hci_conn_enter_active_mode(chan->conn,
2529 bt_cb(skb)->force_active);
2530
2531 hci_send_frame(skb);
2532 hdev->acl_last_tx = jiffies;
2533
2534 hdev->acl_cnt--;
2535 chan->sent++;
2536 chan->conn->sent++;
2537 }
2538 }
2539
2540 if (cnt != hdev->acl_cnt)
2541 hci_prio_recalculate(hdev, ACL_LINK);
2542 }
2543
2544 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2545 {
2546 unsigned int cnt = hdev->block_cnt;
2547 struct hci_chan *chan;
2548 struct sk_buff *skb;
2549 int quote;
2550
2551 __check_timeout(hdev, cnt);
2552
2553 while (hdev->block_cnt > 0 &&
2554 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2555 u32 priority = (skb_peek(&chan->data_q))->priority;
2556 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2557 int blocks;
2558
2559 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2560 skb->len, skb->priority);
2561
2562 /* Stop if priority has changed */
2563 if (skb->priority < priority)
2564 break;
2565
2566 skb = skb_dequeue(&chan->data_q);
2567
2568 blocks = __get_blocks(hdev, skb);
2569 if (blocks > hdev->block_cnt)
2570 return;
2571
2572 hci_conn_enter_active_mode(chan->conn,
2573 bt_cb(skb)->force_active);
2574
2575 hci_send_frame(skb);
2576 hdev->acl_last_tx = jiffies;
2577
2578 hdev->block_cnt -= blocks;
2579 quote -= blocks;
2580
2581 chan->sent += blocks;
2582 chan->conn->sent += blocks;
2583 }
2584 }
2585
2586 if (cnt != hdev->block_cnt)
2587 hci_prio_recalculate(hdev, ACL_LINK);
2588 }
2589
2590 static inline void hci_sched_acl(struct hci_dev *hdev)
2591 {
2592 BT_DBG("%s", hdev->name);
2593
2594 if (!hci_conn_num(hdev, ACL_LINK))
2595 return;
2596
2597 switch (hdev->flow_ctl_mode) {
2598 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2599 hci_sched_acl_pkt(hdev);
2600 break;
2601
2602 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2603 hci_sched_acl_blk(hdev);
2604 break;
2605 }
2606 }
2607
2608 /* Schedule SCO */
2609 static inline void hci_sched_sco(struct hci_dev *hdev)
2610 {
2611 struct hci_conn *conn;
2612 struct sk_buff *skb;
2613 int quote;
2614
2615 BT_DBG("%s", hdev->name);
2616
2617 if (!hci_conn_num(hdev, SCO_LINK))
2618 return;
2619
2620 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2621 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2622 BT_DBG("skb %p len %d", skb, skb->len);
2623 hci_send_frame(skb);
2624
2625 conn->sent++;
2626 if (conn->sent == ~0)
2627 conn->sent = 0;
2628 }
2629 }
2630 }
2631
2632 static inline void hci_sched_esco(struct hci_dev *hdev)
2633 {
2634 struct hci_conn *conn;
2635 struct sk_buff *skb;
2636 int quote;
2637
2638 BT_DBG("%s", hdev->name);
2639
2640 if (!hci_conn_num(hdev, ESCO_LINK))
2641 return;
2642
2643 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2644 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2645 BT_DBG("skb %p len %d", skb, skb->len);
2646 hci_send_frame(skb);
2647
2648 conn->sent++;
2649 if (conn->sent == ~0)
2650 conn->sent = 0;
2651 }
2652 }
2653 }
2654
2655 static inline void hci_sched_le(struct hci_dev *hdev)
2656 {
2657 struct hci_chan *chan;
2658 struct sk_buff *skb;
2659 int quote, cnt, tmp;
2660
2661 BT_DBG("%s", hdev->name);
2662
2663 if (!hci_conn_num(hdev, LE_LINK))
2664 return;
2665
2666 if (!test_bit(HCI_RAW, &hdev->flags)) {
2667 /* LE tx timeout must be longer than maximum
2668 * link supervision timeout (40.9 seconds) */
2669 if (!hdev->le_cnt && hdev->le_pkts &&
2670 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2671 hci_link_tx_to(hdev, LE_LINK);
2672 }
2673
2674 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2675 tmp = cnt;
2676 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2677 u32 priority = (skb_peek(&chan->data_q))->priority;
2678 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2679 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2680 skb->len, skb->priority);
2681
2682 /* Stop if priority has changed */
2683 if (skb->priority < priority)
2684 break;
2685
2686 skb = skb_dequeue(&chan->data_q);
2687
2688 hci_send_frame(skb);
2689 hdev->le_last_tx = jiffies;
2690
2691 cnt--;
2692 chan->sent++;
2693 chan->conn->sent++;
2694 }
2695 }
2696
2697 if (hdev->le_pkts)
2698 hdev->le_cnt = cnt;
2699 else
2700 hdev->acl_cnt = cnt;
2701
2702 if (cnt != tmp)
2703 hci_prio_recalculate(hdev, LE_LINK);
2704 }
2705
2706 static void hci_tx_work(struct work_struct *work)
2707 {
2708 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2709 struct sk_buff *skb;
2710
2711 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2712 hdev->sco_cnt, hdev->le_cnt);
2713
2714 /* Schedule queues and send stuff to HCI driver */
2715
2716 hci_sched_acl(hdev);
2717
2718 hci_sched_sco(hdev);
2719
2720 hci_sched_esco(hdev);
2721
2722 hci_sched_le(hdev);
2723
2724 /* Send next queued raw (unknown type) packet */
2725 while ((skb = skb_dequeue(&hdev->raw_q)))
2726 hci_send_frame(skb);
2727 }
2728
2729 /* ----- HCI RX task (incoming data processing) ----- */
2730
2731 /* ACL data packet */
2732 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2733 {
2734 struct hci_acl_hdr *hdr = (void *) skb->data;
2735 struct hci_conn *conn;
2736 __u16 handle, flags;
2737
2738 skb_pull(skb, HCI_ACL_HDR_SIZE);
2739
2740 handle = __le16_to_cpu(hdr->handle);
2741 flags = hci_flags(handle);
2742 handle = hci_handle(handle);
2743
2744 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2745
2746 hdev->stat.acl_rx++;
2747
2748 hci_dev_lock(hdev);
2749 conn = hci_conn_hash_lookup_handle(hdev, handle);
2750 hci_dev_unlock(hdev);
2751
2752 if (conn) {
2753 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2754
2755 /* Send to upper protocol */
2756 l2cap_recv_acldata(conn, skb, flags);
2757 return;
2758 } else {
2759 BT_ERR("%s ACL packet for unknown connection handle %d",
2760 hdev->name, handle);
2761 }
2762
2763 kfree_skb(skb);
2764 }
2765
2766 /* SCO data packet */
2767 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2768 {
2769 struct hci_sco_hdr *hdr = (void *) skb->data;
2770 struct hci_conn *conn;
2771 __u16 handle;
2772
2773 skb_pull(skb, HCI_SCO_HDR_SIZE);
2774
2775 handle = __le16_to_cpu(hdr->handle);
2776
2777 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2778
2779 hdev->stat.sco_rx++;
2780
2781 hci_dev_lock(hdev);
2782 conn = hci_conn_hash_lookup_handle(hdev, handle);
2783 hci_dev_unlock(hdev);
2784
2785 if (conn) {
2786 /* Send to upper protocol */
2787 sco_recv_scodata(conn, skb);
2788 return;
2789 } else {
2790 BT_ERR("%s SCO packet for unknown connection handle %d",
2791 hdev->name, handle);
2792 }
2793
2794 kfree_skb(skb);
2795 }
2796
2797 static void hci_rx_work(struct work_struct *work)
2798 {
2799 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2800 struct sk_buff *skb;
2801
2802 BT_DBG("%s", hdev->name);
2803
2804 while ((skb = skb_dequeue(&hdev->rx_q))) {
2805 if (atomic_read(&hdev->promisc)) {
2806 /* Send copy to the sockets */
2807 hci_send_to_sock(hdev, skb);
2808 }
2809
2810 if (test_bit(HCI_RAW, &hdev->flags)) {
2811 kfree_skb(skb);
2812 continue;
2813 }
2814
2815 if (test_bit(HCI_INIT, &hdev->flags)) {
2816 /* Don't process data packets in this states. */
2817 switch (bt_cb(skb)->pkt_type) {
2818 case HCI_ACLDATA_PKT:
2819 case HCI_SCODATA_PKT:
2820 kfree_skb(skb);
2821 continue;
2822 }
2823 }
2824
2825 /* Process frame */
2826 switch (bt_cb(skb)->pkt_type) {
2827 case HCI_EVENT_PKT:
2828 BT_DBG("%s Event packet", hdev->name);
2829 hci_event_packet(hdev, skb);
2830 break;
2831
2832 case HCI_ACLDATA_PKT:
2833 BT_DBG("%s ACL data packet", hdev->name);
2834 hci_acldata_packet(hdev, skb);
2835 break;
2836
2837 case HCI_SCODATA_PKT:
2838 BT_DBG("%s SCO data packet", hdev->name);
2839 hci_scodata_packet(hdev, skb);
2840 break;
2841
2842 default:
2843 kfree_skb(skb);
2844 break;
2845 }
2846 }
2847 }
2848
2849 static void hci_cmd_work(struct work_struct *work)
2850 {
2851 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2852 struct sk_buff *skb;
2853
2854 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2855
2856 /* Send queued commands */
2857 if (atomic_read(&hdev->cmd_cnt)) {
2858 skb = skb_dequeue(&hdev->cmd_q);
2859 if (!skb)
2860 return;
2861
2862 kfree_skb(hdev->sent_cmd);
2863
2864 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2865 if (hdev->sent_cmd) {
2866 atomic_dec(&hdev->cmd_cnt);
2867 hci_send_frame(skb);
2868 if (test_bit(HCI_RESET, &hdev->flags))
2869 del_timer(&hdev->cmd_timer);
2870 else
2871 mod_timer(&hdev->cmd_timer,
2872 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2873 } else {
2874 skb_queue_head(&hdev->cmd_q, skb);
2875 queue_work(hdev->workqueue, &hdev->cmd_work);
2876 }
2877 }
2878 }
2879
2880 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2881 {
2882 /* General inquiry access code (GIAC) */
2883 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2884 struct hci_cp_inquiry cp;
2885
2886 BT_DBG("%s", hdev->name);
2887
2888 if (test_bit(HCI_INQUIRY, &hdev->flags))
2889 return -EINPROGRESS;
2890
2891 inquiry_cache_flush(hdev);
2892
2893 memset(&cp, 0, sizeof(cp));
2894 memcpy(&cp.lap, lap, sizeof(cp.lap));
2895 cp.length = length;
2896
2897 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2898 }
2899
2900 int hci_cancel_inquiry(struct hci_dev *hdev)
2901 {
2902 BT_DBG("%s", hdev->name);
2903
2904 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2905 return -EPERM;
2906
2907 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2908 }
2909
2910 module_param(enable_hs, bool, 0644);
2911 MODULE_PARM_DESC(enable_hs, "Enable High Speed");