2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev
*hdev
)
360 struct inquiry_entry
*p
, *n
;
362 list_for_each_entry_safe(p
, n
, &hdev
->inq_cache
.list
, list
) {
368 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
370 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
371 struct inquiry_entry
*e
;
373 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
375 list_for_each_entry(e
, &cache
->list
, list
) {
376 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
383 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
385 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
386 struct inquiry_entry
*ie
;
388 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
390 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
392 /* Entry not in the cache. Add new one. */
393 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
397 list_add(&ie
->list
, &cache
->list
);
400 memcpy(&ie
->data
, data
, sizeof(*data
));
401 ie
->timestamp
= jiffies
;
402 cache
->timestamp
= jiffies
;
405 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
407 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
408 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
409 struct inquiry_entry
*e
;
412 list_for_each_entry(e
, &cache
->list
, list
) {
413 struct inquiry_data
*data
= &e
->data
;
418 bacpy(&info
->bdaddr
, &data
->bdaddr
);
419 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
420 info
->pscan_period_mode
= data
->pscan_period_mode
;
421 info
->pscan_mode
= data
->pscan_mode
;
422 memcpy(info
->dev_class
, data
->dev_class
, 3);
423 info
->clock_offset
= data
->clock_offset
;
429 BT_DBG("cache %p, copied %d", cache
, copied
);
433 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
435 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
436 struct hci_cp_inquiry cp
;
438 BT_DBG("%s", hdev
->name
);
440 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
444 memcpy(&cp
.lap
, &ir
->lap
, 3);
445 cp
.length
= ir
->length
;
446 cp
.num_rsp
= ir
->num_rsp
;
447 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
450 int hci_inquiry(void __user
*arg
)
452 __u8 __user
*ptr
= arg
;
453 struct hci_inquiry_req ir
;
454 struct hci_dev
*hdev
;
455 int err
= 0, do_inquiry
= 0, max_rsp
;
459 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
462 hdev
= hci_dev_get(ir
.dev_id
);
467 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
468 inquiry_cache_empty(hdev
) ||
469 ir
.flags
& IREQ_CACHE_FLUSH
) {
470 inquiry_cache_flush(hdev
);
473 hci_dev_unlock(hdev
);
475 timeo
= ir
.length
* msecs_to_jiffies(2000);
478 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
483 /* for unlimited number of responses we will use buffer with 255 entries */
484 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
486 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
487 * copy it to the user space.
489 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
496 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
497 hci_dev_unlock(hdev
);
499 BT_DBG("num_rsp %d", ir
.num_rsp
);
501 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
503 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
516 /* ---- HCI ioctl helpers ---- */
518 int hci_dev_open(__u16 dev
)
520 struct hci_dev
*hdev
;
523 hdev
= hci_dev_get(dev
);
527 BT_DBG("%s %p", hdev
->name
, hdev
);
531 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
536 if (test_bit(HCI_UP
, &hdev
->flags
)) {
541 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
542 set_bit(HCI_RAW
, &hdev
->flags
);
544 /* Treat all non BR/EDR controllers as raw devices if
545 enable_hs is not set */
546 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
547 set_bit(HCI_RAW
, &hdev
->flags
);
549 if (hdev
->open(hdev
)) {
554 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
555 atomic_set(&hdev
->cmd_cnt
, 1);
556 set_bit(HCI_INIT
, &hdev
->flags
);
557 hdev
->init_last_cmd
= 0;
559 ret
= __hci_request(hdev
, hci_init_req
, 0,
560 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
562 if (lmp_host_le_capable(hdev
))
563 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
564 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
566 clear_bit(HCI_INIT
, &hdev
->flags
);
571 set_bit(HCI_UP
, &hdev
->flags
);
572 hci_notify(hdev
, HCI_DEV_UP
);
573 if (!test_bit(HCI_SETUP
, &hdev
->flags
)) {
575 mgmt_powered(hdev
, 1);
576 hci_dev_unlock(hdev
);
579 /* Init failed, cleanup */
580 flush_work(&hdev
->tx_work
);
581 flush_work(&hdev
->cmd_work
);
582 flush_work(&hdev
->rx_work
);
584 skb_queue_purge(&hdev
->cmd_q
);
585 skb_queue_purge(&hdev
->rx_q
);
590 if (hdev
->sent_cmd
) {
591 kfree_skb(hdev
->sent_cmd
);
592 hdev
->sent_cmd
= NULL
;
600 hci_req_unlock(hdev
);
605 static int hci_dev_do_close(struct hci_dev
*hdev
)
607 BT_DBG("%s %p", hdev
->name
, hdev
);
609 hci_req_cancel(hdev
, ENODEV
);
612 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
613 del_timer_sync(&hdev
->cmd_timer
);
614 hci_req_unlock(hdev
);
618 /* Flush RX and TX works */
619 flush_work(&hdev
->tx_work
);
620 flush_work(&hdev
->rx_work
);
622 if (hdev
->discov_timeout
> 0) {
623 cancel_delayed_work(&hdev
->discov_off
);
624 hdev
->discov_timeout
= 0;
627 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
628 cancel_delayed_work(&hdev
->power_off
);
630 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->flags
))
631 cancel_delayed_work(&hdev
->service_cache
);
634 inquiry_cache_flush(hdev
);
635 hci_conn_hash_flush(hdev
);
636 hci_dev_unlock(hdev
);
638 hci_notify(hdev
, HCI_DEV_DOWN
);
644 skb_queue_purge(&hdev
->cmd_q
);
645 atomic_set(&hdev
->cmd_cnt
, 1);
646 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
647 set_bit(HCI_INIT
, &hdev
->flags
);
648 __hci_request(hdev
, hci_reset_req
, 0,
649 msecs_to_jiffies(250));
650 clear_bit(HCI_INIT
, &hdev
->flags
);
654 flush_work(&hdev
->cmd_work
);
657 skb_queue_purge(&hdev
->rx_q
);
658 skb_queue_purge(&hdev
->cmd_q
);
659 skb_queue_purge(&hdev
->raw_q
);
661 /* Drop last sent command */
662 if (hdev
->sent_cmd
) {
663 del_timer_sync(&hdev
->cmd_timer
);
664 kfree_skb(hdev
->sent_cmd
);
665 hdev
->sent_cmd
= NULL
;
668 /* After this point our queues are empty
669 * and no tasks are scheduled. */
673 mgmt_powered(hdev
, 0);
674 hci_dev_unlock(hdev
);
679 hci_req_unlock(hdev
);
685 int hci_dev_close(__u16 dev
)
687 struct hci_dev
*hdev
;
690 hdev
= hci_dev_get(dev
);
693 err
= hci_dev_do_close(hdev
);
698 int hci_dev_reset(__u16 dev
)
700 struct hci_dev
*hdev
;
703 hdev
= hci_dev_get(dev
);
709 if (!test_bit(HCI_UP
, &hdev
->flags
))
713 skb_queue_purge(&hdev
->rx_q
);
714 skb_queue_purge(&hdev
->cmd_q
);
717 inquiry_cache_flush(hdev
);
718 hci_conn_hash_flush(hdev
);
719 hci_dev_unlock(hdev
);
724 atomic_set(&hdev
->cmd_cnt
, 1);
725 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
727 if (!test_bit(HCI_RAW
, &hdev
->flags
))
728 ret
= __hci_request(hdev
, hci_reset_req
, 0,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
732 hci_req_unlock(hdev
);
737 int hci_dev_reset_stat(__u16 dev
)
739 struct hci_dev
*hdev
;
742 hdev
= hci_dev_get(dev
);
746 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
753 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
755 struct hci_dev
*hdev
;
756 struct hci_dev_req dr
;
759 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
762 hdev
= hci_dev_get(dr
.dev_id
);
768 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
769 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
773 if (!lmp_encrypt_capable(hdev
)) {
778 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
779 /* Auth must be enabled first */
780 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
781 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
786 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
787 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
791 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
792 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
796 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
797 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
801 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
802 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
806 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
810 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
811 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
815 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
816 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
828 int hci_get_dev_list(void __user
*arg
)
830 struct hci_dev
*hdev
;
831 struct hci_dev_list_req
*dl
;
832 struct hci_dev_req
*dr
;
833 int n
= 0, size
, err
;
836 if (get_user(dev_num
, (__u16 __user
*) arg
))
839 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
842 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
844 dl
= kzalloc(size
, GFP_KERNEL
);
850 read_lock(&hci_dev_list_lock
);
851 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
852 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
853 cancel_delayed_work(&hdev
->power_off
);
855 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
856 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
858 (dr
+ n
)->dev_id
= hdev
->id
;
859 (dr
+ n
)->dev_opt
= hdev
->flags
;
864 read_unlock(&hci_dev_list_lock
);
867 size
= sizeof(*dl
) + n
* sizeof(*dr
);
869 err
= copy_to_user(arg
, dl
, size
);
872 return err
? -EFAULT
: 0;
875 int hci_get_dev_info(void __user
*arg
)
877 struct hci_dev
*hdev
;
878 struct hci_dev_info di
;
881 if (copy_from_user(&di
, arg
, sizeof(di
)))
884 hdev
= hci_dev_get(di
.dev_id
);
888 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
889 cancel_delayed_work_sync(&hdev
->power_off
);
891 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
892 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
894 strcpy(di
.name
, hdev
->name
);
895 di
.bdaddr
= hdev
->bdaddr
;
896 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
897 di
.flags
= hdev
->flags
;
898 di
.pkt_type
= hdev
->pkt_type
;
899 di
.acl_mtu
= hdev
->acl_mtu
;
900 di
.acl_pkts
= hdev
->acl_pkts
;
901 di
.sco_mtu
= hdev
->sco_mtu
;
902 di
.sco_pkts
= hdev
->sco_pkts
;
903 di
.link_policy
= hdev
->link_policy
;
904 di
.link_mode
= hdev
->link_mode
;
906 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
907 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
909 if (copy_to_user(arg
, &di
, sizeof(di
)))
917 /* ---- Interface to HCI drivers ---- */
919 static int hci_rfkill_set_block(void *data
, bool blocked
)
921 struct hci_dev
*hdev
= data
;
923 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
928 hci_dev_do_close(hdev
);
933 static const struct rfkill_ops hci_rfkill_ops
= {
934 .set_block
= hci_rfkill_set_block
,
937 /* Alloc HCI device */
938 struct hci_dev
*hci_alloc_dev(void)
940 struct hci_dev
*hdev
;
942 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
946 hci_init_sysfs(hdev
);
947 skb_queue_head_init(&hdev
->driver_init
);
951 EXPORT_SYMBOL(hci_alloc_dev
);
953 /* Free HCI device */
954 void hci_free_dev(struct hci_dev
*hdev
)
956 skb_queue_purge(&hdev
->driver_init
);
958 /* will free via device release */
959 put_device(&hdev
->dev
);
961 EXPORT_SYMBOL(hci_free_dev
);
963 static void hci_power_on(struct work_struct
*work
)
965 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
967 BT_DBG("%s", hdev
->name
);
969 if (hci_dev_open(hdev
->id
) < 0)
972 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
973 schedule_delayed_work(&hdev
->power_off
,
974 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
976 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
977 mgmt_index_added(hdev
);
980 static void hci_power_off(struct work_struct
*work
)
982 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
985 BT_DBG("%s", hdev
->name
);
987 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
989 hci_dev_close(hdev
->id
);
992 static void hci_discov_off(struct work_struct
*work
)
994 struct hci_dev
*hdev
;
997 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
999 BT_DBG("%s", hdev
->name
);
1003 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1005 hdev
->discov_timeout
= 0;
1007 hci_dev_unlock(hdev
);
1010 int hci_uuids_clear(struct hci_dev
*hdev
)
1012 struct list_head
*p
, *n
;
1014 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1015 struct bt_uuid
*uuid
;
1017 uuid
= list_entry(p
, struct bt_uuid
, list
);
1026 int hci_link_keys_clear(struct hci_dev
*hdev
)
1028 struct list_head
*p
, *n
;
1030 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1031 struct link_key
*key
;
1033 key
= list_entry(p
, struct link_key
, list
);
1042 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1046 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1047 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1053 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1054 u8 key_type
, u8 old_key_type
)
1057 if (key_type
< 0x03)
1060 /* Debug keys are insecure so don't store them persistently */
1061 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1064 /* Changed combination key and there's no previous one */
1065 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1068 /* Security mode 3 case */
1072 /* Neither local nor remote side had no-bonding as requirement */
1073 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1076 /* Local side had dedicated bonding as requirement */
1077 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1080 /* Remote side had dedicated bonding as requirement */
1081 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1084 /* If none of the above criteria match, then don't store the key
1089 struct link_key
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1093 list_for_each_entry(k
, &hdev
->link_keys
, list
) {
1094 struct key_master_id
*id
;
1096 if (k
->type
!= HCI_LK_SMP_LTK
)
1099 if (k
->dlen
!= sizeof(*id
))
1102 id
= (void *) &k
->data
;
1103 if (id
->ediv
== ediv
&&
1104 (memcmp(rand
, id
->rand
, sizeof(id
->rand
)) == 0))
1110 EXPORT_SYMBOL(hci_find_ltk
);
1112 struct link_key
*hci_find_link_key_type(struct hci_dev
*hdev
,
1113 bdaddr_t
*bdaddr
, u8 type
)
1117 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1118 if (k
->type
== type
&& bacmp(bdaddr
, &k
->bdaddr
) == 0)
1123 EXPORT_SYMBOL(hci_find_link_key_type
);
1125 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1126 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1128 struct link_key
*key
, *old_key
;
1129 u8 old_key_type
, persistent
;
1131 old_key
= hci_find_link_key(hdev
, bdaddr
);
1133 old_key_type
= old_key
->type
;
1136 old_key_type
= conn
? conn
->key_type
: 0xff;
1137 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1140 list_add(&key
->list
, &hdev
->link_keys
);
1143 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1145 /* Some buggy controller combinations generate a changed
1146 * combination key for legacy pairing even when there's no
1148 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1149 (!conn
|| conn
->remote_auth
== 0xff) &&
1150 old_key_type
== 0xff) {
1151 type
= HCI_LK_COMBINATION
;
1153 conn
->key_type
= type
;
1156 bacpy(&key
->bdaddr
, bdaddr
);
1157 memcpy(key
->val
, val
, 16);
1158 key
->pin_len
= pin_len
;
1160 if (type
== HCI_LK_CHANGED_COMBINATION
)
1161 key
->type
= old_key_type
;
1168 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1170 mgmt_new_link_key(hdev
, key
, persistent
);
1173 list_del(&key
->list
);
1180 int hci_add_ltk(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1181 u8 key_size
, __le16 ediv
, u8 rand
[8], u8 ltk
[16])
1183 struct link_key
*key
, *old_key
;
1184 struct key_master_id
*id
;
1187 BT_DBG("%s addr %s", hdev
->name
, batostr(bdaddr
));
1189 old_key
= hci_find_link_key_type(hdev
, bdaddr
, HCI_LK_SMP_LTK
);
1192 old_key_type
= old_key
->type
;
1194 key
= kzalloc(sizeof(*key
) + sizeof(*id
), GFP_ATOMIC
);
1197 list_add(&key
->list
, &hdev
->link_keys
);
1198 old_key_type
= 0xff;
1201 key
->dlen
= sizeof(*id
);
1203 bacpy(&key
->bdaddr
, bdaddr
);
1204 memcpy(key
->val
, ltk
, sizeof(key
->val
));
1205 key
->type
= HCI_LK_SMP_LTK
;
1206 key
->pin_len
= key_size
;
1208 id
= (void *) &key
->data
;
1210 memcpy(id
->rand
, rand
, sizeof(id
->rand
));
1213 mgmt_new_link_key(hdev
, key
, old_key_type
);
1218 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1220 struct link_key
*key
;
1222 key
= hci_find_link_key(hdev
, bdaddr
);
1226 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1228 list_del(&key
->list
);
1234 /* HCI command timer function */
1235 static void hci_cmd_timer(unsigned long arg
)
1237 struct hci_dev
*hdev
= (void *) arg
;
1239 BT_ERR("%s command tx timeout", hdev
->name
);
1240 atomic_set(&hdev
->cmd_cnt
, 1);
1241 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1244 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1247 struct oob_data
*data
;
1249 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1250 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1256 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1258 struct oob_data
*data
;
1260 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1264 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1266 list_del(&data
->list
);
1272 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1274 struct oob_data
*data
, *n
;
1276 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1277 list_del(&data
->list
);
1284 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1287 struct oob_data
*data
;
1289 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1292 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1296 bacpy(&data
->bdaddr
, bdaddr
);
1297 list_add(&data
->list
, &hdev
->remote_oob_data
);
1300 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1301 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1303 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1308 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1311 struct bdaddr_list
*b
;
1313 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1314 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1320 int hci_blacklist_clear(struct hci_dev
*hdev
)
1322 struct list_head
*p
, *n
;
1324 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1325 struct bdaddr_list
*b
;
1327 b
= list_entry(p
, struct bdaddr_list
, list
);
1336 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1338 struct bdaddr_list
*entry
;
1340 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1343 if (hci_blacklist_lookup(hdev
, bdaddr
))
1346 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1350 bacpy(&entry
->bdaddr
, bdaddr
);
1352 list_add(&entry
->list
, &hdev
->blacklist
);
1354 return mgmt_device_blocked(hdev
, bdaddr
);
1357 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1359 struct bdaddr_list
*entry
;
1361 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1362 return hci_blacklist_clear(hdev
);
1364 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1368 list_del(&entry
->list
);
1371 return mgmt_device_unblocked(hdev
, bdaddr
);
1374 static void hci_clear_adv_cache(struct work_struct
*work
)
1376 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1381 hci_adv_entries_clear(hdev
);
1383 hci_dev_unlock(hdev
);
1386 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1388 struct adv_entry
*entry
, *tmp
;
1390 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1391 list_del(&entry
->list
);
1395 BT_DBG("%s adv cache cleared", hdev
->name
);
1400 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1402 struct adv_entry
*entry
;
1404 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1405 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1411 static inline int is_connectable_adv(u8 evt_type
)
1413 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1419 int hci_add_adv_entry(struct hci_dev
*hdev
,
1420 struct hci_ev_le_advertising_info
*ev
)
1422 struct adv_entry
*entry
;
1424 if (!is_connectable_adv(ev
->evt_type
))
1427 /* Only new entries should be added to adv_entries. So, if
1428 * bdaddr was found, don't add it. */
1429 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1432 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
1436 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1437 entry
->bdaddr_type
= ev
->bdaddr_type
;
1439 list_add(&entry
->list
, &hdev
->adv_entries
);
1441 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1442 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1447 /* Register HCI device */
1448 int hci_register_dev(struct hci_dev
*hdev
)
1450 struct list_head
*head
= &hci_dev_list
, *p
;
1453 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1454 hdev
->bus
, hdev
->owner
);
1456 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1459 /* Do not allow HCI_AMP devices to register at index 0,
1460 * so the index can be used as the AMP controller ID.
1462 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1464 write_lock(&hci_dev_list_lock
);
1466 /* Find first available device id */
1467 list_for_each(p
, &hci_dev_list
) {
1468 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1473 sprintf(hdev
->name
, "hci%d", id
);
1475 list_add_tail(&hdev
->list
, head
);
1477 atomic_set(&hdev
->refcnt
, 1);
1478 mutex_init(&hdev
->lock
);
1481 hdev
->dev_flags
= 0;
1482 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1483 hdev
->esco_type
= (ESCO_HV1
);
1484 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1485 hdev
->io_capability
= 0x03; /* No Input No Output */
1487 hdev
->idle_timeout
= 0;
1488 hdev
->sniff_max_interval
= 800;
1489 hdev
->sniff_min_interval
= 80;
1491 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1492 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1493 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1496 skb_queue_head_init(&hdev
->rx_q
);
1497 skb_queue_head_init(&hdev
->cmd_q
);
1498 skb_queue_head_init(&hdev
->raw_q
);
1500 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1502 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1503 hdev
->reassembly
[i
] = NULL
;
1505 init_waitqueue_head(&hdev
->req_wait_q
);
1506 mutex_init(&hdev
->req_lock
);
1508 inquiry_cache_init(hdev
);
1510 hci_conn_hash_init(hdev
);
1512 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1514 INIT_LIST_HEAD(&hdev
->blacklist
);
1516 INIT_LIST_HEAD(&hdev
->uuids
);
1518 INIT_LIST_HEAD(&hdev
->link_keys
);
1520 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1522 INIT_LIST_HEAD(&hdev
->adv_entries
);
1524 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1525 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1526 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1528 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1530 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1532 atomic_set(&hdev
->promisc
, 0);
1534 write_unlock(&hci_dev_list_lock
);
1536 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1538 if (!hdev
->workqueue
) {
1543 error
= hci_add_sysfs(hdev
);
1547 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1548 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1550 if (rfkill_register(hdev
->rfkill
) < 0) {
1551 rfkill_destroy(hdev
->rfkill
);
1552 hdev
->rfkill
= NULL
;
1556 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1557 set_bit(HCI_SETUP
, &hdev
->flags
);
1558 schedule_work(&hdev
->power_on
);
1560 hci_notify(hdev
, HCI_DEV_REG
);
1565 destroy_workqueue(hdev
->workqueue
);
1567 write_lock(&hci_dev_list_lock
);
1568 list_del(&hdev
->list
);
1569 write_unlock(&hci_dev_list_lock
);
1573 EXPORT_SYMBOL(hci_register_dev
);
1575 /* Unregister HCI device */
1576 void hci_unregister_dev(struct hci_dev
*hdev
)
1580 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1582 write_lock(&hci_dev_list_lock
);
1583 list_del(&hdev
->list
);
1584 write_unlock(&hci_dev_list_lock
);
1586 hci_dev_do_close(hdev
);
1588 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1589 kfree_skb(hdev
->reassembly
[i
]);
1591 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1592 !test_bit(HCI_SETUP
, &hdev
->flags
)) {
1594 mgmt_index_removed(hdev
);
1595 hci_dev_unlock(hdev
);
1598 /* mgmt_index_removed should take care of emptying the
1600 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1602 hci_notify(hdev
, HCI_DEV_UNREG
);
1605 rfkill_unregister(hdev
->rfkill
);
1606 rfkill_destroy(hdev
->rfkill
);
1609 hci_del_sysfs(hdev
);
1611 cancel_delayed_work_sync(&hdev
->adv_work
);
1613 destroy_workqueue(hdev
->workqueue
);
1616 hci_blacklist_clear(hdev
);
1617 hci_uuids_clear(hdev
);
1618 hci_link_keys_clear(hdev
);
1619 hci_remote_oob_data_clear(hdev
);
1620 hci_adv_entries_clear(hdev
);
1621 hci_dev_unlock(hdev
);
1623 __hci_dev_put(hdev
);
1625 EXPORT_SYMBOL(hci_unregister_dev
);
1627 /* Suspend HCI device */
1628 int hci_suspend_dev(struct hci_dev
*hdev
)
1630 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1633 EXPORT_SYMBOL(hci_suspend_dev
);
1635 /* Resume HCI device */
1636 int hci_resume_dev(struct hci_dev
*hdev
)
1638 hci_notify(hdev
, HCI_DEV_RESUME
);
1641 EXPORT_SYMBOL(hci_resume_dev
);
1643 /* Receive frame from HCI drivers */
1644 int hci_recv_frame(struct sk_buff
*skb
)
1646 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1647 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1648 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1654 bt_cb(skb
)->incoming
= 1;
1657 __net_timestamp(skb
);
1659 skb_queue_tail(&hdev
->rx_q
, skb
);
1660 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1664 EXPORT_SYMBOL(hci_recv_frame
);
1666 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1667 int count
, __u8 index
)
1672 struct sk_buff
*skb
;
1673 struct bt_skb_cb
*scb
;
1675 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1676 index
>= NUM_REASSEMBLY
)
1679 skb
= hdev
->reassembly
[index
];
1683 case HCI_ACLDATA_PKT
:
1684 len
= HCI_MAX_FRAME_SIZE
;
1685 hlen
= HCI_ACL_HDR_SIZE
;
1688 len
= HCI_MAX_EVENT_SIZE
;
1689 hlen
= HCI_EVENT_HDR_SIZE
;
1691 case HCI_SCODATA_PKT
:
1692 len
= HCI_MAX_SCO_SIZE
;
1693 hlen
= HCI_SCO_HDR_SIZE
;
1697 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1701 scb
= (void *) skb
->cb
;
1703 scb
->pkt_type
= type
;
1705 skb
->dev
= (void *) hdev
;
1706 hdev
->reassembly
[index
] = skb
;
1710 scb
= (void *) skb
->cb
;
1711 len
= min(scb
->expect
, (__u16
)count
);
1713 memcpy(skb_put(skb
, len
), data
, len
);
1722 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1723 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1724 scb
->expect
= h
->plen
;
1726 if (skb_tailroom(skb
) < scb
->expect
) {
1728 hdev
->reassembly
[index
] = NULL
;
1734 case HCI_ACLDATA_PKT
:
1735 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1736 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1737 scb
->expect
= __le16_to_cpu(h
->dlen
);
1739 if (skb_tailroom(skb
) < scb
->expect
) {
1741 hdev
->reassembly
[index
] = NULL
;
1747 case HCI_SCODATA_PKT
:
1748 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1749 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1750 scb
->expect
= h
->dlen
;
1752 if (skb_tailroom(skb
) < scb
->expect
) {
1754 hdev
->reassembly
[index
] = NULL
;
1761 if (scb
->expect
== 0) {
1762 /* Complete frame */
1764 bt_cb(skb
)->pkt_type
= type
;
1765 hci_recv_frame(skb
);
1767 hdev
->reassembly
[index
] = NULL
;
1775 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1779 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1783 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1787 data
+= (count
- rem
);
1793 EXPORT_SYMBOL(hci_recv_fragment
);
1795 #define STREAM_REASSEMBLY 0
1797 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1803 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1806 struct { char type
; } *pkt
;
1808 /* Start of the frame */
1815 type
= bt_cb(skb
)->pkt_type
;
1817 rem
= hci_reassembly(hdev
, type
, data
, count
,
1822 data
+= (count
- rem
);
1828 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1830 /* ---- Interface to upper protocols ---- */
1832 int hci_register_cb(struct hci_cb
*cb
)
1834 BT_DBG("%p name %s", cb
, cb
->name
);
1836 write_lock(&hci_cb_list_lock
);
1837 list_add(&cb
->list
, &hci_cb_list
);
1838 write_unlock(&hci_cb_list_lock
);
1842 EXPORT_SYMBOL(hci_register_cb
);
1844 int hci_unregister_cb(struct hci_cb
*cb
)
1846 BT_DBG("%p name %s", cb
, cb
->name
);
1848 write_lock(&hci_cb_list_lock
);
1849 list_del(&cb
->list
);
1850 write_unlock(&hci_cb_list_lock
);
1854 EXPORT_SYMBOL(hci_unregister_cb
);
1856 static int hci_send_frame(struct sk_buff
*skb
)
1858 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1865 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1867 if (atomic_read(&hdev
->promisc
)) {
1869 __net_timestamp(skb
);
1871 hci_send_to_sock(hdev
, skb
, NULL
);
1874 /* Get rid of skb owner, prior to sending to the driver. */
1877 return hdev
->send(skb
);
1880 /* Send HCI command */
1881 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1883 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1884 struct hci_command_hdr
*hdr
;
1885 struct sk_buff
*skb
;
1887 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1889 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1891 BT_ERR("%s no memory for command", hdev
->name
);
1895 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1896 hdr
->opcode
= cpu_to_le16(opcode
);
1900 memcpy(skb_put(skb
, plen
), param
, plen
);
1902 BT_DBG("skb len %d", skb
->len
);
1904 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1905 skb
->dev
= (void *) hdev
;
1907 if (test_bit(HCI_INIT
, &hdev
->flags
))
1908 hdev
->init_last_cmd
= opcode
;
1910 skb_queue_tail(&hdev
->cmd_q
, skb
);
1911 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1916 /* Get data from the previously sent command */
1917 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1919 struct hci_command_hdr
*hdr
;
1921 if (!hdev
->sent_cmd
)
1924 hdr
= (void *) hdev
->sent_cmd
->data
;
1926 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1929 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1931 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1935 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1937 struct hci_acl_hdr
*hdr
;
1940 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1941 skb_reset_transport_header(skb
);
1942 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1943 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1944 hdr
->dlen
= cpu_to_le16(len
);
1947 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
1948 struct sk_buff
*skb
, __u16 flags
)
1950 struct hci_dev
*hdev
= conn
->hdev
;
1951 struct sk_buff
*list
;
1953 list
= skb_shinfo(skb
)->frag_list
;
1955 /* Non fragmented */
1956 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1958 skb_queue_tail(queue
, skb
);
1961 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1963 skb_shinfo(skb
)->frag_list
= NULL
;
1965 /* Queue all fragments atomically */
1966 spin_lock(&queue
->lock
);
1968 __skb_queue_tail(queue
, skb
);
1970 flags
&= ~ACL_START
;
1973 skb
= list
; list
= list
->next
;
1975 skb
->dev
= (void *) hdev
;
1976 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1977 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1979 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1981 __skb_queue_tail(queue
, skb
);
1984 spin_unlock(&queue
->lock
);
1988 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
1990 struct hci_conn
*conn
= chan
->conn
;
1991 struct hci_dev
*hdev
= conn
->hdev
;
1993 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
1995 skb
->dev
= (void *) hdev
;
1996 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1997 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1999 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2001 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2003 EXPORT_SYMBOL(hci_send_acl
);
2006 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2008 struct hci_dev
*hdev
= conn
->hdev
;
2009 struct hci_sco_hdr hdr
;
2011 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2013 hdr
.handle
= cpu_to_le16(conn
->handle
);
2014 hdr
.dlen
= skb
->len
;
2016 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2017 skb_reset_transport_header(skb
);
2018 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2020 skb
->dev
= (void *) hdev
;
2021 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2023 skb_queue_tail(&conn
->data_q
, skb
);
2024 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2026 EXPORT_SYMBOL(hci_send_sco
);
2028 /* ---- HCI TX task (outgoing data) ---- */
2030 /* HCI Connection scheduler */
2031 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2033 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2034 struct hci_conn
*conn
= NULL
, *c
;
2035 int num
= 0, min
= ~0;
2037 /* We don't have to lock device here. Connections are always
2038 * added and removed with TX task disabled. */
2042 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2043 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2046 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2051 if (c
->sent
< min
) {
2056 if (hci_conn_num(hdev
, type
) == num
)
2065 switch (conn
->type
) {
2067 cnt
= hdev
->acl_cnt
;
2071 cnt
= hdev
->sco_cnt
;
2074 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2078 BT_ERR("Unknown link type");
2086 BT_DBG("conn %p quote %d", conn
, *quote
);
2090 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2092 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2095 BT_ERR("%s link tx timeout", hdev
->name
);
2099 /* Kill stalled connections */
2100 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2101 if (c
->type
== type
&& c
->sent
) {
2102 BT_ERR("%s killing stalled connection %s",
2103 hdev
->name
, batostr(&c
->dst
));
2104 hci_acl_disconn(c
, 0x13);
2111 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2114 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2115 struct hci_chan
*chan
= NULL
;
2116 int num
= 0, min
= ~0, cur_prio
= 0;
2117 struct hci_conn
*conn
;
2118 int cnt
, q
, conn_num
= 0;
2120 BT_DBG("%s", hdev
->name
);
2124 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2125 struct hci_chan
*tmp
;
2127 if (conn
->type
!= type
)
2130 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2135 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2136 struct sk_buff
*skb
;
2138 if (skb_queue_empty(&tmp
->data_q
))
2141 skb
= skb_peek(&tmp
->data_q
);
2142 if (skb
->priority
< cur_prio
)
2145 if (skb
->priority
> cur_prio
) {
2148 cur_prio
= skb
->priority
;
2153 if (conn
->sent
< min
) {
2159 if (hci_conn_num(hdev
, type
) == conn_num
)
2168 switch (chan
->conn
->type
) {
2170 cnt
= hdev
->acl_cnt
;
2174 cnt
= hdev
->sco_cnt
;
2177 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2181 BT_ERR("Unknown link type");
2186 BT_DBG("chan %p quote %d", chan
, *quote
);
2190 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2192 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2193 struct hci_conn
*conn
;
2196 BT_DBG("%s", hdev
->name
);
2200 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2201 struct hci_chan
*chan
;
2203 if (conn
->type
!= type
)
2206 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2211 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2212 struct sk_buff
*skb
;
2219 if (skb_queue_empty(&chan
->data_q
))
2222 skb
= skb_peek(&chan
->data_q
);
2223 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2226 skb
->priority
= HCI_PRIO_MAX
- 1;
2228 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2232 if (hci_conn_num(hdev
, type
) == num
)
2240 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2242 struct hci_chan
*chan
;
2243 struct sk_buff
*skb
;
2247 BT_DBG("%s", hdev
->name
);
2249 if (!hci_conn_num(hdev
, ACL_LINK
))
2252 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2253 /* ACL tx timeout must be longer than maximum
2254 * link supervision timeout (40.9 seconds) */
2255 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
2256 hci_link_tx_to(hdev
, ACL_LINK
);
2259 cnt
= hdev
->acl_cnt
;
2261 while (hdev
->acl_cnt
&&
2262 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2263 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2264 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2265 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2266 skb
->len
, skb
->priority
);
2268 /* Stop if priority has changed */
2269 if (skb
->priority
< priority
)
2272 skb
= skb_dequeue(&chan
->data_q
);
2274 hci_conn_enter_active_mode(chan
->conn
,
2275 bt_cb(skb
)->force_active
);
2277 hci_send_frame(skb
);
2278 hdev
->acl_last_tx
= jiffies
;
2286 if (cnt
!= hdev
->acl_cnt
)
2287 hci_prio_recalculate(hdev
, ACL_LINK
);
2291 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2293 struct hci_conn
*conn
;
2294 struct sk_buff
*skb
;
2297 BT_DBG("%s", hdev
->name
);
2299 if (!hci_conn_num(hdev
, SCO_LINK
))
2302 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2303 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2304 BT_DBG("skb %p len %d", skb
, skb
->len
);
2305 hci_send_frame(skb
);
2308 if (conn
->sent
== ~0)
2314 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2316 struct hci_conn
*conn
;
2317 struct sk_buff
*skb
;
2320 BT_DBG("%s", hdev
->name
);
2322 if (!hci_conn_num(hdev
, ESCO_LINK
))
2325 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2326 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2327 BT_DBG("skb %p len %d", skb
, skb
->len
);
2328 hci_send_frame(skb
);
2331 if (conn
->sent
== ~0)
2337 static inline void hci_sched_le(struct hci_dev
*hdev
)
2339 struct hci_chan
*chan
;
2340 struct sk_buff
*skb
;
2341 int quote
, cnt
, tmp
;
2343 BT_DBG("%s", hdev
->name
);
2345 if (!hci_conn_num(hdev
, LE_LINK
))
2348 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2349 /* LE tx timeout must be longer than maximum
2350 * link supervision timeout (40.9 seconds) */
2351 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2352 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2353 hci_link_tx_to(hdev
, LE_LINK
);
2356 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2358 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2359 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2360 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2361 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2362 skb
->len
, skb
->priority
);
2364 /* Stop if priority has changed */
2365 if (skb
->priority
< priority
)
2368 skb
= skb_dequeue(&chan
->data_q
);
2370 hci_send_frame(skb
);
2371 hdev
->le_last_tx
= jiffies
;
2382 hdev
->acl_cnt
= cnt
;
2385 hci_prio_recalculate(hdev
, LE_LINK
);
2388 static void hci_tx_work(struct work_struct
*work
)
2390 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2391 struct sk_buff
*skb
;
2393 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2394 hdev
->sco_cnt
, hdev
->le_cnt
);
2396 /* Schedule queues and send stuff to HCI driver */
2398 hci_sched_acl(hdev
);
2400 hci_sched_sco(hdev
);
2402 hci_sched_esco(hdev
);
2406 /* Send next queued raw (unknown type) packet */
2407 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2408 hci_send_frame(skb
);
2411 /* ----- HCI RX task (incoming data processing) ----- */
2413 /* ACL data packet */
2414 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2416 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2417 struct hci_conn
*conn
;
2418 __u16 handle
, flags
;
2420 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2422 handle
= __le16_to_cpu(hdr
->handle
);
2423 flags
= hci_flags(handle
);
2424 handle
= hci_handle(handle
);
2426 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2428 hdev
->stat
.acl_rx
++;
2431 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2432 hci_dev_unlock(hdev
);
2435 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2437 /* Send to upper protocol */
2438 l2cap_recv_acldata(conn
, skb
, flags
);
2441 BT_ERR("%s ACL packet for unknown connection handle %d",
2442 hdev
->name
, handle
);
2448 /* SCO data packet */
2449 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2451 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2452 struct hci_conn
*conn
;
2455 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2457 handle
= __le16_to_cpu(hdr
->handle
);
2459 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2461 hdev
->stat
.sco_rx
++;
2464 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2465 hci_dev_unlock(hdev
);
2468 /* Send to upper protocol */
2469 sco_recv_scodata(conn
, skb
);
2472 BT_ERR("%s SCO packet for unknown connection handle %d",
2473 hdev
->name
, handle
);
2479 static void hci_rx_work(struct work_struct
*work
)
2481 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2482 struct sk_buff
*skb
;
2484 BT_DBG("%s", hdev
->name
);
2486 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2487 if (atomic_read(&hdev
->promisc
)) {
2488 /* Send copy to the sockets */
2489 hci_send_to_sock(hdev
, skb
, NULL
);
2492 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2497 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2498 /* Don't process data packets in this states. */
2499 switch (bt_cb(skb
)->pkt_type
) {
2500 case HCI_ACLDATA_PKT
:
2501 case HCI_SCODATA_PKT
:
2508 switch (bt_cb(skb
)->pkt_type
) {
2510 BT_DBG("%s Event packet", hdev
->name
);
2511 hci_event_packet(hdev
, skb
);
2514 case HCI_ACLDATA_PKT
:
2515 BT_DBG("%s ACL data packet", hdev
->name
);
2516 hci_acldata_packet(hdev
, skb
);
2519 case HCI_SCODATA_PKT
:
2520 BT_DBG("%s SCO data packet", hdev
->name
);
2521 hci_scodata_packet(hdev
, skb
);
2531 static void hci_cmd_work(struct work_struct
*work
)
2533 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2534 struct sk_buff
*skb
;
2536 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2538 /* Send queued commands */
2539 if (atomic_read(&hdev
->cmd_cnt
)) {
2540 skb
= skb_dequeue(&hdev
->cmd_q
);
2544 kfree_skb(hdev
->sent_cmd
);
2546 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2547 if (hdev
->sent_cmd
) {
2548 atomic_dec(&hdev
->cmd_cnt
);
2549 hci_send_frame(skb
);
2550 if (test_bit(HCI_RESET
, &hdev
->flags
))
2551 del_timer(&hdev
->cmd_timer
);
2553 mod_timer(&hdev
->cmd_timer
,
2554 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2556 skb_queue_head(&hdev
->cmd_q
, skb
);
2557 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2562 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2564 /* General inquiry access code (GIAC) */
2565 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2566 struct hci_cp_inquiry cp
;
2568 BT_DBG("%s", hdev
->name
);
2570 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2571 return -EINPROGRESS
;
2573 memset(&cp
, 0, sizeof(cp
));
2574 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2577 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2580 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2582 BT_DBG("%s", hdev
->name
);
2584 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2587 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2590 module_param(enable_hs
, bool, 0644);
2591 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");