2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev
*hdev
)
360 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
361 struct inquiry_entry
*p
, *n
;
363 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
368 INIT_LIST_HEAD(&cache
->unknown
);
369 INIT_LIST_HEAD(&cache
->resolve
);
372 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
374 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
375 struct inquiry_entry
*e
;
377 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
379 list_for_each_entry(e
, &cache
->all
, all
) {
380 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
387 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
390 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
391 struct inquiry_entry
*e
;
393 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
395 list_for_each_entry(e
, &cache
->unknown
, list
) {
396 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
403 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
406 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
407 struct inquiry_entry
*ie
;
409 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
411 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
415 /* Entry not in the cache. Add new one. */
416 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
420 list_add(&ie
->all
, &cache
->all
);
423 ie
->name_state
= NAME_KNOWN
;
425 ie
->name_state
= NAME_NOT_KNOWN
;
426 list_add(&ie
->list
, &cache
->unknown
);
430 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
431 ie
->name_state
!= NAME_PENDING
) {
432 ie
->name_state
= NAME_KNOWN
;
436 memcpy(&ie
->data
, data
, sizeof(*data
));
437 ie
->timestamp
= jiffies
;
438 cache
->timestamp
= jiffies
;
440 if (ie
->name_state
== NAME_NOT_KNOWN
)
446 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
448 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
449 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
450 struct inquiry_entry
*e
;
453 list_for_each_entry(e
, &cache
->all
, all
) {
454 struct inquiry_data
*data
= &e
->data
;
459 bacpy(&info
->bdaddr
, &data
->bdaddr
);
460 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
461 info
->pscan_period_mode
= data
->pscan_period_mode
;
462 info
->pscan_mode
= data
->pscan_mode
;
463 memcpy(info
->dev_class
, data
->dev_class
, 3);
464 info
->clock_offset
= data
->clock_offset
;
470 BT_DBG("cache %p, copied %d", cache
, copied
);
474 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
476 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
477 struct hci_cp_inquiry cp
;
479 BT_DBG("%s", hdev
->name
);
481 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
485 memcpy(&cp
.lap
, &ir
->lap
, 3);
486 cp
.length
= ir
->length
;
487 cp
.num_rsp
= ir
->num_rsp
;
488 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
491 int hci_inquiry(void __user
*arg
)
493 __u8 __user
*ptr
= arg
;
494 struct hci_inquiry_req ir
;
495 struct hci_dev
*hdev
;
496 int err
= 0, do_inquiry
= 0, max_rsp
;
500 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
503 hdev
= hci_dev_get(ir
.dev_id
);
508 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
509 inquiry_cache_empty(hdev
) ||
510 ir
.flags
& IREQ_CACHE_FLUSH
) {
511 inquiry_cache_flush(hdev
);
514 hci_dev_unlock(hdev
);
516 timeo
= ir
.length
* msecs_to_jiffies(2000);
519 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
524 /* for unlimited number of responses we will use buffer with 255 entries */
525 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
527 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
528 * copy it to the user space.
530 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
537 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
538 hci_dev_unlock(hdev
);
540 BT_DBG("num_rsp %d", ir
.num_rsp
);
542 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
544 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
557 /* ---- HCI ioctl helpers ---- */
559 int hci_dev_open(__u16 dev
)
561 struct hci_dev
*hdev
;
564 hdev
= hci_dev_get(dev
);
568 BT_DBG("%s %p", hdev
->name
, hdev
);
572 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
577 if (test_bit(HCI_UP
, &hdev
->flags
)) {
582 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
583 set_bit(HCI_RAW
, &hdev
->flags
);
585 /* Treat all non BR/EDR controllers as raw devices if
586 enable_hs is not set */
587 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
588 set_bit(HCI_RAW
, &hdev
->flags
);
590 if (hdev
->open(hdev
)) {
595 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
596 atomic_set(&hdev
->cmd_cnt
, 1);
597 set_bit(HCI_INIT
, &hdev
->flags
);
598 hdev
->init_last_cmd
= 0;
600 ret
= __hci_request(hdev
, hci_init_req
, 0,
601 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
603 if (lmp_host_le_capable(hdev
))
604 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
605 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
607 clear_bit(HCI_INIT
, &hdev
->flags
);
612 set_bit(HCI_UP
, &hdev
->flags
);
613 hci_notify(hdev
, HCI_DEV_UP
);
614 if (!test_bit(HCI_SETUP
, &hdev
->flags
)) {
616 mgmt_powered(hdev
, 1);
617 hci_dev_unlock(hdev
);
620 /* Init failed, cleanup */
621 flush_work(&hdev
->tx_work
);
622 flush_work(&hdev
->cmd_work
);
623 flush_work(&hdev
->rx_work
);
625 skb_queue_purge(&hdev
->cmd_q
);
626 skb_queue_purge(&hdev
->rx_q
);
631 if (hdev
->sent_cmd
) {
632 kfree_skb(hdev
->sent_cmd
);
633 hdev
->sent_cmd
= NULL
;
641 hci_req_unlock(hdev
);
646 static int hci_dev_do_close(struct hci_dev
*hdev
)
648 BT_DBG("%s %p", hdev
->name
, hdev
);
650 hci_req_cancel(hdev
, ENODEV
);
653 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
654 del_timer_sync(&hdev
->cmd_timer
);
655 hci_req_unlock(hdev
);
659 /* Flush RX and TX works */
660 flush_work(&hdev
->tx_work
);
661 flush_work(&hdev
->rx_work
);
663 if (hdev
->discov_timeout
> 0) {
664 cancel_delayed_work(&hdev
->discov_off
);
665 hdev
->discov_timeout
= 0;
668 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
669 cancel_delayed_work(&hdev
->power_off
);
671 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->flags
))
672 cancel_delayed_work(&hdev
->service_cache
);
675 inquiry_cache_flush(hdev
);
676 hci_conn_hash_flush(hdev
);
677 hci_dev_unlock(hdev
);
679 hci_notify(hdev
, HCI_DEV_DOWN
);
685 skb_queue_purge(&hdev
->cmd_q
);
686 atomic_set(&hdev
->cmd_cnt
, 1);
687 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
688 set_bit(HCI_INIT
, &hdev
->flags
);
689 __hci_request(hdev
, hci_reset_req
, 0,
690 msecs_to_jiffies(250));
691 clear_bit(HCI_INIT
, &hdev
->flags
);
695 flush_work(&hdev
->cmd_work
);
698 skb_queue_purge(&hdev
->rx_q
);
699 skb_queue_purge(&hdev
->cmd_q
);
700 skb_queue_purge(&hdev
->raw_q
);
702 /* Drop last sent command */
703 if (hdev
->sent_cmd
) {
704 del_timer_sync(&hdev
->cmd_timer
);
705 kfree_skb(hdev
->sent_cmd
);
706 hdev
->sent_cmd
= NULL
;
709 /* After this point our queues are empty
710 * and no tasks are scheduled. */
714 mgmt_powered(hdev
, 0);
715 hci_dev_unlock(hdev
);
720 hci_req_unlock(hdev
);
726 int hci_dev_close(__u16 dev
)
728 struct hci_dev
*hdev
;
731 hdev
= hci_dev_get(dev
);
734 err
= hci_dev_do_close(hdev
);
739 int hci_dev_reset(__u16 dev
)
741 struct hci_dev
*hdev
;
744 hdev
= hci_dev_get(dev
);
750 if (!test_bit(HCI_UP
, &hdev
->flags
))
754 skb_queue_purge(&hdev
->rx_q
);
755 skb_queue_purge(&hdev
->cmd_q
);
758 inquiry_cache_flush(hdev
);
759 hci_conn_hash_flush(hdev
);
760 hci_dev_unlock(hdev
);
765 atomic_set(&hdev
->cmd_cnt
, 1);
766 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
768 if (!test_bit(HCI_RAW
, &hdev
->flags
))
769 ret
= __hci_request(hdev
, hci_reset_req
, 0,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
773 hci_req_unlock(hdev
);
778 int hci_dev_reset_stat(__u16 dev
)
780 struct hci_dev
*hdev
;
783 hdev
= hci_dev_get(dev
);
787 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
794 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
796 struct hci_dev
*hdev
;
797 struct hci_dev_req dr
;
800 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
803 hdev
= hci_dev_get(dr
.dev_id
);
809 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
810 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
814 if (!lmp_encrypt_capable(hdev
)) {
819 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
820 /* Auth must be enabled first */
821 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
822 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
827 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
828 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
832 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
833 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
837 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
838 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
842 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
843 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
847 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
851 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
852 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
856 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
857 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
869 int hci_get_dev_list(void __user
*arg
)
871 struct hci_dev
*hdev
;
872 struct hci_dev_list_req
*dl
;
873 struct hci_dev_req
*dr
;
874 int n
= 0, size
, err
;
877 if (get_user(dev_num
, (__u16 __user
*) arg
))
880 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
883 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
885 dl
= kzalloc(size
, GFP_KERNEL
);
891 read_lock(&hci_dev_list_lock
);
892 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
893 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
894 cancel_delayed_work(&hdev
->power_off
);
896 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
897 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
899 (dr
+ n
)->dev_id
= hdev
->id
;
900 (dr
+ n
)->dev_opt
= hdev
->flags
;
905 read_unlock(&hci_dev_list_lock
);
908 size
= sizeof(*dl
) + n
* sizeof(*dr
);
910 err
= copy_to_user(arg
, dl
, size
);
913 return err
? -EFAULT
: 0;
916 int hci_get_dev_info(void __user
*arg
)
918 struct hci_dev
*hdev
;
919 struct hci_dev_info di
;
922 if (copy_from_user(&di
, arg
, sizeof(di
)))
925 hdev
= hci_dev_get(di
.dev_id
);
929 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
930 cancel_delayed_work_sync(&hdev
->power_off
);
932 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
933 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
935 strcpy(di
.name
, hdev
->name
);
936 di
.bdaddr
= hdev
->bdaddr
;
937 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
938 di
.flags
= hdev
->flags
;
939 di
.pkt_type
= hdev
->pkt_type
;
940 di
.acl_mtu
= hdev
->acl_mtu
;
941 di
.acl_pkts
= hdev
->acl_pkts
;
942 di
.sco_mtu
= hdev
->sco_mtu
;
943 di
.sco_pkts
= hdev
->sco_pkts
;
944 di
.link_policy
= hdev
->link_policy
;
945 di
.link_mode
= hdev
->link_mode
;
947 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
948 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
950 if (copy_to_user(arg
, &di
, sizeof(di
)))
958 /* ---- Interface to HCI drivers ---- */
960 static int hci_rfkill_set_block(void *data
, bool blocked
)
962 struct hci_dev
*hdev
= data
;
964 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
969 hci_dev_do_close(hdev
);
974 static const struct rfkill_ops hci_rfkill_ops
= {
975 .set_block
= hci_rfkill_set_block
,
978 /* Alloc HCI device */
979 struct hci_dev
*hci_alloc_dev(void)
981 struct hci_dev
*hdev
;
983 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
987 hci_init_sysfs(hdev
);
988 skb_queue_head_init(&hdev
->driver_init
);
992 EXPORT_SYMBOL(hci_alloc_dev
);
994 /* Free HCI device */
995 void hci_free_dev(struct hci_dev
*hdev
)
997 skb_queue_purge(&hdev
->driver_init
);
999 /* will free via device release */
1000 put_device(&hdev
->dev
);
1002 EXPORT_SYMBOL(hci_free_dev
);
1004 static void hci_power_on(struct work_struct
*work
)
1006 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1008 BT_DBG("%s", hdev
->name
);
1010 if (hci_dev_open(hdev
->id
) < 0)
1013 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
1014 schedule_delayed_work(&hdev
->power_off
,
1015 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1017 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
1018 mgmt_index_added(hdev
);
1021 static void hci_power_off(struct work_struct
*work
)
1023 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1026 BT_DBG("%s", hdev
->name
);
1028 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1030 hci_dev_close(hdev
->id
);
1033 static void hci_discov_off(struct work_struct
*work
)
1035 struct hci_dev
*hdev
;
1036 u8 scan
= SCAN_PAGE
;
1038 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1040 BT_DBG("%s", hdev
->name
);
1044 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1046 hdev
->discov_timeout
= 0;
1048 hci_dev_unlock(hdev
);
1051 int hci_uuids_clear(struct hci_dev
*hdev
)
1053 struct list_head
*p
, *n
;
1055 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1056 struct bt_uuid
*uuid
;
1058 uuid
= list_entry(p
, struct bt_uuid
, list
);
1067 int hci_link_keys_clear(struct hci_dev
*hdev
)
1069 struct list_head
*p
, *n
;
1071 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1072 struct link_key
*key
;
1074 key
= list_entry(p
, struct link_key
, list
);
1083 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1087 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1088 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1094 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1095 u8 key_type
, u8 old_key_type
)
1098 if (key_type
< 0x03)
1101 /* Debug keys are insecure so don't store them persistently */
1102 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1105 /* Changed combination key and there's no previous one */
1106 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1109 /* Security mode 3 case */
1113 /* Neither local nor remote side had no-bonding as requirement */
1114 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1117 /* Local side had dedicated bonding as requirement */
1118 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1121 /* Remote side had dedicated bonding as requirement */
1122 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1125 /* If none of the above criteria match, then don't store the key
1130 struct link_key
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1134 list_for_each_entry(k
, &hdev
->link_keys
, list
) {
1135 struct key_master_id
*id
;
1137 if (k
->type
!= HCI_LK_SMP_LTK
)
1140 if (k
->dlen
!= sizeof(*id
))
1143 id
= (void *) &k
->data
;
1144 if (id
->ediv
== ediv
&&
1145 (memcmp(rand
, id
->rand
, sizeof(id
->rand
)) == 0))
1151 EXPORT_SYMBOL(hci_find_ltk
);
1153 struct link_key
*hci_find_link_key_type(struct hci_dev
*hdev
,
1154 bdaddr_t
*bdaddr
, u8 type
)
1158 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1159 if (k
->type
== type
&& bacmp(bdaddr
, &k
->bdaddr
) == 0)
1164 EXPORT_SYMBOL(hci_find_link_key_type
);
1166 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1167 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1169 struct link_key
*key
, *old_key
;
1170 u8 old_key_type
, persistent
;
1172 old_key
= hci_find_link_key(hdev
, bdaddr
);
1174 old_key_type
= old_key
->type
;
1177 old_key_type
= conn
? conn
->key_type
: 0xff;
1178 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1181 list_add(&key
->list
, &hdev
->link_keys
);
1184 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1186 /* Some buggy controller combinations generate a changed
1187 * combination key for legacy pairing even when there's no
1189 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1190 (!conn
|| conn
->remote_auth
== 0xff) &&
1191 old_key_type
== 0xff) {
1192 type
= HCI_LK_COMBINATION
;
1194 conn
->key_type
= type
;
1197 bacpy(&key
->bdaddr
, bdaddr
);
1198 memcpy(key
->val
, val
, 16);
1199 key
->pin_len
= pin_len
;
1201 if (type
== HCI_LK_CHANGED_COMBINATION
)
1202 key
->type
= old_key_type
;
1209 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1211 mgmt_new_link_key(hdev
, key
, persistent
);
1214 list_del(&key
->list
);
1221 int hci_add_ltk(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1222 u8 key_size
, __le16 ediv
, u8 rand
[8], u8 ltk
[16])
1224 struct link_key
*key
, *old_key
;
1225 struct key_master_id
*id
;
1228 BT_DBG("%s addr %s", hdev
->name
, batostr(bdaddr
));
1230 old_key
= hci_find_link_key_type(hdev
, bdaddr
, HCI_LK_SMP_LTK
);
1233 old_key_type
= old_key
->type
;
1235 key
= kzalloc(sizeof(*key
) + sizeof(*id
), GFP_ATOMIC
);
1238 list_add(&key
->list
, &hdev
->link_keys
);
1239 old_key_type
= 0xff;
1242 key
->dlen
= sizeof(*id
);
1244 bacpy(&key
->bdaddr
, bdaddr
);
1245 memcpy(key
->val
, ltk
, sizeof(key
->val
));
1246 key
->type
= HCI_LK_SMP_LTK
;
1247 key
->pin_len
= key_size
;
1249 id
= (void *) &key
->data
;
1251 memcpy(id
->rand
, rand
, sizeof(id
->rand
));
1254 mgmt_new_link_key(hdev
, key
, old_key_type
);
1259 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1261 struct link_key
*key
;
1263 key
= hci_find_link_key(hdev
, bdaddr
);
1267 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1269 list_del(&key
->list
);
1275 /* HCI command timer function */
1276 static void hci_cmd_timer(unsigned long arg
)
1278 struct hci_dev
*hdev
= (void *) arg
;
1280 BT_ERR("%s command tx timeout", hdev
->name
);
1281 atomic_set(&hdev
->cmd_cnt
, 1);
1282 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1285 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1288 struct oob_data
*data
;
1290 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1291 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1297 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1299 struct oob_data
*data
;
1301 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1305 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1307 list_del(&data
->list
);
1313 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1315 struct oob_data
*data
, *n
;
1317 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1318 list_del(&data
->list
);
1325 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1328 struct oob_data
*data
;
1330 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1333 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1337 bacpy(&data
->bdaddr
, bdaddr
);
1338 list_add(&data
->list
, &hdev
->remote_oob_data
);
1341 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1342 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1344 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1349 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1352 struct bdaddr_list
*b
;
1354 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1355 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1361 int hci_blacklist_clear(struct hci_dev
*hdev
)
1363 struct list_head
*p
, *n
;
1365 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1366 struct bdaddr_list
*b
;
1368 b
= list_entry(p
, struct bdaddr_list
, list
);
1377 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1379 struct bdaddr_list
*entry
;
1381 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1384 if (hci_blacklist_lookup(hdev
, bdaddr
))
1387 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1391 bacpy(&entry
->bdaddr
, bdaddr
);
1393 list_add(&entry
->list
, &hdev
->blacklist
);
1395 return mgmt_device_blocked(hdev
, bdaddr
);
1398 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1400 struct bdaddr_list
*entry
;
1402 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1403 return hci_blacklist_clear(hdev
);
1405 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1409 list_del(&entry
->list
);
1412 return mgmt_device_unblocked(hdev
, bdaddr
);
1415 static void hci_clear_adv_cache(struct work_struct
*work
)
1417 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1422 hci_adv_entries_clear(hdev
);
1424 hci_dev_unlock(hdev
);
1427 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1429 struct adv_entry
*entry
, *tmp
;
1431 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1432 list_del(&entry
->list
);
1436 BT_DBG("%s adv cache cleared", hdev
->name
);
1441 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1443 struct adv_entry
*entry
;
1445 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1446 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1452 static inline int is_connectable_adv(u8 evt_type
)
1454 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1460 int hci_add_adv_entry(struct hci_dev
*hdev
,
1461 struct hci_ev_le_advertising_info
*ev
)
1463 struct adv_entry
*entry
;
1465 if (!is_connectable_adv(ev
->evt_type
))
1468 /* Only new entries should be added to adv_entries. So, if
1469 * bdaddr was found, don't add it. */
1470 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1473 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
1477 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1478 entry
->bdaddr_type
= ev
->bdaddr_type
;
1480 list_add(&entry
->list
, &hdev
->adv_entries
);
1482 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1483 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1488 /* Register HCI device */
1489 int hci_register_dev(struct hci_dev
*hdev
)
1491 struct list_head
*head
= &hci_dev_list
, *p
;
1494 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1495 hdev
->bus
, hdev
->owner
);
1497 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1500 /* Do not allow HCI_AMP devices to register at index 0,
1501 * so the index can be used as the AMP controller ID.
1503 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1505 write_lock(&hci_dev_list_lock
);
1507 /* Find first available device id */
1508 list_for_each(p
, &hci_dev_list
) {
1509 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1514 sprintf(hdev
->name
, "hci%d", id
);
1516 list_add_tail(&hdev
->list
, head
);
1518 atomic_set(&hdev
->refcnt
, 1);
1519 mutex_init(&hdev
->lock
);
1522 hdev
->dev_flags
= 0;
1523 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1524 hdev
->esco_type
= (ESCO_HV1
);
1525 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1526 hdev
->io_capability
= 0x03; /* No Input No Output */
1528 hdev
->idle_timeout
= 0;
1529 hdev
->sniff_max_interval
= 800;
1530 hdev
->sniff_min_interval
= 80;
1532 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1533 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1534 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1537 skb_queue_head_init(&hdev
->rx_q
);
1538 skb_queue_head_init(&hdev
->cmd_q
);
1539 skb_queue_head_init(&hdev
->raw_q
);
1541 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1543 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1544 hdev
->reassembly
[i
] = NULL
;
1546 init_waitqueue_head(&hdev
->req_wait_q
);
1547 mutex_init(&hdev
->req_lock
);
1549 inquiry_cache_init(hdev
);
1551 hci_conn_hash_init(hdev
);
1553 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1555 INIT_LIST_HEAD(&hdev
->blacklist
);
1557 INIT_LIST_HEAD(&hdev
->uuids
);
1559 INIT_LIST_HEAD(&hdev
->link_keys
);
1561 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1563 INIT_LIST_HEAD(&hdev
->adv_entries
);
1565 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1566 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1567 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1569 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1571 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1573 atomic_set(&hdev
->promisc
, 0);
1575 write_unlock(&hci_dev_list_lock
);
1577 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1579 if (!hdev
->workqueue
) {
1584 error
= hci_add_sysfs(hdev
);
1588 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1589 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1591 if (rfkill_register(hdev
->rfkill
) < 0) {
1592 rfkill_destroy(hdev
->rfkill
);
1593 hdev
->rfkill
= NULL
;
1597 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1598 set_bit(HCI_SETUP
, &hdev
->flags
);
1599 schedule_work(&hdev
->power_on
);
1601 hci_notify(hdev
, HCI_DEV_REG
);
1606 destroy_workqueue(hdev
->workqueue
);
1608 write_lock(&hci_dev_list_lock
);
1609 list_del(&hdev
->list
);
1610 write_unlock(&hci_dev_list_lock
);
1614 EXPORT_SYMBOL(hci_register_dev
);
1616 /* Unregister HCI device */
1617 void hci_unregister_dev(struct hci_dev
*hdev
)
1621 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1623 write_lock(&hci_dev_list_lock
);
1624 list_del(&hdev
->list
);
1625 write_unlock(&hci_dev_list_lock
);
1627 hci_dev_do_close(hdev
);
1629 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1630 kfree_skb(hdev
->reassembly
[i
]);
1632 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1633 !test_bit(HCI_SETUP
, &hdev
->flags
)) {
1635 mgmt_index_removed(hdev
);
1636 hci_dev_unlock(hdev
);
1639 /* mgmt_index_removed should take care of emptying the
1641 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1643 hci_notify(hdev
, HCI_DEV_UNREG
);
1646 rfkill_unregister(hdev
->rfkill
);
1647 rfkill_destroy(hdev
->rfkill
);
1650 hci_del_sysfs(hdev
);
1652 cancel_delayed_work_sync(&hdev
->adv_work
);
1654 destroy_workqueue(hdev
->workqueue
);
1657 hci_blacklist_clear(hdev
);
1658 hci_uuids_clear(hdev
);
1659 hci_link_keys_clear(hdev
);
1660 hci_remote_oob_data_clear(hdev
);
1661 hci_adv_entries_clear(hdev
);
1662 hci_dev_unlock(hdev
);
1664 __hci_dev_put(hdev
);
1666 EXPORT_SYMBOL(hci_unregister_dev
);
1668 /* Suspend HCI device */
1669 int hci_suspend_dev(struct hci_dev
*hdev
)
1671 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1674 EXPORT_SYMBOL(hci_suspend_dev
);
1676 /* Resume HCI device */
1677 int hci_resume_dev(struct hci_dev
*hdev
)
1679 hci_notify(hdev
, HCI_DEV_RESUME
);
1682 EXPORT_SYMBOL(hci_resume_dev
);
1684 /* Receive frame from HCI drivers */
1685 int hci_recv_frame(struct sk_buff
*skb
)
1687 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1688 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1689 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1695 bt_cb(skb
)->incoming
= 1;
1698 __net_timestamp(skb
);
1700 skb_queue_tail(&hdev
->rx_q
, skb
);
1701 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1705 EXPORT_SYMBOL(hci_recv_frame
);
1707 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1708 int count
, __u8 index
)
1713 struct sk_buff
*skb
;
1714 struct bt_skb_cb
*scb
;
1716 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1717 index
>= NUM_REASSEMBLY
)
1720 skb
= hdev
->reassembly
[index
];
1724 case HCI_ACLDATA_PKT
:
1725 len
= HCI_MAX_FRAME_SIZE
;
1726 hlen
= HCI_ACL_HDR_SIZE
;
1729 len
= HCI_MAX_EVENT_SIZE
;
1730 hlen
= HCI_EVENT_HDR_SIZE
;
1732 case HCI_SCODATA_PKT
:
1733 len
= HCI_MAX_SCO_SIZE
;
1734 hlen
= HCI_SCO_HDR_SIZE
;
1738 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1742 scb
= (void *) skb
->cb
;
1744 scb
->pkt_type
= type
;
1746 skb
->dev
= (void *) hdev
;
1747 hdev
->reassembly
[index
] = skb
;
1751 scb
= (void *) skb
->cb
;
1752 len
= min(scb
->expect
, (__u16
)count
);
1754 memcpy(skb_put(skb
, len
), data
, len
);
1763 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1764 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1765 scb
->expect
= h
->plen
;
1767 if (skb_tailroom(skb
) < scb
->expect
) {
1769 hdev
->reassembly
[index
] = NULL
;
1775 case HCI_ACLDATA_PKT
:
1776 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1777 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1778 scb
->expect
= __le16_to_cpu(h
->dlen
);
1780 if (skb_tailroom(skb
) < scb
->expect
) {
1782 hdev
->reassembly
[index
] = NULL
;
1788 case HCI_SCODATA_PKT
:
1789 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1790 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1791 scb
->expect
= h
->dlen
;
1793 if (skb_tailroom(skb
) < scb
->expect
) {
1795 hdev
->reassembly
[index
] = NULL
;
1802 if (scb
->expect
== 0) {
1803 /* Complete frame */
1805 bt_cb(skb
)->pkt_type
= type
;
1806 hci_recv_frame(skb
);
1808 hdev
->reassembly
[index
] = NULL
;
1816 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1820 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1824 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1828 data
+= (count
- rem
);
1834 EXPORT_SYMBOL(hci_recv_fragment
);
1836 #define STREAM_REASSEMBLY 0
1838 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1844 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1847 struct { char type
; } *pkt
;
1849 /* Start of the frame */
1856 type
= bt_cb(skb
)->pkt_type
;
1858 rem
= hci_reassembly(hdev
, type
, data
, count
,
1863 data
+= (count
- rem
);
1869 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1871 /* ---- Interface to upper protocols ---- */
1873 int hci_register_cb(struct hci_cb
*cb
)
1875 BT_DBG("%p name %s", cb
, cb
->name
);
1877 write_lock(&hci_cb_list_lock
);
1878 list_add(&cb
->list
, &hci_cb_list
);
1879 write_unlock(&hci_cb_list_lock
);
1883 EXPORT_SYMBOL(hci_register_cb
);
1885 int hci_unregister_cb(struct hci_cb
*cb
)
1887 BT_DBG("%p name %s", cb
, cb
->name
);
1889 write_lock(&hci_cb_list_lock
);
1890 list_del(&cb
->list
);
1891 write_unlock(&hci_cb_list_lock
);
1895 EXPORT_SYMBOL(hci_unregister_cb
);
1897 static int hci_send_frame(struct sk_buff
*skb
)
1899 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1906 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1908 if (atomic_read(&hdev
->promisc
)) {
1910 __net_timestamp(skb
);
1912 hci_send_to_sock(hdev
, skb
, NULL
);
1915 /* Get rid of skb owner, prior to sending to the driver. */
1918 return hdev
->send(skb
);
1921 /* Send HCI command */
1922 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1924 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1925 struct hci_command_hdr
*hdr
;
1926 struct sk_buff
*skb
;
1928 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1930 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1932 BT_ERR("%s no memory for command", hdev
->name
);
1936 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1937 hdr
->opcode
= cpu_to_le16(opcode
);
1941 memcpy(skb_put(skb
, plen
), param
, plen
);
1943 BT_DBG("skb len %d", skb
->len
);
1945 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1946 skb
->dev
= (void *) hdev
;
1948 if (test_bit(HCI_INIT
, &hdev
->flags
))
1949 hdev
->init_last_cmd
= opcode
;
1951 skb_queue_tail(&hdev
->cmd_q
, skb
);
1952 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1957 /* Get data from the previously sent command */
1958 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1960 struct hci_command_hdr
*hdr
;
1962 if (!hdev
->sent_cmd
)
1965 hdr
= (void *) hdev
->sent_cmd
->data
;
1967 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1970 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1972 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1976 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1978 struct hci_acl_hdr
*hdr
;
1981 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1982 skb_reset_transport_header(skb
);
1983 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1984 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1985 hdr
->dlen
= cpu_to_le16(len
);
1988 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
1989 struct sk_buff
*skb
, __u16 flags
)
1991 struct hci_dev
*hdev
= conn
->hdev
;
1992 struct sk_buff
*list
;
1994 list
= skb_shinfo(skb
)->frag_list
;
1996 /* Non fragmented */
1997 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1999 skb_queue_tail(queue
, skb
);
2002 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2004 skb_shinfo(skb
)->frag_list
= NULL
;
2006 /* Queue all fragments atomically */
2007 spin_lock(&queue
->lock
);
2009 __skb_queue_tail(queue
, skb
);
2011 flags
&= ~ACL_START
;
2014 skb
= list
; list
= list
->next
;
2016 skb
->dev
= (void *) hdev
;
2017 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2018 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2020 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2022 __skb_queue_tail(queue
, skb
);
2025 spin_unlock(&queue
->lock
);
2029 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2031 struct hci_conn
*conn
= chan
->conn
;
2032 struct hci_dev
*hdev
= conn
->hdev
;
2034 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2036 skb
->dev
= (void *) hdev
;
2037 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2038 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2040 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2042 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2044 EXPORT_SYMBOL(hci_send_acl
);
2047 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2049 struct hci_dev
*hdev
= conn
->hdev
;
2050 struct hci_sco_hdr hdr
;
2052 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2054 hdr
.handle
= cpu_to_le16(conn
->handle
);
2055 hdr
.dlen
= skb
->len
;
2057 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2058 skb_reset_transport_header(skb
);
2059 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2061 skb
->dev
= (void *) hdev
;
2062 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2064 skb_queue_tail(&conn
->data_q
, skb
);
2065 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2067 EXPORT_SYMBOL(hci_send_sco
);
2069 /* ---- HCI TX task (outgoing data) ---- */
2071 /* HCI Connection scheduler */
2072 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2074 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2075 struct hci_conn
*conn
= NULL
, *c
;
2076 int num
= 0, min
= ~0;
2078 /* We don't have to lock device here. Connections are always
2079 * added and removed with TX task disabled. */
2083 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2084 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2087 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2092 if (c
->sent
< min
) {
2097 if (hci_conn_num(hdev
, type
) == num
)
2106 switch (conn
->type
) {
2108 cnt
= hdev
->acl_cnt
;
2112 cnt
= hdev
->sco_cnt
;
2115 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2119 BT_ERR("Unknown link type");
2127 BT_DBG("conn %p quote %d", conn
, *quote
);
2131 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2133 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2136 BT_ERR("%s link tx timeout", hdev
->name
);
2140 /* Kill stalled connections */
2141 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2142 if (c
->type
== type
&& c
->sent
) {
2143 BT_ERR("%s killing stalled connection %s",
2144 hdev
->name
, batostr(&c
->dst
));
2145 hci_acl_disconn(c
, 0x13);
2152 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2155 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2156 struct hci_chan
*chan
= NULL
;
2157 int num
= 0, min
= ~0, cur_prio
= 0;
2158 struct hci_conn
*conn
;
2159 int cnt
, q
, conn_num
= 0;
2161 BT_DBG("%s", hdev
->name
);
2165 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2166 struct hci_chan
*tmp
;
2168 if (conn
->type
!= type
)
2171 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2176 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2177 struct sk_buff
*skb
;
2179 if (skb_queue_empty(&tmp
->data_q
))
2182 skb
= skb_peek(&tmp
->data_q
);
2183 if (skb
->priority
< cur_prio
)
2186 if (skb
->priority
> cur_prio
) {
2189 cur_prio
= skb
->priority
;
2194 if (conn
->sent
< min
) {
2200 if (hci_conn_num(hdev
, type
) == conn_num
)
2209 switch (chan
->conn
->type
) {
2211 cnt
= hdev
->acl_cnt
;
2215 cnt
= hdev
->sco_cnt
;
2218 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2222 BT_ERR("Unknown link type");
2227 BT_DBG("chan %p quote %d", chan
, *quote
);
2231 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2233 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2234 struct hci_conn
*conn
;
2237 BT_DBG("%s", hdev
->name
);
2241 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2242 struct hci_chan
*chan
;
2244 if (conn
->type
!= type
)
2247 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2252 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2253 struct sk_buff
*skb
;
2260 if (skb_queue_empty(&chan
->data_q
))
2263 skb
= skb_peek(&chan
->data_q
);
2264 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2267 skb
->priority
= HCI_PRIO_MAX
- 1;
2269 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2273 if (hci_conn_num(hdev
, type
) == num
)
2281 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2283 struct hci_chan
*chan
;
2284 struct sk_buff
*skb
;
2288 BT_DBG("%s", hdev
->name
);
2290 if (!hci_conn_num(hdev
, ACL_LINK
))
2293 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2294 /* ACL tx timeout must be longer than maximum
2295 * link supervision timeout (40.9 seconds) */
2296 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
2297 hci_link_tx_to(hdev
, ACL_LINK
);
2300 cnt
= hdev
->acl_cnt
;
2302 while (hdev
->acl_cnt
&&
2303 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2304 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2305 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2306 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2307 skb
->len
, skb
->priority
);
2309 /* Stop if priority has changed */
2310 if (skb
->priority
< priority
)
2313 skb
= skb_dequeue(&chan
->data_q
);
2315 hci_conn_enter_active_mode(chan
->conn
,
2316 bt_cb(skb
)->force_active
);
2318 hci_send_frame(skb
);
2319 hdev
->acl_last_tx
= jiffies
;
2327 if (cnt
!= hdev
->acl_cnt
)
2328 hci_prio_recalculate(hdev
, ACL_LINK
);
2332 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2334 struct hci_conn
*conn
;
2335 struct sk_buff
*skb
;
2338 BT_DBG("%s", hdev
->name
);
2340 if (!hci_conn_num(hdev
, SCO_LINK
))
2343 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2344 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2345 BT_DBG("skb %p len %d", skb
, skb
->len
);
2346 hci_send_frame(skb
);
2349 if (conn
->sent
== ~0)
2355 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2357 struct hci_conn
*conn
;
2358 struct sk_buff
*skb
;
2361 BT_DBG("%s", hdev
->name
);
2363 if (!hci_conn_num(hdev
, ESCO_LINK
))
2366 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2367 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2368 BT_DBG("skb %p len %d", skb
, skb
->len
);
2369 hci_send_frame(skb
);
2372 if (conn
->sent
== ~0)
2378 static inline void hci_sched_le(struct hci_dev
*hdev
)
2380 struct hci_chan
*chan
;
2381 struct sk_buff
*skb
;
2382 int quote
, cnt
, tmp
;
2384 BT_DBG("%s", hdev
->name
);
2386 if (!hci_conn_num(hdev
, LE_LINK
))
2389 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2390 /* LE tx timeout must be longer than maximum
2391 * link supervision timeout (40.9 seconds) */
2392 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2393 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2394 hci_link_tx_to(hdev
, LE_LINK
);
2397 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2399 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2400 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2401 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2402 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2403 skb
->len
, skb
->priority
);
2405 /* Stop if priority has changed */
2406 if (skb
->priority
< priority
)
2409 skb
= skb_dequeue(&chan
->data_q
);
2411 hci_send_frame(skb
);
2412 hdev
->le_last_tx
= jiffies
;
2423 hdev
->acl_cnt
= cnt
;
2426 hci_prio_recalculate(hdev
, LE_LINK
);
2429 static void hci_tx_work(struct work_struct
*work
)
2431 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2432 struct sk_buff
*skb
;
2434 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2435 hdev
->sco_cnt
, hdev
->le_cnt
);
2437 /* Schedule queues and send stuff to HCI driver */
2439 hci_sched_acl(hdev
);
2441 hci_sched_sco(hdev
);
2443 hci_sched_esco(hdev
);
2447 /* Send next queued raw (unknown type) packet */
2448 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2449 hci_send_frame(skb
);
2452 /* ----- HCI RX task (incoming data processing) ----- */
2454 /* ACL data packet */
2455 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2457 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2458 struct hci_conn
*conn
;
2459 __u16 handle
, flags
;
2461 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2463 handle
= __le16_to_cpu(hdr
->handle
);
2464 flags
= hci_flags(handle
);
2465 handle
= hci_handle(handle
);
2467 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2469 hdev
->stat
.acl_rx
++;
2472 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2473 hci_dev_unlock(hdev
);
2476 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2478 /* Send to upper protocol */
2479 l2cap_recv_acldata(conn
, skb
, flags
);
2482 BT_ERR("%s ACL packet for unknown connection handle %d",
2483 hdev
->name
, handle
);
2489 /* SCO data packet */
2490 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2492 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2493 struct hci_conn
*conn
;
2496 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2498 handle
= __le16_to_cpu(hdr
->handle
);
2500 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2502 hdev
->stat
.sco_rx
++;
2505 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2506 hci_dev_unlock(hdev
);
2509 /* Send to upper protocol */
2510 sco_recv_scodata(conn
, skb
);
2513 BT_ERR("%s SCO packet for unknown connection handle %d",
2514 hdev
->name
, handle
);
2520 static void hci_rx_work(struct work_struct
*work
)
2522 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2523 struct sk_buff
*skb
;
2525 BT_DBG("%s", hdev
->name
);
2527 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2528 if (atomic_read(&hdev
->promisc
)) {
2529 /* Send copy to the sockets */
2530 hci_send_to_sock(hdev
, skb
, NULL
);
2533 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2538 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2539 /* Don't process data packets in this states. */
2540 switch (bt_cb(skb
)->pkt_type
) {
2541 case HCI_ACLDATA_PKT
:
2542 case HCI_SCODATA_PKT
:
2549 switch (bt_cb(skb
)->pkt_type
) {
2551 BT_DBG("%s Event packet", hdev
->name
);
2552 hci_event_packet(hdev
, skb
);
2555 case HCI_ACLDATA_PKT
:
2556 BT_DBG("%s ACL data packet", hdev
->name
);
2557 hci_acldata_packet(hdev
, skb
);
2560 case HCI_SCODATA_PKT
:
2561 BT_DBG("%s SCO data packet", hdev
->name
);
2562 hci_scodata_packet(hdev
, skb
);
2572 static void hci_cmd_work(struct work_struct
*work
)
2574 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2575 struct sk_buff
*skb
;
2577 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2579 /* Send queued commands */
2580 if (atomic_read(&hdev
->cmd_cnt
)) {
2581 skb
= skb_dequeue(&hdev
->cmd_q
);
2585 kfree_skb(hdev
->sent_cmd
);
2587 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2588 if (hdev
->sent_cmd
) {
2589 atomic_dec(&hdev
->cmd_cnt
);
2590 hci_send_frame(skb
);
2591 if (test_bit(HCI_RESET
, &hdev
->flags
))
2592 del_timer(&hdev
->cmd_timer
);
2594 mod_timer(&hdev
->cmd_timer
,
2595 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2597 skb_queue_head(&hdev
->cmd_q
, skb
);
2598 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2603 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2605 /* General inquiry access code (GIAC) */
2606 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2607 struct hci_cp_inquiry cp
;
2609 BT_DBG("%s", hdev
->name
);
2611 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2612 return -EINPROGRESS
;
2614 memset(&cp
, 0, sizeof(cp
));
2615 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2618 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2621 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2623 BT_DBG("%s", hdev
->name
);
2625 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2628 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2631 module_param(enable_hs
, bool, 0644);
2632 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");