2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
359 bool hci_discovery_active(struct hci_dev
*hdev
)
361 struct discovery_state
*discov
= &hdev
->discovery
;
363 if (discov
->state
== DISCOVERY_INQUIRY
||
364 discov
->state
== DISCOVERY_RESOLVING
)
370 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
372 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
374 if (hdev
->discovery
.state
== state
)
378 case DISCOVERY_STOPPED
:
379 mgmt_discovering(hdev
, 0);
381 case DISCOVERY_STARTING
:
383 case DISCOVERY_INQUIRY
:
384 mgmt_discovering(hdev
, 1);
386 case DISCOVERY_RESOLVING
:
388 case DISCOVERY_STOPPING
:
392 hdev
->discovery
.state
= state
;
395 static void inquiry_cache_flush(struct hci_dev
*hdev
)
397 struct discovery_state
*cache
= &hdev
->discovery
;
398 struct inquiry_entry
*p
, *n
;
400 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
405 INIT_LIST_HEAD(&cache
->unknown
);
406 INIT_LIST_HEAD(&cache
->resolve
);
407 cache
->state
= DISCOVERY_STOPPED
;
410 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
412 struct discovery_state
*cache
= &hdev
->discovery
;
413 struct inquiry_entry
*e
;
415 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
417 list_for_each_entry(e
, &cache
->all
, all
) {
418 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
425 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
428 struct discovery_state
*cache
= &hdev
->discovery
;
429 struct inquiry_entry
*e
;
431 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
433 list_for_each_entry(e
, &cache
->unknown
, list
) {
434 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
441 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
445 struct discovery_state
*cache
= &hdev
->discovery
;
446 struct inquiry_entry
*e
;
448 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
450 list_for_each_entry(e
, &cache
->resolve
, list
) {
451 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
453 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
460 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
463 struct discovery_state
*cache
= &hdev
->discovery
;
464 struct inquiry_entry
*ie
;
466 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
468 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
472 /* Entry not in the cache. Add new one. */
473 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
477 list_add(&ie
->all
, &cache
->all
);
480 ie
->name_state
= NAME_KNOWN
;
482 ie
->name_state
= NAME_NOT_KNOWN
;
483 list_add(&ie
->list
, &cache
->unknown
);
487 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
488 ie
->name_state
!= NAME_PENDING
) {
489 ie
->name_state
= NAME_KNOWN
;
493 memcpy(&ie
->data
, data
, sizeof(*data
));
494 ie
->timestamp
= jiffies
;
495 cache
->timestamp
= jiffies
;
497 if (ie
->name_state
== NAME_NOT_KNOWN
)
503 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
505 struct discovery_state
*cache
= &hdev
->discovery
;
506 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
507 struct inquiry_entry
*e
;
510 list_for_each_entry(e
, &cache
->all
, all
) {
511 struct inquiry_data
*data
= &e
->data
;
516 bacpy(&info
->bdaddr
, &data
->bdaddr
);
517 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
518 info
->pscan_period_mode
= data
->pscan_period_mode
;
519 info
->pscan_mode
= data
->pscan_mode
;
520 memcpy(info
->dev_class
, data
->dev_class
, 3);
521 info
->clock_offset
= data
->clock_offset
;
527 BT_DBG("cache %p, copied %d", cache
, copied
);
531 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
533 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
534 struct hci_cp_inquiry cp
;
536 BT_DBG("%s", hdev
->name
);
538 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
542 memcpy(&cp
.lap
, &ir
->lap
, 3);
543 cp
.length
= ir
->length
;
544 cp
.num_rsp
= ir
->num_rsp
;
545 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
548 int hci_inquiry(void __user
*arg
)
550 __u8 __user
*ptr
= arg
;
551 struct hci_inquiry_req ir
;
552 struct hci_dev
*hdev
;
553 int err
= 0, do_inquiry
= 0, max_rsp
;
557 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
560 hdev
= hci_dev_get(ir
.dev_id
);
565 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
566 inquiry_cache_empty(hdev
) ||
567 ir
.flags
& IREQ_CACHE_FLUSH
) {
568 inquiry_cache_flush(hdev
);
571 hci_dev_unlock(hdev
);
573 timeo
= ir
.length
* msecs_to_jiffies(2000);
576 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
581 /* for unlimited number of responses we will use buffer with 255 entries */
582 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
584 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
585 * copy it to the user space.
587 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
594 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
595 hci_dev_unlock(hdev
);
597 BT_DBG("num_rsp %d", ir
.num_rsp
);
599 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
601 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
614 /* ---- HCI ioctl helpers ---- */
616 int hci_dev_open(__u16 dev
)
618 struct hci_dev
*hdev
;
621 hdev
= hci_dev_get(dev
);
625 BT_DBG("%s %p", hdev
->name
, hdev
);
629 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
634 if (test_bit(HCI_UP
, &hdev
->flags
)) {
639 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
640 set_bit(HCI_RAW
, &hdev
->flags
);
642 /* Treat all non BR/EDR controllers as raw devices if
643 enable_hs is not set */
644 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
645 set_bit(HCI_RAW
, &hdev
->flags
);
647 if (hdev
->open(hdev
)) {
652 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
653 atomic_set(&hdev
->cmd_cnt
, 1);
654 set_bit(HCI_INIT
, &hdev
->flags
);
655 hdev
->init_last_cmd
= 0;
657 ret
= __hci_request(hdev
, hci_init_req
, 0,
658 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
660 if (lmp_host_le_capable(hdev
))
661 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
662 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
664 clear_bit(HCI_INIT
, &hdev
->flags
);
669 set_bit(HCI_UP
, &hdev
->flags
);
670 hci_notify(hdev
, HCI_DEV_UP
);
671 if (!test_bit(HCI_SETUP
, &hdev
->flags
)) {
673 mgmt_powered(hdev
, 1);
674 hci_dev_unlock(hdev
);
677 /* Init failed, cleanup */
678 flush_work(&hdev
->tx_work
);
679 flush_work(&hdev
->cmd_work
);
680 flush_work(&hdev
->rx_work
);
682 skb_queue_purge(&hdev
->cmd_q
);
683 skb_queue_purge(&hdev
->rx_q
);
688 if (hdev
->sent_cmd
) {
689 kfree_skb(hdev
->sent_cmd
);
690 hdev
->sent_cmd
= NULL
;
698 hci_req_unlock(hdev
);
703 static int hci_dev_do_close(struct hci_dev
*hdev
)
705 BT_DBG("%s %p", hdev
->name
, hdev
);
707 hci_req_cancel(hdev
, ENODEV
);
710 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
711 del_timer_sync(&hdev
->cmd_timer
);
712 hci_req_unlock(hdev
);
716 /* Flush RX and TX works */
717 flush_work(&hdev
->tx_work
);
718 flush_work(&hdev
->rx_work
);
720 if (hdev
->discov_timeout
> 0) {
721 cancel_delayed_work(&hdev
->discov_off
);
722 hdev
->discov_timeout
= 0;
725 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
726 cancel_delayed_work(&hdev
->power_off
);
728 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->flags
))
729 cancel_delayed_work(&hdev
->service_cache
);
732 inquiry_cache_flush(hdev
);
733 hci_conn_hash_flush(hdev
);
734 hci_dev_unlock(hdev
);
736 hci_notify(hdev
, HCI_DEV_DOWN
);
742 skb_queue_purge(&hdev
->cmd_q
);
743 atomic_set(&hdev
->cmd_cnt
, 1);
744 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
745 set_bit(HCI_INIT
, &hdev
->flags
);
746 __hci_request(hdev
, hci_reset_req
, 0,
747 msecs_to_jiffies(250));
748 clear_bit(HCI_INIT
, &hdev
->flags
);
752 flush_work(&hdev
->cmd_work
);
755 skb_queue_purge(&hdev
->rx_q
);
756 skb_queue_purge(&hdev
->cmd_q
);
757 skb_queue_purge(&hdev
->raw_q
);
759 /* Drop last sent command */
760 if (hdev
->sent_cmd
) {
761 del_timer_sync(&hdev
->cmd_timer
);
762 kfree_skb(hdev
->sent_cmd
);
763 hdev
->sent_cmd
= NULL
;
766 /* After this point our queues are empty
767 * and no tasks are scheduled. */
771 mgmt_powered(hdev
, 0);
772 hci_dev_unlock(hdev
);
777 hci_req_unlock(hdev
);
783 int hci_dev_close(__u16 dev
)
785 struct hci_dev
*hdev
;
788 hdev
= hci_dev_get(dev
);
791 err
= hci_dev_do_close(hdev
);
796 int hci_dev_reset(__u16 dev
)
798 struct hci_dev
*hdev
;
801 hdev
= hci_dev_get(dev
);
807 if (!test_bit(HCI_UP
, &hdev
->flags
))
811 skb_queue_purge(&hdev
->rx_q
);
812 skb_queue_purge(&hdev
->cmd_q
);
815 inquiry_cache_flush(hdev
);
816 hci_conn_hash_flush(hdev
);
817 hci_dev_unlock(hdev
);
822 atomic_set(&hdev
->cmd_cnt
, 1);
823 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
825 if (!test_bit(HCI_RAW
, &hdev
->flags
))
826 ret
= __hci_request(hdev
, hci_reset_req
, 0,
827 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
830 hci_req_unlock(hdev
);
835 int hci_dev_reset_stat(__u16 dev
)
837 struct hci_dev
*hdev
;
840 hdev
= hci_dev_get(dev
);
844 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
851 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
853 struct hci_dev
*hdev
;
854 struct hci_dev_req dr
;
857 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
860 hdev
= hci_dev_get(dr
.dev_id
);
866 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
867 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
871 if (!lmp_encrypt_capable(hdev
)) {
876 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
877 /* Auth must be enabled first */
878 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
879 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
884 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
885 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
889 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
890 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
894 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
895 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
899 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
900 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
904 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
908 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
909 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
913 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
914 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
926 int hci_get_dev_list(void __user
*arg
)
928 struct hci_dev
*hdev
;
929 struct hci_dev_list_req
*dl
;
930 struct hci_dev_req
*dr
;
931 int n
= 0, size
, err
;
934 if (get_user(dev_num
, (__u16 __user
*) arg
))
937 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
940 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
942 dl
= kzalloc(size
, GFP_KERNEL
);
948 read_lock(&hci_dev_list_lock
);
949 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
950 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
951 cancel_delayed_work(&hdev
->power_off
);
953 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
954 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
956 (dr
+ n
)->dev_id
= hdev
->id
;
957 (dr
+ n
)->dev_opt
= hdev
->flags
;
962 read_unlock(&hci_dev_list_lock
);
965 size
= sizeof(*dl
) + n
* sizeof(*dr
);
967 err
= copy_to_user(arg
, dl
, size
);
970 return err
? -EFAULT
: 0;
973 int hci_get_dev_info(void __user
*arg
)
975 struct hci_dev
*hdev
;
976 struct hci_dev_info di
;
979 if (copy_from_user(&di
, arg
, sizeof(di
)))
982 hdev
= hci_dev_get(di
.dev_id
);
986 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
987 cancel_delayed_work_sync(&hdev
->power_off
);
989 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
990 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
992 strcpy(di
.name
, hdev
->name
);
993 di
.bdaddr
= hdev
->bdaddr
;
994 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
995 di
.flags
= hdev
->flags
;
996 di
.pkt_type
= hdev
->pkt_type
;
997 di
.acl_mtu
= hdev
->acl_mtu
;
998 di
.acl_pkts
= hdev
->acl_pkts
;
999 di
.sco_mtu
= hdev
->sco_mtu
;
1000 di
.sco_pkts
= hdev
->sco_pkts
;
1001 di
.link_policy
= hdev
->link_policy
;
1002 di
.link_mode
= hdev
->link_mode
;
1004 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1005 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1007 if (copy_to_user(arg
, &di
, sizeof(di
)))
1015 /* ---- Interface to HCI drivers ---- */
1017 static int hci_rfkill_set_block(void *data
, bool blocked
)
1019 struct hci_dev
*hdev
= data
;
1021 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1026 hci_dev_do_close(hdev
);
1031 static const struct rfkill_ops hci_rfkill_ops
= {
1032 .set_block
= hci_rfkill_set_block
,
1035 /* Alloc HCI device */
1036 struct hci_dev
*hci_alloc_dev(void)
1038 struct hci_dev
*hdev
;
1040 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1044 hci_init_sysfs(hdev
);
1045 skb_queue_head_init(&hdev
->driver_init
);
1049 EXPORT_SYMBOL(hci_alloc_dev
);
1051 /* Free HCI device */
1052 void hci_free_dev(struct hci_dev
*hdev
)
1054 skb_queue_purge(&hdev
->driver_init
);
1056 /* will free via device release */
1057 put_device(&hdev
->dev
);
1059 EXPORT_SYMBOL(hci_free_dev
);
1061 static void hci_power_on(struct work_struct
*work
)
1063 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1065 BT_DBG("%s", hdev
->name
);
1067 if (hci_dev_open(hdev
->id
) < 0)
1070 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
1071 schedule_delayed_work(&hdev
->power_off
,
1072 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1074 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
1075 mgmt_index_added(hdev
);
1078 static void hci_power_off(struct work_struct
*work
)
1080 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1083 BT_DBG("%s", hdev
->name
);
1085 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1087 hci_dev_close(hdev
->id
);
1090 static void hci_discov_off(struct work_struct
*work
)
1092 struct hci_dev
*hdev
;
1093 u8 scan
= SCAN_PAGE
;
1095 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1097 BT_DBG("%s", hdev
->name
);
1101 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1103 hdev
->discov_timeout
= 0;
1105 hci_dev_unlock(hdev
);
1108 int hci_uuids_clear(struct hci_dev
*hdev
)
1110 struct list_head
*p
, *n
;
1112 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1113 struct bt_uuid
*uuid
;
1115 uuid
= list_entry(p
, struct bt_uuid
, list
);
1124 int hci_link_keys_clear(struct hci_dev
*hdev
)
1126 struct list_head
*p
, *n
;
1128 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1129 struct link_key
*key
;
1131 key
= list_entry(p
, struct link_key
, list
);
1140 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1144 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1145 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1151 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1152 u8 key_type
, u8 old_key_type
)
1155 if (key_type
< 0x03)
1158 /* Debug keys are insecure so don't store them persistently */
1159 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1162 /* Changed combination key and there's no previous one */
1163 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1166 /* Security mode 3 case */
1170 /* Neither local nor remote side had no-bonding as requirement */
1171 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1174 /* Local side had dedicated bonding as requirement */
1175 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1178 /* Remote side had dedicated bonding as requirement */
1179 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1182 /* If none of the above criteria match, then don't store the key
1187 struct link_key
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1191 list_for_each_entry(k
, &hdev
->link_keys
, list
) {
1192 struct key_master_id
*id
;
1194 if (k
->type
!= HCI_LK_SMP_LTK
)
1197 if (k
->dlen
!= sizeof(*id
))
1200 id
= (void *) &k
->data
;
1201 if (id
->ediv
== ediv
&&
1202 (memcmp(rand
, id
->rand
, sizeof(id
->rand
)) == 0))
1208 EXPORT_SYMBOL(hci_find_ltk
);
1210 struct link_key
*hci_find_link_key_type(struct hci_dev
*hdev
,
1211 bdaddr_t
*bdaddr
, u8 type
)
1215 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1216 if (k
->type
== type
&& bacmp(bdaddr
, &k
->bdaddr
) == 0)
1221 EXPORT_SYMBOL(hci_find_link_key_type
);
1223 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1224 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1226 struct link_key
*key
, *old_key
;
1227 u8 old_key_type
, persistent
;
1229 old_key
= hci_find_link_key(hdev
, bdaddr
);
1231 old_key_type
= old_key
->type
;
1234 old_key_type
= conn
? conn
->key_type
: 0xff;
1235 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1238 list_add(&key
->list
, &hdev
->link_keys
);
1241 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1243 /* Some buggy controller combinations generate a changed
1244 * combination key for legacy pairing even when there's no
1246 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1247 (!conn
|| conn
->remote_auth
== 0xff) &&
1248 old_key_type
== 0xff) {
1249 type
= HCI_LK_COMBINATION
;
1251 conn
->key_type
= type
;
1254 bacpy(&key
->bdaddr
, bdaddr
);
1255 memcpy(key
->val
, val
, 16);
1256 key
->pin_len
= pin_len
;
1258 if (type
== HCI_LK_CHANGED_COMBINATION
)
1259 key
->type
= old_key_type
;
1266 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1268 mgmt_new_link_key(hdev
, key
, persistent
);
1271 list_del(&key
->list
);
1278 int hci_add_ltk(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1279 u8 key_size
, __le16 ediv
, u8 rand
[8], u8 ltk
[16])
1281 struct link_key
*key
, *old_key
;
1282 struct key_master_id
*id
;
1285 BT_DBG("%s addr %s", hdev
->name
, batostr(bdaddr
));
1287 old_key
= hci_find_link_key_type(hdev
, bdaddr
, HCI_LK_SMP_LTK
);
1290 old_key_type
= old_key
->type
;
1292 key
= kzalloc(sizeof(*key
) + sizeof(*id
), GFP_ATOMIC
);
1295 list_add(&key
->list
, &hdev
->link_keys
);
1296 old_key_type
= 0xff;
1299 key
->dlen
= sizeof(*id
);
1301 bacpy(&key
->bdaddr
, bdaddr
);
1302 memcpy(key
->val
, ltk
, sizeof(key
->val
));
1303 key
->type
= HCI_LK_SMP_LTK
;
1304 key
->pin_len
= key_size
;
1306 id
= (void *) &key
->data
;
1308 memcpy(id
->rand
, rand
, sizeof(id
->rand
));
1311 mgmt_new_link_key(hdev
, key
, old_key_type
);
1316 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1318 struct link_key
*key
;
1320 key
= hci_find_link_key(hdev
, bdaddr
);
1324 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1326 list_del(&key
->list
);
1332 /* HCI command timer function */
1333 static void hci_cmd_timer(unsigned long arg
)
1335 struct hci_dev
*hdev
= (void *) arg
;
1337 BT_ERR("%s command tx timeout", hdev
->name
);
1338 atomic_set(&hdev
->cmd_cnt
, 1);
1339 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1342 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1345 struct oob_data
*data
;
1347 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1348 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1354 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1356 struct oob_data
*data
;
1358 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1362 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1364 list_del(&data
->list
);
1370 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1372 struct oob_data
*data
, *n
;
1374 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1375 list_del(&data
->list
);
1382 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1385 struct oob_data
*data
;
1387 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1390 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1394 bacpy(&data
->bdaddr
, bdaddr
);
1395 list_add(&data
->list
, &hdev
->remote_oob_data
);
1398 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1399 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1401 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1406 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1409 struct bdaddr_list
*b
;
1411 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1412 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1418 int hci_blacklist_clear(struct hci_dev
*hdev
)
1420 struct list_head
*p
, *n
;
1422 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1423 struct bdaddr_list
*b
;
1425 b
= list_entry(p
, struct bdaddr_list
, list
);
1434 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1436 struct bdaddr_list
*entry
;
1438 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1441 if (hci_blacklist_lookup(hdev
, bdaddr
))
1444 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1448 bacpy(&entry
->bdaddr
, bdaddr
);
1450 list_add(&entry
->list
, &hdev
->blacklist
);
1452 return mgmt_device_blocked(hdev
, bdaddr
);
1455 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1457 struct bdaddr_list
*entry
;
1459 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1460 return hci_blacklist_clear(hdev
);
1462 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1466 list_del(&entry
->list
);
1469 return mgmt_device_unblocked(hdev
, bdaddr
);
1472 static void hci_clear_adv_cache(struct work_struct
*work
)
1474 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1479 hci_adv_entries_clear(hdev
);
1481 hci_dev_unlock(hdev
);
1484 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1486 struct adv_entry
*entry
, *tmp
;
1488 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1489 list_del(&entry
->list
);
1493 BT_DBG("%s adv cache cleared", hdev
->name
);
1498 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1500 struct adv_entry
*entry
;
1502 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1503 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1509 static inline int is_connectable_adv(u8 evt_type
)
1511 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1517 int hci_add_adv_entry(struct hci_dev
*hdev
,
1518 struct hci_ev_le_advertising_info
*ev
)
1520 struct adv_entry
*entry
;
1522 if (!is_connectable_adv(ev
->evt_type
))
1525 /* Only new entries should be added to adv_entries. So, if
1526 * bdaddr was found, don't add it. */
1527 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1530 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
1534 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1535 entry
->bdaddr_type
= ev
->bdaddr_type
;
1537 list_add(&entry
->list
, &hdev
->adv_entries
);
1539 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1540 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1545 /* Register HCI device */
1546 int hci_register_dev(struct hci_dev
*hdev
)
1548 struct list_head
*head
= &hci_dev_list
, *p
;
1551 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1553 if (!hdev
->open
|| !hdev
->close
)
1556 /* Do not allow HCI_AMP devices to register at index 0,
1557 * so the index can be used as the AMP controller ID.
1559 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1561 write_lock(&hci_dev_list_lock
);
1563 /* Find first available device id */
1564 list_for_each(p
, &hci_dev_list
) {
1565 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1570 sprintf(hdev
->name
, "hci%d", id
);
1572 list_add_tail(&hdev
->list
, head
);
1574 mutex_init(&hdev
->lock
);
1577 hdev
->dev_flags
= 0;
1578 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1579 hdev
->esco_type
= (ESCO_HV1
);
1580 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1581 hdev
->io_capability
= 0x03; /* No Input No Output */
1583 hdev
->idle_timeout
= 0;
1584 hdev
->sniff_max_interval
= 800;
1585 hdev
->sniff_min_interval
= 80;
1587 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1588 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1589 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1592 skb_queue_head_init(&hdev
->rx_q
);
1593 skb_queue_head_init(&hdev
->cmd_q
);
1594 skb_queue_head_init(&hdev
->raw_q
);
1596 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1598 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1599 hdev
->reassembly
[i
] = NULL
;
1601 init_waitqueue_head(&hdev
->req_wait_q
);
1602 mutex_init(&hdev
->req_lock
);
1604 discovery_init(hdev
);
1606 hci_conn_hash_init(hdev
);
1608 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1610 INIT_LIST_HEAD(&hdev
->blacklist
);
1612 INIT_LIST_HEAD(&hdev
->uuids
);
1614 INIT_LIST_HEAD(&hdev
->link_keys
);
1616 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1618 INIT_LIST_HEAD(&hdev
->adv_entries
);
1620 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1621 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1622 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1624 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1626 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1628 atomic_set(&hdev
->promisc
, 0);
1630 write_unlock(&hci_dev_list_lock
);
1632 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1634 if (!hdev
->workqueue
) {
1639 error
= hci_add_sysfs(hdev
);
1643 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1644 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1646 if (rfkill_register(hdev
->rfkill
) < 0) {
1647 rfkill_destroy(hdev
->rfkill
);
1648 hdev
->rfkill
= NULL
;
1652 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1653 set_bit(HCI_SETUP
, &hdev
->flags
);
1654 schedule_work(&hdev
->power_on
);
1656 hci_notify(hdev
, HCI_DEV_REG
);
1662 destroy_workqueue(hdev
->workqueue
);
1664 write_lock(&hci_dev_list_lock
);
1665 list_del(&hdev
->list
);
1666 write_unlock(&hci_dev_list_lock
);
1670 EXPORT_SYMBOL(hci_register_dev
);
1672 /* Unregister HCI device */
1673 void hci_unregister_dev(struct hci_dev
*hdev
)
1677 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1679 write_lock(&hci_dev_list_lock
);
1680 list_del(&hdev
->list
);
1681 write_unlock(&hci_dev_list_lock
);
1683 hci_dev_do_close(hdev
);
1685 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1686 kfree_skb(hdev
->reassembly
[i
]);
1688 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1689 !test_bit(HCI_SETUP
, &hdev
->flags
)) {
1691 mgmt_index_removed(hdev
);
1692 hci_dev_unlock(hdev
);
1695 /* mgmt_index_removed should take care of emptying the
1697 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1699 hci_notify(hdev
, HCI_DEV_UNREG
);
1702 rfkill_unregister(hdev
->rfkill
);
1703 rfkill_destroy(hdev
->rfkill
);
1706 hci_del_sysfs(hdev
);
1708 cancel_delayed_work_sync(&hdev
->adv_work
);
1710 destroy_workqueue(hdev
->workqueue
);
1713 hci_blacklist_clear(hdev
);
1714 hci_uuids_clear(hdev
);
1715 hci_link_keys_clear(hdev
);
1716 hci_remote_oob_data_clear(hdev
);
1717 hci_adv_entries_clear(hdev
);
1718 hci_dev_unlock(hdev
);
1722 EXPORT_SYMBOL(hci_unregister_dev
);
1724 /* Suspend HCI device */
1725 int hci_suspend_dev(struct hci_dev
*hdev
)
1727 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1730 EXPORT_SYMBOL(hci_suspend_dev
);
1732 /* Resume HCI device */
1733 int hci_resume_dev(struct hci_dev
*hdev
)
1735 hci_notify(hdev
, HCI_DEV_RESUME
);
1738 EXPORT_SYMBOL(hci_resume_dev
);
1740 /* Receive frame from HCI drivers */
1741 int hci_recv_frame(struct sk_buff
*skb
)
1743 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1744 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1745 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1751 bt_cb(skb
)->incoming
= 1;
1754 __net_timestamp(skb
);
1756 skb_queue_tail(&hdev
->rx_q
, skb
);
1757 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1761 EXPORT_SYMBOL(hci_recv_frame
);
1763 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1764 int count
, __u8 index
)
1769 struct sk_buff
*skb
;
1770 struct bt_skb_cb
*scb
;
1772 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1773 index
>= NUM_REASSEMBLY
)
1776 skb
= hdev
->reassembly
[index
];
1780 case HCI_ACLDATA_PKT
:
1781 len
= HCI_MAX_FRAME_SIZE
;
1782 hlen
= HCI_ACL_HDR_SIZE
;
1785 len
= HCI_MAX_EVENT_SIZE
;
1786 hlen
= HCI_EVENT_HDR_SIZE
;
1788 case HCI_SCODATA_PKT
:
1789 len
= HCI_MAX_SCO_SIZE
;
1790 hlen
= HCI_SCO_HDR_SIZE
;
1794 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1798 scb
= (void *) skb
->cb
;
1800 scb
->pkt_type
= type
;
1802 skb
->dev
= (void *) hdev
;
1803 hdev
->reassembly
[index
] = skb
;
1807 scb
= (void *) skb
->cb
;
1808 len
= min(scb
->expect
, (__u16
)count
);
1810 memcpy(skb_put(skb
, len
), data
, len
);
1819 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1820 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1821 scb
->expect
= h
->plen
;
1823 if (skb_tailroom(skb
) < scb
->expect
) {
1825 hdev
->reassembly
[index
] = NULL
;
1831 case HCI_ACLDATA_PKT
:
1832 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1833 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1834 scb
->expect
= __le16_to_cpu(h
->dlen
);
1836 if (skb_tailroom(skb
) < scb
->expect
) {
1838 hdev
->reassembly
[index
] = NULL
;
1844 case HCI_SCODATA_PKT
:
1845 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1846 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1847 scb
->expect
= h
->dlen
;
1849 if (skb_tailroom(skb
) < scb
->expect
) {
1851 hdev
->reassembly
[index
] = NULL
;
1858 if (scb
->expect
== 0) {
1859 /* Complete frame */
1861 bt_cb(skb
)->pkt_type
= type
;
1862 hci_recv_frame(skb
);
1864 hdev
->reassembly
[index
] = NULL
;
1872 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1876 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1880 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1884 data
+= (count
- rem
);
1890 EXPORT_SYMBOL(hci_recv_fragment
);
1892 #define STREAM_REASSEMBLY 0
1894 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1900 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1903 struct { char type
; } *pkt
;
1905 /* Start of the frame */
1912 type
= bt_cb(skb
)->pkt_type
;
1914 rem
= hci_reassembly(hdev
, type
, data
, count
,
1919 data
+= (count
- rem
);
1925 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1927 /* ---- Interface to upper protocols ---- */
1929 int hci_register_cb(struct hci_cb
*cb
)
1931 BT_DBG("%p name %s", cb
, cb
->name
);
1933 write_lock(&hci_cb_list_lock
);
1934 list_add(&cb
->list
, &hci_cb_list
);
1935 write_unlock(&hci_cb_list_lock
);
1939 EXPORT_SYMBOL(hci_register_cb
);
1941 int hci_unregister_cb(struct hci_cb
*cb
)
1943 BT_DBG("%p name %s", cb
, cb
->name
);
1945 write_lock(&hci_cb_list_lock
);
1946 list_del(&cb
->list
);
1947 write_unlock(&hci_cb_list_lock
);
1951 EXPORT_SYMBOL(hci_unregister_cb
);
1953 static int hci_send_frame(struct sk_buff
*skb
)
1955 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1962 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1964 if (atomic_read(&hdev
->promisc
)) {
1966 __net_timestamp(skb
);
1968 hci_send_to_sock(hdev
, skb
, NULL
);
1971 /* Get rid of skb owner, prior to sending to the driver. */
1974 return hdev
->send(skb
);
1977 /* Send HCI command */
1978 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1980 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1981 struct hci_command_hdr
*hdr
;
1982 struct sk_buff
*skb
;
1984 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1986 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1988 BT_ERR("%s no memory for command", hdev
->name
);
1992 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1993 hdr
->opcode
= cpu_to_le16(opcode
);
1997 memcpy(skb_put(skb
, plen
), param
, plen
);
1999 BT_DBG("skb len %d", skb
->len
);
2001 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2002 skb
->dev
= (void *) hdev
;
2004 if (test_bit(HCI_INIT
, &hdev
->flags
))
2005 hdev
->init_last_cmd
= opcode
;
2007 skb_queue_tail(&hdev
->cmd_q
, skb
);
2008 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2013 /* Get data from the previously sent command */
2014 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2016 struct hci_command_hdr
*hdr
;
2018 if (!hdev
->sent_cmd
)
2021 hdr
= (void *) hdev
->sent_cmd
->data
;
2023 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2026 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2028 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2032 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2034 struct hci_acl_hdr
*hdr
;
2037 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2038 skb_reset_transport_header(skb
);
2039 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2040 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2041 hdr
->dlen
= cpu_to_le16(len
);
2044 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2045 struct sk_buff
*skb
, __u16 flags
)
2047 struct hci_dev
*hdev
= conn
->hdev
;
2048 struct sk_buff
*list
;
2050 list
= skb_shinfo(skb
)->frag_list
;
2052 /* Non fragmented */
2053 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2055 skb_queue_tail(queue
, skb
);
2058 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2060 skb_shinfo(skb
)->frag_list
= NULL
;
2062 /* Queue all fragments atomically */
2063 spin_lock(&queue
->lock
);
2065 __skb_queue_tail(queue
, skb
);
2067 flags
&= ~ACL_START
;
2070 skb
= list
; list
= list
->next
;
2072 skb
->dev
= (void *) hdev
;
2073 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2074 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2076 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2078 __skb_queue_tail(queue
, skb
);
2081 spin_unlock(&queue
->lock
);
2085 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2087 struct hci_conn
*conn
= chan
->conn
;
2088 struct hci_dev
*hdev
= conn
->hdev
;
2090 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2092 skb
->dev
= (void *) hdev
;
2093 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2094 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2096 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2098 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2100 EXPORT_SYMBOL(hci_send_acl
);
2103 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2105 struct hci_dev
*hdev
= conn
->hdev
;
2106 struct hci_sco_hdr hdr
;
2108 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2110 hdr
.handle
= cpu_to_le16(conn
->handle
);
2111 hdr
.dlen
= skb
->len
;
2113 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2114 skb_reset_transport_header(skb
);
2115 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2117 skb
->dev
= (void *) hdev
;
2118 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2120 skb_queue_tail(&conn
->data_q
, skb
);
2121 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2123 EXPORT_SYMBOL(hci_send_sco
);
2125 /* ---- HCI TX task (outgoing data) ---- */
2127 /* HCI Connection scheduler */
2128 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2130 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2131 struct hci_conn
*conn
= NULL
, *c
;
2132 int num
= 0, min
= ~0;
2134 /* We don't have to lock device here. Connections are always
2135 * added and removed with TX task disabled. */
2139 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2140 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2143 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2148 if (c
->sent
< min
) {
2153 if (hci_conn_num(hdev
, type
) == num
)
2162 switch (conn
->type
) {
2164 cnt
= hdev
->acl_cnt
;
2168 cnt
= hdev
->sco_cnt
;
2171 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2175 BT_ERR("Unknown link type");
2183 BT_DBG("conn %p quote %d", conn
, *quote
);
2187 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2189 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2192 BT_ERR("%s link tx timeout", hdev
->name
);
2196 /* Kill stalled connections */
2197 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2198 if (c
->type
== type
&& c
->sent
) {
2199 BT_ERR("%s killing stalled connection %s",
2200 hdev
->name
, batostr(&c
->dst
));
2201 hci_acl_disconn(c
, 0x13);
2208 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2211 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2212 struct hci_chan
*chan
= NULL
;
2213 int num
= 0, min
= ~0, cur_prio
= 0;
2214 struct hci_conn
*conn
;
2215 int cnt
, q
, conn_num
= 0;
2217 BT_DBG("%s", hdev
->name
);
2221 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2222 struct hci_chan
*tmp
;
2224 if (conn
->type
!= type
)
2227 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2232 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2233 struct sk_buff
*skb
;
2235 if (skb_queue_empty(&tmp
->data_q
))
2238 skb
= skb_peek(&tmp
->data_q
);
2239 if (skb
->priority
< cur_prio
)
2242 if (skb
->priority
> cur_prio
) {
2245 cur_prio
= skb
->priority
;
2250 if (conn
->sent
< min
) {
2256 if (hci_conn_num(hdev
, type
) == conn_num
)
2265 switch (chan
->conn
->type
) {
2267 cnt
= hdev
->acl_cnt
;
2271 cnt
= hdev
->sco_cnt
;
2274 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2278 BT_ERR("Unknown link type");
2283 BT_DBG("chan %p quote %d", chan
, *quote
);
2287 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2289 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2290 struct hci_conn
*conn
;
2293 BT_DBG("%s", hdev
->name
);
2297 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2298 struct hci_chan
*chan
;
2300 if (conn
->type
!= type
)
2303 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2308 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2309 struct sk_buff
*skb
;
2316 if (skb_queue_empty(&chan
->data_q
))
2319 skb
= skb_peek(&chan
->data_q
);
2320 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2323 skb
->priority
= HCI_PRIO_MAX
- 1;
2325 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2329 if (hci_conn_num(hdev
, type
) == num
)
2337 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2339 struct hci_chan
*chan
;
2340 struct sk_buff
*skb
;
2344 BT_DBG("%s", hdev
->name
);
2346 if (!hci_conn_num(hdev
, ACL_LINK
))
2349 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2350 /* ACL tx timeout must be longer than maximum
2351 * link supervision timeout (40.9 seconds) */
2352 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2353 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT
)))
2354 hci_link_tx_to(hdev
, ACL_LINK
);
2357 cnt
= hdev
->acl_cnt
;
2359 while (hdev
->acl_cnt
&&
2360 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2361 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2362 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2363 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2364 skb
->len
, skb
->priority
);
2366 /* Stop if priority has changed */
2367 if (skb
->priority
< priority
)
2370 skb
= skb_dequeue(&chan
->data_q
);
2372 hci_conn_enter_active_mode(chan
->conn
,
2373 bt_cb(skb
)->force_active
);
2375 hci_send_frame(skb
);
2376 hdev
->acl_last_tx
= jiffies
;
2384 if (cnt
!= hdev
->acl_cnt
)
2385 hci_prio_recalculate(hdev
, ACL_LINK
);
2389 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2391 struct hci_conn
*conn
;
2392 struct sk_buff
*skb
;
2395 BT_DBG("%s", hdev
->name
);
2397 if (!hci_conn_num(hdev
, SCO_LINK
))
2400 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2401 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2402 BT_DBG("skb %p len %d", skb
, skb
->len
);
2403 hci_send_frame(skb
);
2406 if (conn
->sent
== ~0)
2412 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2414 struct hci_conn
*conn
;
2415 struct sk_buff
*skb
;
2418 BT_DBG("%s", hdev
->name
);
2420 if (!hci_conn_num(hdev
, ESCO_LINK
))
2423 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2424 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2425 BT_DBG("skb %p len %d", skb
, skb
->len
);
2426 hci_send_frame(skb
);
2429 if (conn
->sent
== ~0)
2435 static inline void hci_sched_le(struct hci_dev
*hdev
)
2437 struct hci_chan
*chan
;
2438 struct sk_buff
*skb
;
2439 int quote
, cnt
, tmp
;
2441 BT_DBG("%s", hdev
->name
);
2443 if (!hci_conn_num(hdev
, LE_LINK
))
2446 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2447 /* LE tx timeout must be longer than maximum
2448 * link supervision timeout (40.9 seconds) */
2449 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2450 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2451 hci_link_tx_to(hdev
, LE_LINK
);
2454 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2456 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2457 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2458 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2459 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2460 skb
->len
, skb
->priority
);
2462 /* Stop if priority has changed */
2463 if (skb
->priority
< priority
)
2466 skb
= skb_dequeue(&chan
->data_q
);
2468 hci_send_frame(skb
);
2469 hdev
->le_last_tx
= jiffies
;
2480 hdev
->acl_cnt
= cnt
;
2483 hci_prio_recalculate(hdev
, LE_LINK
);
2486 static void hci_tx_work(struct work_struct
*work
)
2488 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2489 struct sk_buff
*skb
;
2491 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2492 hdev
->sco_cnt
, hdev
->le_cnt
);
2494 /* Schedule queues and send stuff to HCI driver */
2496 hci_sched_acl(hdev
);
2498 hci_sched_sco(hdev
);
2500 hci_sched_esco(hdev
);
2504 /* Send next queued raw (unknown type) packet */
2505 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2506 hci_send_frame(skb
);
2509 /* ----- HCI RX task (incoming data processing) ----- */
2511 /* ACL data packet */
2512 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2514 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2515 struct hci_conn
*conn
;
2516 __u16 handle
, flags
;
2518 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2520 handle
= __le16_to_cpu(hdr
->handle
);
2521 flags
= hci_flags(handle
);
2522 handle
= hci_handle(handle
);
2524 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2526 hdev
->stat
.acl_rx
++;
2529 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2530 hci_dev_unlock(hdev
);
2533 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2535 /* Send to upper protocol */
2536 l2cap_recv_acldata(conn
, skb
, flags
);
2539 BT_ERR("%s ACL packet for unknown connection handle %d",
2540 hdev
->name
, handle
);
2546 /* SCO data packet */
2547 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2549 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2550 struct hci_conn
*conn
;
2553 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2555 handle
= __le16_to_cpu(hdr
->handle
);
2557 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2559 hdev
->stat
.sco_rx
++;
2562 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2563 hci_dev_unlock(hdev
);
2566 /* Send to upper protocol */
2567 sco_recv_scodata(conn
, skb
);
2570 BT_ERR("%s SCO packet for unknown connection handle %d",
2571 hdev
->name
, handle
);
2577 static void hci_rx_work(struct work_struct
*work
)
2579 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2580 struct sk_buff
*skb
;
2582 BT_DBG("%s", hdev
->name
);
2584 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2585 if (atomic_read(&hdev
->promisc
)) {
2586 /* Send copy to the sockets */
2587 hci_send_to_sock(hdev
, skb
, NULL
);
2590 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2595 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2596 /* Don't process data packets in this states. */
2597 switch (bt_cb(skb
)->pkt_type
) {
2598 case HCI_ACLDATA_PKT
:
2599 case HCI_SCODATA_PKT
:
2606 switch (bt_cb(skb
)->pkt_type
) {
2608 BT_DBG("%s Event packet", hdev
->name
);
2609 hci_event_packet(hdev
, skb
);
2612 case HCI_ACLDATA_PKT
:
2613 BT_DBG("%s ACL data packet", hdev
->name
);
2614 hci_acldata_packet(hdev
, skb
);
2617 case HCI_SCODATA_PKT
:
2618 BT_DBG("%s SCO data packet", hdev
->name
);
2619 hci_scodata_packet(hdev
, skb
);
2629 static void hci_cmd_work(struct work_struct
*work
)
2631 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2632 struct sk_buff
*skb
;
2634 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2636 /* Send queued commands */
2637 if (atomic_read(&hdev
->cmd_cnt
)) {
2638 skb
= skb_dequeue(&hdev
->cmd_q
);
2642 kfree_skb(hdev
->sent_cmd
);
2644 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2645 if (hdev
->sent_cmd
) {
2646 atomic_dec(&hdev
->cmd_cnt
);
2647 hci_send_frame(skb
);
2648 if (test_bit(HCI_RESET
, &hdev
->flags
))
2649 del_timer(&hdev
->cmd_timer
);
2651 mod_timer(&hdev
->cmd_timer
,
2652 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2654 skb_queue_head(&hdev
->cmd_q
, skb
);
2655 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2660 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2662 /* General inquiry access code (GIAC) */
2663 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2664 struct hci_cp_inquiry cp
;
2666 BT_DBG("%s", hdev
->name
);
2668 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2669 return -EINPROGRESS
;
2671 inquiry_cache_flush(hdev
);
2673 memset(&cp
, 0, sizeof(cp
));
2674 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2677 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2680 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2682 BT_DBG("%s", hdev
->name
);
2684 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2687 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2690 module_param(enable_hs
, bool, 0644);
2691 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");