2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
359 bool hci_discovery_active(struct hci_dev
*hdev
)
361 struct discovery_state
*discov
= &hdev
->discovery
;
363 switch (discov
->state
) {
364 case DISCOVERY_FINDING
:
365 case DISCOVERY_RESOLVING
:
373 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
375 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
377 if (hdev
->discovery
.state
== state
)
381 case DISCOVERY_STOPPED
:
382 hdev
->discovery
.type
= 0;
384 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
385 mgmt_discovering(hdev
, 0);
387 case DISCOVERY_STARTING
:
389 case DISCOVERY_FINDING
:
390 mgmt_discovering(hdev
, 1);
392 case DISCOVERY_RESOLVING
:
394 case DISCOVERY_STOPPING
:
398 hdev
->discovery
.state
= state
;
401 static void inquiry_cache_flush(struct hci_dev
*hdev
)
403 struct discovery_state
*cache
= &hdev
->discovery
;
404 struct inquiry_entry
*p
, *n
;
406 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
411 INIT_LIST_HEAD(&cache
->unknown
);
412 INIT_LIST_HEAD(&cache
->resolve
);
413 cache
->state
= DISCOVERY_STOPPED
;
416 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
418 struct discovery_state
*cache
= &hdev
->discovery
;
419 struct inquiry_entry
*e
;
421 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
423 list_for_each_entry(e
, &cache
->all
, all
) {
424 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
431 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
434 struct discovery_state
*cache
= &hdev
->discovery
;
435 struct inquiry_entry
*e
;
437 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
439 list_for_each_entry(e
, &cache
->unknown
, list
) {
440 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
447 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
451 struct discovery_state
*cache
= &hdev
->discovery
;
452 struct inquiry_entry
*e
;
454 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
456 list_for_each_entry(e
, &cache
->resolve
, list
) {
457 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
459 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
466 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
467 struct inquiry_entry
*ie
)
469 struct discovery_state
*cache
= &hdev
->discovery
;
470 struct list_head
*pos
= &cache
->resolve
;
471 struct inquiry_entry
*p
;
475 list_for_each_entry(p
, &cache
->resolve
, list
) {
476 if (p
->name_state
!= NAME_PENDING
&&
477 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
482 list_add(&ie
->list
, pos
);
485 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
488 struct discovery_state
*cache
= &hdev
->discovery
;
489 struct inquiry_entry
*ie
;
491 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
493 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
495 if (ie
->name_state
== NAME_NEEDED
&&
496 data
->rssi
!= ie
->data
.rssi
) {
497 ie
->data
.rssi
= data
->rssi
;
498 hci_inquiry_cache_update_resolve(hdev
, ie
);
504 /* Entry not in the cache. Add new one. */
505 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
509 list_add(&ie
->all
, &cache
->all
);
512 ie
->name_state
= NAME_KNOWN
;
514 ie
->name_state
= NAME_NOT_KNOWN
;
515 list_add(&ie
->list
, &cache
->unknown
);
519 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
520 ie
->name_state
!= NAME_PENDING
) {
521 ie
->name_state
= NAME_KNOWN
;
525 memcpy(&ie
->data
, data
, sizeof(*data
));
526 ie
->timestamp
= jiffies
;
527 cache
->timestamp
= jiffies
;
529 if (ie
->name_state
== NAME_NOT_KNOWN
)
535 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
537 struct discovery_state
*cache
= &hdev
->discovery
;
538 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
539 struct inquiry_entry
*e
;
542 list_for_each_entry(e
, &cache
->all
, all
) {
543 struct inquiry_data
*data
= &e
->data
;
548 bacpy(&info
->bdaddr
, &data
->bdaddr
);
549 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
550 info
->pscan_period_mode
= data
->pscan_period_mode
;
551 info
->pscan_mode
= data
->pscan_mode
;
552 memcpy(info
->dev_class
, data
->dev_class
, 3);
553 info
->clock_offset
= data
->clock_offset
;
559 BT_DBG("cache %p, copied %d", cache
, copied
);
563 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
565 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
566 struct hci_cp_inquiry cp
;
568 BT_DBG("%s", hdev
->name
);
570 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
574 memcpy(&cp
.lap
, &ir
->lap
, 3);
575 cp
.length
= ir
->length
;
576 cp
.num_rsp
= ir
->num_rsp
;
577 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
580 int hci_inquiry(void __user
*arg
)
582 __u8 __user
*ptr
= arg
;
583 struct hci_inquiry_req ir
;
584 struct hci_dev
*hdev
;
585 int err
= 0, do_inquiry
= 0, max_rsp
;
589 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
592 hdev
= hci_dev_get(ir
.dev_id
);
597 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
598 inquiry_cache_empty(hdev
) ||
599 ir
.flags
& IREQ_CACHE_FLUSH
) {
600 inquiry_cache_flush(hdev
);
603 hci_dev_unlock(hdev
);
605 timeo
= ir
.length
* msecs_to_jiffies(2000);
608 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
613 /* for unlimited number of responses we will use buffer with 255 entries */
614 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
616 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
617 * copy it to the user space.
619 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
626 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
627 hci_dev_unlock(hdev
);
629 BT_DBG("num_rsp %d", ir
.num_rsp
);
631 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
633 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
646 /* ---- HCI ioctl helpers ---- */
648 int hci_dev_open(__u16 dev
)
650 struct hci_dev
*hdev
;
653 hdev
= hci_dev_get(dev
);
657 BT_DBG("%s %p", hdev
->name
, hdev
);
661 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
666 if (test_bit(HCI_UP
, &hdev
->flags
)) {
671 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
672 set_bit(HCI_RAW
, &hdev
->flags
);
674 /* Treat all non BR/EDR controllers as raw devices if
675 enable_hs is not set */
676 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
677 set_bit(HCI_RAW
, &hdev
->flags
);
679 if (hdev
->open(hdev
)) {
684 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
685 atomic_set(&hdev
->cmd_cnt
, 1);
686 set_bit(HCI_INIT
, &hdev
->flags
);
687 hdev
->init_last_cmd
= 0;
689 ret
= __hci_request(hdev
, hci_init_req
, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
692 if (lmp_host_le_capable(hdev
))
693 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
694 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
696 clear_bit(HCI_INIT
, &hdev
->flags
);
701 set_bit(HCI_UP
, &hdev
->flags
);
702 hci_notify(hdev
, HCI_DEV_UP
);
703 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
705 mgmt_powered(hdev
, 1);
706 hci_dev_unlock(hdev
);
709 /* Init failed, cleanup */
710 flush_work(&hdev
->tx_work
);
711 flush_work(&hdev
->cmd_work
);
712 flush_work(&hdev
->rx_work
);
714 skb_queue_purge(&hdev
->cmd_q
);
715 skb_queue_purge(&hdev
->rx_q
);
720 if (hdev
->sent_cmd
) {
721 kfree_skb(hdev
->sent_cmd
);
722 hdev
->sent_cmd
= NULL
;
730 hci_req_unlock(hdev
);
735 static int hci_dev_do_close(struct hci_dev
*hdev
)
737 BT_DBG("%s %p", hdev
->name
, hdev
);
739 cancel_work_sync(&hdev
->le_scan
);
741 hci_req_cancel(hdev
, ENODEV
);
744 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
745 del_timer_sync(&hdev
->cmd_timer
);
746 hci_req_unlock(hdev
);
750 /* Flush RX and TX works */
751 flush_work(&hdev
->tx_work
);
752 flush_work(&hdev
->rx_work
);
754 if (hdev
->discov_timeout
> 0) {
755 cancel_delayed_work(&hdev
->discov_off
);
756 hdev
->discov_timeout
= 0;
759 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
760 cancel_delayed_work(&hdev
->power_off
);
762 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
763 cancel_delayed_work(&hdev
->service_cache
);
765 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
768 inquiry_cache_flush(hdev
);
769 hci_conn_hash_flush(hdev
);
770 hci_dev_unlock(hdev
);
772 hci_notify(hdev
, HCI_DEV_DOWN
);
778 skb_queue_purge(&hdev
->cmd_q
);
779 atomic_set(&hdev
->cmd_cnt
, 1);
780 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
781 test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
782 set_bit(HCI_INIT
, &hdev
->flags
);
783 __hci_request(hdev
, hci_reset_req
, 0,
784 msecs_to_jiffies(250));
785 clear_bit(HCI_INIT
, &hdev
->flags
);
789 flush_work(&hdev
->cmd_work
);
792 skb_queue_purge(&hdev
->rx_q
);
793 skb_queue_purge(&hdev
->cmd_q
);
794 skb_queue_purge(&hdev
->raw_q
);
796 /* Drop last sent command */
797 if (hdev
->sent_cmd
) {
798 del_timer_sync(&hdev
->cmd_timer
);
799 kfree_skb(hdev
->sent_cmd
);
800 hdev
->sent_cmd
= NULL
;
803 /* After this point our queues are empty
804 * and no tasks are scheduled. */
808 mgmt_powered(hdev
, 0);
809 hci_dev_unlock(hdev
);
814 hci_req_unlock(hdev
);
820 int hci_dev_close(__u16 dev
)
822 struct hci_dev
*hdev
;
825 hdev
= hci_dev_get(dev
);
828 err
= hci_dev_do_close(hdev
);
833 int hci_dev_reset(__u16 dev
)
835 struct hci_dev
*hdev
;
838 hdev
= hci_dev_get(dev
);
844 if (!test_bit(HCI_UP
, &hdev
->flags
))
848 skb_queue_purge(&hdev
->rx_q
);
849 skb_queue_purge(&hdev
->cmd_q
);
852 inquiry_cache_flush(hdev
);
853 hci_conn_hash_flush(hdev
);
854 hci_dev_unlock(hdev
);
859 atomic_set(&hdev
->cmd_cnt
, 1);
860 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
862 if (!test_bit(HCI_RAW
, &hdev
->flags
))
863 ret
= __hci_request(hdev
, hci_reset_req
, 0,
864 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
867 hci_req_unlock(hdev
);
872 int hci_dev_reset_stat(__u16 dev
)
874 struct hci_dev
*hdev
;
877 hdev
= hci_dev_get(dev
);
881 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
888 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
890 struct hci_dev
*hdev
;
891 struct hci_dev_req dr
;
894 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
897 hdev
= hci_dev_get(dr
.dev_id
);
903 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
904 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
908 if (!lmp_encrypt_capable(hdev
)) {
913 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
914 /* Auth must be enabled first */
915 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
916 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
921 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
922 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
926 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
931 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
932 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
936 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
937 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
941 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
945 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
946 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
950 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
951 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
963 int hci_get_dev_list(void __user
*arg
)
965 struct hci_dev
*hdev
;
966 struct hci_dev_list_req
*dl
;
967 struct hci_dev_req
*dr
;
968 int n
= 0, size
, err
;
971 if (get_user(dev_num
, (__u16 __user
*) arg
))
974 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
977 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
979 dl
= kzalloc(size
, GFP_KERNEL
);
985 read_lock(&hci_dev_list_lock
);
986 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
987 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
988 cancel_delayed_work(&hdev
->power_off
);
990 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
991 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
993 (dr
+ n
)->dev_id
= hdev
->id
;
994 (dr
+ n
)->dev_opt
= hdev
->flags
;
999 read_unlock(&hci_dev_list_lock
);
1002 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1004 err
= copy_to_user(arg
, dl
, size
);
1007 return err
? -EFAULT
: 0;
1010 int hci_get_dev_info(void __user
*arg
)
1012 struct hci_dev
*hdev
;
1013 struct hci_dev_info di
;
1016 if (copy_from_user(&di
, arg
, sizeof(di
)))
1019 hdev
= hci_dev_get(di
.dev_id
);
1023 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1024 cancel_delayed_work_sync(&hdev
->power_off
);
1026 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1027 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1029 strcpy(di
.name
, hdev
->name
);
1030 di
.bdaddr
= hdev
->bdaddr
;
1031 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1032 di
.flags
= hdev
->flags
;
1033 di
.pkt_type
= hdev
->pkt_type
;
1034 di
.acl_mtu
= hdev
->acl_mtu
;
1035 di
.acl_pkts
= hdev
->acl_pkts
;
1036 di
.sco_mtu
= hdev
->sco_mtu
;
1037 di
.sco_pkts
= hdev
->sco_pkts
;
1038 di
.link_policy
= hdev
->link_policy
;
1039 di
.link_mode
= hdev
->link_mode
;
1041 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1042 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1044 if (copy_to_user(arg
, &di
, sizeof(di
)))
1052 /* ---- Interface to HCI drivers ---- */
1054 static int hci_rfkill_set_block(void *data
, bool blocked
)
1056 struct hci_dev
*hdev
= data
;
1058 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1063 hci_dev_do_close(hdev
);
1068 static const struct rfkill_ops hci_rfkill_ops
= {
1069 .set_block
= hci_rfkill_set_block
,
1072 /* Alloc HCI device */
1073 struct hci_dev
*hci_alloc_dev(void)
1075 struct hci_dev
*hdev
;
1077 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1081 hci_init_sysfs(hdev
);
1082 skb_queue_head_init(&hdev
->driver_init
);
1086 EXPORT_SYMBOL(hci_alloc_dev
);
1088 /* Free HCI device */
1089 void hci_free_dev(struct hci_dev
*hdev
)
1091 skb_queue_purge(&hdev
->driver_init
);
1093 /* will free via device release */
1094 put_device(&hdev
->dev
);
1096 EXPORT_SYMBOL(hci_free_dev
);
1098 static void hci_power_on(struct work_struct
*work
)
1100 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1102 BT_DBG("%s", hdev
->name
);
1104 if (hci_dev_open(hdev
->id
) < 0)
1107 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1108 schedule_delayed_work(&hdev
->power_off
,
1109 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1111 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1112 mgmt_index_added(hdev
);
1115 static void hci_power_off(struct work_struct
*work
)
1117 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1120 BT_DBG("%s", hdev
->name
);
1122 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1124 hci_dev_close(hdev
->id
);
1127 static void hci_discov_off(struct work_struct
*work
)
1129 struct hci_dev
*hdev
;
1130 u8 scan
= SCAN_PAGE
;
1132 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1134 BT_DBG("%s", hdev
->name
);
1138 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1140 hdev
->discov_timeout
= 0;
1142 hci_dev_unlock(hdev
);
1145 int hci_uuids_clear(struct hci_dev
*hdev
)
1147 struct list_head
*p
, *n
;
1149 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1150 struct bt_uuid
*uuid
;
1152 uuid
= list_entry(p
, struct bt_uuid
, list
);
1161 int hci_link_keys_clear(struct hci_dev
*hdev
)
1163 struct list_head
*p
, *n
;
1165 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1166 struct link_key
*key
;
1168 key
= list_entry(p
, struct link_key
, list
);
1177 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1179 struct smp_ltk
*k
, *tmp
;
1181 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1189 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1193 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1194 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1200 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1201 u8 key_type
, u8 old_key_type
)
1204 if (key_type
< 0x03)
1207 /* Debug keys are insecure so don't store them persistently */
1208 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1211 /* Changed combination key and there's no previous one */
1212 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1215 /* Security mode 3 case */
1219 /* Neither local nor remote side had no-bonding as requirement */
1220 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1223 /* Local side had dedicated bonding as requirement */
1224 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1227 /* Remote side had dedicated bonding as requirement */
1228 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1231 /* If none of the above criteria match, then don't store the key
1236 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1240 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1241 if (k
->ediv
!= ediv
||
1242 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1250 EXPORT_SYMBOL(hci_find_ltk
);
1252 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1257 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1258 if (addr_type
== k
->bdaddr_type
&&
1259 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1264 EXPORT_SYMBOL(hci_find_ltk_by_addr
);
1266 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1267 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1269 struct link_key
*key
, *old_key
;
1270 u8 old_key_type
, persistent
;
1272 old_key
= hci_find_link_key(hdev
, bdaddr
);
1274 old_key_type
= old_key
->type
;
1277 old_key_type
= conn
? conn
->key_type
: 0xff;
1278 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1281 list_add(&key
->list
, &hdev
->link_keys
);
1284 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1286 /* Some buggy controller combinations generate a changed
1287 * combination key for legacy pairing even when there's no
1289 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1290 (!conn
|| conn
->remote_auth
== 0xff) &&
1291 old_key_type
== 0xff) {
1292 type
= HCI_LK_COMBINATION
;
1294 conn
->key_type
= type
;
1297 bacpy(&key
->bdaddr
, bdaddr
);
1298 memcpy(key
->val
, val
, 16);
1299 key
->pin_len
= pin_len
;
1301 if (type
== HCI_LK_CHANGED_COMBINATION
)
1302 key
->type
= old_key_type
;
1309 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1311 mgmt_new_link_key(hdev
, key
, persistent
);
1314 list_del(&key
->list
);
1321 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1322 int new_key
, u8 authenticated
, u8 tk
[16],
1323 u8 enc_size
, u16 ediv
, u8 rand
[8])
1325 struct smp_ltk
*key
, *old_key
;
1327 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1330 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1334 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1337 list_add(&key
->list
, &hdev
->long_term_keys
);
1340 bacpy(&key
->bdaddr
, bdaddr
);
1341 key
->bdaddr_type
= addr_type
;
1342 memcpy(key
->val
, tk
, sizeof(key
->val
));
1343 key
->authenticated
= authenticated
;
1345 key
->enc_size
= enc_size
;
1347 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1352 if (type
& HCI_SMP_LTK
)
1353 mgmt_new_ltk(hdev
, key
, 1);
1358 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1360 struct link_key
*key
;
1362 key
= hci_find_link_key(hdev
, bdaddr
);
1366 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1368 list_del(&key
->list
);
1374 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1376 struct smp_ltk
*k
, *tmp
;
1378 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1379 if (bacmp(bdaddr
, &k
->bdaddr
))
1382 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1391 /* HCI command timer function */
1392 static void hci_cmd_timer(unsigned long arg
)
1394 struct hci_dev
*hdev
= (void *) arg
;
1396 BT_ERR("%s command tx timeout", hdev
->name
);
1397 atomic_set(&hdev
->cmd_cnt
, 1);
1398 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1401 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1404 struct oob_data
*data
;
1406 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1407 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1413 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1415 struct oob_data
*data
;
1417 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1421 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1423 list_del(&data
->list
);
1429 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1431 struct oob_data
*data
, *n
;
1433 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1434 list_del(&data
->list
);
1441 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1444 struct oob_data
*data
;
1446 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1449 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1453 bacpy(&data
->bdaddr
, bdaddr
);
1454 list_add(&data
->list
, &hdev
->remote_oob_data
);
1457 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1458 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1460 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1465 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1468 struct bdaddr_list
*b
;
1470 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1471 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1477 int hci_blacklist_clear(struct hci_dev
*hdev
)
1479 struct list_head
*p
, *n
;
1481 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1482 struct bdaddr_list
*b
;
1484 b
= list_entry(p
, struct bdaddr_list
, list
);
1493 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1495 struct bdaddr_list
*entry
;
1497 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1500 if (hci_blacklist_lookup(hdev
, bdaddr
))
1503 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1507 bacpy(&entry
->bdaddr
, bdaddr
);
1509 list_add(&entry
->list
, &hdev
->blacklist
);
1511 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1514 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1516 struct bdaddr_list
*entry
;
1518 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1519 return hci_blacklist_clear(hdev
);
1521 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1525 list_del(&entry
->list
);
1528 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1531 static void hci_clear_adv_cache(struct work_struct
*work
)
1533 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1538 hci_adv_entries_clear(hdev
);
1540 hci_dev_unlock(hdev
);
1543 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1545 struct adv_entry
*entry
, *tmp
;
1547 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1548 list_del(&entry
->list
);
1552 BT_DBG("%s adv cache cleared", hdev
->name
);
1557 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1559 struct adv_entry
*entry
;
1561 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1562 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1568 static inline int is_connectable_adv(u8 evt_type
)
1570 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1576 int hci_add_adv_entry(struct hci_dev
*hdev
,
1577 struct hci_ev_le_advertising_info
*ev
)
1579 struct adv_entry
*entry
;
1581 if (!is_connectable_adv(ev
->evt_type
))
1584 /* Only new entries should be added to adv_entries. So, if
1585 * bdaddr was found, don't add it. */
1586 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1589 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1593 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1594 entry
->bdaddr_type
= ev
->bdaddr_type
;
1596 list_add(&entry
->list
, &hdev
->adv_entries
);
1598 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1599 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1604 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1606 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1607 struct hci_cp_le_set_scan_param cp
;
1609 memset(&cp
, 0, sizeof(cp
));
1610 cp
.type
= param
->type
;
1611 cp
.interval
= cpu_to_le16(param
->interval
);
1612 cp
.window
= cpu_to_le16(param
->window
);
1614 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1617 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1619 struct hci_cp_le_set_scan_enable cp
;
1621 memset(&cp
, 0, sizeof(cp
));
1624 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1627 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1628 u16 window
, int timeout
)
1630 long timeo
= msecs_to_jiffies(3000);
1631 struct le_scan_params param
;
1634 BT_DBG("%s", hdev
->name
);
1636 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1637 return -EINPROGRESS
;
1640 param
.interval
= interval
;
1641 param
.window
= window
;
1645 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1648 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1650 hci_req_unlock(hdev
);
1655 schedule_delayed_work(&hdev
->le_scan_disable
,
1656 msecs_to_jiffies(timeout
));
1661 static void le_scan_disable_work(struct work_struct
*work
)
1663 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1664 le_scan_disable
.work
);
1665 struct hci_cp_le_set_scan_enable cp
;
1667 BT_DBG("%s", hdev
->name
);
1669 memset(&cp
, 0, sizeof(cp
));
1671 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1674 static void le_scan_work(struct work_struct
*work
)
1676 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1677 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1679 BT_DBG("%s", hdev
->name
);
1681 hci_do_le_scan(hdev
, param
->type
, param
->interval
,
1682 param
->window
, param
->timeout
);
1685 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1688 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1690 BT_DBG("%s", hdev
->name
);
1692 if (work_busy(&hdev
->le_scan
))
1693 return -EINPROGRESS
;
1696 param
->interval
= interval
;
1697 param
->window
= window
;
1698 param
->timeout
= timeout
;
1700 queue_work(system_long_wq
, &hdev
->le_scan
);
1705 /* Register HCI device */
1706 int hci_register_dev(struct hci_dev
*hdev
)
1708 struct list_head
*head
= &hci_dev_list
, *p
;
1711 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1713 if (!hdev
->open
|| !hdev
->close
)
1716 /* Do not allow HCI_AMP devices to register at index 0,
1717 * so the index can be used as the AMP controller ID.
1719 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1721 write_lock(&hci_dev_list_lock
);
1723 /* Find first available device id */
1724 list_for_each(p
, &hci_dev_list
) {
1725 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1730 sprintf(hdev
->name
, "hci%d", id
);
1732 list_add_tail(&hdev
->list
, head
);
1734 mutex_init(&hdev
->lock
);
1737 hdev
->dev_flags
= 0;
1738 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1739 hdev
->esco_type
= (ESCO_HV1
);
1740 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1741 hdev
->io_capability
= 0x03; /* No Input No Output */
1743 hdev
->idle_timeout
= 0;
1744 hdev
->sniff_max_interval
= 800;
1745 hdev
->sniff_min_interval
= 80;
1747 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1748 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1749 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1752 skb_queue_head_init(&hdev
->rx_q
);
1753 skb_queue_head_init(&hdev
->cmd_q
);
1754 skb_queue_head_init(&hdev
->raw_q
);
1756 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1758 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1759 hdev
->reassembly
[i
] = NULL
;
1761 init_waitqueue_head(&hdev
->req_wait_q
);
1762 mutex_init(&hdev
->req_lock
);
1764 discovery_init(hdev
);
1766 hci_conn_hash_init(hdev
);
1768 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1770 INIT_LIST_HEAD(&hdev
->blacklist
);
1772 INIT_LIST_HEAD(&hdev
->uuids
);
1774 INIT_LIST_HEAD(&hdev
->link_keys
);
1775 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1777 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1779 INIT_LIST_HEAD(&hdev
->adv_entries
);
1781 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1782 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1783 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1785 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1787 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1789 atomic_set(&hdev
->promisc
, 0);
1791 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1793 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1795 write_unlock(&hci_dev_list_lock
);
1797 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1799 if (!hdev
->workqueue
) {
1804 error
= hci_add_sysfs(hdev
);
1808 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1809 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1811 if (rfkill_register(hdev
->rfkill
) < 0) {
1812 rfkill_destroy(hdev
->rfkill
);
1813 hdev
->rfkill
= NULL
;
1817 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1818 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1819 schedule_work(&hdev
->power_on
);
1821 hci_notify(hdev
, HCI_DEV_REG
);
1827 destroy_workqueue(hdev
->workqueue
);
1829 write_lock(&hci_dev_list_lock
);
1830 list_del(&hdev
->list
);
1831 write_unlock(&hci_dev_list_lock
);
1835 EXPORT_SYMBOL(hci_register_dev
);
1837 /* Unregister HCI device */
1838 void hci_unregister_dev(struct hci_dev
*hdev
)
1842 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1844 write_lock(&hci_dev_list_lock
);
1845 list_del(&hdev
->list
);
1846 write_unlock(&hci_dev_list_lock
);
1848 hci_dev_do_close(hdev
);
1850 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1851 kfree_skb(hdev
->reassembly
[i
]);
1853 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1854 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1856 mgmt_index_removed(hdev
);
1857 hci_dev_unlock(hdev
);
1860 /* mgmt_index_removed should take care of emptying the
1862 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1864 hci_notify(hdev
, HCI_DEV_UNREG
);
1867 rfkill_unregister(hdev
->rfkill
);
1868 rfkill_destroy(hdev
->rfkill
);
1871 hci_del_sysfs(hdev
);
1873 cancel_delayed_work_sync(&hdev
->adv_work
);
1875 destroy_workqueue(hdev
->workqueue
);
1878 hci_blacklist_clear(hdev
);
1879 hci_uuids_clear(hdev
);
1880 hci_link_keys_clear(hdev
);
1881 hci_smp_ltks_clear(hdev
);
1882 hci_remote_oob_data_clear(hdev
);
1883 hci_adv_entries_clear(hdev
);
1884 hci_dev_unlock(hdev
);
1888 EXPORT_SYMBOL(hci_unregister_dev
);
1890 /* Suspend HCI device */
1891 int hci_suspend_dev(struct hci_dev
*hdev
)
1893 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1896 EXPORT_SYMBOL(hci_suspend_dev
);
1898 /* Resume HCI device */
1899 int hci_resume_dev(struct hci_dev
*hdev
)
1901 hci_notify(hdev
, HCI_DEV_RESUME
);
1904 EXPORT_SYMBOL(hci_resume_dev
);
1906 /* Receive frame from HCI drivers */
1907 int hci_recv_frame(struct sk_buff
*skb
)
1909 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1910 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1911 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1917 bt_cb(skb
)->incoming
= 1;
1920 __net_timestamp(skb
);
1922 skb_queue_tail(&hdev
->rx_q
, skb
);
1923 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1927 EXPORT_SYMBOL(hci_recv_frame
);
1929 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1930 int count
, __u8 index
)
1935 struct sk_buff
*skb
;
1936 struct bt_skb_cb
*scb
;
1938 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1939 index
>= NUM_REASSEMBLY
)
1942 skb
= hdev
->reassembly
[index
];
1946 case HCI_ACLDATA_PKT
:
1947 len
= HCI_MAX_FRAME_SIZE
;
1948 hlen
= HCI_ACL_HDR_SIZE
;
1951 len
= HCI_MAX_EVENT_SIZE
;
1952 hlen
= HCI_EVENT_HDR_SIZE
;
1954 case HCI_SCODATA_PKT
:
1955 len
= HCI_MAX_SCO_SIZE
;
1956 hlen
= HCI_SCO_HDR_SIZE
;
1960 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1964 scb
= (void *) skb
->cb
;
1966 scb
->pkt_type
= type
;
1968 skb
->dev
= (void *) hdev
;
1969 hdev
->reassembly
[index
] = skb
;
1973 scb
= (void *) skb
->cb
;
1974 len
= min(scb
->expect
, (__u16
)count
);
1976 memcpy(skb_put(skb
, len
), data
, len
);
1985 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1986 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1987 scb
->expect
= h
->plen
;
1989 if (skb_tailroom(skb
) < scb
->expect
) {
1991 hdev
->reassembly
[index
] = NULL
;
1997 case HCI_ACLDATA_PKT
:
1998 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1999 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
2000 scb
->expect
= __le16_to_cpu(h
->dlen
);
2002 if (skb_tailroom(skb
) < scb
->expect
) {
2004 hdev
->reassembly
[index
] = NULL
;
2010 case HCI_SCODATA_PKT
:
2011 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2012 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2013 scb
->expect
= h
->dlen
;
2015 if (skb_tailroom(skb
) < scb
->expect
) {
2017 hdev
->reassembly
[index
] = NULL
;
2024 if (scb
->expect
== 0) {
2025 /* Complete frame */
2027 bt_cb(skb
)->pkt_type
= type
;
2028 hci_recv_frame(skb
);
2030 hdev
->reassembly
[index
] = NULL
;
2038 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2042 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2046 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2050 data
+= (count
- rem
);
2056 EXPORT_SYMBOL(hci_recv_fragment
);
2058 #define STREAM_REASSEMBLY 0
2060 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2066 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2069 struct { char type
; } *pkt
;
2071 /* Start of the frame */
2078 type
= bt_cb(skb
)->pkt_type
;
2080 rem
= hci_reassembly(hdev
, type
, data
, count
,
2085 data
+= (count
- rem
);
2091 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2093 /* ---- Interface to upper protocols ---- */
2095 int hci_register_cb(struct hci_cb
*cb
)
2097 BT_DBG("%p name %s", cb
, cb
->name
);
2099 write_lock(&hci_cb_list_lock
);
2100 list_add(&cb
->list
, &hci_cb_list
);
2101 write_unlock(&hci_cb_list_lock
);
2105 EXPORT_SYMBOL(hci_register_cb
);
2107 int hci_unregister_cb(struct hci_cb
*cb
)
2109 BT_DBG("%p name %s", cb
, cb
->name
);
2111 write_lock(&hci_cb_list_lock
);
2112 list_del(&cb
->list
);
2113 write_unlock(&hci_cb_list_lock
);
2117 EXPORT_SYMBOL(hci_unregister_cb
);
2119 static int hci_send_frame(struct sk_buff
*skb
)
2121 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2128 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2130 if (atomic_read(&hdev
->promisc
)) {
2132 __net_timestamp(skb
);
2134 hci_send_to_sock(hdev
, skb
);
2137 /* Get rid of skb owner, prior to sending to the driver. */
2140 return hdev
->send(skb
);
2143 /* Send HCI command */
2144 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2146 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2147 struct hci_command_hdr
*hdr
;
2148 struct sk_buff
*skb
;
2150 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
2152 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2154 BT_ERR("%s no memory for command", hdev
->name
);
2158 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2159 hdr
->opcode
= cpu_to_le16(opcode
);
2163 memcpy(skb_put(skb
, plen
), param
, plen
);
2165 BT_DBG("skb len %d", skb
->len
);
2167 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2168 skb
->dev
= (void *) hdev
;
2170 if (test_bit(HCI_INIT
, &hdev
->flags
))
2171 hdev
->init_last_cmd
= opcode
;
2173 skb_queue_tail(&hdev
->cmd_q
, skb
);
2174 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2179 /* Get data from the previously sent command */
2180 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2182 struct hci_command_hdr
*hdr
;
2184 if (!hdev
->sent_cmd
)
2187 hdr
= (void *) hdev
->sent_cmd
->data
;
2189 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2192 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2194 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2198 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2200 struct hci_acl_hdr
*hdr
;
2203 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2204 skb_reset_transport_header(skb
);
2205 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2206 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2207 hdr
->dlen
= cpu_to_le16(len
);
2210 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2211 struct sk_buff
*skb
, __u16 flags
)
2213 struct hci_dev
*hdev
= conn
->hdev
;
2214 struct sk_buff
*list
;
2216 list
= skb_shinfo(skb
)->frag_list
;
2218 /* Non fragmented */
2219 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2221 skb_queue_tail(queue
, skb
);
2224 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2226 skb_shinfo(skb
)->frag_list
= NULL
;
2228 /* Queue all fragments atomically */
2229 spin_lock(&queue
->lock
);
2231 __skb_queue_tail(queue
, skb
);
2233 flags
&= ~ACL_START
;
2236 skb
= list
; list
= list
->next
;
2238 skb
->dev
= (void *) hdev
;
2239 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2240 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2242 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2244 __skb_queue_tail(queue
, skb
);
2247 spin_unlock(&queue
->lock
);
2251 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2253 struct hci_conn
*conn
= chan
->conn
;
2254 struct hci_dev
*hdev
= conn
->hdev
;
2256 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2258 skb
->dev
= (void *) hdev
;
2259 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2260 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2262 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2264 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2266 EXPORT_SYMBOL(hci_send_acl
);
2269 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2271 struct hci_dev
*hdev
= conn
->hdev
;
2272 struct hci_sco_hdr hdr
;
2274 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2276 hdr
.handle
= cpu_to_le16(conn
->handle
);
2277 hdr
.dlen
= skb
->len
;
2279 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2280 skb_reset_transport_header(skb
);
2281 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2283 skb
->dev
= (void *) hdev
;
2284 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2286 skb_queue_tail(&conn
->data_q
, skb
);
2287 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2289 EXPORT_SYMBOL(hci_send_sco
);
2291 /* ---- HCI TX task (outgoing data) ---- */
2293 /* HCI Connection scheduler */
2294 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2296 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2297 struct hci_conn
*conn
= NULL
, *c
;
2298 int num
= 0, min
= ~0;
2300 /* We don't have to lock device here. Connections are always
2301 * added and removed with TX task disabled. */
2305 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2306 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2309 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2314 if (c
->sent
< min
) {
2319 if (hci_conn_num(hdev
, type
) == num
)
2328 switch (conn
->type
) {
2330 cnt
= hdev
->acl_cnt
;
2334 cnt
= hdev
->sco_cnt
;
2337 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2341 BT_ERR("Unknown link type");
2349 BT_DBG("conn %p quote %d", conn
, *quote
);
2353 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2355 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2358 BT_ERR("%s link tx timeout", hdev
->name
);
2362 /* Kill stalled connections */
2363 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2364 if (c
->type
== type
&& c
->sent
) {
2365 BT_ERR("%s killing stalled connection %s",
2366 hdev
->name
, batostr(&c
->dst
));
2367 hci_acl_disconn(c
, 0x13);
2374 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2377 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2378 struct hci_chan
*chan
= NULL
;
2379 int num
= 0, min
= ~0, cur_prio
= 0;
2380 struct hci_conn
*conn
;
2381 int cnt
, q
, conn_num
= 0;
2383 BT_DBG("%s", hdev
->name
);
2387 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2388 struct hci_chan
*tmp
;
2390 if (conn
->type
!= type
)
2393 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2398 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2399 struct sk_buff
*skb
;
2401 if (skb_queue_empty(&tmp
->data_q
))
2404 skb
= skb_peek(&tmp
->data_q
);
2405 if (skb
->priority
< cur_prio
)
2408 if (skb
->priority
> cur_prio
) {
2411 cur_prio
= skb
->priority
;
2416 if (conn
->sent
< min
) {
2422 if (hci_conn_num(hdev
, type
) == conn_num
)
2431 switch (chan
->conn
->type
) {
2433 cnt
= hdev
->acl_cnt
;
2437 cnt
= hdev
->sco_cnt
;
2440 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2444 BT_ERR("Unknown link type");
2449 BT_DBG("chan %p quote %d", chan
, *quote
);
2453 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2455 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2456 struct hci_conn
*conn
;
2459 BT_DBG("%s", hdev
->name
);
2463 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2464 struct hci_chan
*chan
;
2466 if (conn
->type
!= type
)
2469 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2474 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2475 struct sk_buff
*skb
;
2482 if (skb_queue_empty(&chan
->data_q
))
2485 skb
= skb_peek(&chan
->data_q
);
2486 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2489 skb
->priority
= HCI_PRIO_MAX
- 1;
2491 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2495 if (hci_conn_num(hdev
, type
) == num
)
2503 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2505 /* Calculate count of blocks used by this packet */
2506 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2509 static inline void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2511 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2512 /* ACL tx timeout must be longer than maximum
2513 * link supervision timeout (40.9 seconds) */
2514 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2515 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT
)))
2516 hci_link_tx_to(hdev
, ACL_LINK
);
2520 static inline void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2522 unsigned int cnt
= hdev
->acl_cnt
;
2523 struct hci_chan
*chan
;
2524 struct sk_buff
*skb
;
2527 __check_timeout(hdev
, cnt
);
2529 while (hdev
->acl_cnt
&&
2530 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2531 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2532 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2533 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2534 skb
->len
, skb
->priority
);
2536 /* Stop if priority has changed */
2537 if (skb
->priority
< priority
)
2540 skb
= skb_dequeue(&chan
->data_q
);
2542 hci_conn_enter_active_mode(chan
->conn
,
2543 bt_cb(skb
)->force_active
);
2545 hci_send_frame(skb
);
2546 hdev
->acl_last_tx
= jiffies
;
2554 if (cnt
!= hdev
->acl_cnt
)
2555 hci_prio_recalculate(hdev
, ACL_LINK
);
2558 static inline void hci_sched_acl_blk(struct hci_dev
*hdev
)
2560 unsigned int cnt
= hdev
->block_cnt
;
2561 struct hci_chan
*chan
;
2562 struct sk_buff
*skb
;
2565 __check_timeout(hdev
, cnt
);
2567 while (hdev
->block_cnt
> 0 &&
2568 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2569 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2570 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2573 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2574 skb
->len
, skb
->priority
);
2576 /* Stop if priority has changed */
2577 if (skb
->priority
< priority
)
2580 skb
= skb_dequeue(&chan
->data_q
);
2582 blocks
= __get_blocks(hdev
, skb
);
2583 if (blocks
> hdev
->block_cnt
)
2586 hci_conn_enter_active_mode(chan
->conn
,
2587 bt_cb(skb
)->force_active
);
2589 hci_send_frame(skb
);
2590 hdev
->acl_last_tx
= jiffies
;
2592 hdev
->block_cnt
-= blocks
;
2595 chan
->sent
+= blocks
;
2596 chan
->conn
->sent
+= blocks
;
2600 if (cnt
!= hdev
->block_cnt
)
2601 hci_prio_recalculate(hdev
, ACL_LINK
);
2604 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2606 BT_DBG("%s", hdev
->name
);
2608 if (!hci_conn_num(hdev
, ACL_LINK
))
2611 switch (hdev
->flow_ctl_mode
) {
2612 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2613 hci_sched_acl_pkt(hdev
);
2616 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2617 hci_sched_acl_blk(hdev
);
2623 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2625 struct hci_conn
*conn
;
2626 struct sk_buff
*skb
;
2629 BT_DBG("%s", hdev
->name
);
2631 if (!hci_conn_num(hdev
, SCO_LINK
))
2634 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2635 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2636 BT_DBG("skb %p len %d", skb
, skb
->len
);
2637 hci_send_frame(skb
);
2640 if (conn
->sent
== ~0)
2646 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2648 struct hci_conn
*conn
;
2649 struct sk_buff
*skb
;
2652 BT_DBG("%s", hdev
->name
);
2654 if (!hci_conn_num(hdev
, ESCO_LINK
))
2657 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2658 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2659 BT_DBG("skb %p len %d", skb
, skb
->len
);
2660 hci_send_frame(skb
);
2663 if (conn
->sent
== ~0)
2669 static inline void hci_sched_le(struct hci_dev
*hdev
)
2671 struct hci_chan
*chan
;
2672 struct sk_buff
*skb
;
2673 int quote
, cnt
, tmp
;
2675 BT_DBG("%s", hdev
->name
);
2677 if (!hci_conn_num(hdev
, LE_LINK
))
2680 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2681 /* LE tx timeout must be longer than maximum
2682 * link supervision timeout (40.9 seconds) */
2683 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2684 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2685 hci_link_tx_to(hdev
, LE_LINK
);
2688 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2690 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2691 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2692 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2693 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2694 skb
->len
, skb
->priority
);
2696 /* Stop if priority has changed */
2697 if (skb
->priority
< priority
)
2700 skb
= skb_dequeue(&chan
->data_q
);
2702 hci_send_frame(skb
);
2703 hdev
->le_last_tx
= jiffies
;
2714 hdev
->acl_cnt
= cnt
;
2717 hci_prio_recalculate(hdev
, LE_LINK
);
2720 static void hci_tx_work(struct work_struct
*work
)
2722 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2723 struct sk_buff
*skb
;
2725 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2726 hdev
->sco_cnt
, hdev
->le_cnt
);
2728 /* Schedule queues and send stuff to HCI driver */
2730 hci_sched_acl(hdev
);
2732 hci_sched_sco(hdev
);
2734 hci_sched_esco(hdev
);
2738 /* Send next queued raw (unknown type) packet */
2739 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2740 hci_send_frame(skb
);
2743 /* ----- HCI RX task (incoming data processing) ----- */
2745 /* ACL data packet */
2746 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2748 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2749 struct hci_conn
*conn
;
2750 __u16 handle
, flags
;
2752 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2754 handle
= __le16_to_cpu(hdr
->handle
);
2755 flags
= hci_flags(handle
);
2756 handle
= hci_handle(handle
);
2758 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2760 hdev
->stat
.acl_rx
++;
2763 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2764 hci_dev_unlock(hdev
);
2767 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2769 /* Send to upper protocol */
2770 l2cap_recv_acldata(conn
, skb
, flags
);
2773 BT_ERR("%s ACL packet for unknown connection handle %d",
2774 hdev
->name
, handle
);
2780 /* SCO data packet */
2781 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2783 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2784 struct hci_conn
*conn
;
2787 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2789 handle
= __le16_to_cpu(hdr
->handle
);
2791 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2793 hdev
->stat
.sco_rx
++;
2796 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2797 hci_dev_unlock(hdev
);
2800 /* Send to upper protocol */
2801 sco_recv_scodata(conn
, skb
);
2804 BT_ERR("%s SCO packet for unknown connection handle %d",
2805 hdev
->name
, handle
);
2811 static void hci_rx_work(struct work_struct
*work
)
2813 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2814 struct sk_buff
*skb
;
2816 BT_DBG("%s", hdev
->name
);
2818 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2819 if (atomic_read(&hdev
->promisc
)) {
2820 /* Send copy to the sockets */
2821 hci_send_to_sock(hdev
, skb
);
2824 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2829 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2830 /* Don't process data packets in this states. */
2831 switch (bt_cb(skb
)->pkt_type
) {
2832 case HCI_ACLDATA_PKT
:
2833 case HCI_SCODATA_PKT
:
2840 switch (bt_cb(skb
)->pkt_type
) {
2842 BT_DBG("%s Event packet", hdev
->name
);
2843 hci_event_packet(hdev
, skb
);
2846 case HCI_ACLDATA_PKT
:
2847 BT_DBG("%s ACL data packet", hdev
->name
);
2848 hci_acldata_packet(hdev
, skb
);
2851 case HCI_SCODATA_PKT
:
2852 BT_DBG("%s SCO data packet", hdev
->name
);
2853 hci_scodata_packet(hdev
, skb
);
2863 static void hci_cmd_work(struct work_struct
*work
)
2865 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2866 struct sk_buff
*skb
;
2868 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2870 /* Send queued commands */
2871 if (atomic_read(&hdev
->cmd_cnt
)) {
2872 skb
= skb_dequeue(&hdev
->cmd_q
);
2876 kfree_skb(hdev
->sent_cmd
);
2878 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2879 if (hdev
->sent_cmd
) {
2880 atomic_dec(&hdev
->cmd_cnt
);
2881 hci_send_frame(skb
);
2882 if (test_bit(HCI_RESET
, &hdev
->flags
))
2883 del_timer(&hdev
->cmd_timer
);
2885 mod_timer(&hdev
->cmd_timer
,
2886 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2888 skb_queue_head(&hdev
->cmd_q
, skb
);
2889 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2894 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2896 /* General inquiry access code (GIAC) */
2897 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2898 struct hci_cp_inquiry cp
;
2900 BT_DBG("%s", hdev
->name
);
2902 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2903 return -EINPROGRESS
;
2905 inquiry_cache_flush(hdev
);
2907 memset(&cp
, 0, sizeof(cp
));
2908 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2911 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2914 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2916 BT_DBG("%s", hdev
->name
);
2918 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2921 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2924 module_param(enable_hs
, bool, 0644);
2925 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");