2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev
->name
, cmd
, result
);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
68 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
69 u16 opcode
= __le16_to_cpu(sent
->opcode
);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
82 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
84 skb_queue_head(&hdev
->cmd_q
, skb
);
85 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
91 if (hdev
->req_status
== HCI_REQ_PEND
) {
92 hdev
->req_result
= result
;
93 hdev
->req_status
= HCI_REQ_DONE
;
94 wake_up_interruptible(&hdev
->req_wait_q
);
98 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
100 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= err
;
104 hdev
->req_status
= HCI_REQ_CANCELED
;
105 wake_up_interruptible(&hdev
->req_wait_q
);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev
*hdev
,
111 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
114 DECLARE_WAITQUEUE(wait
, current
);
117 BT_DBG("%s start", hdev
->name
);
119 hdev
->req_status
= HCI_REQ_PEND
;
121 add_wait_queue(&hdev
->req_wait_q
, &wait
);
122 set_current_state(TASK_INTERRUPTIBLE
);
125 schedule_timeout(timeout
);
127 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
129 if (signal_pending(current
))
132 switch (hdev
->req_status
) {
134 err
= -bt_to_errno(hdev
->req_result
);
137 case HCI_REQ_CANCELED
:
138 err
= -hdev
->req_result
;
146 hdev
->req_status
= hdev
->req_result
= 0;
148 BT_DBG("%s end: err %d", hdev
->name
, err
);
153 static int hci_request(struct hci_dev
*hdev
,
154 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
155 unsigned long opt
, __u32 timeout
)
159 if (!test_bit(HCI_UP
, &hdev
->flags
))
162 /* Serialize all requests */
164 ret
= __hci_request(hdev
, req
, opt
, timeout
);
165 hci_req_unlock(hdev
);
170 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
172 BT_DBG("%s %ld", hdev
->name
, opt
);
175 set_bit(HCI_RESET
, &hdev
->flags
);
176 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
179 static void bredr_init(struct hci_dev
*hdev
)
181 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
183 /* Read Local Supported Features */
184 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
186 /* Read Local Version */
187 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
190 static void amp_init(struct hci_dev
*hdev
)
192 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
194 /* Read Local Version */
195 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
200 /* Read Data Blk size */
201 hci_send_cmd(hdev
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
204 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
208 BT_DBG("%s %ld", hdev
->name
, opt
);
210 /* Driver initialization */
212 /* Special commands */
213 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
214 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
215 skb
->dev
= (void *) hdev
;
217 skb_queue_tail(&hdev
->cmd_q
, skb
);
218 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
220 skb_queue_purge(&hdev
->driver_init
);
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
224 hci_reset_req(hdev
, 0);
226 switch (hdev
->dev_type
) {
236 BT_ERR("Unknown device type %d", hdev
->dev_type
);
241 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
245 BT_DBG("%s %x", hdev
->name
, scan
);
247 /* Inquiry and Page scans */
248 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
251 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
255 BT_DBG("%s %x", hdev
->name
, auth
);
258 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
261 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
265 BT_DBG("%s %x", hdev
->name
, encrypt
);
268 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
271 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
273 __le16 policy
= cpu_to_le16(opt
);
275 BT_DBG("%s %x", hdev
->name
, policy
);
277 /* Default link policy */
278 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
281 /* Get HCI device by index.
282 * Device is held on return. */
283 struct hci_dev
*hci_dev_get(int index
)
285 struct hci_dev
*hdev
= NULL
, *d
;
292 read_lock(&hci_dev_list_lock
);
293 list_for_each_entry(d
, &hci_dev_list
, list
) {
294 if (d
->id
== index
) {
295 hdev
= hci_dev_hold(d
);
299 read_unlock(&hci_dev_list_lock
);
303 /* ---- Inquiry support ---- */
305 bool hci_discovery_active(struct hci_dev
*hdev
)
307 struct discovery_state
*discov
= &hdev
->discovery
;
309 switch (discov
->state
) {
310 case DISCOVERY_FINDING
:
311 case DISCOVERY_RESOLVING
:
319 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
321 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
323 if (hdev
->discovery
.state
== state
)
327 case DISCOVERY_STOPPED
:
328 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
329 mgmt_discovering(hdev
, 0);
331 case DISCOVERY_STARTING
:
333 case DISCOVERY_FINDING
:
334 mgmt_discovering(hdev
, 1);
336 case DISCOVERY_RESOLVING
:
338 case DISCOVERY_STOPPING
:
342 hdev
->discovery
.state
= state
;
345 static void inquiry_cache_flush(struct hci_dev
*hdev
)
347 struct discovery_state
*cache
= &hdev
->discovery
;
348 struct inquiry_entry
*p
, *n
;
350 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
355 INIT_LIST_HEAD(&cache
->unknown
);
356 INIT_LIST_HEAD(&cache
->resolve
);
359 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
362 struct discovery_state
*cache
= &hdev
->discovery
;
363 struct inquiry_entry
*e
;
365 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
367 list_for_each_entry(e
, &cache
->all
, all
) {
368 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
375 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
378 struct discovery_state
*cache
= &hdev
->discovery
;
379 struct inquiry_entry
*e
;
381 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
383 list_for_each_entry(e
, &cache
->unknown
, list
) {
384 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
391 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
395 struct discovery_state
*cache
= &hdev
->discovery
;
396 struct inquiry_entry
*e
;
398 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
400 list_for_each_entry(e
, &cache
->resolve
, list
) {
401 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
403 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
410 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
411 struct inquiry_entry
*ie
)
413 struct discovery_state
*cache
= &hdev
->discovery
;
414 struct list_head
*pos
= &cache
->resolve
;
415 struct inquiry_entry
*p
;
419 list_for_each_entry(p
, &cache
->resolve
, list
) {
420 if (p
->name_state
!= NAME_PENDING
&&
421 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
426 list_add(&ie
->list
, pos
);
429 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
430 bool name_known
, bool *ssp
)
432 struct discovery_state
*cache
= &hdev
->discovery
;
433 struct inquiry_entry
*ie
;
435 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
438 *ssp
= data
->ssp_mode
;
440 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
442 if (ie
->data
.ssp_mode
&& ssp
)
445 if (ie
->name_state
== NAME_NEEDED
&&
446 data
->rssi
!= ie
->data
.rssi
) {
447 ie
->data
.rssi
= data
->rssi
;
448 hci_inquiry_cache_update_resolve(hdev
, ie
);
454 /* Entry not in the cache. Add new one. */
455 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
459 list_add(&ie
->all
, &cache
->all
);
462 ie
->name_state
= NAME_KNOWN
;
464 ie
->name_state
= NAME_NOT_KNOWN
;
465 list_add(&ie
->list
, &cache
->unknown
);
469 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
470 ie
->name_state
!= NAME_PENDING
) {
471 ie
->name_state
= NAME_KNOWN
;
475 memcpy(&ie
->data
, data
, sizeof(*data
));
476 ie
->timestamp
= jiffies
;
477 cache
->timestamp
= jiffies
;
479 if (ie
->name_state
== NAME_NOT_KNOWN
)
485 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
487 struct discovery_state
*cache
= &hdev
->discovery
;
488 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
489 struct inquiry_entry
*e
;
492 list_for_each_entry(e
, &cache
->all
, all
) {
493 struct inquiry_data
*data
= &e
->data
;
498 bacpy(&info
->bdaddr
, &data
->bdaddr
);
499 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
500 info
->pscan_period_mode
= data
->pscan_period_mode
;
501 info
->pscan_mode
= data
->pscan_mode
;
502 memcpy(info
->dev_class
, data
->dev_class
, 3);
503 info
->clock_offset
= data
->clock_offset
;
509 BT_DBG("cache %p, copied %d", cache
, copied
);
513 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
515 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
516 struct hci_cp_inquiry cp
;
518 BT_DBG("%s", hdev
->name
);
520 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
524 memcpy(&cp
.lap
, &ir
->lap
, 3);
525 cp
.length
= ir
->length
;
526 cp
.num_rsp
= ir
->num_rsp
;
527 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
530 int hci_inquiry(void __user
*arg
)
532 __u8 __user
*ptr
= arg
;
533 struct hci_inquiry_req ir
;
534 struct hci_dev
*hdev
;
535 int err
= 0, do_inquiry
= 0, max_rsp
;
539 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
542 hdev
= hci_dev_get(ir
.dev_id
);
547 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
548 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
549 inquiry_cache_flush(hdev
);
552 hci_dev_unlock(hdev
);
554 timeo
= ir
.length
* msecs_to_jiffies(2000);
557 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
562 /* for unlimited number of responses we will use buffer with
565 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
570 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
577 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
578 hci_dev_unlock(hdev
);
580 BT_DBG("num_rsp %d", ir
.num_rsp
);
582 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
584 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
597 static u8
create_ad(struct hci_dev
*hdev
, u8
*ptr
)
599 u8 ad_len
= 0, flags
= 0;
602 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
603 flags
|= LE_AD_GENERAL
;
605 if (!lmp_bredr_capable(hdev
))
606 flags
|= LE_AD_NO_BREDR
;
608 if (lmp_le_br_capable(hdev
))
609 flags
|= LE_AD_SIM_LE_BREDR_CTRL
;
611 if (lmp_host_le_br_capable(hdev
))
612 flags
|= LE_AD_SIM_LE_BREDR_HOST
;
615 BT_DBG("adv flags 0x%02x", flags
);
625 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
627 ptr
[1] = EIR_TX_POWER
;
628 ptr
[2] = (u8
) hdev
->adv_tx_power
;
634 name_len
= strlen(hdev
->dev_name
);
636 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
638 if (name_len
> max_len
) {
640 ptr
[1] = EIR_NAME_SHORT
;
642 ptr
[1] = EIR_NAME_COMPLETE
;
644 ptr
[0] = name_len
+ 1;
646 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
648 ad_len
+= (name_len
+ 2);
649 ptr
+= (name_len
+ 2);
655 int hci_update_ad(struct hci_dev
*hdev
)
657 struct hci_cp_le_set_adv_data cp
;
663 if (!lmp_le_capable(hdev
)) {
668 memset(&cp
, 0, sizeof(cp
));
670 len
= create_ad(hdev
, cp
.data
);
672 if (hdev
->adv_data_len
== len
&&
673 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0) {
678 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
679 hdev
->adv_data_len
= len
;
682 err
= hci_send_cmd(hdev
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
685 hci_dev_unlock(hdev
);
690 /* ---- HCI ioctl helpers ---- */
692 int hci_dev_open(__u16 dev
)
694 struct hci_dev
*hdev
;
697 hdev
= hci_dev_get(dev
);
701 BT_DBG("%s %p", hdev
->name
, hdev
);
705 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
710 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
715 if (test_bit(HCI_UP
, &hdev
->flags
)) {
720 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
721 set_bit(HCI_RAW
, &hdev
->flags
);
723 /* Treat all non BR/EDR controllers as raw devices if
724 enable_hs is not set */
725 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
726 set_bit(HCI_RAW
, &hdev
->flags
);
728 if (hdev
->open(hdev
)) {
733 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
734 atomic_set(&hdev
->cmd_cnt
, 1);
735 set_bit(HCI_INIT
, &hdev
->flags
);
736 hdev
->init_last_cmd
= 0;
738 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
740 clear_bit(HCI_INIT
, &hdev
->flags
);
745 set_bit(HCI_UP
, &hdev
->flags
);
746 hci_notify(hdev
, HCI_DEV_UP
);
748 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
749 mgmt_valid_hdev(hdev
)) {
751 mgmt_powered(hdev
, 1);
752 hci_dev_unlock(hdev
);
755 /* Init failed, cleanup */
756 flush_work(&hdev
->tx_work
);
757 flush_work(&hdev
->cmd_work
);
758 flush_work(&hdev
->rx_work
);
760 skb_queue_purge(&hdev
->cmd_q
);
761 skb_queue_purge(&hdev
->rx_q
);
766 if (hdev
->sent_cmd
) {
767 kfree_skb(hdev
->sent_cmd
);
768 hdev
->sent_cmd
= NULL
;
776 hci_req_unlock(hdev
);
781 static int hci_dev_do_close(struct hci_dev
*hdev
)
783 BT_DBG("%s %p", hdev
->name
, hdev
);
785 cancel_work_sync(&hdev
->le_scan
);
787 cancel_delayed_work(&hdev
->power_off
);
789 hci_req_cancel(hdev
, ENODEV
);
792 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
793 del_timer_sync(&hdev
->cmd_timer
);
794 hci_req_unlock(hdev
);
798 /* Flush RX and TX works */
799 flush_work(&hdev
->tx_work
);
800 flush_work(&hdev
->rx_work
);
802 if (hdev
->discov_timeout
> 0) {
803 cancel_delayed_work(&hdev
->discov_off
);
804 hdev
->discov_timeout
= 0;
805 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
808 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
809 cancel_delayed_work(&hdev
->service_cache
);
811 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
814 inquiry_cache_flush(hdev
);
815 hci_conn_hash_flush(hdev
);
816 hci_dev_unlock(hdev
);
818 hci_notify(hdev
, HCI_DEV_DOWN
);
824 skb_queue_purge(&hdev
->cmd_q
);
825 atomic_set(&hdev
->cmd_cnt
, 1);
826 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
827 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
828 set_bit(HCI_INIT
, &hdev
->flags
);
829 __hci_request(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
830 clear_bit(HCI_INIT
, &hdev
->flags
);
834 flush_work(&hdev
->cmd_work
);
837 skb_queue_purge(&hdev
->rx_q
);
838 skb_queue_purge(&hdev
->cmd_q
);
839 skb_queue_purge(&hdev
->raw_q
);
841 /* Drop last sent command */
842 if (hdev
->sent_cmd
) {
843 del_timer_sync(&hdev
->cmd_timer
);
844 kfree_skb(hdev
->sent_cmd
);
845 hdev
->sent_cmd
= NULL
;
848 /* After this point our queues are empty
849 * and no tasks are scheduled. */
852 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
853 mgmt_valid_hdev(hdev
)) {
855 mgmt_powered(hdev
, 0);
856 hci_dev_unlock(hdev
);
862 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
863 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
865 hci_req_unlock(hdev
);
871 int hci_dev_close(__u16 dev
)
873 struct hci_dev
*hdev
;
876 hdev
= hci_dev_get(dev
);
880 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
881 cancel_delayed_work(&hdev
->power_off
);
883 err
= hci_dev_do_close(hdev
);
889 int hci_dev_reset(__u16 dev
)
891 struct hci_dev
*hdev
;
894 hdev
= hci_dev_get(dev
);
900 if (!test_bit(HCI_UP
, &hdev
->flags
))
904 skb_queue_purge(&hdev
->rx_q
);
905 skb_queue_purge(&hdev
->cmd_q
);
908 inquiry_cache_flush(hdev
);
909 hci_conn_hash_flush(hdev
);
910 hci_dev_unlock(hdev
);
915 atomic_set(&hdev
->cmd_cnt
, 1);
916 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
918 if (!test_bit(HCI_RAW
, &hdev
->flags
))
919 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
922 hci_req_unlock(hdev
);
927 int hci_dev_reset_stat(__u16 dev
)
929 struct hci_dev
*hdev
;
932 hdev
= hci_dev_get(dev
);
936 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
943 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
945 struct hci_dev
*hdev
;
946 struct hci_dev_req dr
;
949 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
952 hdev
= hci_dev_get(dr
.dev_id
);
958 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
963 if (!lmp_encrypt_capable(hdev
)) {
968 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
969 /* Auth must be enabled first */
970 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
976 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
981 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
986 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
991 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
992 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
996 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1000 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1001 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1005 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1006 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1018 int hci_get_dev_list(void __user
*arg
)
1020 struct hci_dev
*hdev
;
1021 struct hci_dev_list_req
*dl
;
1022 struct hci_dev_req
*dr
;
1023 int n
= 0, size
, err
;
1026 if (get_user(dev_num
, (__u16 __user
*) arg
))
1029 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1032 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1034 dl
= kzalloc(size
, GFP_KERNEL
);
1040 read_lock(&hci_dev_list_lock
);
1041 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1042 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1043 cancel_delayed_work(&hdev
->power_off
);
1045 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1046 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1048 (dr
+ n
)->dev_id
= hdev
->id
;
1049 (dr
+ n
)->dev_opt
= hdev
->flags
;
1054 read_unlock(&hci_dev_list_lock
);
1057 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1059 err
= copy_to_user(arg
, dl
, size
);
1062 return err
? -EFAULT
: 0;
1065 int hci_get_dev_info(void __user
*arg
)
1067 struct hci_dev
*hdev
;
1068 struct hci_dev_info di
;
1071 if (copy_from_user(&di
, arg
, sizeof(di
)))
1074 hdev
= hci_dev_get(di
.dev_id
);
1078 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1079 cancel_delayed_work_sync(&hdev
->power_off
);
1081 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1082 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1084 strcpy(di
.name
, hdev
->name
);
1085 di
.bdaddr
= hdev
->bdaddr
;
1086 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1087 di
.flags
= hdev
->flags
;
1088 di
.pkt_type
= hdev
->pkt_type
;
1089 if (lmp_bredr_capable(hdev
)) {
1090 di
.acl_mtu
= hdev
->acl_mtu
;
1091 di
.acl_pkts
= hdev
->acl_pkts
;
1092 di
.sco_mtu
= hdev
->sco_mtu
;
1093 di
.sco_pkts
= hdev
->sco_pkts
;
1095 di
.acl_mtu
= hdev
->le_mtu
;
1096 di
.acl_pkts
= hdev
->le_pkts
;
1100 di
.link_policy
= hdev
->link_policy
;
1101 di
.link_mode
= hdev
->link_mode
;
1103 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1104 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1106 if (copy_to_user(arg
, &di
, sizeof(di
)))
1114 /* ---- Interface to HCI drivers ---- */
1116 static int hci_rfkill_set_block(void *data
, bool blocked
)
1118 struct hci_dev
*hdev
= data
;
1120 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1125 hci_dev_do_close(hdev
);
1130 static const struct rfkill_ops hci_rfkill_ops
= {
1131 .set_block
= hci_rfkill_set_block
,
1134 static void hci_power_on(struct work_struct
*work
)
1136 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1138 BT_DBG("%s", hdev
->name
);
1140 if (hci_dev_open(hdev
->id
) < 0)
1143 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1144 schedule_delayed_work(&hdev
->power_off
, HCI_AUTO_OFF_TIMEOUT
);
1146 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1147 mgmt_index_added(hdev
);
1150 static void hci_power_off(struct work_struct
*work
)
1152 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1155 BT_DBG("%s", hdev
->name
);
1157 hci_dev_do_close(hdev
);
1160 static void hci_discov_off(struct work_struct
*work
)
1162 struct hci_dev
*hdev
;
1163 u8 scan
= SCAN_PAGE
;
1165 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1167 BT_DBG("%s", hdev
->name
);
1171 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1173 hdev
->discov_timeout
= 0;
1175 hci_dev_unlock(hdev
);
1178 int hci_uuids_clear(struct hci_dev
*hdev
)
1180 struct list_head
*p
, *n
;
1182 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1183 struct bt_uuid
*uuid
;
1185 uuid
= list_entry(p
, struct bt_uuid
, list
);
1194 int hci_link_keys_clear(struct hci_dev
*hdev
)
1196 struct list_head
*p
, *n
;
1198 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1199 struct link_key
*key
;
1201 key
= list_entry(p
, struct link_key
, list
);
1210 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1212 struct smp_ltk
*k
, *tmp
;
1214 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1222 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1226 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1227 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1233 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1234 u8 key_type
, u8 old_key_type
)
1237 if (key_type
< 0x03)
1240 /* Debug keys are insecure so don't store them persistently */
1241 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1244 /* Changed combination key and there's no previous one */
1245 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1248 /* Security mode 3 case */
1252 /* Neither local nor remote side had no-bonding as requirement */
1253 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1256 /* Local side had dedicated bonding as requirement */
1257 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1260 /* Remote side had dedicated bonding as requirement */
1261 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1264 /* If none of the above criteria match, then don't store the key
1269 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1273 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1274 if (k
->ediv
!= ediv
||
1275 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1284 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1289 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1290 if (addr_type
== k
->bdaddr_type
&&
1291 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1297 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1298 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1300 struct link_key
*key
, *old_key
;
1304 old_key
= hci_find_link_key(hdev
, bdaddr
);
1306 old_key_type
= old_key
->type
;
1309 old_key_type
= conn
? conn
->key_type
: 0xff;
1310 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1313 list_add(&key
->list
, &hdev
->link_keys
);
1316 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1318 /* Some buggy controller combinations generate a changed
1319 * combination key for legacy pairing even when there's no
1321 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1322 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1323 type
= HCI_LK_COMBINATION
;
1325 conn
->key_type
= type
;
1328 bacpy(&key
->bdaddr
, bdaddr
);
1329 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1330 key
->pin_len
= pin_len
;
1332 if (type
== HCI_LK_CHANGED_COMBINATION
)
1333 key
->type
= old_key_type
;
1340 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1342 mgmt_new_link_key(hdev
, key
, persistent
);
1345 conn
->flush_key
= !persistent
;
1350 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1351 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1354 struct smp_ltk
*key
, *old_key
;
1356 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1359 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1363 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1366 list_add(&key
->list
, &hdev
->long_term_keys
);
1369 bacpy(&key
->bdaddr
, bdaddr
);
1370 key
->bdaddr_type
= addr_type
;
1371 memcpy(key
->val
, tk
, sizeof(key
->val
));
1372 key
->authenticated
= authenticated
;
1374 key
->enc_size
= enc_size
;
1376 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1381 if (type
& HCI_SMP_LTK
)
1382 mgmt_new_ltk(hdev
, key
, 1);
1387 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1389 struct link_key
*key
;
1391 key
= hci_find_link_key(hdev
, bdaddr
);
1395 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1397 list_del(&key
->list
);
1403 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1405 struct smp_ltk
*k
, *tmp
;
1407 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1408 if (bacmp(bdaddr
, &k
->bdaddr
))
1411 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1420 /* HCI command timer function */
1421 static void hci_cmd_timeout(unsigned long arg
)
1423 struct hci_dev
*hdev
= (void *) arg
;
1425 if (hdev
->sent_cmd
) {
1426 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1427 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1429 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1431 BT_ERR("%s command tx timeout", hdev
->name
);
1434 atomic_set(&hdev
->cmd_cnt
, 1);
1435 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1438 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1441 struct oob_data
*data
;
1443 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1444 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1450 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1452 struct oob_data
*data
;
1454 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1458 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1460 list_del(&data
->list
);
1466 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1468 struct oob_data
*data
, *n
;
1470 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1471 list_del(&data
->list
);
1478 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1481 struct oob_data
*data
;
1483 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1486 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1490 bacpy(&data
->bdaddr
, bdaddr
);
1491 list_add(&data
->list
, &hdev
->remote_oob_data
);
1494 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1495 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1497 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1502 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1504 struct bdaddr_list
*b
;
1506 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1507 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1513 int hci_blacklist_clear(struct hci_dev
*hdev
)
1515 struct list_head
*p
, *n
;
1517 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1518 struct bdaddr_list
*b
;
1520 b
= list_entry(p
, struct bdaddr_list
, list
);
1529 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1531 struct bdaddr_list
*entry
;
1533 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1536 if (hci_blacklist_lookup(hdev
, bdaddr
))
1539 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1543 bacpy(&entry
->bdaddr
, bdaddr
);
1545 list_add(&entry
->list
, &hdev
->blacklist
);
1547 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1550 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1552 struct bdaddr_list
*entry
;
1554 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1555 return hci_blacklist_clear(hdev
);
1557 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1561 list_del(&entry
->list
);
1564 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1567 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1569 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1570 struct hci_cp_le_set_scan_param cp
;
1572 memset(&cp
, 0, sizeof(cp
));
1573 cp
.type
= param
->type
;
1574 cp
.interval
= cpu_to_le16(param
->interval
);
1575 cp
.window
= cpu_to_le16(param
->window
);
1577 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1580 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1582 struct hci_cp_le_set_scan_enable cp
;
1584 memset(&cp
, 0, sizeof(cp
));
1588 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1591 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1592 u16 window
, int timeout
)
1594 long timeo
= msecs_to_jiffies(3000);
1595 struct le_scan_params param
;
1598 BT_DBG("%s", hdev
->name
);
1600 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1601 return -EINPROGRESS
;
1604 param
.interval
= interval
;
1605 param
.window
= window
;
1609 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1612 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1614 hci_req_unlock(hdev
);
1619 schedule_delayed_work(&hdev
->le_scan_disable
,
1620 msecs_to_jiffies(timeout
));
1625 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1627 BT_DBG("%s", hdev
->name
);
1629 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1632 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1633 struct hci_cp_le_set_scan_enable cp
;
1635 /* Send HCI command to disable LE Scan */
1636 memset(&cp
, 0, sizeof(cp
));
1637 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1643 static void le_scan_disable_work(struct work_struct
*work
)
1645 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1646 le_scan_disable
.work
);
1647 struct hci_cp_le_set_scan_enable cp
;
1649 BT_DBG("%s", hdev
->name
);
1651 memset(&cp
, 0, sizeof(cp
));
1653 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1656 static void le_scan_work(struct work_struct
*work
)
1658 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1659 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1661 BT_DBG("%s", hdev
->name
);
1663 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1667 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1670 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1672 BT_DBG("%s", hdev
->name
);
1674 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
1677 if (work_busy(&hdev
->le_scan
))
1678 return -EINPROGRESS
;
1681 param
->interval
= interval
;
1682 param
->window
= window
;
1683 param
->timeout
= timeout
;
1685 queue_work(system_long_wq
, &hdev
->le_scan
);
1690 /* Alloc HCI device */
1691 struct hci_dev
*hci_alloc_dev(void)
1693 struct hci_dev
*hdev
;
1695 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1699 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1700 hdev
->esco_type
= (ESCO_HV1
);
1701 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1702 hdev
->io_capability
= 0x03; /* No Input No Output */
1703 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
1704 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
1706 hdev
->sniff_max_interval
= 800;
1707 hdev
->sniff_min_interval
= 80;
1709 mutex_init(&hdev
->lock
);
1710 mutex_init(&hdev
->req_lock
);
1712 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1713 INIT_LIST_HEAD(&hdev
->blacklist
);
1714 INIT_LIST_HEAD(&hdev
->uuids
);
1715 INIT_LIST_HEAD(&hdev
->link_keys
);
1716 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1717 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1718 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1720 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1721 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1722 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1723 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1724 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1726 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1727 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1728 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1730 skb_queue_head_init(&hdev
->driver_init
);
1731 skb_queue_head_init(&hdev
->rx_q
);
1732 skb_queue_head_init(&hdev
->cmd_q
);
1733 skb_queue_head_init(&hdev
->raw_q
);
1735 init_waitqueue_head(&hdev
->req_wait_q
);
1737 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1739 hci_init_sysfs(hdev
);
1740 discovery_init(hdev
);
1744 EXPORT_SYMBOL(hci_alloc_dev
);
1746 /* Free HCI device */
1747 void hci_free_dev(struct hci_dev
*hdev
)
1749 skb_queue_purge(&hdev
->driver_init
);
1751 /* will free via device release */
1752 put_device(&hdev
->dev
);
1754 EXPORT_SYMBOL(hci_free_dev
);
1756 /* Register HCI device */
1757 int hci_register_dev(struct hci_dev
*hdev
)
1761 if (!hdev
->open
|| !hdev
->close
)
1764 /* Do not allow HCI_AMP devices to register at index 0,
1765 * so the index can be used as the AMP controller ID.
1767 switch (hdev
->dev_type
) {
1769 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1772 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1781 sprintf(hdev
->name
, "hci%d", id
);
1784 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1786 write_lock(&hci_dev_list_lock
);
1787 list_add(&hdev
->list
, &hci_dev_list
);
1788 write_unlock(&hci_dev_list_lock
);
1790 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1792 if (!hdev
->workqueue
) {
1797 error
= hci_add_sysfs(hdev
);
1801 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1802 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1805 if (rfkill_register(hdev
->rfkill
) < 0) {
1806 rfkill_destroy(hdev
->rfkill
);
1807 hdev
->rfkill
= NULL
;
1811 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1813 if (hdev
->dev_type
!= HCI_AMP
)
1814 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1816 schedule_work(&hdev
->power_on
);
1818 hci_notify(hdev
, HCI_DEV_REG
);
1824 destroy_workqueue(hdev
->workqueue
);
1826 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1827 write_lock(&hci_dev_list_lock
);
1828 list_del(&hdev
->list
);
1829 write_unlock(&hci_dev_list_lock
);
1833 EXPORT_SYMBOL(hci_register_dev
);
1835 /* Unregister HCI device */
1836 void hci_unregister_dev(struct hci_dev
*hdev
)
1840 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1842 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1846 write_lock(&hci_dev_list_lock
);
1847 list_del(&hdev
->list
);
1848 write_unlock(&hci_dev_list_lock
);
1850 hci_dev_do_close(hdev
);
1852 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1853 kfree_skb(hdev
->reassembly
[i
]);
1855 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1856 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1858 mgmt_index_removed(hdev
);
1859 hci_dev_unlock(hdev
);
1862 /* mgmt_index_removed should take care of emptying the
1864 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1866 hci_notify(hdev
, HCI_DEV_UNREG
);
1869 rfkill_unregister(hdev
->rfkill
);
1870 rfkill_destroy(hdev
->rfkill
);
1873 hci_del_sysfs(hdev
);
1875 destroy_workqueue(hdev
->workqueue
);
1878 hci_blacklist_clear(hdev
);
1879 hci_uuids_clear(hdev
);
1880 hci_link_keys_clear(hdev
);
1881 hci_smp_ltks_clear(hdev
);
1882 hci_remote_oob_data_clear(hdev
);
1883 hci_dev_unlock(hdev
);
1887 ida_simple_remove(&hci_index_ida
, id
);
1889 EXPORT_SYMBOL(hci_unregister_dev
);
1891 /* Suspend HCI device */
1892 int hci_suspend_dev(struct hci_dev
*hdev
)
1894 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1897 EXPORT_SYMBOL(hci_suspend_dev
);
1899 /* Resume HCI device */
1900 int hci_resume_dev(struct hci_dev
*hdev
)
1902 hci_notify(hdev
, HCI_DEV_RESUME
);
1905 EXPORT_SYMBOL(hci_resume_dev
);
1907 /* Receive frame from HCI drivers */
1908 int hci_recv_frame(struct sk_buff
*skb
)
1910 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1911 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1912 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1918 bt_cb(skb
)->incoming
= 1;
1921 __net_timestamp(skb
);
1923 skb_queue_tail(&hdev
->rx_q
, skb
);
1924 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1928 EXPORT_SYMBOL(hci_recv_frame
);
1930 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1931 int count
, __u8 index
)
1936 struct sk_buff
*skb
;
1937 struct bt_skb_cb
*scb
;
1939 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1940 index
>= NUM_REASSEMBLY
)
1943 skb
= hdev
->reassembly
[index
];
1947 case HCI_ACLDATA_PKT
:
1948 len
= HCI_MAX_FRAME_SIZE
;
1949 hlen
= HCI_ACL_HDR_SIZE
;
1952 len
= HCI_MAX_EVENT_SIZE
;
1953 hlen
= HCI_EVENT_HDR_SIZE
;
1955 case HCI_SCODATA_PKT
:
1956 len
= HCI_MAX_SCO_SIZE
;
1957 hlen
= HCI_SCO_HDR_SIZE
;
1961 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1965 scb
= (void *) skb
->cb
;
1967 scb
->pkt_type
= type
;
1969 skb
->dev
= (void *) hdev
;
1970 hdev
->reassembly
[index
] = skb
;
1974 scb
= (void *) skb
->cb
;
1975 len
= min_t(uint
, scb
->expect
, count
);
1977 memcpy(skb_put(skb
, len
), data
, len
);
1986 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1987 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1988 scb
->expect
= h
->plen
;
1990 if (skb_tailroom(skb
) < scb
->expect
) {
1992 hdev
->reassembly
[index
] = NULL
;
1998 case HCI_ACLDATA_PKT
:
1999 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
2000 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
2001 scb
->expect
= __le16_to_cpu(h
->dlen
);
2003 if (skb_tailroom(skb
) < scb
->expect
) {
2005 hdev
->reassembly
[index
] = NULL
;
2011 case HCI_SCODATA_PKT
:
2012 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2013 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2014 scb
->expect
= h
->dlen
;
2016 if (skb_tailroom(skb
) < scb
->expect
) {
2018 hdev
->reassembly
[index
] = NULL
;
2025 if (scb
->expect
== 0) {
2026 /* Complete frame */
2028 bt_cb(skb
)->pkt_type
= type
;
2029 hci_recv_frame(skb
);
2031 hdev
->reassembly
[index
] = NULL
;
2039 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2043 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2047 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2051 data
+= (count
- rem
);
2057 EXPORT_SYMBOL(hci_recv_fragment
);
2059 #define STREAM_REASSEMBLY 0
2061 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2067 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2070 struct { char type
; } *pkt
;
2072 /* Start of the frame */
2079 type
= bt_cb(skb
)->pkt_type
;
2081 rem
= hci_reassembly(hdev
, type
, data
, count
,
2086 data
+= (count
- rem
);
2092 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2094 /* ---- Interface to upper protocols ---- */
2096 int hci_register_cb(struct hci_cb
*cb
)
2098 BT_DBG("%p name %s", cb
, cb
->name
);
2100 write_lock(&hci_cb_list_lock
);
2101 list_add(&cb
->list
, &hci_cb_list
);
2102 write_unlock(&hci_cb_list_lock
);
2106 EXPORT_SYMBOL(hci_register_cb
);
2108 int hci_unregister_cb(struct hci_cb
*cb
)
2110 BT_DBG("%p name %s", cb
, cb
->name
);
2112 write_lock(&hci_cb_list_lock
);
2113 list_del(&cb
->list
);
2114 write_unlock(&hci_cb_list_lock
);
2118 EXPORT_SYMBOL(hci_unregister_cb
);
2120 static int hci_send_frame(struct sk_buff
*skb
)
2122 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2129 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2132 __net_timestamp(skb
);
2134 /* Send copy to monitor */
2135 hci_send_to_monitor(hdev
, skb
);
2137 if (atomic_read(&hdev
->promisc
)) {
2138 /* Send copy to the sockets */
2139 hci_send_to_sock(hdev
, skb
);
2142 /* Get rid of skb owner, prior to sending to the driver. */
2145 return hdev
->send(skb
);
2148 /* Send HCI command */
2149 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2151 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2152 struct hci_command_hdr
*hdr
;
2153 struct sk_buff
*skb
;
2155 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2157 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2159 BT_ERR("%s no memory for command", hdev
->name
);
2163 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2164 hdr
->opcode
= cpu_to_le16(opcode
);
2168 memcpy(skb_put(skb
, plen
), param
, plen
);
2170 BT_DBG("skb len %d", skb
->len
);
2172 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2173 skb
->dev
= (void *) hdev
;
2175 if (test_bit(HCI_INIT
, &hdev
->flags
))
2176 hdev
->init_last_cmd
= opcode
;
2178 skb_queue_tail(&hdev
->cmd_q
, skb
);
2179 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2184 /* Get data from the previously sent command */
2185 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2187 struct hci_command_hdr
*hdr
;
2189 if (!hdev
->sent_cmd
)
2192 hdr
= (void *) hdev
->sent_cmd
->data
;
2194 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2197 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2199 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2203 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2205 struct hci_acl_hdr
*hdr
;
2208 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2209 skb_reset_transport_header(skb
);
2210 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2211 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2212 hdr
->dlen
= cpu_to_le16(len
);
2215 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2216 struct sk_buff
*skb
, __u16 flags
)
2218 struct hci_conn
*conn
= chan
->conn
;
2219 struct hci_dev
*hdev
= conn
->hdev
;
2220 struct sk_buff
*list
;
2222 skb
->len
= skb_headlen(skb
);
2225 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2227 switch (hdev
->dev_type
) {
2229 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2232 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2235 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2239 list
= skb_shinfo(skb
)->frag_list
;
2241 /* Non fragmented */
2242 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2244 skb_queue_tail(queue
, skb
);
2247 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2249 skb_shinfo(skb
)->frag_list
= NULL
;
2251 /* Queue all fragments atomically */
2252 spin_lock(&queue
->lock
);
2254 __skb_queue_tail(queue
, skb
);
2256 flags
&= ~ACL_START
;
2259 skb
= list
; list
= list
->next
;
2261 skb
->dev
= (void *) hdev
;
2262 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2263 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2265 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2267 __skb_queue_tail(queue
, skb
);
2270 spin_unlock(&queue
->lock
);
2274 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2276 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2278 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2280 skb
->dev
= (void *) hdev
;
2282 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2284 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2288 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2290 struct hci_dev
*hdev
= conn
->hdev
;
2291 struct hci_sco_hdr hdr
;
2293 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2295 hdr
.handle
= cpu_to_le16(conn
->handle
);
2296 hdr
.dlen
= skb
->len
;
2298 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2299 skb_reset_transport_header(skb
);
2300 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2302 skb
->dev
= (void *) hdev
;
2303 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2305 skb_queue_tail(&conn
->data_q
, skb
);
2306 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2309 /* ---- HCI TX task (outgoing data) ---- */
2311 /* HCI Connection scheduler */
2312 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2315 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2316 struct hci_conn
*conn
= NULL
, *c
;
2317 unsigned int num
= 0, min
= ~0;
2319 /* We don't have to lock device here. Connections are always
2320 * added and removed with TX task disabled. */
2324 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2325 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2328 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2333 if (c
->sent
< min
) {
2338 if (hci_conn_num(hdev
, type
) == num
)
2347 switch (conn
->type
) {
2349 cnt
= hdev
->acl_cnt
;
2353 cnt
= hdev
->sco_cnt
;
2356 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2360 BT_ERR("Unknown link type");
2368 BT_DBG("conn %p quote %d", conn
, *quote
);
2372 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2374 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2377 BT_ERR("%s link tx timeout", hdev
->name
);
2381 /* Kill stalled connections */
2382 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2383 if (c
->type
== type
&& c
->sent
) {
2384 BT_ERR("%s killing stalled connection %pMR",
2385 hdev
->name
, &c
->dst
);
2386 hci_acl_disconn(c
, HCI_ERROR_REMOTE_USER_TERM
);
2393 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2396 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2397 struct hci_chan
*chan
= NULL
;
2398 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2399 struct hci_conn
*conn
;
2400 int cnt
, q
, conn_num
= 0;
2402 BT_DBG("%s", hdev
->name
);
2406 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2407 struct hci_chan
*tmp
;
2409 if (conn
->type
!= type
)
2412 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2417 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2418 struct sk_buff
*skb
;
2420 if (skb_queue_empty(&tmp
->data_q
))
2423 skb
= skb_peek(&tmp
->data_q
);
2424 if (skb
->priority
< cur_prio
)
2427 if (skb
->priority
> cur_prio
) {
2430 cur_prio
= skb
->priority
;
2435 if (conn
->sent
< min
) {
2441 if (hci_conn_num(hdev
, type
) == conn_num
)
2450 switch (chan
->conn
->type
) {
2452 cnt
= hdev
->acl_cnt
;
2455 cnt
= hdev
->block_cnt
;
2459 cnt
= hdev
->sco_cnt
;
2462 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2466 BT_ERR("Unknown link type");
2471 BT_DBG("chan %p quote %d", chan
, *quote
);
2475 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2477 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2478 struct hci_conn
*conn
;
2481 BT_DBG("%s", hdev
->name
);
2485 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2486 struct hci_chan
*chan
;
2488 if (conn
->type
!= type
)
2491 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2496 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2497 struct sk_buff
*skb
;
2504 if (skb_queue_empty(&chan
->data_q
))
2507 skb
= skb_peek(&chan
->data_q
);
2508 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2511 skb
->priority
= HCI_PRIO_MAX
- 1;
2513 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2517 if (hci_conn_num(hdev
, type
) == num
)
2525 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2527 /* Calculate count of blocks used by this packet */
2528 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2531 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2533 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2534 /* ACL tx timeout must be longer than maximum
2535 * link supervision timeout (40.9 seconds) */
2536 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2537 HCI_ACL_TX_TIMEOUT
))
2538 hci_link_tx_to(hdev
, ACL_LINK
);
2542 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2544 unsigned int cnt
= hdev
->acl_cnt
;
2545 struct hci_chan
*chan
;
2546 struct sk_buff
*skb
;
2549 __check_timeout(hdev
, cnt
);
2551 while (hdev
->acl_cnt
&&
2552 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2553 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2554 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2555 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2556 skb
->len
, skb
->priority
);
2558 /* Stop if priority has changed */
2559 if (skb
->priority
< priority
)
2562 skb
= skb_dequeue(&chan
->data_q
);
2564 hci_conn_enter_active_mode(chan
->conn
,
2565 bt_cb(skb
)->force_active
);
2567 hci_send_frame(skb
);
2568 hdev
->acl_last_tx
= jiffies
;
2576 if (cnt
!= hdev
->acl_cnt
)
2577 hci_prio_recalculate(hdev
, ACL_LINK
);
2580 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2582 unsigned int cnt
= hdev
->block_cnt
;
2583 struct hci_chan
*chan
;
2584 struct sk_buff
*skb
;
2588 __check_timeout(hdev
, cnt
);
2590 BT_DBG("%s", hdev
->name
);
2592 if (hdev
->dev_type
== HCI_AMP
)
2597 while (hdev
->block_cnt
> 0 &&
2598 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
2599 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2600 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2603 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2604 skb
->len
, skb
->priority
);
2606 /* Stop if priority has changed */
2607 if (skb
->priority
< priority
)
2610 skb
= skb_dequeue(&chan
->data_q
);
2612 blocks
= __get_blocks(hdev
, skb
);
2613 if (blocks
> hdev
->block_cnt
)
2616 hci_conn_enter_active_mode(chan
->conn
,
2617 bt_cb(skb
)->force_active
);
2619 hci_send_frame(skb
);
2620 hdev
->acl_last_tx
= jiffies
;
2622 hdev
->block_cnt
-= blocks
;
2625 chan
->sent
+= blocks
;
2626 chan
->conn
->sent
+= blocks
;
2630 if (cnt
!= hdev
->block_cnt
)
2631 hci_prio_recalculate(hdev
, type
);
2634 static void hci_sched_acl(struct hci_dev
*hdev
)
2636 BT_DBG("%s", hdev
->name
);
2638 /* No ACL link over BR/EDR controller */
2639 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
2642 /* No AMP link over AMP controller */
2643 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
2646 switch (hdev
->flow_ctl_mode
) {
2647 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2648 hci_sched_acl_pkt(hdev
);
2651 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2652 hci_sched_acl_blk(hdev
);
2658 static void hci_sched_sco(struct hci_dev
*hdev
)
2660 struct hci_conn
*conn
;
2661 struct sk_buff
*skb
;
2664 BT_DBG("%s", hdev
->name
);
2666 if (!hci_conn_num(hdev
, SCO_LINK
))
2669 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2670 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2671 BT_DBG("skb %p len %d", skb
, skb
->len
);
2672 hci_send_frame(skb
);
2675 if (conn
->sent
== ~0)
2681 static void hci_sched_esco(struct hci_dev
*hdev
)
2683 struct hci_conn
*conn
;
2684 struct sk_buff
*skb
;
2687 BT_DBG("%s", hdev
->name
);
2689 if (!hci_conn_num(hdev
, ESCO_LINK
))
2692 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2694 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2695 BT_DBG("skb %p len %d", skb
, skb
->len
);
2696 hci_send_frame(skb
);
2699 if (conn
->sent
== ~0)
2705 static void hci_sched_le(struct hci_dev
*hdev
)
2707 struct hci_chan
*chan
;
2708 struct sk_buff
*skb
;
2709 int quote
, cnt
, tmp
;
2711 BT_DBG("%s", hdev
->name
);
2713 if (!hci_conn_num(hdev
, LE_LINK
))
2716 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2717 /* LE tx timeout must be longer than maximum
2718 * link supervision timeout (40.9 seconds) */
2719 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2720 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2721 hci_link_tx_to(hdev
, LE_LINK
);
2724 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2726 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2727 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2728 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2729 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2730 skb
->len
, skb
->priority
);
2732 /* Stop if priority has changed */
2733 if (skb
->priority
< priority
)
2736 skb
= skb_dequeue(&chan
->data_q
);
2738 hci_send_frame(skb
);
2739 hdev
->le_last_tx
= jiffies
;
2750 hdev
->acl_cnt
= cnt
;
2753 hci_prio_recalculate(hdev
, LE_LINK
);
2756 static void hci_tx_work(struct work_struct
*work
)
2758 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2759 struct sk_buff
*skb
;
2761 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2762 hdev
->sco_cnt
, hdev
->le_cnt
);
2764 /* Schedule queues and send stuff to HCI driver */
2766 hci_sched_acl(hdev
);
2768 hci_sched_sco(hdev
);
2770 hci_sched_esco(hdev
);
2774 /* Send next queued raw (unknown type) packet */
2775 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2776 hci_send_frame(skb
);
2779 /* ----- HCI RX task (incoming data processing) ----- */
2781 /* ACL data packet */
2782 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2784 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2785 struct hci_conn
*conn
;
2786 __u16 handle
, flags
;
2788 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2790 handle
= __le16_to_cpu(hdr
->handle
);
2791 flags
= hci_flags(handle
);
2792 handle
= hci_handle(handle
);
2794 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
2797 hdev
->stat
.acl_rx
++;
2800 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2801 hci_dev_unlock(hdev
);
2804 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2807 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
2808 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2809 mgmt_device_connected(hdev
, &conn
->dst
, conn
->type
,
2810 conn
->dst_type
, 0, NULL
, 0,
2812 hci_dev_unlock(hdev
);
2814 /* Send to upper protocol */
2815 l2cap_recv_acldata(conn
, skb
, flags
);
2818 BT_ERR("%s ACL packet for unknown connection handle %d",
2819 hdev
->name
, handle
);
2825 /* SCO data packet */
2826 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2828 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2829 struct hci_conn
*conn
;
2832 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2834 handle
= __le16_to_cpu(hdr
->handle
);
2836 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
2838 hdev
->stat
.sco_rx
++;
2841 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2842 hci_dev_unlock(hdev
);
2845 /* Send to upper protocol */
2846 sco_recv_scodata(conn
, skb
);
2849 BT_ERR("%s SCO packet for unknown connection handle %d",
2850 hdev
->name
, handle
);
2856 static void hci_rx_work(struct work_struct
*work
)
2858 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2859 struct sk_buff
*skb
;
2861 BT_DBG("%s", hdev
->name
);
2863 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2864 /* Send copy to monitor */
2865 hci_send_to_monitor(hdev
, skb
);
2867 if (atomic_read(&hdev
->promisc
)) {
2868 /* Send copy to the sockets */
2869 hci_send_to_sock(hdev
, skb
);
2872 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2877 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2878 /* Don't process data packets in this states. */
2879 switch (bt_cb(skb
)->pkt_type
) {
2880 case HCI_ACLDATA_PKT
:
2881 case HCI_SCODATA_PKT
:
2888 switch (bt_cb(skb
)->pkt_type
) {
2890 BT_DBG("%s Event packet", hdev
->name
);
2891 hci_event_packet(hdev
, skb
);
2894 case HCI_ACLDATA_PKT
:
2895 BT_DBG("%s ACL data packet", hdev
->name
);
2896 hci_acldata_packet(hdev
, skb
);
2899 case HCI_SCODATA_PKT
:
2900 BT_DBG("%s SCO data packet", hdev
->name
);
2901 hci_scodata_packet(hdev
, skb
);
2911 static void hci_cmd_work(struct work_struct
*work
)
2913 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2914 struct sk_buff
*skb
;
2916 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
2917 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
2919 /* Send queued commands */
2920 if (atomic_read(&hdev
->cmd_cnt
)) {
2921 skb
= skb_dequeue(&hdev
->cmd_q
);
2925 kfree_skb(hdev
->sent_cmd
);
2927 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2928 if (hdev
->sent_cmd
) {
2929 atomic_dec(&hdev
->cmd_cnt
);
2930 hci_send_frame(skb
);
2931 if (test_bit(HCI_RESET
, &hdev
->flags
))
2932 del_timer(&hdev
->cmd_timer
);
2934 mod_timer(&hdev
->cmd_timer
,
2935 jiffies
+ HCI_CMD_TIMEOUT
);
2937 skb_queue_head(&hdev
->cmd_q
, skb
);
2938 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2943 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2945 /* General inquiry access code (GIAC) */
2946 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2947 struct hci_cp_inquiry cp
;
2949 BT_DBG("%s", hdev
->name
);
2951 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2952 return -EINPROGRESS
;
2954 inquiry_cache_flush(hdev
);
2956 memset(&cp
, 0, sizeof(cp
));
2957 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2960 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2963 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2965 BT_DBG("%s", hdev
->name
);
2967 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2970 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2973 u8
bdaddr_to_le(u8 bdaddr_type
)
2975 switch (bdaddr_type
) {
2976 case BDADDR_LE_PUBLIC
:
2977 return ADDR_LE_DEV_PUBLIC
;
2980 /* Fallback to LE Random address type */
2981 return ADDR_LE_DEV_RANDOM
;