2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev
->name
, cmd
, result
);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
68 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
69 u16 opcode
= __le16_to_cpu(sent
->opcode
);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
82 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
84 skb_queue_head(&hdev
->cmd_q
, skb
);
85 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
91 if (hdev
->req_status
== HCI_REQ_PEND
) {
92 hdev
->req_result
= result
;
93 hdev
->req_status
= HCI_REQ_DONE
;
94 wake_up_interruptible(&hdev
->req_wait_q
);
98 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
100 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= err
;
104 hdev
->req_status
= HCI_REQ_CANCELED
;
105 wake_up_interruptible(&hdev
->req_wait_q
);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev
*hdev
,
111 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
114 DECLARE_WAITQUEUE(wait
, current
);
117 BT_DBG("%s start", hdev
->name
);
119 hdev
->req_status
= HCI_REQ_PEND
;
121 add_wait_queue(&hdev
->req_wait_q
, &wait
);
122 set_current_state(TASK_INTERRUPTIBLE
);
125 schedule_timeout(timeout
);
127 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
129 if (signal_pending(current
))
132 switch (hdev
->req_status
) {
134 err
= -bt_to_errno(hdev
->req_result
);
137 case HCI_REQ_CANCELED
:
138 err
= -hdev
->req_result
;
146 hdev
->req_status
= hdev
->req_result
= 0;
148 BT_DBG("%s end: err %d", hdev
->name
, err
);
153 static int hci_request(struct hci_dev
*hdev
,
154 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
155 unsigned long opt
, __u32 timeout
)
159 if (!test_bit(HCI_UP
, &hdev
->flags
))
162 /* Serialize all requests */
164 ret
= __hci_request(hdev
, req
, opt
, timeout
);
165 hci_req_unlock(hdev
);
170 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
172 BT_DBG("%s %ld", hdev
->name
, opt
);
175 set_bit(HCI_RESET
, &hdev
->flags
);
176 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
179 static void bredr_init(struct hci_dev
*hdev
)
181 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
183 /* Read Local Supported Features */
184 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
186 /* Read Local Version */
187 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
190 static void amp_init(struct hci_dev
*hdev
)
192 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
194 /* Read Local Version */
195 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
200 /* Read Data Blk size */
201 hci_send_cmd(hdev
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
204 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
208 BT_DBG("%s %ld", hdev
->name
, opt
);
210 /* Driver initialization */
212 /* Special commands */
213 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
214 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
215 skb
->dev
= (void *) hdev
;
217 skb_queue_tail(&hdev
->cmd_q
, skb
);
218 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
220 skb_queue_purge(&hdev
->driver_init
);
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
224 hci_reset_req(hdev
, 0);
226 switch (hdev
->dev_type
) {
236 BT_ERR("Unknown device type %d", hdev
->dev_type
);
241 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
245 BT_DBG("%s %x", hdev
->name
, scan
);
247 /* Inquiry and Page scans */
248 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
251 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
255 BT_DBG("%s %x", hdev
->name
, auth
);
258 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
261 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
265 BT_DBG("%s %x", hdev
->name
, encrypt
);
268 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
271 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
273 __le16 policy
= cpu_to_le16(opt
);
275 BT_DBG("%s %x", hdev
->name
, policy
);
277 /* Default link policy */
278 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
281 /* Get HCI device by index.
282 * Device is held on return. */
283 struct hci_dev
*hci_dev_get(int index
)
285 struct hci_dev
*hdev
= NULL
, *d
;
292 read_lock(&hci_dev_list_lock
);
293 list_for_each_entry(d
, &hci_dev_list
, list
) {
294 if (d
->id
== index
) {
295 hdev
= hci_dev_hold(d
);
299 read_unlock(&hci_dev_list_lock
);
303 /* ---- Inquiry support ---- */
305 bool hci_discovery_active(struct hci_dev
*hdev
)
307 struct discovery_state
*discov
= &hdev
->discovery
;
309 switch (discov
->state
) {
310 case DISCOVERY_FINDING
:
311 case DISCOVERY_RESOLVING
:
319 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
321 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
323 if (hdev
->discovery
.state
== state
)
327 case DISCOVERY_STOPPED
:
328 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
329 mgmt_discovering(hdev
, 0);
331 case DISCOVERY_STARTING
:
333 case DISCOVERY_FINDING
:
334 mgmt_discovering(hdev
, 1);
336 case DISCOVERY_RESOLVING
:
338 case DISCOVERY_STOPPING
:
342 hdev
->discovery
.state
= state
;
345 static void inquiry_cache_flush(struct hci_dev
*hdev
)
347 struct discovery_state
*cache
= &hdev
->discovery
;
348 struct inquiry_entry
*p
, *n
;
350 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
355 INIT_LIST_HEAD(&cache
->unknown
);
356 INIT_LIST_HEAD(&cache
->resolve
);
359 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
362 struct discovery_state
*cache
= &hdev
->discovery
;
363 struct inquiry_entry
*e
;
365 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
367 list_for_each_entry(e
, &cache
->all
, all
) {
368 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
375 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
378 struct discovery_state
*cache
= &hdev
->discovery
;
379 struct inquiry_entry
*e
;
381 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
383 list_for_each_entry(e
, &cache
->unknown
, list
) {
384 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
391 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
395 struct discovery_state
*cache
= &hdev
->discovery
;
396 struct inquiry_entry
*e
;
398 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
400 list_for_each_entry(e
, &cache
->resolve
, list
) {
401 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
403 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
410 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
411 struct inquiry_entry
*ie
)
413 struct discovery_state
*cache
= &hdev
->discovery
;
414 struct list_head
*pos
= &cache
->resolve
;
415 struct inquiry_entry
*p
;
419 list_for_each_entry(p
, &cache
->resolve
, list
) {
420 if (p
->name_state
!= NAME_PENDING
&&
421 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
426 list_add(&ie
->list
, pos
);
429 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
430 bool name_known
, bool *ssp
)
432 struct discovery_state
*cache
= &hdev
->discovery
;
433 struct inquiry_entry
*ie
;
435 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
438 *ssp
= data
->ssp_mode
;
440 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
442 if (ie
->data
.ssp_mode
&& ssp
)
445 if (ie
->name_state
== NAME_NEEDED
&&
446 data
->rssi
!= ie
->data
.rssi
) {
447 ie
->data
.rssi
= data
->rssi
;
448 hci_inquiry_cache_update_resolve(hdev
, ie
);
454 /* Entry not in the cache. Add new one. */
455 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
459 list_add(&ie
->all
, &cache
->all
);
462 ie
->name_state
= NAME_KNOWN
;
464 ie
->name_state
= NAME_NOT_KNOWN
;
465 list_add(&ie
->list
, &cache
->unknown
);
469 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
470 ie
->name_state
!= NAME_PENDING
) {
471 ie
->name_state
= NAME_KNOWN
;
475 memcpy(&ie
->data
, data
, sizeof(*data
));
476 ie
->timestamp
= jiffies
;
477 cache
->timestamp
= jiffies
;
479 if (ie
->name_state
== NAME_NOT_KNOWN
)
485 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
487 struct discovery_state
*cache
= &hdev
->discovery
;
488 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
489 struct inquiry_entry
*e
;
492 list_for_each_entry(e
, &cache
->all
, all
) {
493 struct inquiry_data
*data
= &e
->data
;
498 bacpy(&info
->bdaddr
, &data
->bdaddr
);
499 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
500 info
->pscan_period_mode
= data
->pscan_period_mode
;
501 info
->pscan_mode
= data
->pscan_mode
;
502 memcpy(info
->dev_class
, data
->dev_class
, 3);
503 info
->clock_offset
= data
->clock_offset
;
509 BT_DBG("cache %p, copied %d", cache
, copied
);
513 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
515 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
516 struct hci_cp_inquiry cp
;
518 BT_DBG("%s", hdev
->name
);
520 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
524 memcpy(&cp
.lap
, &ir
->lap
, 3);
525 cp
.length
= ir
->length
;
526 cp
.num_rsp
= ir
->num_rsp
;
527 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
530 int hci_inquiry(void __user
*arg
)
532 __u8 __user
*ptr
= arg
;
533 struct hci_inquiry_req ir
;
534 struct hci_dev
*hdev
;
535 int err
= 0, do_inquiry
= 0, max_rsp
;
539 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
542 hdev
= hci_dev_get(ir
.dev_id
);
547 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
548 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
549 inquiry_cache_flush(hdev
);
552 hci_dev_unlock(hdev
);
554 timeo
= ir
.length
* msecs_to_jiffies(2000);
557 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
562 /* for unlimited number of responses we will use buffer with
565 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
570 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
577 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
578 hci_dev_unlock(hdev
);
580 BT_DBG("num_rsp %d", ir
.num_rsp
);
582 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
584 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
597 /* ---- HCI ioctl helpers ---- */
599 int hci_dev_open(__u16 dev
)
601 struct hci_dev
*hdev
;
604 hdev
= hci_dev_get(dev
);
608 BT_DBG("%s %p", hdev
->name
, hdev
);
612 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
617 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
622 if (test_bit(HCI_UP
, &hdev
->flags
)) {
627 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
628 set_bit(HCI_RAW
, &hdev
->flags
);
630 /* Treat all non BR/EDR controllers as raw devices if
631 enable_hs is not set */
632 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
633 set_bit(HCI_RAW
, &hdev
->flags
);
635 if (hdev
->open(hdev
)) {
640 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
641 atomic_set(&hdev
->cmd_cnt
, 1);
642 set_bit(HCI_INIT
, &hdev
->flags
);
643 hdev
->init_last_cmd
= 0;
645 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
647 clear_bit(HCI_INIT
, &hdev
->flags
);
652 set_bit(HCI_UP
, &hdev
->flags
);
653 hci_notify(hdev
, HCI_DEV_UP
);
654 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
655 mgmt_valid_hdev(hdev
)) {
657 mgmt_powered(hdev
, 1);
658 hci_dev_unlock(hdev
);
661 /* Init failed, cleanup */
662 flush_work(&hdev
->tx_work
);
663 flush_work(&hdev
->cmd_work
);
664 flush_work(&hdev
->rx_work
);
666 skb_queue_purge(&hdev
->cmd_q
);
667 skb_queue_purge(&hdev
->rx_q
);
672 if (hdev
->sent_cmd
) {
673 kfree_skb(hdev
->sent_cmd
);
674 hdev
->sent_cmd
= NULL
;
682 hci_req_unlock(hdev
);
687 static int hci_dev_do_close(struct hci_dev
*hdev
)
689 BT_DBG("%s %p", hdev
->name
, hdev
);
691 cancel_work_sync(&hdev
->le_scan
);
693 cancel_delayed_work(&hdev
->power_off
);
695 hci_req_cancel(hdev
, ENODEV
);
698 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
699 del_timer_sync(&hdev
->cmd_timer
);
700 hci_req_unlock(hdev
);
704 /* Flush RX and TX works */
705 flush_work(&hdev
->tx_work
);
706 flush_work(&hdev
->rx_work
);
708 if (hdev
->discov_timeout
> 0) {
709 cancel_delayed_work(&hdev
->discov_off
);
710 hdev
->discov_timeout
= 0;
711 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
714 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
715 cancel_delayed_work(&hdev
->service_cache
);
717 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
720 inquiry_cache_flush(hdev
);
721 hci_conn_hash_flush(hdev
);
722 hci_dev_unlock(hdev
);
724 hci_notify(hdev
, HCI_DEV_DOWN
);
730 skb_queue_purge(&hdev
->cmd_q
);
731 atomic_set(&hdev
->cmd_cnt
, 1);
732 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
733 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
734 set_bit(HCI_INIT
, &hdev
->flags
);
735 __hci_request(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
736 clear_bit(HCI_INIT
, &hdev
->flags
);
740 flush_work(&hdev
->cmd_work
);
743 skb_queue_purge(&hdev
->rx_q
);
744 skb_queue_purge(&hdev
->cmd_q
);
745 skb_queue_purge(&hdev
->raw_q
);
747 /* Drop last sent command */
748 if (hdev
->sent_cmd
) {
749 del_timer_sync(&hdev
->cmd_timer
);
750 kfree_skb(hdev
->sent_cmd
);
751 hdev
->sent_cmd
= NULL
;
754 /* After this point our queues are empty
755 * and no tasks are scheduled. */
758 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
759 mgmt_valid_hdev(hdev
)) {
761 mgmt_powered(hdev
, 0);
762 hci_dev_unlock(hdev
);
768 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
769 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
771 hci_req_unlock(hdev
);
777 int hci_dev_close(__u16 dev
)
779 struct hci_dev
*hdev
;
782 hdev
= hci_dev_get(dev
);
786 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
787 cancel_delayed_work(&hdev
->power_off
);
789 err
= hci_dev_do_close(hdev
);
795 int hci_dev_reset(__u16 dev
)
797 struct hci_dev
*hdev
;
800 hdev
= hci_dev_get(dev
);
806 if (!test_bit(HCI_UP
, &hdev
->flags
))
810 skb_queue_purge(&hdev
->rx_q
);
811 skb_queue_purge(&hdev
->cmd_q
);
814 inquiry_cache_flush(hdev
);
815 hci_conn_hash_flush(hdev
);
816 hci_dev_unlock(hdev
);
821 atomic_set(&hdev
->cmd_cnt
, 1);
822 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
824 if (!test_bit(HCI_RAW
, &hdev
->flags
))
825 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
828 hci_req_unlock(hdev
);
833 int hci_dev_reset_stat(__u16 dev
)
835 struct hci_dev
*hdev
;
838 hdev
= hci_dev_get(dev
);
842 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
849 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
851 struct hci_dev
*hdev
;
852 struct hci_dev_req dr
;
855 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
858 hdev
= hci_dev_get(dr
.dev_id
);
864 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
869 if (!lmp_encrypt_capable(hdev
)) {
874 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
875 /* Auth must be enabled first */
876 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
882 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
887 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
892 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
897 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
898 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
902 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
906 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
907 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
911 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
912 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
924 int hci_get_dev_list(void __user
*arg
)
926 struct hci_dev
*hdev
;
927 struct hci_dev_list_req
*dl
;
928 struct hci_dev_req
*dr
;
929 int n
= 0, size
, err
;
932 if (get_user(dev_num
, (__u16 __user
*) arg
))
935 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
938 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
940 dl
= kzalloc(size
, GFP_KERNEL
);
946 read_lock(&hci_dev_list_lock
);
947 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
948 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
949 cancel_delayed_work(&hdev
->power_off
);
951 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
952 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
954 (dr
+ n
)->dev_id
= hdev
->id
;
955 (dr
+ n
)->dev_opt
= hdev
->flags
;
960 read_unlock(&hci_dev_list_lock
);
963 size
= sizeof(*dl
) + n
* sizeof(*dr
);
965 err
= copy_to_user(arg
, dl
, size
);
968 return err
? -EFAULT
: 0;
971 int hci_get_dev_info(void __user
*arg
)
973 struct hci_dev
*hdev
;
974 struct hci_dev_info di
;
977 if (copy_from_user(&di
, arg
, sizeof(di
)))
980 hdev
= hci_dev_get(di
.dev_id
);
984 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
985 cancel_delayed_work_sync(&hdev
->power_off
);
987 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
988 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
990 strcpy(di
.name
, hdev
->name
);
991 di
.bdaddr
= hdev
->bdaddr
;
992 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
993 di
.flags
= hdev
->flags
;
994 di
.pkt_type
= hdev
->pkt_type
;
995 if (lmp_bredr_capable(hdev
)) {
996 di
.acl_mtu
= hdev
->acl_mtu
;
997 di
.acl_pkts
= hdev
->acl_pkts
;
998 di
.sco_mtu
= hdev
->sco_mtu
;
999 di
.sco_pkts
= hdev
->sco_pkts
;
1001 di
.acl_mtu
= hdev
->le_mtu
;
1002 di
.acl_pkts
= hdev
->le_pkts
;
1006 di
.link_policy
= hdev
->link_policy
;
1007 di
.link_mode
= hdev
->link_mode
;
1009 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1010 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1012 if (copy_to_user(arg
, &di
, sizeof(di
)))
1020 /* ---- Interface to HCI drivers ---- */
1022 static int hci_rfkill_set_block(void *data
, bool blocked
)
1024 struct hci_dev
*hdev
= data
;
1026 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1031 hci_dev_do_close(hdev
);
1036 static const struct rfkill_ops hci_rfkill_ops
= {
1037 .set_block
= hci_rfkill_set_block
,
1040 static void hci_power_on(struct work_struct
*work
)
1042 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1044 BT_DBG("%s", hdev
->name
);
1046 if (hci_dev_open(hdev
->id
) < 0)
1049 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1050 schedule_delayed_work(&hdev
->power_off
, HCI_AUTO_OFF_TIMEOUT
);
1052 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1053 mgmt_index_added(hdev
);
1056 static void hci_power_off(struct work_struct
*work
)
1058 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1061 BT_DBG("%s", hdev
->name
);
1063 hci_dev_do_close(hdev
);
1066 static void hci_discov_off(struct work_struct
*work
)
1068 struct hci_dev
*hdev
;
1069 u8 scan
= SCAN_PAGE
;
1071 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1073 BT_DBG("%s", hdev
->name
);
1077 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1079 hdev
->discov_timeout
= 0;
1081 hci_dev_unlock(hdev
);
1084 int hci_uuids_clear(struct hci_dev
*hdev
)
1086 struct list_head
*p
, *n
;
1088 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1089 struct bt_uuid
*uuid
;
1091 uuid
= list_entry(p
, struct bt_uuid
, list
);
1100 int hci_link_keys_clear(struct hci_dev
*hdev
)
1102 struct list_head
*p
, *n
;
1104 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1105 struct link_key
*key
;
1107 key
= list_entry(p
, struct link_key
, list
);
1116 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1118 struct smp_ltk
*k
, *tmp
;
1120 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1128 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1132 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1133 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1139 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1140 u8 key_type
, u8 old_key_type
)
1143 if (key_type
< 0x03)
1146 /* Debug keys are insecure so don't store them persistently */
1147 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1150 /* Changed combination key and there's no previous one */
1151 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1154 /* Security mode 3 case */
1158 /* Neither local nor remote side had no-bonding as requirement */
1159 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1162 /* Local side had dedicated bonding as requirement */
1163 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1166 /* Remote side had dedicated bonding as requirement */
1167 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1170 /* If none of the above criteria match, then don't store the key
1175 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1179 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1180 if (k
->ediv
!= ediv
||
1181 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1190 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1195 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1196 if (addr_type
== k
->bdaddr_type
&&
1197 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1203 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1204 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1206 struct link_key
*key
, *old_key
;
1210 old_key
= hci_find_link_key(hdev
, bdaddr
);
1212 old_key_type
= old_key
->type
;
1215 old_key_type
= conn
? conn
->key_type
: 0xff;
1216 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1219 list_add(&key
->list
, &hdev
->link_keys
);
1222 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1224 /* Some buggy controller combinations generate a changed
1225 * combination key for legacy pairing even when there's no
1227 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1228 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1229 type
= HCI_LK_COMBINATION
;
1231 conn
->key_type
= type
;
1234 bacpy(&key
->bdaddr
, bdaddr
);
1235 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1236 key
->pin_len
= pin_len
;
1238 if (type
== HCI_LK_CHANGED_COMBINATION
)
1239 key
->type
= old_key_type
;
1246 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1248 mgmt_new_link_key(hdev
, key
, persistent
);
1251 conn
->flush_key
= !persistent
;
1256 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1257 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1260 struct smp_ltk
*key
, *old_key
;
1262 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1265 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1269 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1272 list_add(&key
->list
, &hdev
->long_term_keys
);
1275 bacpy(&key
->bdaddr
, bdaddr
);
1276 key
->bdaddr_type
= addr_type
;
1277 memcpy(key
->val
, tk
, sizeof(key
->val
));
1278 key
->authenticated
= authenticated
;
1280 key
->enc_size
= enc_size
;
1282 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1287 if (type
& HCI_SMP_LTK
)
1288 mgmt_new_ltk(hdev
, key
, 1);
1293 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1295 struct link_key
*key
;
1297 key
= hci_find_link_key(hdev
, bdaddr
);
1301 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1303 list_del(&key
->list
);
1309 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1311 struct smp_ltk
*k
, *tmp
;
1313 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1314 if (bacmp(bdaddr
, &k
->bdaddr
))
1317 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1326 /* HCI command timer function */
1327 static void hci_cmd_timeout(unsigned long arg
)
1329 struct hci_dev
*hdev
= (void *) arg
;
1331 if (hdev
->sent_cmd
) {
1332 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1333 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1335 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1337 BT_ERR("%s command tx timeout", hdev
->name
);
1340 atomic_set(&hdev
->cmd_cnt
, 1);
1341 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1344 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1347 struct oob_data
*data
;
1349 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1350 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1356 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1358 struct oob_data
*data
;
1360 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1364 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1366 list_del(&data
->list
);
1372 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1374 struct oob_data
*data
, *n
;
1376 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1377 list_del(&data
->list
);
1384 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1387 struct oob_data
*data
;
1389 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1392 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1396 bacpy(&data
->bdaddr
, bdaddr
);
1397 list_add(&data
->list
, &hdev
->remote_oob_data
);
1400 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1401 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1403 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1408 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1410 struct bdaddr_list
*b
;
1412 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1413 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1419 int hci_blacklist_clear(struct hci_dev
*hdev
)
1421 struct list_head
*p
, *n
;
1423 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1424 struct bdaddr_list
*b
;
1426 b
= list_entry(p
, struct bdaddr_list
, list
);
1435 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1437 struct bdaddr_list
*entry
;
1439 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1442 if (hci_blacklist_lookup(hdev
, bdaddr
))
1445 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1449 bacpy(&entry
->bdaddr
, bdaddr
);
1451 list_add(&entry
->list
, &hdev
->blacklist
);
1453 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1456 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1458 struct bdaddr_list
*entry
;
1460 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1461 return hci_blacklist_clear(hdev
);
1463 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1467 list_del(&entry
->list
);
1470 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1473 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1475 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1476 struct hci_cp_le_set_scan_param cp
;
1478 memset(&cp
, 0, sizeof(cp
));
1479 cp
.type
= param
->type
;
1480 cp
.interval
= cpu_to_le16(param
->interval
);
1481 cp
.window
= cpu_to_le16(param
->window
);
1483 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1486 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1488 struct hci_cp_le_set_scan_enable cp
;
1490 memset(&cp
, 0, sizeof(cp
));
1494 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1497 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1498 u16 window
, int timeout
)
1500 long timeo
= msecs_to_jiffies(3000);
1501 struct le_scan_params param
;
1504 BT_DBG("%s", hdev
->name
);
1506 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1507 return -EINPROGRESS
;
1510 param
.interval
= interval
;
1511 param
.window
= window
;
1515 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1518 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1520 hci_req_unlock(hdev
);
1525 schedule_delayed_work(&hdev
->le_scan_disable
,
1526 msecs_to_jiffies(timeout
));
1531 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1533 BT_DBG("%s", hdev
->name
);
1535 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1538 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1539 struct hci_cp_le_set_scan_enable cp
;
1541 /* Send HCI command to disable LE Scan */
1542 memset(&cp
, 0, sizeof(cp
));
1543 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1549 static void le_scan_disable_work(struct work_struct
*work
)
1551 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1552 le_scan_disable
.work
);
1553 struct hci_cp_le_set_scan_enable cp
;
1555 BT_DBG("%s", hdev
->name
);
1557 memset(&cp
, 0, sizeof(cp
));
1559 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1562 static void le_scan_work(struct work_struct
*work
)
1564 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1565 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1567 BT_DBG("%s", hdev
->name
);
1569 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1573 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1576 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1578 BT_DBG("%s", hdev
->name
);
1580 if (work_busy(&hdev
->le_scan
))
1581 return -EINPROGRESS
;
1584 param
->interval
= interval
;
1585 param
->window
= window
;
1586 param
->timeout
= timeout
;
1588 queue_work(system_long_wq
, &hdev
->le_scan
);
1593 /* Alloc HCI device */
1594 struct hci_dev
*hci_alloc_dev(void)
1596 struct hci_dev
*hdev
;
1598 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1602 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1603 hdev
->esco_type
= (ESCO_HV1
);
1604 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1605 hdev
->io_capability
= 0x03; /* No Input No Output */
1607 hdev
->sniff_max_interval
= 800;
1608 hdev
->sniff_min_interval
= 80;
1610 mutex_init(&hdev
->lock
);
1611 mutex_init(&hdev
->req_lock
);
1613 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1614 INIT_LIST_HEAD(&hdev
->blacklist
);
1615 INIT_LIST_HEAD(&hdev
->uuids
);
1616 INIT_LIST_HEAD(&hdev
->link_keys
);
1617 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1618 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1619 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1621 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1622 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1623 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1624 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1625 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1627 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1628 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1629 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1631 skb_queue_head_init(&hdev
->driver_init
);
1632 skb_queue_head_init(&hdev
->rx_q
);
1633 skb_queue_head_init(&hdev
->cmd_q
);
1634 skb_queue_head_init(&hdev
->raw_q
);
1636 init_waitqueue_head(&hdev
->req_wait_q
);
1638 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1640 hci_init_sysfs(hdev
);
1641 discovery_init(hdev
);
1645 EXPORT_SYMBOL(hci_alloc_dev
);
1647 /* Free HCI device */
1648 void hci_free_dev(struct hci_dev
*hdev
)
1650 skb_queue_purge(&hdev
->driver_init
);
1652 /* will free via device release */
1653 put_device(&hdev
->dev
);
1655 EXPORT_SYMBOL(hci_free_dev
);
1657 /* Register HCI device */
1658 int hci_register_dev(struct hci_dev
*hdev
)
1662 if (!hdev
->open
|| !hdev
->close
)
1665 /* Do not allow HCI_AMP devices to register at index 0,
1666 * so the index can be used as the AMP controller ID.
1668 switch (hdev
->dev_type
) {
1670 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1673 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1682 sprintf(hdev
->name
, "hci%d", id
);
1685 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1687 write_lock(&hci_dev_list_lock
);
1688 list_add(&hdev
->list
, &hci_dev_list
);
1689 write_unlock(&hci_dev_list_lock
);
1691 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1693 if (!hdev
->workqueue
) {
1698 error
= hci_add_sysfs(hdev
);
1702 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1703 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1706 if (rfkill_register(hdev
->rfkill
) < 0) {
1707 rfkill_destroy(hdev
->rfkill
);
1708 hdev
->rfkill
= NULL
;
1712 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1714 if (hdev
->dev_type
!= HCI_AMP
)
1715 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1717 schedule_work(&hdev
->power_on
);
1719 hci_notify(hdev
, HCI_DEV_REG
);
1725 destroy_workqueue(hdev
->workqueue
);
1727 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1728 write_lock(&hci_dev_list_lock
);
1729 list_del(&hdev
->list
);
1730 write_unlock(&hci_dev_list_lock
);
1734 EXPORT_SYMBOL(hci_register_dev
);
1736 /* Unregister HCI device */
1737 void hci_unregister_dev(struct hci_dev
*hdev
)
1741 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1743 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1747 write_lock(&hci_dev_list_lock
);
1748 list_del(&hdev
->list
);
1749 write_unlock(&hci_dev_list_lock
);
1751 hci_dev_do_close(hdev
);
1753 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1754 kfree_skb(hdev
->reassembly
[i
]);
1756 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1757 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1759 mgmt_index_removed(hdev
);
1760 hci_dev_unlock(hdev
);
1763 /* mgmt_index_removed should take care of emptying the
1765 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1767 hci_notify(hdev
, HCI_DEV_UNREG
);
1770 rfkill_unregister(hdev
->rfkill
);
1771 rfkill_destroy(hdev
->rfkill
);
1774 hci_del_sysfs(hdev
);
1776 destroy_workqueue(hdev
->workqueue
);
1779 hci_blacklist_clear(hdev
);
1780 hci_uuids_clear(hdev
);
1781 hci_link_keys_clear(hdev
);
1782 hci_smp_ltks_clear(hdev
);
1783 hci_remote_oob_data_clear(hdev
);
1784 hci_dev_unlock(hdev
);
1788 ida_simple_remove(&hci_index_ida
, id
);
1790 EXPORT_SYMBOL(hci_unregister_dev
);
1792 /* Suspend HCI device */
1793 int hci_suspend_dev(struct hci_dev
*hdev
)
1795 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1798 EXPORT_SYMBOL(hci_suspend_dev
);
1800 /* Resume HCI device */
1801 int hci_resume_dev(struct hci_dev
*hdev
)
1803 hci_notify(hdev
, HCI_DEV_RESUME
);
1806 EXPORT_SYMBOL(hci_resume_dev
);
1808 /* Receive frame from HCI drivers */
1809 int hci_recv_frame(struct sk_buff
*skb
)
1811 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1812 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1813 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1819 bt_cb(skb
)->incoming
= 1;
1822 __net_timestamp(skb
);
1824 skb_queue_tail(&hdev
->rx_q
, skb
);
1825 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1829 EXPORT_SYMBOL(hci_recv_frame
);
1831 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1832 int count
, __u8 index
)
1837 struct sk_buff
*skb
;
1838 struct bt_skb_cb
*scb
;
1840 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1841 index
>= NUM_REASSEMBLY
)
1844 skb
= hdev
->reassembly
[index
];
1848 case HCI_ACLDATA_PKT
:
1849 len
= HCI_MAX_FRAME_SIZE
;
1850 hlen
= HCI_ACL_HDR_SIZE
;
1853 len
= HCI_MAX_EVENT_SIZE
;
1854 hlen
= HCI_EVENT_HDR_SIZE
;
1856 case HCI_SCODATA_PKT
:
1857 len
= HCI_MAX_SCO_SIZE
;
1858 hlen
= HCI_SCO_HDR_SIZE
;
1862 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1866 scb
= (void *) skb
->cb
;
1868 scb
->pkt_type
= type
;
1870 skb
->dev
= (void *) hdev
;
1871 hdev
->reassembly
[index
] = skb
;
1875 scb
= (void *) skb
->cb
;
1876 len
= min_t(uint
, scb
->expect
, count
);
1878 memcpy(skb_put(skb
, len
), data
, len
);
1887 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1888 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1889 scb
->expect
= h
->plen
;
1891 if (skb_tailroom(skb
) < scb
->expect
) {
1893 hdev
->reassembly
[index
] = NULL
;
1899 case HCI_ACLDATA_PKT
:
1900 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1901 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1902 scb
->expect
= __le16_to_cpu(h
->dlen
);
1904 if (skb_tailroom(skb
) < scb
->expect
) {
1906 hdev
->reassembly
[index
] = NULL
;
1912 case HCI_SCODATA_PKT
:
1913 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1914 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1915 scb
->expect
= h
->dlen
;
1917 if (skb_tailroom(skb
) < scb
->expect
) {
1919 hdev
->reassembly
[index
] = NULL
;
1926 if (scb
->expect
== 0) {
1927 /* Complete frame */
1929 bt_cb(skb
)->pkt_type
= type
;
1930 hci_recv_frame(skb
);
1932 hdev
->reassembly
[index
] = NULL
;
1940 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1944 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1948 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1952 data
+= (count
- rem
);
1958 EXPORT_SYMBOL(hci_recv_fragment
);
1960 #define STREAM_REASSEMBLY 0
1962 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1968 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1971 struct { char type
; } *pkt
;
1973 /* Start of the frame */
1980 type
= bt_cb(skb
)->pkt_type
;
1982 rem
= hci_reassembly(hdev
, type
, data
, count
,
1987 data
+= (count
- rem
);
1993 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1995 /* ---- Interface to upper protocols ---- */
1997 int hci_register_cb(struct hci_cb
*cb
)
1999 BT_DBG("%p name %s", cb
, cb
->name
);
2001 write_lock(&hci_cb_list_lock
);
2002 list_add(&cb
->list
, &hci_cb_list
);
2003 write_unlock(&hci_cb_list_lock
);
2007 EXPORT_SYMBOL(hci_register_cb
);
2009 int hci_unregister_cb(struct hci_cb
*cb
)
2011 BT_DBG("%p name %s", cb
, cb
->name
);
2013 write_lock(&hci_cb_list_lock
);
2014 list_del(&cb
->list
);
2015 write_unlock(&hci_cb_list_lock
);
2019 EXPORT_SYMBOL(hci_unregister_cb
);
2021 static int hci_send_frame(struct sk_buff
*skb
)
2023 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2030 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2033 __net_timestamp(skb
);
2035 /* Send copy to monitor */
2036 hci_send_to_monitor(hdev
, skb
);
2038 if (atomic_read(&hdev
->promisc
)) {
2039 /* Send copy to the sockets */
2040 hci_send_to_sock(hdev
, skb
);
2043 /* Get rid of skb owner, prior to sending to the driver. */
2046 return hdev
->send(skb
);
2049 /* Send HCI command */
2050 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2052 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2053 struct hci_command_hdr
*hdr
;
2054 struct sk_buff
*skb
;
2056 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2058 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2060 BT_ERR("%s no memory for command", hdev
->name
);
2064 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2065 hdr
->opcode
= cpu_to_le16(opcode
);
2069 memcpy(skb_put(skb
, plen
), param
, plen
);
2071 BT_DBG("skb len %d", skb
->len
);
2073 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2074 skb
->dev
= (void *) hdev
;
2076 if (test_bit(HCI_INIT
, &hdev
->flags
))
2077 hdev
->init_last_cmd
= opcode
;
2079 skb_queue_tail(&hdev
->cmd_q
, skb
);
2080 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2085 /* Get data from the previously sent command */
2086 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2088 struct hci_command_hdr
*hdr
;
2090 if (!hdev
->sent_cmd
)
2093 hdr
= (void *) hdev
->sent_cmd
->data
;
2095 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2098 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2100 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2104 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2106 struct hci_acl_hdr
*hdr
;
2109 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2110 skb_reset_transport_header(skb
);
2111 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2112 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2113 hdr
->dlen
= cpu_to_le16(len
);
2116 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2117 struct sk_buff
*skb
, __u16 flags
)
2119 struct hci_conn
*conn
= chan
->conn
;
2120 struct hci_dev
*hdev
= conn
->hdev
;
2121 struct sk_buff
*list
;
2123 skb
->len
= skb_headlen(skb
);
2126 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2128 switch (hdev
->dev_type
) {
2130 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2133 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2136 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2140 list
= skb_shinfo(skb
)->frag_list
;
2142 /* Non fragmented */
2143 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2145 skb_queue_tail(queue
, skb
);
2148 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2150 skb_shinfo(skb
)->frag_list
= NULL
;
2152 /* Queue all fragments atomically */
2153 spin_lock(&queue
->lock
);
2155 __skb_queue_tail(queue
, skb
);
2157 flags
&= ~ACL_START
;
2160 skb
= list
; list
= list
->next
;
2162 skb
->dev
= (void *) hdev
;
2163 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2164 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2166 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2168 __skb_queue_tail(queue
, skb
);
2171 spin_unlock(&queue
->lock
);
2175 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2177 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2179 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2181 skb
->dev
= (void *) hdev
;
2183 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2185 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2189 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2191 struct hci_dev
*hdev
= conn
->hdev
;
2192 struct hci_sco_hdr hdr
;
2194 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2196 hdr
.handle
= cpu_to_le16(conn
->handle
);
2197 hdr
.dlen
= skb
->len
;
2199 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2200 skb_reset_transport_header(skb
);
2201 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2203 skb
->dev
= (void *) hdev
;
2204 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2206 skb_queue_tail(&conn
->data_q
, skb
);
2207 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2210 /* ---- HCI TX task (outgoing data) ---- */
2212 /* HCI Connection scheduler */
2213 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2216 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2217 struct hci_conn
*conn
= NULL
, *c
;
2218 unsigned int num
= 0, min
= ~0;
2220 /* We don't have to lock device here. Connections are always
2221 * added and removed with TX task disabled. */
2225 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2226 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2229 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2234 if (c
->sent
< min
) {
2239 if (hci_conn_num(hdev
, type
) == num
)
2248 switch (conn
->type
) {
2250 cnt
= hdev
->acl_cnt
;
2254 cnt
= hdev
->sco_cnt
;
2257 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2261 BT_ERR("Unknown link type");
2269 BT_DBG("conn %p quote %d", conn
, *quote
);
2273 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2275 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2278 BT_ERR("%s link tx timeout", hdev
->name
);
2282 /* Kill stalled connections */
2283 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2284 if (c
->type
== type
&& c
->sent
) {
2285 BT_ERR("%s killing stalled connection %pMR",
2286 hdev
->name
, &c
->dst
);
2287 hci_acl_disconn(c
, HCI_ERROR_REMOTE_USER_TERM
);
2294 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2297 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2298 struct hci_chan
*chan
= NULL
;
2299 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2300 struct hci_conn
*conn
;
2301 int cnt
, q
, conn_num
= 0;
2303 BT_DBG("%s", hdev
->name
);
2307 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2308 struct hci_chan
*tmp
;
2310 if (conn
->type
!= type
)
2313 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2318 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2319 struct sk_buff
*skb
;
2321 if (skb_queue_empty(&tmp
->data_q
))
2324 skb
= skb_peek(&tmp
->data_q
);
2325 if (skb
->priority
< cur_prio
)
2328 if (skb
->priority
> cur_prio
) {
2331 cur_prio
= skb
->priority
;
2336 if (conn
->sent
< min
) {
2342 if (hci_conn_num(hdev
, type
) == conn_num
)
2351 switch (chan
->conn
->type
) {
2353 cnt
= hdev
->acl_cnt
;
2356 cnt
= hdev
->block_cnt
;
2360 cnt
= hdev
->sco_cnt
;
2363 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2367 BT_ERR("Unknown link type");
2372 BT_DBG("chan %p quote %d", chan
, *quote
);
2376 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2378 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2379 struct hci_conn
*conn
;
2382 BT_DBG("%s", hdev
->name
);
2386 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2387 struct hci_chan
*chan
;
2389 if (conn
->type
!= type
)
2392 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2397 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2398 struct sk_buff
*skb
;
2405 if (skb_queue_empty(&chan
->data_q
))
2408 skb
= skb_peek(&chan
->data_q
);
2409 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2412 skb
->priority
= HCI_PRIO_MAX
- 1;
2414 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2418 if (hci_conn_num(hdev
, type
) == num
)
2426 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2428 /* Calculate count of blocks used by this packet */
2429 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2432 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2434 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2435 /* ACL tx timeout must be longer than maximum
2436 * link supervision timeout (40.9 seconds) */
2437 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2438 HCI_ACL_TX_TIMEOUT
))
2439 hci_link_tx_to(hdev
, ACL_LINK
);
2443 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2445 unsigned int cnt
= hdev
->acl_cnt
;
2446 struct hci_chan
*chan
;
2447 struct sk_buff
*skb
;
2450 __check_timeout(hdev
, cnt
);
2452 while (hdev
->acl_cnt
&&
2453 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2454 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2455 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2456 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2457 skb
->len
, skb
->priority
);
2459 /* Stop if priority has changed */
2460 if (skb
->priority
< priority
)
2463 skb
= skb_dequeue(&chan
->data_q
);
2465 hci_conn_enter_active_mode(chan
->conn
,
2466 bt_cb(skb
)->force_active
);
2468 hci_send_frame(skb
);
2469 hdev
->acl_last_tx
= jiffies
;
2477 if (cnt
!= hdev
->acl_cnt
)
2478 hci_prio_recalculate(hdev
, ACL_LINK
);
2481 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2483 unsigned int cnt
= hdev
->block_cnt
;
2484 struct hci_chan
*chan
;
2485 struct sk_buff
*skb
;
2489 __check_timeout(hdev
, cnt
);
2491 BT_DBG("%s", hdev
->name
);
2493 if (hdev
->dev_type
== HCI_AMP
)
2498 while (hdev
->block_cnt
> 0 &&
2499 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
2500 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2501 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2504 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2505 skb
->len
, skb
->priority
);
2507 /* Stop if priority has changed */
2508 if (skb
->priority
< priority
)
2511 skb
= skb_dequeue(&chan
->data_q
);
2513 blocks
= __get_blocks(hdev
, skb
);
2514 if (blocks
> hdev
->block_cnt
)
2517 hci_conn_enter_active_mode(chan
->conn
,
2518 bt_cb(skb
)->force_active
);
2520 hci_send_frame(skb
);
2521 hdev
->acl_last_tx
= jiffies
;
2523 hdev
->block_cnt
-= blocks
;
2526 chan
->sent
+= blocks
;
2527 chan
->conn
->sent
+= blocks
;
2531 if (cnt
!= hdev
->block_cnt
)
2532 hci_prio_recalculate(hdev
, type
);
2535 static void hci_sched_acl(struct hci_dev
*hdev
)
2537 BT_DBG("%s", hdev
->name
);
2539 /* No ACL link over BR/EDR controller */
2540 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
2543 /* No AMP link over AMP controller */
2544 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
2547 switch (hdev
->flow_ctl_mode
) {
2548 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2549 hci_sched_acl_pkt(hdev
);
2552 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2553 hci_sched_acl_blk(hdev
);
2559 static void hci_sched_sco(struct hci_dev
*hdev
)
2561 struct hci_conn
*conn
;
2562 struct sk_buff
*skb
;
2565 BT_DBG("%s", hdev
->name
);
2567 if (!hci_conn_num(hdev
, SCO_LINK
))
2570 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2571 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2572 BT_DBG("skb %p len %d", skb
, skb
->len
);
2573 hci_send_frame(skb
);
2576 if (conn
->sent
== ~0)
2582 static void hci_sched_esco(struct hci_dev
*hdev
)
2584 struct hci_conn
*conn
;
2585 struct sk_buff
*skb
;
2588 BT_DBG("%s", hdev
->name
);
2590 if (!hci_conn_num(hdev
, ESCO_LINK
))
2593 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2595 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2596 BT_DBG("skb %p len %d", skb
, skb
->len
);
2597 hci_send_frame(skb
);
2600 if (conn
->sent
== ~0)
2606 static void hci_sched_le(struct hci_dev
*hdev
)
2608 struct hci_chan
*chan
;
2609 struct sk_buff
*skb
;
2610 int quote
, cnt
, tmp
;
2612 BT_DBG("%s", hdev
->name
);
2614 if (!hci_conn_num(hdev
, LE_LINK
))
2617 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2618 /* LE tx timeout must be longer than maximum
2619 * link supervision timeout (40.9 seconds) */
2620 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2621 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2622 hci_link_tx_to(hdev
, LE_LINK
);
2625 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2627 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2628 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2629 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2630 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2631 skb
->len
, skb
->priority
);
2633 /* Stop if priority has changed */
2634 if (skb
->priority
< priority
)
2637 skb
= skb_dequeue(&chan
->data_q
);
2639 hci_send_frame(skb
);
2640 hdev
->le_last_tx
= jiffies
;
2651 hdev
->acl_cnt
= cnt
;
2654 hci_prio_recalculate(hdev
, LE_LINK
);
2657 static void hci_tx_work(struct work_struct
*work
)
2659 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2660 struct sk_buff
*skb
;
2662 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2663 hdev
->sco_cnt
, hdev
->le_cnt
);
2665 /* Schedule queues and send stuff to HCI driver */
2667 hci_sched_acl(hdev
);
2669 hci_sched_sco(hdev
);
2671 hci_sched_esco(hdev
);
2675 /* Send next queued raw (unknown type) packet */
2676 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2677 hci_send_frame(skb
);
2680 /* ----- HCI RX task (incoming data processing) ----- */
2682 /* ACL data packet */
2683 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2685 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2686 struct hci_conn
*conn
;
2687 __u16 handle
, flags
;
2689 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2691 handle
= __le16_to_cpu(hdr
->handle
);
2692 flags
= hci_flags(handle
);
2693 handle
= hci_handle(handle
);
2695 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
2698 hdev
->stat
.acl_rx
++;
2701 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2702 hci_dev_unlock(hdev
);
2705 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2708 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
2709 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2710 mgmt_device_connected(hdev
, &conn
->dst
, conn
->type
,
2711 conn
->dst_type
, 0, NULL
, 0,
2713 hci_dev_unlock(hdev
);
2715 /* Send to upper protocol */
2716 l2cap_recv_acldata(conn
, skb
, flags
);
2719 BT_ERR("%s ACL packet for unknown connection handle %d",
2720 hdev
->name
, handle
);
2726 /* SCO data packet */
2727 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2729 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2730 struct hci_conn
*conn
;
2733 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2735 handle
= __le16_to_cpu(hdr
->handle
);
2737 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
2739 hdev
->stat
.sco_rx
++;
2742 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2743 hci_dev_unlock(hdev
);
2746 /* Send to upper protocol */
2747 sco_recv_scodata(conn
, skb
);
2750 BT_ERR("%s SCO packet for unknown connection handle %d",
2751 hdev
->name
, handle
);
2757 static void hci_rx_work(struct work_struct
*work
)
2759 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2760 struct sk_buff
*skb
;
2762 BT_DBG("%s", hdev
->name
);
2764 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2765 /* Send copy to monitor */
2766 hci_send_to_monitor(hdev
, skb
);
2768 if (atomic_read(&hdev
->promisc
)) {
2769 /* Send copy to the sockets */
2770 hci_send_to_sock(hdev
, skb
);
2773 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2778 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2779 /* Don't process data packets in this states. */
2780 switch (bt_cb(skb
)->pkt_type
) {
2781 case HCI_ACLDATA_PKT
:
2782 case HCI_SCODATA_PKT
:
2789 switch (bt_cb(skb
)->pkt_type
) {
2791 BT_DBG("%s Event packet", hdev
->name
);
2792 hci_event_packet(hdev
, skb
);
2795 case HCI_ACLDATA_PKT
:
2796 BT_DBG("%s ACL data packet", hdev
->name
);
2797 hci_acldata_packet(hdev
, skb
);
2800 case HCI_SCODATA_PKT
:
2801 BT_DBG("%s SCO data packet", hdev
->name
);
2802 hci_scodata_packet(hdev
, skb
);
2812 static void hci_cmd_work(struct work_struct
*work
)
2814 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2815 struct sk_buff
*skb
;
2817 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
2818 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
2820 /* Send queued commands */
2821 if (atomic_read(&hdev
->cmd_cnt
)) {
2822 skb
= skb_dequeue(&hdev
->cmd_q
);
2826 kfree_skb(hdev
->sent_cmd
);
2828 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2829 if (hdev
->sent_cmd
) {
2830 atomic_dec(&hdev
->cmd_cnt
);
2831 hci_send_frame(skb
);
2832 if (test_bit(HCI_RESET
, &hdev
->flags
))
2833 del_timer(&hdev
->cmd_timer
);
2835 mod_timer(&hdev
->cmd_timer
,
2836 jiffies
+ HCI_CMD_TIMEOUT
);
2838 skb_queue_head(&hdev
->cmd_q
, skb
);
2839 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2844 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2846 /* General inquiry access code (GIAC) */
2847 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2848 struct hci_cp_inquiry cp
;
2850 BT_DBG("%s", hdev
->name
);
2852 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2853 return -EINPROGRESS
;
2855 inquiry_cache_flush(hdev
);
2857 memset(&cp
, 0, sizeof(cp
));
2858 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2861 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2864 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2866 BT_DBG("%s", hdev
->name
);
2868 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2871 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2874 u8
bdaddr_to_le(u8 bdaddr_type
)
2876 switch (bdaddr_type
) {
2877 case BDADDR_LE_PUBLIC
:
2878 return ADDR_LE_DEV_PUBLIC
;
2881 /* Fallback to LE Random address type */
2882 return ADDR_LE_DEV_RANDOM
;