2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 #define AUTO_OFF_TIMEOUT 2000
38 static void hci_rx_work(struct work_struct
*work
);
39 static void hci_cmd_work(struct work_struct
*work
);
40 static void hci_tx_work(struct work_struct
*work
);
43 LIST_HEAD(hci_dev_list
);
44 DEFINE_RWLOCK(hci_dev_list_lock
);
46 /* HCI callback list */
47 LIST_HEAD(hci_cb_list
);
48 DEFINE_RWLOCK(hci_cb_list_lock
);
50 /* HCI ID Numbering */
51 static DEFINE_IDA(hci_index_ida
);
53 /* ---- HCI notifications ---- */
55 static void hci_notify(struct hci_dev
*hdev
, int event
)
57 hci_sock_dev_event(hdev
, event
);
60 /* ---- HCI requests ---- */
62 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
64 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
66 /* If this is the init phase check if the completed command matches
67 * the last init command, and if not just return.
69 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
70 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
71 u16 opcode
= __le16_to_cpu(sent
->opcode
);
74 /* Some CSR based controllers generate a spontaneous
75 * reset complete event during init and any pending
76 * command will never be completed. In such a case we
77 * need to resend whatever was the last sent
81 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
84 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
86 skb_queue_head(&hdev
->cmd_q
, skb
);
87 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
93 if (hdev
->req_status
== HCI_REQ_PEND
) {
94 hdev
->req_result
= result
;
95 hdev
->req_status
= HCI_REQ_DONE
;
96 wake_up_interruptible(&hdev
->req_wait_q
);
100 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
102 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= err
;
106 hdev
->req_status
= HCI_REQ_CANCELED
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 /* Execute request and wait for completion. */
112 static int __hci_request(struct hci_dev
*hdev
,
113 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
114 unsigned long opt
, __u32 timeout
)
116 DECLARE_WAITQUEUE(wait
, current
);
119 BT_DBG("%s start", hdev
->name
);
121 hdev
->req_status
= HCI_REQ_PEND
;
123 add_wait_queue(&hdev
->req_wait_q
, &wait
);
124 set_current_state(TASK_INTERRUPTIBLE
);
127 schedule_timeout(timeout
);
129 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
131 if (signal_pending(current
))
134 switch (hdev
->req_status
) {
136 err
= -bt_to_errno(hdev
->req_result
);
139 case HCI_REQ_CANCELED
:
140 err
= -hdev
->req_result
;
148 hdev
->req_status
= hdev
->req_result
= 0;
150 BT_DBG("%s end: err %d", hdev
->name
, err
);
155 static int hci_request(struct hci_dev
*hdev
,
156 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
157 unsigned long opt
, __u32 timeout
)
161 if (!test_bit(HCI_UP
, &hdev
->flags
))
164 /* Serialize all requests */
166 ret
= __hci_request(hdev
, req
, opt
, timeout
);
167 hci_req_unlock(hdev
);
172 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
174 BT_DBG("%s %ld", hdev
->name
, opt
);
177 set_bit(HCI_RESET
, &hdev
->flags
);
178 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
181 static void bredr_init(struct hci_dev
*hdev
)
183 struct hci_cp_delete_stored_link_key cp
;
187 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
189 /* Mandatory initialization */
192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
193 set_bit(HCI_RESET
, &hdev
->flags
);
194 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
197 /* Read Local Supported Features */
198 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
200 /* Read Local Version */
201 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
203 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
204 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
206 /* Read BD Address */
207 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
209 /* Read Class of Device */
210 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
212 /* Read Local Name */
213 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
215 /* Read Voice Setting */
216 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
218 /* Optional initialization */
220 /* Clear Event Filters */
221 flt_type
= HCI_FLT_CLEAR_ALL
;
222 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
224 /* Connection accept timeout ~20 secs */
225 param
= __constant_cpu_to_le16(0x7d00);
226 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
228 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
230 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
233 static void amp_init(struct hci_dev
*hdev
)
235 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
238 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
240 /* Read Local Version */
241 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
243 /* Read Local AMP Info */
244 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
247 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
251 BT_DBG("%s %ld", hdev
->name
, opt
);
253 /* Driver initialization */
255 /* Special commands */
256 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
257 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
258 skb
->dev
= (void *) hdev
;
260 skb_queue_tail(&hdev
->cmd_q
, skb
);
261 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
263 skb_queue_purge(&hdev
->driver_init
);
265 switch (hdev
->dev_type
) {
275 BT_ERR("Unknown device type %d", hdev
->dev_type
);
281 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
283 BT_DBG("%s", hdev
->name
);
285 /* Read LE buffer size */
286 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
289 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
293 BT_DBG("%s %x", hdev
->name
, scan
);
295 /* Inquiry and Page scans */
296 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
299 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
303 BT_DBG("%s %x", hdev
->name
, auth
);
306 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
309 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
313 BT_DBG("%s %x", hdev
->name
, encrypt
);
316 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
319 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
321 __le16 policy
= cpu_to_le16(opt
);
323 BT_DBG("%s %x", hdev
->name
, policy
);
325 /* Default link policy */
326 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
329 /* Get HCI device by index.
330 * Device is held on return. */
331 struct hci_dev
*hci_dev_get(int index
)
333 struct hci_dev
*hdev
= NULL
, *d
;
340 read_lock(&hci_dev_list_lock
);
341 list_for_each_entry(d
, &hci_dev_list
, list
) {
342 if (d
->id
== index
) {
343 hdev
= hci_dev_hold(d
);
347 read_unlock(&hci_dev_list_lock
);
351 /* ---- Inquiry support ---- */
353 bool hci_discovery_active(struct hci_dev
*hdev
)
355 struct discovery_state
*discov
= &hdev
->discovery
;
357 switch (discov
->state
) {
358 case DISCOVERY_FINDING
:
359 case DISCOVERY_RESOLVING
:
367 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
369 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
371 if (hdev
->discovery
.state
== state
)
375 case DISCOVERY_STOPPED
:
376 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
377 mgmt_discovering(hdev
, 0);
379 case DISCOVERY_STARTING
:
381 case DISCOVERY_FINDING
:
382 mgmt_discovering(hdev
, 1);
384 case DISCOVERY_RESOLVING
:
386 case DISCOVERY_STOPPING
:
390 hdev
->discovery
.state
= state
;
393 static void inquiry_cache_flush(struct hci_dev
*hdev
)
395 struct discovery_state
*cache
= &hdev
->discovery
;
396 struct inquiry_entry
*p
, *n
;
398 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
403 INIT_LIST_HEAD(&cache
->unknown
);
404 INIT_LIST_HEAD(&cache
->resolve
);
407 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
410 struct discovery_state
*cache
= &hdev
->discovery
;
411 struct inquiry_entry
*e
;
413 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
415 list_for_each_entry(e
, &cache
->all
, all
) {
416 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
423 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
426 struct discovery_state
*cache
= &hdev
->discovery
;
427 struct inquiry_entry
*e
;
429 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
431 list_for_each_entry(e
, &cache
->unknown
, list
) {
432 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
439 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
443 struct discovery_state
*cache
= &hdev
->discovery
;
444 struct inquiry_entry
*e
;
446 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
448 list_for_each_entry(e
, &cache
->resolve
, list
) {
449 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
451 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
458 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
459 struct inquiry_entry
*ie
)
461 struct discovery_state
*cache
= &hdev
->discovery
;
462 struct list_head
*pos
= &cache
->resolve
;
463 struct inquiry_entry
*p
;
467 list_for_each_entry(p
, &cache
->resolve
, list
) {
468 if (p
->name_state
!= NAME_PENDING
&&
469 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
474 list_add(&ie
->list
, pos
);
477 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
478 bool name_known
, bool *ssp
)
480 struct discovery_state
*cache
= &hdev
->discovery
;
481 struct inquiry_entry
*ie
;
483 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
486 *ssp
= data
->ssp_mode
;
488 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
490 if (ie
->data
.ssp_mode
&& ssp
)
493 if (ie
->name_state
== NAME_NEEDED
&&
494 data
->rssi
!= ie
->data
.rssi
) {
495 ie
->data
.rssi
= data
->rssi
;
496 hci_inquiry_cache_update_resolve(hdev
, ie
);
502 /* Entry not in the cache. Add new one. */
503 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
507 list_add(&ie
->all
, &cache
->all
);
510 ie
->name_state
= NAME_KNOWN
;
512 ie
->name_state
= NAME_NOT_KNOWN
;
513 list_add(&ie
->list
, &cache
->unknown
);
517 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
518 ie
->name_state
!= NAME_PENDING
) {
519 ie
->name_state
= NAME_KNOWN
;
523 memcpy(&ie
->data
, data
, sizeof(*data
));
524 ie
->timestamp
= jiffies
;
525 cache
->timestamp
= jiffies
;
527 if (ie
->name_state
== NAME_NOT_KNOWN
)
533 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
535 struct discovery_state
*cache
= &hdev
->discovery
;
536 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
537 struct inquiry_entry
*e
;
540 list_for_each_entry(e
, &cache
->all
, all
) {
541 struct inquiry_data
*data
= &e
->data
;
546 bacpy(&info
->bdaddr
, &data
->bdaddr
);
547 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
548 info
->pscan_period_mode
= data
->pscan_period_mode
;
549 info
->pscan_mode
= data
->pscan_mode
;
550 memcpy(info
->dev_class
, data
->dev_class
, 3);
551 info
->clock_offset
= data
->clock_offset
;
557 BT_DBG("cache %p, copied %d", cache
, copied
);
561 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
563 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
564 struct hci_cp_inquiry cp
;
566 BT_DBG("%s", hdev
->name
);
568 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
572 memcpy(&cp
.lap
, &ir
->lap
, 3);
573 cp
.length
= ir
->length
;
574 cp
.num_rsp
= ir
->num_rsp
;
575 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
578 int hci_inquiry(void __user
*arg
)
580 __u8 __user
*ptr
= arg
;
581 struct hci_inquiry_req ir
;
582 struct hci_dev
*hdev
;
583 int err
= 0, do_inquiry
= 0, max_rsp
;
587 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
590 hdev
= hci_dev_get(ir
.dev_id
);
595 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
596 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
597 inquiry_cache_flush(hdev
);
600 hci_dev_unlock(hdev
);
602 timeo
= ir
.length
* msecs_to_jiffies(2000);
605 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
610 /* for unlimited number of responses we will use buffer with
613 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
618 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
625 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
626 hci_dev_unlock(hdev
);
628 BT_DBG("num_rsp %d", ir
.num_rsp
);
630 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
632 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
645 /* ---- HCI ioctl helpers ---- */
647 int hci_dev_open(__u16 dev
)
649 struct hci_dev
*hdev
;
652 hdev
= hci_dev_get(dev
);
656 BT_DBG("%s %p", hdev
->name
, hdev
);
660 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
665 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
670 if (test_bit(HCI_UP
, &hdev
->flags
)) {
675 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
676 set_bit(HCI_RAW
, &hdev
->flags
);
678 /* Treat all non BR/EDR controllers as raw devices if
679 enable_hs is not set */
680 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
681 set_bit(HCI_RAW
, &hdev
->flags
);
683 if (hdev
->open(hdev
)) {
688 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
689 atomic_set(&hdev
->cmd_cnt
, 1);
690 set_bit(HCI_INIT
, &hdev
->flags
);
691 hdev
->init_last_cmd
= 0;
693 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
695 if (lmp_host_le_capable(hdev
))
696 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
699 clear_bit(HCI_INIT
, &hdev
->flags
);
704 set_bit(HCI_UP
, &hdev
->flags
);
705 hci_notify(hdev
, HCI_DEV_UP
);
706 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
708 mgmt_powered(hdev
, 1);
709 hci_dev_unlock(hdev
);
712 /* Init failed, cleanup */
713 flush_work(&hdev
->tx_work
);
714 flush_work(&hdev
->cmd_work
);
715 flush_work(&hdev
->rx_work
);
717 skb_queue_purge(&hdev
->cmd_q
);
718 skb_queue_purge(&hdev
->rx_q
);
723 if (hdev
->sent_cmd
) {
724 kfree_skb(hdev
->sent_cmd
);
725 hdev
->sent_cmd
= NULL
;
733 hci_req_unlock(hdev
);
738 static int hci_dev_do_close(struct hci_dev
*hdev
)
740 BT_DBG("%s %p", hdev
->name
, hdev
);
742 cancel_work_sync(&hdev
->le_scan
);
744 hci_req_cancel(hdev
, ENODEV
);
747 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
748 del_timer_sync(&hdev
->cmd_timer
);
749 hci_req_unlock(hdev
);
753 /* Flush RX and TX works */
754 flush_work(&hdev
->tx_work
);
755 flush_work(&hdev
->rx_work
);
757 if (hdev
->discov_timeout
> 0) {
758 cancel_delayed_work(&hdev
->discov_off
);
759 hdev
->discov_timeout
= 0;
760 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
763 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
764 cancel_delayed_work(&hdev
->service_cache
);
766 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
769 inquiry_cache_flush(hdev
);
770 hci_conn_hash_flush(hdev
);
771 hci_dev_unlock(hdev
);
773 hci_notify(hdev
, HCI_DEV_DOWN
);
779 skb_queue_purge(&hdev
->cmd_q
);
780 atomic_set(&hdev
->cmd_cnt
, 1);
781 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
782 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
783 set_bit(HCI_INIT
, &hdev
->flags
);
784 __hci_request(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
785 clear_bit(HCI_INIT
, &hdev
->flags
);
789 flush_work(&hdev
->cmd_work
);
792 skb_queue_purge(&hdev
->rx_q
);
793 skb_queue_purge(&hdev
->cmd_q
);
794 skb_queue_purge(&hdev
->raw_q
);
796 /* Drop last sent command */
797 if (hdev
->sent_cmd
) {
798 del_timer_sync(&hdev
->cmd_timer
);
799 kfree_skb(hdev
->sent_cmd
);
800 hdev
->sent_cmd
= NULL
;
803 /* After this point our queues are empty
804 * and no tasks are scheduled. */
807 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
809 mgmt_powered(hdev
, 0);
810 hci_dev_unlock(hdev
);
816 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
817 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
819 hci_req_unlock(hdev
);
825 int hci_dev_close(__u16 dev
)
827 struct hci_dev
*hdev
;
830 hdev
= hci_dev_get(dev
);
834 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
835 cancel_delayed_work(&hdev
->power_off
);
837 err
= hci_dev_do_close(hdev
);
843 int hci_dev_reset(__u16 dev
)
845 struct hci_dev
*hdev
;
848 hdev
= hci_dev_get(dev
);
854 if (!test_bit(HCI_UP
, &hdev
->flags
))
858 skb_queue_purge(&hdev
->rx_q
);
859 skb_queue_purge(&hdev
->cmd_q
);
862 inquiry_cache_flush(hdev
);
863 hci_conn_hash_flush(hdev
);
864 hci_dev_unlock(hdev
);
869 atomic_set(&hdev
->cmd_cnt
, 1);
870 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
872 if (!test_bit(HCI_RAW
, &hdev
->flags
))
873 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
876 hci_req_unlock(hdev
);
881 int hci_dev_reset_stat(__u16 dev
)
883 struct hci_dev
*hdev
;
886 hdev
= hci_dev_get(dev
);
890 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
897 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
899 struct hci_dev
*hdev
;
900 struct hci_dev_req dr
;
903 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
906 hdev
= hci_dev_get(dr
.dev_id
);
912 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
917 if (!lmp_encrypt_capable(hdev
)) {
922 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
923 /* Auth must be enabled first */
924 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
930 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
935 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
940 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
945 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
946 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
950 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
954 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
955 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
959 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
960 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
972 int hci_get_dev_list(void __user
*arg
)
974 struct hci_dev
*hdev
;
975 struct hci_dev_list_req
*dl
;
976 struct hci_dev_req
*dr
;
977 int n
= 0, size
, err
;
980 if (get_user(dev_num
, (__u16 __user
*) arg
))
983 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
986 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
988 dl
= kzalloc(size
, GFP_KERNEL
);
994 read_lock(&hci_dev_list_lock
);
995 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
996 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
997 cancel_delayed_work(&hdev
->power_off
);
999 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1000 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1002 (dr
+ n
)->dev_id
= hdev
->id
;
1003 (dr
+ n
)->dev_opt
= hdev
->flags
;
1008 read_unlock(&hci_dev_list_lock
);
1011 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1013 err
= copy_to_user(arg
, dl
, size
);
1016 return err
? -EFAULT
: 0;
1019 int hci_get_dev_info(void __user
*arg
)
1021 struct hci_dev
*hdev
;
1022 struct hci_dev_info di
;
1025 if (copy_from_user(&di
, arg
, sizeof(di
)))
1028 hdev
= hci_dev_get(di
.dev_id
);
1032 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1033 cancel_delayed_work_sync(&hdev
->power_off
);
1035 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1036 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1038 strcpy(di
.name
, hdev
->name
);
1039 di
.bdaddr
= hdev
->bdaddr
;
1040 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1041 di
.flags
= hdev
->flags
;
1042 di
.pkt_type
= hdev
->pkt_type
;
1043 di
.acl_mtu
= hdev
->acl_mtu
;
1044 di
.acl_pkts
= hdev
->acl_pkts
;
1045 di
.sco_mtu
= hdev
->sco_mtu
;
1046 di
.sco_pkts
= hdev
->sco_pkts
;
1047 di
.link_policy
= hdev
->link_policy
;
1048 di
.link_mode
= hdev
->link_mode
;
1050 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1051 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1053 if (copy_to_user(arg
, &di
, sizeof(di
)))
1061 /* ---- Interface to HCI drivers ---- */
1063 static int hci_rfkill_set_block(void *data
, bool blocked
)
1065 struct hci_dev
*hdev
= data
;
1067 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1072 hci_dev_do_close(hdev
);
1077 static const struct rfkill_ops hci_rfkill_ops
= {
1078 .set_block
= hci_rfkill_set_block
,
1081 static void hci_power_on(struct work_struct
*work
)
1083 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1085 BT_DBG("%s", hdev
->name
);
1087 if (hci_dev_open(hdev
->id
) < 0)
1090 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1091 schedule_delayed_work(&hdev
->power_off
,
1092 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1094 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1095 mgmt_index_added(hdev
);
1098 static void hci_power_off(struct work_struct
*work
)
1100 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1103 BT_DBG("%s", hdev
->name
);
1105 hci_dev_do_close(hdev
);
1108 static void hci_discov_off(struct work_struct
*work
)
1110 struct hci_dev
*hdev
;
1111 u8 scan
= SCAN_PAGE
;
1113 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1115 BT_DBG("%s", hdev
->name
);
1119 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1121 hdev
->discov_timeout
= 0;
1123 hci_dev_unlock(hdev
);
1126 int hci_uuids_clear(struct hci_dev
*hdev
)
1128 struct list_head
*p
, *n
;
1130 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1131 struct bt_uuid
*uuid
;
1133 uuid
= list_entry(p
, struct bt_uuid
, list
);
1142 int hci_link_keys_clear(struct hci_dev
*hdev
)
1144 struct list_head
*p
, *n
;
1146 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1147 struct link_key
*key
;
1149 key
= list_entry(p
, struct link_key
, list
);
1158 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1160 struct smp_ltk
*k
, *tmp
;
1162 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1170 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1174 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1175 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1181 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1182 u8 key_type
, u8 old_key_type
)
1185 if (key_type
< 0x03)
1188 /* Debug keys are insecure so don't store them persistently */
1189 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1192 /* Changed combination key and there's no previous one */
1193 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1196 /* Security mode 3 case */
1200 /* Neither local nor remote side had no-bonding as requirement */
1201 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1204 /* Local side had dedicated bonding as requirement */
1205 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1208 /* Remote side had dedicated bonding as requirement */
1209 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1212 /* If none of the above criteria match, then don't store the key
1217 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1221 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1222 if (k
->ediv
!= ediv
||
1223 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1232 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1237 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1238 if (addr_type
== k
->bdaddr_type
&&
1239 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1245 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1246 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1248 struct link_key
*key
, *old_key
;
1252 old_key
= hci_find_link_key(hdev
, bdaddr
);
1254 old_key_type
= old_key
->type
;
1257 old_key_type
= conn
? conn
->key_type
: 0xff;
1258 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1261 list_add(&key
->list
, &hdev
->link_keys
);
1264 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1266 /* Some buggy controller combinations generate a changed
1267 * combination key for legacy pairing even when there's no
1269 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1270 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1271 type
= HCI_LK_COMBINATION
;
1273 conn
->key_type
= type
;
1276 bacpy(&key
->bdaddr
, bdaddr
);
1277 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1278 key
->pin_len
= pin_len
;
1280 if (type
== HCI_LK_CHANGED_COMBINATION
)
1281 key
->type
= old_key_type
;
1288 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1290 mgmt_new_link_key(hdev
, key
, persistent
);
1293 conn
->flush_key
= !persistent
;
1298 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1299 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1302 struct smp_ltk
*key
, *old_key
;
1304 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1307 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1311 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1314 list_add(&key
->list
, &hdev
->long_term_keys
);
1317 bacpy(&key
->bdaddr
, bdaddr
);
1318 key
->bdaddr_type
= addr_type
;
1319 memcpy(key
->val
, tk
, sizeof(key
->val
));
1320 key
->authenticated
= authenticated
;
1322 key
->enc_size
= enc_size
;
1324 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1329 if (type
& HCI_SMP_LTK
)
1330 mgmt_new_ltk(hdev
, key
, 1);
1335 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1337 struct link_key
*key
;
1339 key
= hci_find_link_key(hdev
, bdaddr
);
1343 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1345 list_del(&key
->list
);
1351 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1353 struct smp_ltk
*k
, *tmp
;
1355 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1356 if (bacmp(bdaddr
, &k
->bdaddr
))
1359 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1368 /* HCI command timer function */
1369 static void hci_cmd_timeout(unsigned long arg
)
1371 struct hci_dev
*hdev
= (void *) arg
;
1373 if (hdev
->sent_cmd
) {
1374 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1375 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1377 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1379 BT_ERR("%s command tx timeout", hdev
->name
);
1382 atomic_set(&hdev
->cmd_cnt
, 1);
1383 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1386 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1389 struct oob_data
*data
;
1391 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1392 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1398 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1400 struct oob_data
*data
;
1402 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1406 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1408 list_del(&data
->list
);
1414 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1416 struct oob_data
*data
, *n
;
1418 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1419 list_del(&data
->list
);
1426 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1429 struct oob_data
*data
;
1431 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1434 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1438 bacpy(&data
->bdaddr
, bdaddr
);
1439 list_add(&data
->list
, &hdev
->remote_oob_data
);
1442 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1443 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1445 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1450 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1452 struct bdaddr_list
*b
;
1454 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1455 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1461 int hci_blacklist_clear(struct hci_dev
*hdev
)
1463 struct list_head
*p
, *n
;
1465 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1466 struct bdaddr_list
*b
;
1468 b
= list_entry(p
, struct bdaddr_list
, list
);
1477 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1479 struct bdaddr_list
*entry
;
1481 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1484 if (hci_blacklist_lookup(hdev
, bdaddr
))
1487 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1491 bacpy(&entry
->bdaddr
, bdaddr
);
1493 list_add(&entry
->list
, &hdev
->blacklist
);
1495 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1498 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1500 struct bdaddr_list
*entry
;
1502 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1503 return hci_blacklist_clear(hdev
);
1505 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1509 list_del(&entry
->list
);
1512 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1515 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1517 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1518 struct hci_cp_le_set_scan_param cp
;
1520 memset(&cp
, 0, sizeof(cp
));
1521 cp
.type
= param
->type
;
1522 cp
.interval
= cpu_to_le16(param
->interval
);
1523 cp
.window
= cpu_to_le16(param
->window
);
1525 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1528 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1530 struct hci_cp_le_set_scan_enable cp
;
1532 memset(&cp
, 0, sizeof(cp
));
1536 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1539 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1540 u16 window
, int timeout
)
1542 long timeo
= msecs_to_jiffies(3000);
1543 struct le_scan_params param
;
1546 BT_DBG("%s", hdev
->name
);
1548 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1549 return -EINPROGRESS
;
1552 param
.interval
= interval
;
1553 param
.window
= window
;
1557 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1560 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1562 hci_req_unlock(hdev
);
1567 schedule_delayed_work(&hdev
->le_scan_disable
,
1568 msecs_to_jiffies(timeout
));
1573 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1575 BT_DBG("%s", hdev
->name
);
1577 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1580 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1581 struct hci_cp_le_set_scan_enable cp
;
1583 /* Send HCI command to disable LE Scan */
1584 memset(&cp
, 0, sizeof(cp
));
1585 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1591 static void le_scan_disable_work(struct work_struct
*work
)
1593 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1594 le_scan_disable
.work
);
1595 struct hci_cp_le_set_scan_enable cp
;
1597 BT_DBG("%s", hdev
->name
);
1599 memset(&cp
, 0, sizeof(cp
));
1601 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1604 static void le_scan_work(struct work_struct
*work
)
1606 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1607 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1609 BT_DBG("%s", hdev
->name
);
1611 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1615 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1618 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1620 BT_DBG("%s", hdev
->name
);
1622 if (work_busy(&hdev
->le_scan
))
1623 return -EINPROGRESS
;
1626 param
->interval
= interval
;
1627 param
->window
= window
;
1628 param
->timeout
= timeout
;
1630 queue_work(system_long_wq
, &hdev
->le_scan
);
1635 /* Alloc HCI device */
1636 struct hci_dev
*hci_alloc_dev(void)
1638 struct hci_dev
*hdev
;
1640 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1644 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1645 hdev
->esco_type
= (ESCO_HV1
);
1646 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1647 hdev
->io_capability
= 0x03; /* No Input No Output */
1649 hdev
->sniff_max_interval
= 800;
1650 hdev
->sniff_min_interval
= 80;
1652 mutex_init(&hdev
->lock
);
1653 mutex_init(&hdev
->req_lock
);
1655 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1656 INIT_LIST_HEAD(&hdev
->blacklist
);
1657 INIT_LIST_HEAD(&hdev
->uuids
);
1658 INIT_LIST_HEAD(&hdev
->link_keys
);
1659 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1660 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1662 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1663 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1664 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1665 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1666 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1668 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1669 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1670 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1672 skb_queue_head_init(&hdev
->driver_init
);
1673 skb_queue_head_init(&hdev
->rx_q
);
1674 skb_queue_head_init(&hdev
->cmd_q
);
1675 skb_queue_head_init(&hdev
->raw_q
);
1677 init_waitqueue_head(&hdev
->req_wait_q
);
1679 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1681 hci_init_sysfs(hdev
);
1682 discovery_init(hdev
);
1683 hci_conn_hash_init(hdev
);
1687 EXPORT_SYMBOL(hci_alloc_dev
);
1689 /* Free HCI device */
1690 void hci_free_dev(struct hci_dev
*hdev
)
1692 skb_queue_purge(&hdev
->driver_init
);
1694 /* will free via device release */
1695 put_device(&hdev
->dev
);
1697 EXPORT_SYMBOL(hci_free_dev
);
1699 /* Register HCI device */
1700 int hci_register_dev(struct hci_dev
*hdev
)
1704 if (!hdev
->open
|| !hdev
->close
)
1707 /* Do not allow HCI_AMP devices to register at index 0,
1708 * so the index can be used as the AMP controller ID.
1710 switch (hdev
->dev_type
) {
1712 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1715 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1724 sprintf(hdev
->name
, "hci%d", id
);
1727 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1729 write_lock(&hci_dev_list_lock
);
1730 list_add(&hdev
->list
, &hci_dev_list
);
1731 write_unlock(&hci_dev_list_lock
);
1733 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1735 if (!hdev
->workqueue
) {
1740 error
= hci_add_sysfs(hdev
);
1744 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1745 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1748 if (rfkill_register(hdev
->rfkill
) < 0) {
1749 rfkill_destroy(hdev
->rfkill
);
1750 hdev
->rfkill
= NULL
;
1754 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1755 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1756 schedule_work(&hdev
->power_on
);
1758 hci_notify(hdev
, HCI_DEV_REG
);
1764 destroy_workqueue(hdev
->workqueue
);
1766 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1767 write_lock(&hci_dev_list_lock
);
1768 list_del(&hdev
->list
);
1769 write_unlock(&hci_dev_list_lock
);
1773 EXPORT_SYMBOL(hci_register_dev
);
1775 /* Unregister HCI device */
1776 void hci_unregister_dev(struct hci_dev
*hdev
)
1780 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1782 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1786 write_lock(&hci_dev_list_lock
);
1787 list_del(&hdev
->list
);
1788 write_unlock(&hci_dev_list_lock
);
1790 hci_dev_do_close(hdev
);
1792 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1793 kfree_skb(hdev
->reassembly
[i
]);
1795 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1796 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1798 mgmt_index_removed(hdev
);
1799 hci_dev_unlock(hdev
);
1802 /* mgmt_index_removed should take care of emptying the
1804 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1806 hci_notify(hdev
, HCI_DEV_UNREG
);
1809 rfkill_unregister(hdev
->rfkill
);
1810 rfkill_destroy(hdev
->rfkill
);
1813 hci_del_sysfs(hdev
);
1815 destroy_workqueue(hdev
->workqueue
);
1818 hci_blacklist_clear(hdev
);
1819 hci_uuids_clear(hdev
);
1820 hci_link_keys_clear(hdev
);
1821 hci_smp_ltks_clear(hdev
);
1822 hci_remote_oob_data_clear(hdev
);
1823 hci_dev_unlock(hdev
);
1827 ida_simple_remove(&hci_index_ida
, id
);
1829 EXPORT_SYMBOL(hci_unregister_dev
);
1831 /* Suspend HCI device */
1832 int hci_suspend_dev(struct hci_dev
*hdev
)
1834 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1837 EXPORT_SYMBOL(hci_suspend_dev
);
1839 /* Resume HCI device */
1840 int hci_resume_dev(struct hci_dev
*hdev
)
1842 hci_notify(hdev
, HCI_DEV_RESUME
);
1845 EXPORT_SYMBOL(hci_resume_dev
);
1847 /* Receive frame from HCI drivers */
1848 int hci_recv_frame(struct sk_buff
*skb
)
1850 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1851 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1852 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1858 bt_cb(skb
)->incoming
= 1;
1861 __net_timestamp(skb
);
1863 skb_queue_tail(&hdev
->rx_q
, skb
);
1864 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1868 EXPORT_SYMBOL(hci_recv_frame
);
1870 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1871 int count
, __u8 index
)
1876 struct sk_buff
*skb
;
1877 struct bt_skb_cb
*scb
;
1879 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1880 index
>= NUM_REASSEMBLY
)
1883 skb
= hdev
->reassembly
[index
];
1887 case HCI_ACLDATA_PKT
:
1888 len
= HCI_MAX_FRAME_SIZE
;
1889 hlen
= HCI_ACL_HDR_SIZE
;
1892 len
= HCI_MAX_EVENT_SIZE
;
1893 hlen
= HCI_EVENT_HDR_SIZE
;
1895 case HCI_SCODATA_PKT
:
1896 len
= HCI_MAX_SCO_SIZE
;
1897 hlen
= HCI_SCO_HDR_SIZE
;
1901 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1905 scb
= (void *) skb
->cb
;
1907 scb
->pkt_type
= type
;
1909 skb
->dev
= (void *) hdev
;
1910 hdev
->reassembly
[index
] = skb
;
1914 scb
= (void *) skb
->cb
;
1915 len
= min_t(uint
, scb
->expect
, count
);
1917 memcpy(skb_put(skb
, len
), data
, len
);
1926 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1927 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1928 scb
->expect
= h
->plen
;
1930 if (skb_tailroom(skb
) < scb
->expect
) {
1932 hdev
->reassembly
[index
] = NULL
;
1938 case HCI_ACLDATA_PKT
:
1939 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1940 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1941 scb
->expect
= __le16_to_cpu(h
->dlen
);
1943 if (skb_tailroom(skb
) < scb
->expect
) {
1945 hdev
->reassembly
[index
] = NULL
;
1951 case HCI_SCODATA_PKT
:
1952 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1953 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1954 scb
->expect
= h
->dlen
;
1956 if (skb_tailroom(skb
) < scb
->expect
) {
1958 hdev
->reassembly
[index
] = NULL
;
1965 if (scb
->expect
== 0) {
1966 /* Complete frame */
1968 bt_cb(skb
)->pkt_type
= type
;
1969 hci_recv_frame(skb
);
1971 hdev
->reassembly
[index
] = NULL
;
1979 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1983 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1987 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1991 data
+= (count
- rem
);
1997 EXPORT_SYMBOL(hci_recv_fragment
);
1999 #define STREAM_REASSEMBLY 0
2001 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2007 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2010 struct { char type
; } *pkt
;
2012 /* Start of the frame */
2019 type
= bt_cb(skb
)->pkt_type
;
2021 rem
= hci_reassembly(hdev
, type
, data
, count
,
2026 data
+= (count
- rem
);
2032 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2034 /* ---- Interface to upper protocols ---- */
2036 int hci_register_cb(struct hci_cb
*cb
)
2038 BT_DBG("%p name %s", cb
, cb
->name
);
2040 write_lock(&hci_cb_list_lock
);
2041 list_add(&cb
->list
, &hci_cb_list
);
2042 write_unlock(&hci_cb_list_lock
);
2046 EXPORT_SYMBOL(hci_register_cb
);
2048 int hci_unregister_cb(struct hci_cb
*cb
)
2050 BT_DBG("%p name %s", cb
, cb
->name
);
2052 write_lock(&hci_cb_list_lock
);
2053 list_del(&cb
->list
);
2054 write_unlock(&hci_cb_list_lock
);
2058 EXPORT_SYMBOL(hci_unregister_cb
);
2060 static int hci_send_frame(struct sk_buff
*skb
)
2062 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2069 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2072 __net_timestamp(skb
);
2074 /* Send copy to monitor */
2075 hci_send_to_monitor(hdev
, skb
);
2077 if (atomic_read(&hdev
->promisc
)) {
2078 /* Send copy to the sockets */
2079 hci_send_to_sock(hdev
, skb
);
2082 /* Get rid of skb owner, prior to sending to the driver. */
2085 return hdev
->send(skb
);
2088 /* Send HCI command */
2089 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2091 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2092 struct hci_command_hdr
*hdr
;
2093 struct sk_buff
*skb
;
2095 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
2097 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2099 BT_ERR("%s no memory for command", hdev
->name
);
2103 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2104 hdr
->opcode
= cpu_to_le16(opcode
);
2108 memcpy(skb_put(skb
, plen
), param
, plen
);
2110 BT_DBG("skb len %d", skb
->len
);
2112 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2113 skb
->dev
= (void *) hdev
;
2115 if (test_bit(HCI_INIT
, &hdev
->flags
))
2116 hdev
->init_last_cmd
= opcode
;
2118 skb_queue_tail(&hdev
->cmd_q
, skb
);
2119 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2124 /* Get data from the previously sent command */
2125 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2127 struct hci_command_hdr
*hdr
;
2129 if (!hdev
->sent_cmd
)
2132 hdr
= (void *) hdev
->sent_cmd
->data
;
2134 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2137 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2139 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2143 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2145 struct hci_acl_hdr
*hdr
;
2148 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2149 skb_reset_transport_header(skb
);
2150 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2151 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2152 hdr
->dlen
= cpu_to_le16(len
);
2155 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2156 struct sk_buff
*skb
, __u16 flags
)
2158 struct hci_dev
*hdev
= conn
->hdev
;
2159 struct sk_buff
*list
;
2161 skb
->len
= skb_headlen(skb
);
2164 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2165 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2167 list
= skb_shinfo(skb
)->frag_list
;
2169 /* Non fragmented */
2170 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2172 skb_queue_tail(queue
, skb
);
2175 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2177 skb_shinfo(skb
)->frag_list
= NULL
;
2179 /* Queue all fragments atomically */
2180 spin_lock(&queue
->lock
);
2182 __skb_queue_tail(queue
, skb
);
2184 flags
&= ~ACL_START
;
2187 skb
= list
; list
= list
->next
;
2189 skb
->dev
= (void *) hdev
;
2190 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2191 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2193 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2195 __skb_queue_tail(queue
, skb
);
2198 spin_unlock(&queue
->lock
);
2202 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2204 struct hci_conn
*conn
= chan
->conn
;
2205 struct hci_dev
*hdev
= conn
->hdev
;
2207 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2209 skb
->dev
= (void *) hdev
;
2211 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2213 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2217 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2219 struct hci_dev
*hdev
= conn
->hdev
;
2220 struct hci_sco_hdr hdr
;
2222 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2224 hdr
.handle
= cpu_to_le16(conn
->handle
);
2225 hdr
.dlen
= skb
->len
;
2227 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2228 skb_reset_transport_header(skb
);
2229 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2231 skb
->dev
= (void *) hdev
;
2232 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2234 skb_queue_tail(&conn
->data_q
, skb
);
2235 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2238 /* ---- HCI TX task (outgoing data) ---- */
2240 /* HCI Connection scheduler */
2241 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2244 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2245 struct hci_conn
*conn
= NULL
, *c
;
2246 unsigned int num
= 0, min
= ~0;
2248 /* We don't have to lock device here. Connections are always
2249 * added and removed with TX task disabled. */
2253 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2254 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2257 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2262 if (c
->sent
< min
) {
2267 if (hci_conn_num(hdev
, type
) == num
)
2276 switch (conn
->type
) {
2278 cnt
= hdev
->acl_cnt
;
2282 cnt
= hdev
->sco_cnt
;
2285 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2289 BT_ERR("Unknown link type");
2297 BT_DBG("conn %p quote %d", conn
, *quote
);
2301 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2303 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2306 BT_ERR("%s link tx timeout", hdev
->name
);
2310 /* Kill stalled connections */
2311 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2312 if (c
->type
== type
&& c
->sent
) {
2313 BT_ERR("%s killing stalled connection %s",
2314 hdev
->name
, batostr(&c
->dst
));
2315 hci_acl_disconn(c
, HCI_ERROR_REMOTE_USER_TERM
);
2322 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2325 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2326 struct hci_chan
*chan
= NULL
;
2327 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2328 struct hci_conn
*conn
;
2329 int cnt
, q
, conn_num
= 0;
2331 BT_DBG("%s", hdev
->name
);
2335 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2336 struct hci_chan
*tmp
;
2338 if (conn
->type
!= type
)
2341 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2346 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2347 struct sk_buff
*skb
;
2349 if (skb_queue_empty(&tmp
->data_q
))
2352 skb
= skb_peek(&tmp
->data_q
);
2353 if (skb
->priority
< cur_prio
)
2356 if (skb
->priority
> cur_prio
) {
2359 cur_prio
= skb
->priority
;
2364 if (conn
->sent
< min
) {
2370 if (hci_conn_num(hdev
, type
) == conn_num
)
2379 switch (chan
->conn
->type
) {
2381 cnt
= hdev
->acl_cnt
;
2385 cnt
= hdev
->sco_cnt
;
2388 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2392 BT_ERR("Unknown link type");
2397 BT_DBG("chan %p quote %d", chan
, *quote
);
2401 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2403 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2404 struct hci_conn
*conn
;
2407 BT_DBG("%s", hdev
->name
);
2411 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2412 struct hci_chan
*chan
;
2414 if (conn
->type
!= type
)
2417 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2422 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2423 struct sk_buff
*skb
;
2430 if (skb_queue_empty(&chan
->data_q
))
2433 skb
= skb_peek(&chan
->data_q
);
2434 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2437 skb
->priority
= HCI_PRIO_MAX
- 1;
2439 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2443 if (hci_conn_num(hdev
, type
) == num
)
2451 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2453 /* Calculate count of blocks used by this packet */
2454 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2457 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2459 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2460 /* ACL tx timeout must be longer than maximum
2461 * link supervision timeout (40.9 seconds) */
2462 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2463 HCI_ACL_TX_TIMEOUT
))
2464 hci_link_tx_to(hdev
, ACL_LINK
);
2468 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2470 unsigned int cnt
= hdev
->acl_cnt
;
2471 struct hci_chan
*chan
;
2472 struct sk_buff
*skb
;
2475 __check_timeout(hdev
, cnt
);
2477 while (hdev
->acl_cnt
&&
2478 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2479 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2480 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2481 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2482 skb
->len
, skb
->priority
);
2484 /* Stop if priority has changed */
2485 if (skb
->priority
< priority
)
2488 skb
= skb_dequeue(&chan
->data_q
);
2490 hci_conn_enter_active_mode(chan
->conn
,
2491 bt_cb(skb
)->force_active
);
2493 hci_send_frame(skb
);
2494 hdev
->acl_last_tx
= jiffies
;
2502 if (cnt
!= hdev
->acl_cnt
)
2503 hci_prio_recalculate(hdev
, ACL_LINK
);
2506 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2508 unsigned int cnt
= hdev
->block_cnt
;
2509 struct hci_chan
*chan
;
2510 struct sk_buff
*skb
;
2513 __check_timeout(hdev
, cnt
);
2515 while (hdev
->block_cnt
> 0 &&
2516 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2517 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2518 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2521 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2522 skb
->len
, skb
->priority
);
2524 /* Stop if priority has changed */
2525 if (skb
->priority
< priority
)
2528 skb
= skb_dequeue(&chan
->data_q
);
2530 blocks
= __get_blocks(hdev
, skb
);
2531 if (blocks
> hdev
->block_cnt
)
2534 hci_conn_enter_active_mode(chan
->conn
,
2535 bt_cb(skb
)->force_active
);
2537 hci_send_frame(skb
);
2538 hdev
->acl_last_tx
= jiffies
;
2540 hdev
->block_cnt
-= blocks
;
2543 chan
->sent
+= blocks
;
2544 chan
->conn
->sent
+= blocks
;
2548 if (cnt
!= hdev
->block_cnt
)
2549 hci_prio_recalculate(hdev
, ACL_LINK
);
2552 static void hci_sched_acl(struct hci_dev
*hdev
)
2554 BT_DBG("%s", hdev
->name
);
2556 if (!hci_conn_num(hdev
, ACL_LINK
))
2559 switch (hdev
->flow_ctl_mode
) {
2560 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2561 hci_sched_acl_pkt(hdev
);
2564 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2565 hci_sched_acl_blk(hdev
);
2571 static void hci_sched_sco(struct hci_dev
*hdev
)
2573 struct hci_conn
*conn
;
2574 struct sk_buff
*skb
;
2577 BT_DBG("%s", hdev
->name
);
2579 if (!hci_conn_num(hdev
, SCO_LINK
))
2582 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2583 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2584 BT_DBG("skb %p len %d", skb
, skb
->len
);
2585 hci_send_frame(skb
);
2588 if (conn
->sent
== ~0)
2594 static void hci_sched_esco(struct hci_dev
*hdev
)
2596 struct hci_conn
*conn
;
2597 struct sk_buff
*skb
;
2600 BT_DBG("%s", hdev
->name
);
2602 if (!hci_conn_num(hdev
, ESCO_LINK
))
2605 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2607 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2608 BT_DBG("skb %p len %d", skb
, skb
->len
);
2609 hci_send_frame(skb
);
2612 if (conn
->sent
== ~0)
2618 static void hci_sched_le(struct hci_dev
*hdev
)
2620 struct hci_chan
*chan
;
2621 struct sk_buff
*skb
;
2622 int quote
, cnt
, tmp
;
2624 BT_DBG("%s", hdev
->name
);
2626 if (!hci_conn_num(hdev
, LE_LINK
))
2629 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2630 /* LE tx timeout must be longer than maximum
2631 * link supervision timeout (40.9 seconds) */
2632 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2633 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2634 hci_link_tx_to(hdev
, LE_LINK
);
2637 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2639 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2640 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2641 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2642 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2643 skb
->len
, skb
->priority
);
2645 /* Stop if priority has changed */
2646 if (skb
->priority
< priority
)
2649 skb
= skb_dequeue(&chan
->data_q
);
2651 hci_send_frame(skb
);
2652 hdev
->le_last_tx
= jiffies
;
2663 hdev
->acl_cnt
= cnt
;
2666 hci_prio_recalculate(hdev
, LE_LINK
);
2669 static void hci_tx_work(struct work_struct
*work
)
2671 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2672 struct sk_buff
*skb
;
2674 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2675 hdev
->sco_cnt
, hdev
->le_cnt
);
2677 /* Schedule queues and send stuff to HCI driver */
2679 hci_sched_acl(hdev
);
2681 hci_sched_sco(hdev
);
2683 hci_sched_esco(hdev
);
2687 /* Send next queued raw (unknown type) packet */
2688 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2689 hci_send_frame(skb
);
2692 /* ----- HCI RX task (incoming data processing) ----- */
2694 /* ACL data packet */
2695 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2697 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2698 struct hci_conn
*conn
;
2699 __u16 handle
, flags
;
2701 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2703 handle
= __le16_to_cpu(hdr
->handle
);
2704 flags
= hci_flags(handle
);
2705 handle
= hci_handle(handle
);
2707 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
,
2710 hdev
->stat
.acl_rx
++;
2713 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2714 hci_dev_unlock(hdev
);
2717 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2720 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
2721 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2722 mgmt_device_connected(hdev
, &conn
->dst
, conn
->type
,
2723 conn
->dst_type
, 0, NULL
, 0,
2725 hci_dev_unlock(hdev
);
2727 /* Send to upper protocol */
2728 l2cap_recv_acldata(conn
, skb
, flags
);
2731 BT_ERR("%s ACL packet for unknown connection handle %d",
2732 hdev
->name
, handle
);
2738 /* SCO data packet */
2739 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2741 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2742 struct hci_conn
*conn
;
2745 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2747 handle
= __le16_to_cpu(hdr
->handle
);
2749 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2751 hdev
->stat
.sco_rx
++;
2754 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2755 hci_dev_unlock(hdev
);
2758 /* Send to upper protocol */
2759 sco_recv_scodata(conn
, skb
);
2762 BT_ERR("%s SCO packet for unknown connection handle %d",
2763 hdev
->name
, handle
);
2769 static void hci_rx_work(struct work_struct
*work
)
2771 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2772 struct sk_buff
*skb
;
2774 BT_DBG("%s", hdev
->name
);
2776 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2777 /* Send copy to monitor */
2778 hci_send_to_monitor(hdev
, skb
);
2780 if (atomic_read(&hdev
->promisc
)) {
2781 /* Send copy to the sockets */
2782 hci_send_to_sock(hdev
, skb
);
2785 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2790 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2791 /* Don't process data packets in this states. */
2792 switch (bt_cb(skb
)->pkt_type
) {
2793 case HCI_ACLDATA_PKT
:
2794 case HCI_SCODATA_PKT
:
2801 switch (bt_cb(skb
)->pkt_type
) {
2803 BT_DBG("%s Event packet", hdev
->name
);
2804 hci_event_packet(hdev
, skb
);
2807 case HCI_ACLDATA_PKT
:
2808 BT_DBG("%s ACL data packet", hdev
->name
);
2809 hci_acldata_packet(hdev
, skb
);
2812 case HCI_SCODATA_PKT
:
2813 BT_DBG("%s SCO data packet", hdev
->name
);
2814 hci_scodata_packet(hdev
, skb
);
2824 static void hci_cmd_work(struct work_struct
*work
)
2826 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2827 struct sk_buff
*skb
;
2829 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2831 /* Send queued commands */
2832 if (atomic_read(&hdev
->cmd_cnt
)) {
2833 skb
= skb_dequeue(&hdev
->cmd_q
);
2837 kfree_skb(hdev
->sent_cmd
);
2839 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2840 if (hdev
->sent_cmd
) {
2841 atomic_dec(&hdev
->cmd_cnt
);
2842 hci_send_frame(skb
);
2843 if (test_bit(HCI_RESET
, &hdev
->flags
))
2844 del_timer(&hdev
->cmd_timer
);
2846 mod_timer(&hdev
->cmd_timer
,
2847 jiffies
+ HCI_CMD_TIMEOUT
);
2849 skb_queue_head(&hdev
->cmd_q
, skb
);
2850 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2855 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2857 /* General inquiry access code (GIAC) */
2858 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2859 struct hci_cp_inquiry cp
;
2861 BT_DBG("%s", hdev
->name
);
2863 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2864 return -EINPROGRESS
;
2866 inquiry_cache_flush(hdev
);
2868 memset(&cp
, 0, sizeof(cp
));
2869 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2872 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2875 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2877 BT_DBG("%s", hdev
->name
);
2879 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2882 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2885 u8
bdaddr_to_le(u8 bdaddr_type
)
2887 switch (bdaddr_type
) {
2888 case BDADDR_LE_PUBLIC
:
2889 return ADDR_LE_DEV_PUBLIC
;
2892 /* Fallback to LE Random address type */
2893 return ADDR_LE_DEV_RANDOM
;