2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev
->name
, cmd
, result
);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
68 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
69 u16 opcode
= __le16_to_cpu(sent
->opcode
);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
82 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
84 skb_queue_head(&hdev
->cmd_q
, skb
);
85 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
91 if (hdev
->req_status
== HCI_REQ_PEND
) {
92 hdev
->req_result
= result
;
93 hdev
->req_status
= HCI_REQ_DONE
;
94 wake_up_interruptible(&hdev
->req_wait_q
);
98 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
100 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= err
;
104 hdev
->req_status
= HCI_REQ_CANCELED
;
105 wake_up_interruptible(&hdev
->req_wait_q
);
109 /* Execute request and wait for completion. */
110 static int __hci_req_sync(struct hci_dev
*hdev
,
111 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
114 DECLARE_WAITQUEUE(wait
, current
);
117 BT_DBG("%s start", hdev
->name
);
119 hdev
->req_status
= HCI_REQ_PEND
;
121 add_wait_queue(&hdev
->req_wait_q
, &wait
);
122 set_current_state(TASK_INTERRUPTIBLE
);
126 /* If the request didn't send any commands return immediately */
127 if (skb_queue_empty(&hdev
->cmd_q
) && atomic_read(&hdev
->cmd_cnt
)) {
128 hdev
->req_status
= 0;
129 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
133 schedule_timeout(timeout
);
135 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
137 if (signal_pending(current
))
140 switch (hdev
->req_status
) {
142 err
= -bt_to_errno(hdev
->req_result
);
145 case HCI_REQ_CANCELED
:
146 err
= -hdev
->req_result
;
154 hdev
->req_status
= hdev
->req_result
= 0;
156 BT_DBG("%s end: err %d", hdev
->name
, err
);
161 static int hci_req_sync(struct hci_dev
*hdev
,
162 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
163 unsigned long opt
, __u32 timeout
)
167 if (!test_bit(HCI_UP
, &hdev
->flags
))
170 /* Serialize all requests */
172 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
173 hci_req_unlock(hdev
);
178 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
180 BT_DBG("%s %ld", hdev
->name
, opt
);
183 set_bit(HCI_RESET
, &hdev
->flags
);
184 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
187 static void bredr_init(struct hci_dev
*hdev
)
189 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
191 /* Read Local Supported Features */
192 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
194 /* Read Local Version */
195 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
198 static void amp_init(struct hci_dev
*hdev
)
200 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
202 /* Read Local Version */
203 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
205 /* Read Local AMP Info */
206 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
208 /* Read Data Blk size */
209 hci_send_cmd(hdev
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
212 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
216 BT_DBG("%s %ld", hdev
->name
, opt
);
218 /* Driver initialization */
220 /* Special commands */
221 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
222 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
223 skb
->dev
= (void *) hdev
;
225 skb_queue_tail(&hdev
->cmd_q
, skb
);
226 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
228 skb_queue_purge(&hdev
->driver_init
);
231 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
232 hci_reset_req(hdev
, 0);
234 switch (hdev
->dev_type
) {
244 BT_ERR("Unknown device type %d", hdev
->dev_type
);
249 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
253 BT_DBG("%s %x", hdev
->name
, scan
);
255 /* Inquiry and Page scans */
256 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
259 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
263 BT_DBG("%s %x", hdev
->name
, auth
);
266 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
269 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
273 BT_DBG("%s %x", hdev
->name
, encrypt
);
276 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
279 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
281 __le16 policy
= cpu_to_le16(opt
);
283 BT_DBG("%s %x", hdev
->name
, policy
);
285 /* Default link policy */
286 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
289 /* Get HCI device by index.
290 * Device is held on return. */
291 struct hci_dev
*hci_dev_get(int index
)
293 struct hci_dev
*hdev
= NULL
, *d
;
300 read_lock(&hci_dev_list_lock
);
301 list_for_each_entry(d
, &hci_dev_list
, list
) {
302 if (d
->id
== index
) {
303 hdev
= hci_dev_hold(d
);
307 read_unlock(&hci_dev_list_lock
);
311 /* ---- Inquiry support ---- */
313 bool hci_discovery_active(struct hci_dev
*hdev
)
315 struct discovery_state
*discov
= &hdev
->discovery
;
317 switch (discov
->state
) {
318 case DISCOVERY_FINDING
:
319 case DISCOVERY_RESOLVING
:
327 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
329 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
331 if (hdev
->discovery
.state
== state
)
335 case DISCOVERY_STOPPED
:
336 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
337 mgmt_discovering(hdev
, 0);
339 case DISCOVERY_STARTING
:
341 case DISCOVERY_FINDING
:
342 mgmt_discovering(hdev
, 1);
344 case DISCOVERY_RESOLVING
:
346 case DISCOVERY_STOPPING
:
350 hdev
->discovery
.state
= state
;
353 static void inquiry_cache_flush(struct hci_dev
*hdev
)
355 struct discovery_state
*cache
= &hdev
->discovery
;
356 struct inquiry_entry
*p
, *n
;
358 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
363 INIT_LIST_HEAD(&cache
->unknown
);
364 INIT_LIST_HEAD(&cache
->resolve
);
367 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
370 struct discovery_state
*cache
= &hdev
->discovery
;
371 struct inquiry_entry
*e
;
373 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
375 list_for_each_entry(e
, &cache
->all
, all
) {
376 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
383 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
386 struct discovery_state
*cache
= &hdev
->discovery
;
387 struct inquiry_entry
*e
;
389 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
391 list_for_each_entry(e
, &cache
->unknown
, list
) {
392 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
399 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
403 struct discovery_state
*cache
= &hdev
->discovery
;
404 struct inquiry_entry
*e
;
406 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
408 list_for_each_entry(e
, &cache
->resolve
, list
) {
409 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
411 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
418 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
419 struct inquiry_entry
*ie
)
421 struct discovery_state
*cache
= &hdev
->discovery
;
422 struct list_head
*pos
= &cache
->resolve
;
423 struct inquiry_entry
*p
;
427 list_for_each_entry(p
, &cache
->resolve
, list
) {
428 if (p
->name_state
!= NAME_PENDING
&&
429 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
434 list_add(&ie
->list
, pos
);
437 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
438 bool name_known
, bool *ssp
)
440 struct discovery_state
*cache
= &hdev
->discovery
;
441 struct inquiry_entry
*ie
;
443 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
445 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
448 *ssp
= data
->ssp_mode
;
450 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
452 if (ie
->data
.ssp_mode
&& ssp
)
455 if (ie
->name_state
== NAME_NEEDED
&&
456 data
->rssi
!= ie
->data
.rssi
) {
457 ie
->data
.rssi
= data
->rssi
;
458 hci_inquiry_cache_update_resolve(hdev
, ie
);
464 /* Entry not in the cache. Add new one. */
465 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
469 list_add(&ie
->all
, &cache
->all
);
472 ie
->name_state
= NAME_KNOWN
;
474 ie
->name_state
= NAME_NOT_KNOWN
;
475 list_add(&ie
->list
, &cache
->unknown
);
479 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
480 ie
->name_state
!= NAME_PENDING
) {
481 ie
->name_state
= NAME_KNOWN
;
485 memcpy(&ie
->data
, data
, sizeof(*data
));
486 ie
->timestamp
= jiffies
;
487 cache
->timestamp
= jiffies
;
489 if (ie
->name_state
== NAME_NOT_KNOWN
)
495 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
497 struct discovery_state
*cache
= &hdev
->discovery
;
498 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
499 struct inquiry_entry
*e
;
502 list_for_each_entry(e
, &cache
->all
, all
) {
503 struct inquiry_data
*data
= &e
->data
;
508 bacpy(&info
->bdaddr
, &data
->bdaddr
);
509 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
510 info
->pscan_period_mode
= data
->pscan_period_mode
;
511 info
->pscan_mode
= data
->pscan_mode
;
512 memcpy(info
->dev_class
, data
->dev_class
, 3);
513 info
->clock_offset
= data
->clock_offset
;
519 BT_DBG("cache %p, copied %d", cache
, copied
);
523 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
525 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
526 struct hci_cp_inquiry cp
;
528 BT_DBG("%s", hdev
->name
);
530 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
534 memcpy(&cp
.lap
, &ir
->lap
, 3);
535 cp
.length
= ir
->length
;
536 cp
.num_rsp
= ir
->num_rsp
;
537 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
540 int hci_inquiry(void __user
*arg
)
542 __u8 __user
*ptr
= arg
;
543 struct hci_inquiry_req ir
;
544 struct hci_dev
*hdev
;
545 int err
= 0, do_inquiry
= 0, max_rsp
;
549 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
552 hdev
= hci_dev_get(ir
.dev_id
);
557 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
558 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
559 inquiry_cache_flush(hdev
);
562 hci_dev_unlock(hdev
);
564 timeo
= ir
.length
* msecs_to_jiffies(2000);
567 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
573 /* for unlimited number of responses we will use buffer with
576 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
578 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
579 * copy it to the user space.
581 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
588 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
589 hci_dev_unlock(hdev
);
591 BT_DBG("num_rsp %d", ir
.num_rsp
);
593 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
595 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
608 static u8
create_ad(struct hci_dev
*hdev
, u8
*ptr
)
610 u8 ad_len
= 0, flags
= 0;
613 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
614 flags
|= LE_AD_GENERAL
;
616 if (!lmp_bredr_capable(hdev
))
617 flags
|= LE_AD_NO_BREDR
;
619 if (lmp_le_br_capable(hdev
))
620 flags
|= LE_AD_SIM_LE_BREDR_CTRL
;
622 if (lmp_host_le_br_capable(hdev
))
623 flags
|= LE_AD_SIM_LE_BREDR_HOST
;
626 BT_DBG("adv flags 0x%02x", flags
);
636 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
638 ptr
[1] = EIR_TX_POWER
;
639 ptr
[2] = (u8
) hdev
->adv_tx_power
;
645 name_len
= strlen(hdev
->dev_name
);
647 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
649 if (name_len
> max_len
) {
651 ptr
[1] = EIR_NAME_SHORT
;
653 ptr
[1] = EIR_NAME_COMPLETE
;
655 ptr
[0] = name_len
+ 1;
657 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
659 ad_len
+= (name_len
+ 2);
660 ptr
+= (name_len
+ 2);
666 int hci_update_ad(struct hci_dev
*hdev
)
668 struct hci_cp_le_set_adv_data cp
;
674 if (!lmp_le_capable(hdev
)) {
679 memset(&cp
, 0, sizeof(cp
));
681 len
= create_ad(hdev
, cp
.data
);
683 if (hdev
->adv_data_len
== len
&&
684 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0) {
689 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
690 hdev
->adv_data_len
= len
;
693 err
= hci_send_cmd(hdev
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
696 hci_dev_unlock(hdev
);
701 /* ---- HCI ioctl helpers ---- */
703 int hci_dev_open(__u16 dev
)
705 struct hci_dev
*hdev
;
708 hdev
= hci_dev_get(dev
);
712 BT_DBG("%s %p", hdev
->name
, hdev
);
716 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
721 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
726 if (test_bit(HCI_UP
, &hdev
->flags
)) {
731 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
732 set_bit(HCI_RAW
, &hdev
->flags
);
734 /* Treat all non BR/EDR controllers as raw devices if
735 enable_hs is not set */
736 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
737 set_bit(HCI_RAW
, &hdev
->flags
);
739 if (hdev
->open(hdev
)) {
744 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
745 atomic_set(&hdev
->cmd_cnt
, 1);
746 set_bit(HCI_INIT
, &hdev
->flags
);
747 hdev
->init_last_cmd
= 0;
749 ret
= __hci_req_sync(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
751 clear_bit(HCI_INIT
, &hdev
->flags
);
756 set_bit(HCI_UP
, &hdev
->flags
);
757 hci_notify(hdev
, HCI_DEV_UP
);
759 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
760 mgmt_valid_hdev(hdev
)) {
762 mgmt_powered(hdev
, 1);
763 hci_dev_unlock(hdev
);
766 /* Init failed, cleanup */
767 flush_work(&hdev
->tx_work
);
768 flush_work(&hdev
->cmd_work
);
769 flush_work(&hdev
->rx_work
);
771 skb_queue_purge(&hdev
->cmd_q
);
772 skb_queue_purge(&hdev
->rx_q
);
777 if (hdev
->sent_cmd
) {
778 kfree_skb(hdev
->sent_cmd
);
779 hdev
->sent_cmd
= NULL
;
787 hci_req_unlock(hdev
);
792 static int hci_dev_do_close(struct hci_dev
*hdev
)
794 BT_DBG("%s %p", hdev
->name
, hdev
);
796 cancel_work_sync(&hdev
->le_scan
);
798 cancel_delayed_work(&hdev
->power_off
);
800 hci_req_cancel(hdev
, ENODEV
);
803 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
804 del_timer_sync(&hdev
->cmd_timer
);
805 hci_req_unlock(hdev
);
809 /* Flush RX and TX works */
810 flush_work(&hdev
->tx_work
);
811 flush_work(&hdev
->rx_work
);
813 if (hdev
->discov_timeout
> 0) {
814 cancel_delayed_work(&hdev
->discov_off
);
815 hdev
->discov_timeout
= 0;
816 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
819 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
820 cancel_delayed_work(&hdev
->service_cache
);
822 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
825 inquiry_cache_flush(hdev
);
826 hci_conn_hash_flush(hdev
);
827 hci_dev_unlock(hdev
);
829 hci_notify(hdev
, HCI_DEV_DOWN
);
835 skb_queue_purge(&hdev
->cmd_q
);
836 atomic_set(&hdev
->cmd_cnt
, 1);
837 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
838 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
839 set_bit(HCI_INIT
, &hdev
->flags
);
840 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
841 clear_bit(HCI_INIT
, &hdev
->flags
);
845 flush_work(&hdev
->cmd_work
);
848 skb_queue_purge(&hdev
->rx_q
);
849 skb_queue_purge(&hdev
->cmd_q
);
850 skb_queue_purge(&hdev
->raw_q
);
852 /* Drop last sent command */
853 if (hdev
->sent_cmd
) {
854 del_timer_sync(&hdev
->cmd_timer
);
855 kfree_skb(hdev
->sent_cmd
);
856 hdev
->sent_cmd
= NULL
;
859 /* After this point our queues are empty
860 * and no tasks are scheduled. */
863 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
864 mgmt_valid_hdev(hdev
)) {
866 mgmt_powered(hdev
, 0);
867 hci_dev_unlock(hdev
);
873 /* Controller radio is available but is currently powered down */
874 hdev
->amp_status
= 0;
876 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
877 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
879 hci_req_unlock(hdev
);
885 int hci_dev_close(__u16 dev
)
887 struct hci_dev
*hdev
;
890 hdev
= hci_dev_get(dev
);
894 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
895 cancel_delayed_work(&hdev
->power_off
);
897 err
= hci_dev_do_close(hdev
);
903 int hci_dev_reset(__u16 dev
)
905 struct hci_dev
*hdev
;
908 hdev
= hci_dev_get(dev
);
914 if (!test_bit(HCI_UP
, &hdev
->flags
))
918 skb_queue_purge(&hdev
->rx_q
);
919 skb_queue_purge(&hdev
->cmd_q
);
922 inquiry_cache_flush(hdev
);
923 hci_conn_hash_flush(hdev
);
924 hci_dev_unlock(hdev
);
929 atomic_set(&hdev
->cmd_cnt
, 1);
930 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
932 if (!test_bit(HCI_RAW
, &hdev
->flags
))
933 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
936 hci_req_unlock(hdev
);
941 int hci_dev_reset_stat(__u16 dev
)
943 struct hci_dev
*hdev
;
946 hdev
= hci_dev_get(dev
);
950 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
957 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
959 struct hci_dev
*hdev
;
960 struct hci_dev_req dr
;
963 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
966 hdev
= hci_dev_get(dr
.dev_id
);
972 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
977 if (!lmp_encrypt_capable(hdev
)) {
982 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
983 /* Auth must be enabled first */
984 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
990 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
995 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1000 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1004 case HCISETLINKMODE
:
1005 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1006 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1010 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1014 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1015 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1019 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1020 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1032 int hci_get_dev_list(void __user
*arg
)
1034 struct hci_dev
*hdev
;
1035 struct hci_dev_list_req
*dl
;
1036 struct hci_dev_req
*dr
;
1037 int n
= 0, size
, err
;
1040 if (get_user(dev_num
, (__u16 __user
*) arg
))
1043 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1046 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1048 dl
= kzalloc(size
, GFP_KERNEL
);
1054 read_lock(&hci_dev_list_lock
);
1055 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1056 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1057 cancel_delayed_work(&hdev
->power_off
);
1059 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1060 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1062 (dr
+ n
)->dev_id
= hdev
->id
;
1063 (dr
+ n
)->dev_opt
= hdev
->flags
;
1068 read_unlock(&hci_dev_list_lock
);
1071 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1073 err
= copy_to_user(arg
, dl
, size
);
1076 return err
? -EFAULT
: 0;
1079 int hci_get_dev_info(void __user
*arg
)
1081 struct hci_dev
*hdev
;
1082 struct hci_dev_info di
;
1085 if (copy_from_user(&di
, arg
, sizeof(di
)))
1088 hdev
= hci_dev_get(di
.dev_id
);
1092 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1093 cancel_delayed_work_sync(&hdev
->power_off
);
1095 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1096 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1098 strcpy(di
.name
, hdev
->name
);
1099 di
.bdaddr
= hdev
->bdaddr
;
1100 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1101 di
.flags
= hdev
->flags
;
1102 di
.pkt_type
= hdev
->pkt_type
;
1103 if (lmp_bredr_capable(hdev
)) {
1104 di
.acl_mtu
= hdev
->acl_mtu
;
1105 di
.acl_pkts
= hdev
->acl_pkts
;
1106 di
.sco_mtu
= hdev
->sco_mtu
;
1107 di
.sco_pkts
= hdev
->sco_pkts
;
1109 di
.acl_mtu
= hdev
->le_mtu
;
1110 di
.acl_pkts
= hdev
->le_pkts
;
1114 di
.link_policy
= hdev
->link_policy
;
1115 di
.link_mode
= hdev
->link_mode
;
1117 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1118 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1120 if (copy_to_user(arg
, &di
, sizeof(di
)))
1128 /* ---- Interface to HCI drivers ---- */
1130 static int hci_rfkill_set_block(void *data
, bool blocked
)
1132 struct hci_dev
*hdev
= data
;
1134 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1139 hci_dev_do_close(hdev
);
1144 static const struct rfkill_ops hci_rfkill_ops
= {
1145 .set_block
= hci_rfkill_set_block
,
1148 static void hci_power_on(struct work_struct
*work
)
1150 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1152 BT_DBG("%s", hdev
->name
);
1154 if (hci_dev_open(hdev
->id
) < 0)
1157 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1158 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1159 HCI_AUTO_OFF_TIMEOUT
);
1161 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1162 mgmt_index_added(hdev
);
1165 static void hci_power_off(struct work_struct
*work
)
1167 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1170 BT_DBG("%s", hdev
->name
);
1172 hci_dev_do_close(hdev
);
1175 static void hci_discov_off(struct work_struct
*work
)
1177 struct hci_dev
*hdev
;
1178 u8 scan
= SCAN_PAGE
;
1180 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1182 BT_DBG("%s", hdev
->name
);
1186 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1188 hdev
->discov_timeout
= 0;
1190 hci_dev_unlock(hdev
);
1193 int hci_uuids_clear(struct hci_dev
*hdev
)
1195 struct bt_uuid
*uuid
, *tmp
;
1197 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
1198 list_del(&uuid
->list
);
1205 int hci_link_keys_clear(struct hci_dev
*hdev
)
1207 struct list_head
*p
, *n
;
1209 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1210 struct link_key
*key
;
1212 key
= list_entry(p
, struct link_key
, list
);
1221 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1223 struct smp_ltk
*k
, *tmp
;
1225 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1233 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1237 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1238 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1244 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1245 u8 key_type
, u8 old_key_type
)
1248 if (key_type
< 0x03)
1251 /* Debug keys are insecure so don't store them persistently */
1252 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1255 /* Changed combination key and there's no previous one */
1256 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1259 /* Security mode 3 case */
1263 /* Neither local nor remote side had no-bonding as requirement */
1264 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1267 /* Local side had dedicated bonding as requirement */
1268 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1271 /* Remote side had dedicated bonding as requirement */
1272 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1275 /* If none of the above criteria match, then don't store the key
1280 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1284 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1285 if (k
->ediv
!= ediv
||
1286 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1295 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1300 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1301 if (addr_type
== k
->bdaddr_type
&&
1302 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1308 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1309 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1311 struct link_key
*key
, *old_key
;
1315 old_key
= hci_find_link_key(hdev
, bdaddr
);
1317 old_key_type
= old_key
->type
;
1320 old_key_type
= conn
? conn
->key_type
: 0xff;
1321 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1324 list_add(&key
->list
, &hdev
->link_keys
);
1327 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1329 /* Some buggy controller combinations generate a changed
1330 * combination key for legacy pairing even when there's no
1332 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1333 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1334 type
= HCI_LK_COMBINATION
;
1336 conn
->key_type
= type
;
1339 bacpy(&key
->bdaddr
, bdaddr
);
1340 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1341 key
->pin_len
= pin_len
;
1343 if (type
== HCI_LK_CHANGED_COMBINATION
)
1344 key
->type
= old_key_type
;
1351 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1353 mgmt_new_link_key(hdev
, key
, persistent
);
1356 conn
->flush_key
= !persistent
;
1361 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1362 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1365 struct smp_ltk
*key
, *old_key
;
1367 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1370 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1374 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1377 list_add(&key
->list
, &hdev
->long_term_keys
);
1380 bacpy(&key
->bdaddr
, bdaddr
);
1381 key
->bdaddr_type
= addr_type
;
1382 memcpy(key
->val
, tk
, sizeof(key
->val
));
1383 key
->authenticated
= authenticated
;
1385 key
->enc_size
= enc_size
;
1387 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1392 if (type
& HCI_SMP_LTK
)
1393 mgmt_new_ltk(hdev
, key
, 1);
1398 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1400 struct link_key
*key
;
1402 key
= hci_find_link_key(hdev
, bdaddr
);
1406 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1408 list_del(&key
->list
);
1414 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1416 struct smp_ltk
*k
, *tmp
;
1418 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1419 if (bacmp(bdaddr
, &k
->bdaddr
))
1422 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1431 /* HCI command timer function */
1432 static void hci_cmd_timeout(unsigned long arg
)
1434 struct hci_dev
*hdev
= (void *) arg
;
1436 if (hdev
->sent_cmd
) {
1437 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1438 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1440 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1442 BT_ERR("%s command tx timeout", hdev
->name
);
1445 atomic_set(&hdev
->cmd_cnt
, 1);
1446 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1449 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1452 struct oob_data
*data
;
1454 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1455 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1461 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1463 struct oob_data
*data
;
1465 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1469 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1471 list_del(&data
->list
);
1477 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1479 struct oob_data
*data
, *n
;
1481 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1482 list_del(&data
->list
);
1489 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1492 struct oob_data
*data
;
1494 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1497 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1501 bacpy(&data
->bdaddr
, bdaddr
);
1502 list_add(&data
->list
, &hdev
->remote_oob_data
);
1505 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1506 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1508 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1513 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1515 struct bdaddr_list
*b
;
1517 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1518 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1524 int hci_blacklist_clear(struct hci_dev
*hdev
)
1526 struct list_head
*p
, *n
;
1528 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1529 struct bdaddr_list
*b
;
1531 b
= list_entry(p
, struct bdaddr_list
, list
);
1540 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1542 struct bdaddr_list
*entry
;
1544 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1547 if (hci_blacklist_lookup(hdev
, bdaddr
))
1550 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1554 bacpy(&entry
->bdaddr
, bdaddr
);
1556 list_add(&entry
->list
, &hdev
->blacklist
);
1558 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1561 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1563 struct bdaddr_list
*entry
;
1565 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1566 return hci_blacklist_clear(hdev
);
1568 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1572 list_del(&entry
->list
);
1575 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1578 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1580 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1581 struct hci_cp_le_set_scan_param cp
;
1583 memset(&cp
, 0, sizeof(cp
));
1584 cp
.type
= param
->type
;
1585 cp
.interval
= cpu_to_le16(param
->interval
);
1586 cp
.window
= cpu_to_le16(param
->window
);
1588 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1591 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1593 struct hci_cp_le_set_scan_enable cp
;
1595 memset(&cp
, 0, sizeof(cp
));
1599 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1602 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1603 u16 window
, int timeout
)
1605 long timeo
= msecs_to_jiffies(3000);
1606 struct le_scan_params param
;
1609 BT_DBG("%s", hdev
->name
);
1611 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1612 return -EINPROGRESS
;
1615 param
.interval
= interval
;
1616 param
.window
= window
;
1620 err
= __hci_req_sync(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1623 err
= __hci_req_sync(hdev
, le_scan_enable_req
, 0, timeo
);
1625 hci_req_unlock(hdev
);
1630 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
,
1631 msecs_to_jiffies(timeout
));
1636 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1638 BT_DBG("%s", hdev
->name
);
1640 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1643 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1644 struct hci_cp_le_set_scan_enable cp
;
1646 /* Send HCI command to disable LE Scan */
1647 memset(&cp
, 0, sizeof(cp
));
1648 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1654 static void le_scan_disable_work(struct work_struct
*work
)
1656 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1657 le_scan_disable
.work
);
1658 struct hci_cp_le_set_scan_enable cp
;
1660 BT_DBG("%s", hdev
->name
);
1662 memset(&cp
, 0, sizeof(cp
));
1664 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1667 static void le_scan_work(struct work_struct
*work
)
1669 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1670 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1672 BT_DBG("%s", hdev
->name
);
1674 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1678 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1681 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1683 BT_DBG("%s", hdev
->name
);
1685 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
1688 if (work_busy(&hdev
->le_scan
))
1689 return -EINPROGRESS
;
1692 param
->interval
= interval
;
1693 param
->window
= window
;
1694 param
->timeout
= timeout
;
1696 queue_work(system_long_wq
, &hdev
->le_scan
);
1701 /* Alloc HCI device */
1702 struct hci_dev
*hci_alloc_dev(void)
1704 struct hci_dev
*hdev
;
1706 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1710 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1711 hdev
->esco_type
= (ESCO_HV1
);
1712 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1713 hdev
->io_capability
= 0x03; /* No Input No Output */
1714 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
1715 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
1717 hdev
->sniff_max_interval
= 800;
1718 hdev
->sniff_min_interval
= 80;
1720 mutex_init(&hdev
->lock
);
1721 mutex_init(&hdev
->req_lock
);
1723 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1724 INIT_LIST_HEAD(&hdev
->blacklist
);
1725 INIT_LIST_HEAD(&hdev
->uuids
);
1726 INIT_LIST_HEAD(&hdev
->link_keys
);
1727 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1728 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1729 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1731 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1732 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1733 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1734 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1735 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1737 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1738 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1739 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1741 skb_queue_head_init(&hdev
->driver_init
);
1742 skb_queue_head_init(&hdev
->rx_q
);
1743 skb_queue_head_init(&hdev
->cmd_q
);
1744 skb_queue_head_init(&hdev
->raw_q
);
1746 init_waitqueue_head(&hdev
->req_wait_q
);
1748 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1750 hci_init_sysfs(hdev
);
1751 discovery_init(hdev
);
1755 EXPORT_SYMBOL(hci_alloc_dev
);
1757 /* Free HCI device */
1758 void hci_free_dev(struct hci_dev
*hdev
)
1760 skb_queue_purge(&hdev
->driver_init
);
1762 /* will free via device release */
1763 put_device(&hdev
->dev
);
1765 EXPORT_SYMBOL(hci_free_dev
);
1767 /* Register HCI device */
1768 int hci_register_dev(struct hci_dev
*hdev
)
1772 if (!hdev
->open
|| !hdev
->close
)
1775 /* Do not allow HCI_AMP devices to register at index 0,
1776 * so the index can be used as the AMP controller ID.
1778 switch (hdev
->dev_type
) {
1780 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1783 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1792 sprintf(hdev
->name
, "hci%d", id
);
1795 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1797 write_lock(&hci_dev_list_lock
);
1798 list_add(&hdev
->list
, &hci_dev_list
);
1799 write_unlock(&hci_dev_list_lock
);
1801 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1803 if (!hdev
->workqueue
) {
1808 hdev
->req_workqueue
= alloc_workqueue(hdev
->name
,
1809 WQ_HIGHPRI
| WQ_UNBOUND
|
1811 if (!hdev
->req_workqueue
) {
1812 destroy_workqueue(hdev
->workqueue
);
1817 error
= hci_add_sysfs(hdev
);
1821 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1822 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1825 if (rfkill_register(hdev
->rfkill
) < 0) {
1826 rfkill_destroy(hdev
->rfkill
);
1827 hdev
->rfkill
= NULL
;
1831 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1833 if (hdev
->dev_type
!= HCI_AMP
)
1834 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1836 hci_notify(hdev
, HCI_DEV_REG
);
1839 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1844 destroy_workqueue(hdev
->workqueue
);
1845 destroy_workqueue(hdev
->req_workqueue
);
1847 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1848 write_lock(&hci_dev_list_lock
);
1849 list_del(&hdev
->list
);
1850 write_unlock(&hci_dev_list_lock
);
1854 EXPORT_SYMBOL(hci_register_dev
);
1856 /* Unregister HCI device */
1857 void hci_unregister_dev(struct hci_dev
*hdev
)
1861 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1863 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1867 write_lock(&hci_dev_list_lock
);
1868 list_del(&hdev
->list
);
1869 write_unlock(&hci_dev_list_lock
);
1871 hci_dev_do_close(hdev
);
1873 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1874 kfree_skb(hdev
->reassembly
[i
]);
1876 cancel_work_sync(&hdev
->power_on
);
1878 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1879 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1881 mgmt_index_removed(hdev
);
1882 hci_dev_unlock(hdev
);
1885 /* mgmt_index_removed should take care of emptying the
1887 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1889 hci_notify(hdev
, HCI_DEV_UNREG
);
1892 rfkill_unregister(hdev
->rfkill
);
1893 rfkill_destroy(hdev
->rfkill
);
1896 hci_del_sysfs(hdev
);
1898 destroy_workqueue(hdev
->workqueue
);
1899 destroy_workqueue(hdev
->req_workqueue
);
1902 hci_blacklist_clear(hdev
);
1903 hci_uuids_clear(hdev
);
1904 hci_link_keys_clear(hdev
);
1905 hci_smp_ltks_clear(hdev
);
1906 hci_remote_oob_data_clear(hdev
);
1907 hci_dev_unlock(hdev
);
1911 ida_simple_remove(&hci_index_ida
, id
);
1913 EXPORT_SYMBOL(hci_unregister_dev
);
1915 /* Suspend HCI device */
1916 int hci_suspend_dev(struct hci_dev
*hdev
)
1918 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1921 EXPORT_SYMBOL(hci_suspend_dev
);
1923 /* Resume HCI device */
1924 int hci_resume_dev(struct hci_dev
*hdev
)
1926 hci_notify(hdev
, HCI_DEV_RESUME
);
1929 EXPORT_SYMBOL(hci_resume_dev
);
1931 /* Receive frame from HCI drivers */
1932 int hci_recv_frame(struct sk_buff
*skb
)
1934 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1935 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1936 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1942 bt_cb(skb
)->incoming
= 1;
1945 __net_timestamp(skb
);
1947 skb_queue_tail(&hdev
->rx_q
, skb
);
1948 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1952 EXPORT_SYMBOL(hci_recv_frame
);
1954 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1955 int count
, __u8 index
)
1960 struct sk_buff
*skb
;
1961 struct bt_skb_cb
*scb
;
1963 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1964 index
>= NUM_REASSEMBLY
)
1967 skb
= hdev
->reassembly
[index
];
1971 case HCI_ACLDATA_PKT
:
1972 len
= HCI_MAX_FRAME_SIZE
;
1973 hlen
= HCI_ACL_HDR_SIZE
;
1976 len
= HCI_MAX_EVENT_SIZE
;
1977 hlen
= HCI_EVENT_HDR_SIZE
;
1979 case HCI_SCODATA_PKT
:
1980 len
= HCI_MAX_SCO_SIZE
;
1981 hlen
= HCI_SCO_HDR_SIZE
;
1985 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1989 scb
= (void *) skb
->cb
;
1991 scb
->pkt_type
= type
;
1993 skb
->dev
= (void *) hdev
;
1994 hdev
->reassembly
[index
] = skb
;
1998 scb
= (void *) skb
->cb
;
1999 len
= min_t(uint
, scb
->expect
, count
);
2001 memcpy(skb_put(skb
, len
), data
, len
);
2010 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
2011 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
2012 scb
->expect
= h
->plen
;
2014 if (skb_tailroom(skb
) < scb
->expect
) {
2016 hdev
->reassembly
[index
] = NULL
;
2022 case HCI_ACLDATA_PKT
:
2023 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
2024 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
2025 scb
->expect
= __le16_to_cpu(h
->dlen
);
2027 if (skb_tailroom(skb
) < scb
->expect
) {
2029 hdev
->reassembly
[index
] = NULL
;
2035 case HCI_SCODATA_PKT
:
2036 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2037 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2038 scb
->expect
= h
->dlen
;
2040 if (skb_tailroom(skb
) < scb
->expect
) {
2042 hdev
->reassembly
[index
] = NULL
;
2049 if (scb
->expect
== 0) {
2050 /* Complete frame */
2052 bt_cb(skb
)->pkt_type
= type
;
2053 hci_recv_frame(skb
);
2055 hdev
->reassembly
[index
] = NULL
;
2063 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2067 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2071 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2075 data
+= (count
- rem
);
2081 EXPORT_SYMBOL(hci_recv_fragment
);
2083 #define STREAM_REASSEMBLY 0
2085 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2091 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2094 struct { char type
; } *pkt
;
2096 /* Start of the frame */
2103 type
= bt_cb(skb
)->pkt_type
;
2105 rem
= hci_reassembly(hdev
, type
, data
, count
,
2110 data
+= (count
- rem
);
2116 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2118 /* ---- Interface to upper protocols ---- */
2120 int hci_register_cb(struct hci_cb
*cb
)
2122 BT_DBG("%p name %s", cb
, cb
->name
);
2124 write_lock(&hci_cb_list_lock
);
2125 list_add(&cb
->list
, &hci_cb_list
);
2126 write_unlock(&hci_cb_list_lock
);
2130 EXPORT_SYMBOL(hci_register_cb
);
2132 int hci_unregister_cb(struct hci_cb
*cb
)
2134 BT_DBG("%p name %s", cb
, cb
->name
);
2136 write_lock(&hci_cb_list_lock
);
2137 list_del(&cb
->list
);
2138 write_unlock(&hci_cb_list_lock
);
2142 EXPORT_SYMBOL(hci_unregister_cb
);
2144 static int hci_send_frame(struct sk_buff
*skb
)
2146 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2153 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2156 __net_timestamp(skb
);
2158 /* Send copy to monitor */
2159 hci_send_to_monitor(hdev
, skb
);
2161 if (atomic_read(&hdev
->promisc
)) {
2162 /* Send copy to the sockets */
2163 hci_send_to_sock(hdev
, skb
);
2166 /* Get rid of skb owner, prior to sending to the driver. */
2169 return hdev
->send(skb
);
2172 /* Send HCI command */
2173 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2175 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2176 struct hci_command_hdr
*hdr
;
2177 struct sk_buff
*skb
;
2179 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2181 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2183 BT_ERR("%s no memory for command", hdev
->name
);
2187 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2188 hdr
->opcode
= cpu_to_le16(opcode
);
2192 memcpy(skb_put(skb
, plen
), param
, plen
);
2194 BT_DBG("skb len %d", skb
->len
);
2196 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2197 skb
->dev
= (void *) hdev
;
2199 if (test_bit(HCI_INIT
, &hdev
->flags
))
2200 hdev
->init_last_cmd
= opcode
;
2202 skb_queue_tail(&hdev
->cmd_q
, skb
);
2203 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2208 /* Get data from the previously sent command */
2209 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2211 struct hci_command_hdr
*hdr
;
2213 if (!hdev
->sent_cmd
)
2216 hdr
= (void *) hdev
->sent_cmd
->data
;
2218 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2221 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2223 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2227 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2229 struct hci_acl_hdr
*hdr
;
2232 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2233 skb_reset_transport_header(skb
);
2234 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2235 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2236 hdr
->dlen
= cpu_to_le16(len
);
2239 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2240 struct sk_buff
*skb
, __u16 flags
)
2242 struct hci_conn
*conn
= chan
->conn
;
2243 struct hci_dev
*hdev
= conn
->hdev
;
2244 struct sk_buff
*list
;
2246 skb
->len
= skb_headlen(skb
);
2249 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2251 switch (hdev
->dev_type
) {
2253 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2256 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2259 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2263 list
= skb_shinfo(skb
)->frag_list
;
2265 /* Non fragmented */
2266 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2268 skb_queue_tail(queue
, skb
);
2271 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2273 skb_shinfo(skb
)->frag_list
= NULL
;
2275 /* Queue all fragments atomically */
2276 spin_lock(&queue
->lock
);
2278 __skb_queue_tail(queue
, skb
);
2280 flags
&= ~ACL_START
;
2283 skb
= list
; list
= list
->next
;
2285 skb
->dev
= (void *) hdev
;
2286 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2287 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2289 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2291 __skb_queue_tail(queue
, skb
);
2294 spin_unlock(&queue
->lock
);
2298 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2300 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2302 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2304 skb
->dev
= (void *) hdev
;
2306 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2308 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2312 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2314 struct hci_dev
*hdev
= conn
->hdev
;
2315 struct hci_sco_hdr hdr
;
2317 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2319 hdr
.handle
= cpu_to_le16(conn
->handle
);
2320 hdr
.dlen
= skb
->len
;
2322 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2323 skb_reset_transport_header(skb
);
2324 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2326 skb
->dev
= (void *) hdev
;
2327 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2329 skb_queue_tail(&conn
->data_q
, skb
);
2330 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2333 /* ---- HCI TX task (outgoing data) ---- */
2335 /* HCI Connection scheduler */
2336 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2339 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2340 struct hci_conn
*conn
= NULL
, *c
;
2341 unsigned int num
= 0, min
= ~0;
2343 /* We don't have to lock device here. Connections are always
2344 * added and removed with TX task disabled. */
2348 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2349 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2352 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2357 if (c
->sent
< min
) {
2362 if (hci_conn_num(hdev
, type
) == num
)
2371 switch (conn
->type
) {
2373 cnt
= hdev
->acl_cnt
;
2377 cnt
= hdev
->sco_cnt
;
2380 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2384 BT_ERR("Unknown link type");
2392 BT_DBG("conn %p quote %d", conn
, *quote
);
2396 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2398 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2401 BT_ERR("%s link tx timeout", hdev
->name
);
2405 /* Kill stalled connections */
2406 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2407 if (c
->type
== type
&& c
->sent
) {
2408 BT_ERR("%s killing stalled connection %pMR",
2409 hdev
->name
, &c
->dst
);
2410 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
2417 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2420 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2421 struct hci_chan
*chan
= NULL
;
2422 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2423 struct hci_conn
*conn
;
2424 int cnt
, q
, conn_num
= 0;
2426 BT_DBG("%s", hdev
->name
);
2430 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2431 struct hci_chan
*tmp
;
2433 if (conn
->type
!= type
)
2436 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2441 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2442 struct sk_buff
*skb
;
2444 if (skb_queue_empty(&tmp
->data_q
))
2447 skb
= skb_peek(&tmp
->data_q
);
2448 if (skb
->priority
< cur_prio
)
2451 if (skb
->priority
> cur_prio
) {
2454 cur_prio
= skb
->priority
;
2459 if (conn
->sent
< min
) {
2465 if (hci_conn_num(hdev
, type
) == conn_num
)
2474 switch (chan
->conn
->type
) {
2476 cnt
= hdev
->acl_cnt
;
2479 cnt
= hdev
->block_cnt
;
2483 cnt
= hdev
->sco_cnt
;
2486 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2490 BT_ERR("Unknown link type");
2495 BT_DBG("chan %p quote %d", chan
, *quote
);
2499 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2501 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2502 struct hci_conn
*conn
;
2505 BT_DBG("%s", hdev
->name
);
2509 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2510 struct hci_chan
*chan
;
2512 if (conn
->type
!= type
)
2515 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2520 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2521 struct sk_buff
*skb
;
2528 if (skb_queue_empty(&chan
->data_q
))
2531 skb
= skb_peek(&chan
->data_q
);
2532 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2535 skb
->priority
= HCI_PRIO_MAX
- 1;
2537 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2541 if (hci_conn_num(hdev
, type
) == num
)
2549 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2551 /* Calculate count of blocks used by this packet */
2552 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2555 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2557 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2558 /* ACL tx timeout must be longer than maximum
2559 * link supervision timeout (40.9 seconds) */
2560 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2561 HCI_ACL_TX_TIMEOUT
))
2562 hci_link_tx_to(hdev
, ACL_LINK
);
2566 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2568 unsigned int cnt
= hdev
->acl_cnt
;
2569 struct hci_chan
*chan
;
2570 struct sk_buff
*skb
;
2573 __check_timeout(hdev
, cnt
);
2575 while (hdev
->acl_cnt
&&
2576 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2577 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2578 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2579 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2580 skb
->len
, skb
->priority
);
2582 /* Stop if priority has changed */
2583 if (skb
->priority
< priority
)
2586 skb
= skb_dequeue(&chan
->data_q
);
2588 hci_conn_enter_active_mode(chan
->conn
,
2589 bt_cb(skb
)->force_active
);
2591 hci_send_frame(skb
);
2592 hdev
->acl_last_tx
= jiffies
;
2600 if (cnt
!= hdev
->acl_cnt
)
2601 hci_prio_recalculate(hdev
, ACL_LINK
);
2604 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2606 unsigned int cnt
= hdev
->block_cnt
;
2607 struct hci_chan
*chan
;
2608 struct sk_buff
*skb
;
2612 __check_timeout(hdev
, cnt
);
2614 BT_DBG("%s", hdev
->name
);
2616 if (hdev
->dev_type
== HCI_AMP
)
2621 while (hdev
->block_cnt
> 0 &&
2622 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
2623 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2624 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2627 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2628 skb
->len
, skb
->priority
);
2630 /* Stop if priority has changed */
2631 if (skb
->priority
< priority
)
2634 skb
= skb_dequeue(&chan
->data_q
);
2636 blocks
= __get_blocks(hdev
, skb
);
2637 if (blocks
> hdev
->block_cnt
)
2640 hci_conn_enter_active_mode(chan
->conn
,
2641 bt_cb(skb
)->force_active
);
2643 hci_send_frame(skb
);
2644 hdev
->acl_last_tx
= jiffies
;
2646 hdev
->block_cnt
-= blocks
;
2649 chan
->sent
+= blocks
;
2650 chan
->conn
->sent
+= blocks
;
2654 if (cnt
!= hdev
->block_cnt
)
2655 hci_prio_recalculate(hdev
, type
);
2658 static void hci_sched_acl(struct hci_dev
*hdev
)
2660 BT_DBG("%s", hdev
->name
);
2662 /* No ACL link over BR/EDR controller */
2663 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
2666 /* No AMP link over AMP controller */
2667 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
2670 switch (hdev
->flow_ctl_mode
) {
2671 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2672 hci_sched_acl_pkt(hdev
);
2675 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2676 hci_sched_acl_blk(hdev
);
2682 static void hci_sched_sco(struct hci_dev
*hdev
)
2684 struct hci_conn
*conn
;
2685 struct sk_buff
*skb
;
2688 BT_DBG("%s", hdev
->name
);
2690 if (!hci_conn_num(hdev
, SCO_LINK
))
2693 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2694 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2695 BT_DBG("skb %p len %d", skb
, skb
->len
);
2696 hci_send_frame(skb
);
2699 if (conn
->sent
== ~0)
2705 static void hci_sched_esco(struct hci_dev
*hdev
)
2707 struct hci_conn
*conn
;
2708 struct sk_buff
*skb
;
2711 BT_DBG("%s", hdev
->name
);
2713 if (!hci_conn_num(hdev
, ESCO_LINK
))
2716 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2718 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2719 BT_DBG("skb %p len %d", skb
, skb
->len
);
2720 hci_send_frame(skb
);
2723 if (conn
->sent
== ~0)
2729 static void hci_sched_le(struct hci_dev
*hdev
)
2731 struct hci_chan
*chan
;
2732 struct sk_buff
*skb
;
2733 int quote
, cnt
, tmp
;
2735 BT_DBG("%s", hdev
->name
);
2737 if (!hci_conn_num(hdev
, LE_LINK
))
2740 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2741 /* LE tx timeout must be longer than maximum
2742 * link supervision timeout (40.9 seconds) */
2743 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2744 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2745 hci_link_tx_to(hdev
, LE_LINK
);
2748 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2750 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2751 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2752 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2753 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2754 skb
->len
, skb
->priority
);
2756 /* Stop if priority has changed */
2757 if (skb
->priority
< priority
)
2760 skb
= skb_dequeue(&chan
->data_q
);
2762 hci_send_frame(skb
);
2763 hdev
->le_last_tx
= jiffies
;
2774 hdev
->acl_cnt
= cnt
;
2777 hci_prio_recalculate(hdev
, LE_LINK
);
2780 static void hci_tx_work(struct work_struct
*work
)
2782 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2783 struct sk_buff
*skb
;
2785 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2786 hdev
->sco_cnt
, hdev
->le_cnt
);
2788 /* Schedule queues and send stuff to HCI driver */
2790 hci_sched_acl(hdev
);
2792 hci_sched_sco(hdev
);
2794 hci_sched_esco(hdev
);
2798 /* Send next queued raw (unknown type) packet */
2799 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2800 hci_send_frame(skb
);
2803 /* ----- HCI RX task (incoming data processing) ----- */
2805 /* ACL data packet */
2806 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2808 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2809 struct hci_conn
*conn
;
2810 __u16 handle
, flags
;
2812 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2814 handle
= __le16_to_cpu(hdr
->handle
);
2815 flags
= hci_flags(handle
);
2816 handle
= hci_handle(handle
);
2818 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
2821 hdev
->stat
.acl_rx
++;
2824 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2825 hci_dev_unlock(hdev
);
2828 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2830 /* Send to upper protocol */
2831 l2cap_recv_acldata(conn
, skb
, flags
);
2834 BT_ERR("%s ACL packet for unknown connection handle %d",
2835 hdev
->name
, handle
);
2841 /* SCO data packet */
2842 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2844 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2845 struct hci_conn
*conn
;
2848 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2850 handle
= __le16_to_cpu(hdr
->handle
);
2852 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
2854 hdev
->stat
.sco_rx
++;
2857 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2858 hci_dev_unlock(hdev
);
2861 /* Send to upper protocol */
2862 sco_recv_scodata(conn
, skb
);
2865 BT_ERR("%s SCO packet for unknown connection handle %d",
2866 hdev
->name
, handle
);
2872 static void hci_rx_work(struct work_struct
*work
)
2874 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2875 struct sk_buff
*skb
;
2877 BT_DBG("%s", hdev
->name
);
2879 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2880 /* Send copy to monitor */
2881 hci_send_to_monitor(hdev
, skb
);
2883 if (atomic_read(&hdev
->promisc
)) {
2884 /* Send copy to the sockets */
2885 hci_send_to_sock(hdev
, skb
);
2888 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2893 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2894 /* Don't process data packets in this states. */
2895 switch (bt_cb(skb
)->pkt_type
) {
2896 case HCI_ACLDATA_PKT
:
2897 case HCI_SCODATA_PKT
:
2904 switch (bt_cb(skb
)->pkt_type
) {
2906 BT_DBG("%s Event packet", hdev
->name
);
2907 hci_event_packet(hdev
, skb
);
2910 case HCI_ACLDATA_PKT
:
2911 BT_DBG("%s ACL data packet", hdev
->name
);
2912 hci_acldata_packet(hdev
, skb
);
2915 case HCI_SCODATA_PKT
:
2916 BT_DBG("%s SCO data packet", hdev
->name
);
2917 hci_scodata_packet(hdev
, skb
);
2927 static void hci_cmd_work(struct work_struct
*work
)
2929 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2930 struct sk_buff
*skb
;
2932 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
2933 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
2935 /* Send queued commands */
2936 if (atomic_read(&hdev
->cmd_cnt
)) {
2937 skb
= skb_dequeue(&hdev
->cmd_q
);
2941 kfree_skb(hdev
->sent_cmd
);
2943 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2944 if (hdev
->sent_cmd
) {
2945 atomic_dec(&hdev
->cmd_cnt
);
2946 hci_send_frame(skb
);
2947 if (test_bit(HCI_RESET
, &hdev
->flags
))
2948 del_timer(&hdev
->cmd_timer
);
2950 mod_timer(&hdev
->cmd_timer
,
2951 jiffies
+ HCI_CMD_TIMEOUT
);
2953 skb_queue_head(&hdev
->cmd_q
, skb
);
2954 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2959 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2961 /* General inquiry access code (GIAC) */
2962 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2963 struct hci_cp_inquiry cp
;
2965 BT_DBG("%s", hdev
->name
);
2967 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2968 return -EINPROGRESS
;
2970 inquiry_cache_flush(hdev
);
2972 memset(&cp
, 0, sizeof(cp
));
2973 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2976 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2979 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2981 BT_DBG("%s", hdev
->name
);
2983 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2986 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2989 u8
bdaddr_to_le(u8 bdaddr_type
)
2991 switch (bdaddr_type
) {
2992 case BDADDR_LE_PUBLIC
:
2993 return ADDR_LE_DEV_PUBLIC
;
2996 /* Fallback to LE Random address type */
2997 return ADDR_LE_DEV_RANDOM
;