2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
359 bool hci_discovery_active(struct hci_dev
*hdev
)
361 struct discovery_state
*discov
= &hdev
->discovery
;
363 switch (discov
->state
) {
364 case DISCOVERY_INQUIRY
:
365 case DISCOVERY_LE_SCAN
:
366 case DISCOVERY_RESOLVING
:
374 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
376 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
378 if (hdev
->discovery
.state
== state
)
382 case DISCOVERY_STOPPED
:
383 mgmt_discovering(hdev
, 0);
385 case DISCOVERY_STARTING
:
387 case DISCOVERY_INQUIRY
:
388 case DISCOVERY_LE_SCAN
:
389 mgmt_discovering(hdev
, 1);
391 case DISCOVERY_RESOLVING
:
393 case DISCOVERY_STOPPING
:
397 hdev
->discovery
.state
= state
;
400 static void inquiry_cache_flush(struct hci_dev
*hdev
)
402 struct discovery_state
*cache
= &hdev
->discovery
;
403 struct inquiry_entry
*p
, *n
;
405 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
410 INIT_LIST_HEAD(&cache
->unknown
);
411 INIT_LIST_HEAD(&cache
->resolve
);
412 cache
->state
= DISCOVERY_STOPPED
;
415 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
417 struct discovery_state
*cache
= &hdev
->discovery
;
418 struct inquiry_entry
*e
;
420 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
422 list_for_each_entry(e
, &cache
->all
, all
) {
423 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
430 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
433 struct discovery_state
*cache
= &hdev
->discovery
;
434 struct inquiry_entry
*e
;
436 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
438 list_for_each_entry(e
, &cache
->unknown
, list
) {
439 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
446 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
450 struct discovery_state
*cache
= &hdev
->discovery
;
451 struct inquiry_entry
*e
;
453 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
455 list_for_each_entry(e
, &cache
->resolve
, list
) {
456 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
458 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
465 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
466 struct inquiry_entry
*ie
)
468 struct discovery_state
*cache
= &hdev
->discovery
;
469 struct list_head
*pos
= &cache
->resolve
;
470 struct inquiry_entry
*p
;
474 list_for_each_entry(p
, &cache
->resolve
, list
) {
475 if (p
->name_state
!= NAME_PENDING
&&
476 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
481 list_add(&ie
->list
, pos
);
484 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
487 struct discovery_state
*cache
= &hdev
->discovery
;
488 struct inquiry_entry
*ie
;
490 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
492 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
494 if (ie
->name_state
== NAME_NEEDED
&&
495 data
->rssi
!= ie
->data
.rssi
) {
496 ie
->data
.rssi
= data
->rssi
;
497 hci_inquiry_cache_update_resolve(hdev
, ie
);
503 /* Entry not in the cache. Add new one. */
504 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
508 list_add(&ie
->all
, &cache
->all
);
511 ie
->name_state
= NAME_KNOWN
;
513 ie
->name_state
= NAME_NOT_KNOWN
;
514 list_add(&ie
->list
, &cache
->unknown
);
518 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
519 ie
->name_state
!= NAME_PENDING
) {
520 ie
->name_state
= NAME_KNOWN
;
524 memcpy(&ie
->data
, data
, sizeof(*data
));
525 ie
->timestamp
= jiffies
;
526 cache
->timestamp
= jiffies
;
528 if (ie
->name_state
== NAME_NOT_KNOWN
)
534 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
536 struct discovery_state
*cache
= &hdev
->discovery
;
537 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
538 struct inquiry_entry
*e
;
541 list_for_each_entry(e
, &cache
->all
, all
) {
542 struct inquiry_data
*data
= &e
->data
;
547 bacpy(&info
->bdaddr
, &data
->bdaddr
);
548 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
549 info
->pscan_period_mode
= data
->pscan_period_mode
;
550 info
->pscan_mode
= data
->pscan_mode
;
551 memcpy(info
->dev_class
, data
->dev_class
, 3);
552 info
->clock_offset
= data
->clock_offset
;
558 BT_DBG("cache %p, copied %d", cache
, copied
);
562 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
564 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
565 struct hci_cp_inquiry cp
;
567 BT_DBG("%s", hdev
->name
);
569 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
573 memcpy(&cp
.lap
, &ir
->lap
, 3);
574 cp
.length
= ir
->length
;
575 cp
.num_rsp
= ir
->num_rsp
;
576 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
579 int hci_inquiry(void __user
*arg
)
581 __u8 __user
*ptr
= arg
;
582 struct hci_inquiry_req ir
;
583 struct hci_dev
*hdev
;
584 int err
= 0, do_inquiry
= 0, max_rsp
;
588 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
591 hdev
= hci_dev_get(ir
.dev_id
);
596 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
597 inquiry_cache_empty(hdev
) ||
598 ir
.flags
& IREQ_CACHE_FLUSH
) {
599 inquiry_cache_flush(hdev
);
602 hci_dev_unlock(hdev
);
604 timeo
= ir
.length
* msecs_to_jiffies(2000);
607 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
612 /* for unlimited number of responses we will use buffer with 255 entries */
613 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
618 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
625 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
626 hci_dev_unlock(hdev
);
628 BT_DBG("num_rsp %d", ir
.num_rsp
);
630 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
632 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
645 /* ---- HCI ioctl helpers ---- */
647 int hci_dev_open(__u16 dev
)
649 struct hci_dev
*hdev
;
652 hdev
= hci_dev_get(dev
);
656 BT_DBG("%s %p", hdev
->name
, hdev
);
660 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
665 if (test_bit(HCI_UP
, &hdev
->flags
)) {
670 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
671 set_bit(HCI_RAW
, &hdev
->flags
);
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
676 set_bit(HCI_RAW
, &hdev
->flags
);
678 if (hdev
->open(hdev
)) {
683 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
684 atomic_set(&hdev
->cmd_cnt
, 1);
685 set_bit(HCI_INIT
, &hdev
->flags
);
686 hdev
->init_last_cmd
= 0;
688 ret
= __hci_request(hdev
, hci_init_req
, 0,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
691 if (lmp_host_le_capable(hdev
))
692 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
695 clear_bit(HCI_INIT
, &hdev
->flags
);
700 set_bit(HCI_UP
, &hdev
->flags
);
701 hci_notify(hdev
, HCI_DEV_UP
);
702 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
704 mgmt_powered(hdev
, 1);
705 hci_dev_unlock(hdev
);
708 /* Init failed, cleanup */
709 flush_work(&hdev
->tx_work
);
710 flush_work(&hdev
->cmd_work
);
711 flush_work(&hdev
->rx_work
);
713 skb_queue_purge(&hdev
->cmd_q
);
714 skb_queue_purge(&hdev
->rx_q
);
719 if (hdev
->sent_cmd
) {
720 kfree_skb(hdev
->sent_cmd
);
721 hdev
->sent_cmd
= NULL
;
729 hci_req_unlock(hdev
);
734 static int hci_dev_do_close(struct hci_dev
*hdev
)
736 BT_DBG("%s %p", hdev
->name
, hdev
);
738 cancel_work_sync(&hdev
->le_scan
);
740 hci_req_cancel(hdev
, ENODEV
);
743 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
744 del_timer_sync(&hdev
->cmd_timer
);
745 hci_req_unlock(hdev
);
749 /* Flush RX and TX works */
750 flush_work(&hdev
->tx_work
);
751 flush_work(&hdev
->rx_work
);
753 if (hdev
->discov_timeout
> 0) {
754 cancel_delayed_work(&hdev
->discov_off
);
755 hdev
->discov_timeout
= 0;
758 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
759 cancel_delayed_work(&hdev
->power_off
);
761 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
762 cancel_delayed_work(&hdev
->service_cache
);
764 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
767 inquiry_cache_flush(hdev
);
768 hci_conn_hash_flush(hdev
);
769 hci_dev_unlock(hdev
);
771 hci_notify(hdev
, HCI_DEV_DOWN
);
777 skb_queue_purge(&hdev
->cmd_q
);
778 atomic_set(&hdev
->cmd_cnt
, 1);
779 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
780 test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
781 set_bit(HCI_INIT
, &hdev
->flags
);
782 __hci_request(hdev
, hci_reset_req
, 0,
783 msecs_to_jiffies(250));
784 clear_bit(HCI_INIT
, &hdev
->flags
);
788 flush_work(&hdev
->cmd_work
);
791 skb_queue_purge(&hdev
->rx_q
);
792 skb_queue_purge(&hdev
->cmd_q
);
793 skb_queue_purge(&hdev
->raw_q
);
795 /* Drop last sent command */
796 if (hdev
->sent_cmd
) {
797 del_timer_sync(&hdev
->cmd_timer
);
798 kfree_skb(hdev
->sent_cmd
);
799 hdev
->sent_cmd
= NULL
;
802 /* After this point our queues are empty
803 * and no tasks are scheduled. */
807 mgmt_powered(hdev
, 0);
808 hci_dev_unlock(hdev
);
813 hci_req_unlock(hdev
);
819 int hci_dev_close(__u16 dev
)
821 struct hci_dev
*hdev
;
824 hdev
= hci_dev_get(dev
);
827 err
= hci_dev_do_close(hdev
);
832 int hci_dev_reset(__u16 dev
)
834 struct hci_dev
*hdev
;
837 hdev
= hci_dev_get(dev
);
843 if (!test_bit(HCI_UP
, &hdev
->flags
))
847 skb_queue_purge(&hdev
->rx_q
);
848 skb_queue_purge(&hdev
->cmd_q
);
851 inquiry_cache_flush(hdev
);
852 hci_conn_hash_flush(hdev
);
853 hci_dev_unlock(hdev
);
858 atomic_set(&hdev
->cmd_cnt
, 1);
859 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
861 if (!test_bit(HCI_RAW
, &hdev
->flags
))
862 ret
= __hci_request(hdev
, hci_reset_req
, 0,
863 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
866 hci_req_unlock(hdev
);
871 int hci_dev_reset_stat(__u16 dev
)
873 struct hci_dev
*hdev
;
876 hdev
= hci_dev_get(dev
);
880 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
887 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
889 struct hci_dev
*hdev
;
890 struct hci_dev_req dr
;
893 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
896 hdev
= hci_dev_get(dr
.dev_id
);
902 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
903 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
907 if (!lmp_encrypt_capable(hdev
)) {
912 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
913 /* Auth must be enabled first */
914 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
915 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
920 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
921 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
925 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
926 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
930 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
931 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
935 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
936 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
940 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
944 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
945 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
949 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
950 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
962 int hci_get_dev_list(void __user
*arg
)
964 struct hci_dev
*hdev
;
965 struct hci_dev_list_req
*dl
;
966 struct hci_dev_req
*dr
;
967 int n
= 0, size
, err
;
970 if (get_user(dev_num
, (__u16 __user
*) arg
))
973 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
976 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
978 dl
= kzalloc(size
, GFP_KERNEL
);
984 read_lock(&hci_dev_list_lock
);
985 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
986 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
987 cancel_delayed_work(&hdev
->power_off
);
989 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
990 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
992 (dr
+ n
)->dev_id
= hdev
->id
;
993 (dr
+ n
)->dev_opt
= hdev
->flags
;
998 read_unlock(&hci_dev_list_lock
);
1001 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1003 err
= copy_to_user(arg
, dl
, size
);
1006 return err
? -EFAULT
: 0;
1009 int hci_get_dev_info(void __user
*arg
)
1011 struct hci_dev
*hdev
;
1012 struct hci_dev_info di
;
1015 if (copy_from_user(&di
, arg
, sizeof(di
)))
1018 hdev
= hci_dev_get(di
.dev_id
);
1022 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1023 cancel_delayed_work_sync(&hdev
->power_off
);
1025 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1026 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1028 strcpy(di
.name
, hdev
->name
);
1029 di
.bdaddr
= hdev
->bdaddr
;
1030 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1031 di
.flags
= hdev
->flags
;
1032 di
.pkt_type
= hdev
->pkt_type
;
1033 di
.acl_mtu
= hdev
->acl_mtu
;
1034 di
.acl_pkts
= hdev
->acl_pkts
;
1035 di
.sco_mtu
= hdev
->sco_mtu
;
1036 di
.sco_pkts
= hdev
->sco_pkts
;
1037 di
.link_policy
= hdev
->link_policy
;
1038 di
.link_mode
= hdev
->link_mode
;
1040 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1041 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1043 if (copy_to_user(arg
, &di
, sizeof(di
)))
1051 /* ---- Interface to HCI drivers ---- */
1053 static int hci_rfkill_set_block(void *data
, bool blocked
)
1055 struct hci_dev
*hdev
= data
;
1057 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1062 hci_dev_do_close(hdev
);
1067 static const struct rfkill_ops hci_rfkill_ops
= {
1068 .set_block
= hci_rfkill_set_block
,
1071 /* Alloc HCI device */
1072 struct hci_dev
*hci_alloc_dev(void)
1074 struct hci_dev
*hdev
;
1076 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1080 hci_init_sysfs(hdev
);
1081 skb_queue_head_init(&hdev
->driver_init
);
1085 EXPORT_SYMBOL(hci_alloc_dev
);
1087 /* Free HCI device */
1088 void hci_free_dev(struct hci_dev
*hdev
)
1090 skb_queue_purge(&hdev
->driver_init
);
1092 /* will free via device release */
1093 put_device(&hdev
->dev
);
1095 EXPORT_SYMBOL(hci_free_dev
);
1097 static void hci_power_on(struct work_struct
*work
)
1099 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1101 BT_DBG("%s", hdev
->name
);
1103 if (hci_dev_open(hdev
->id
) < 0)
1106 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1107 schedule_delayed_work(&hdev
->power_off
,
1108 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1110 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1111 mgmt_index_added(hdev
);
1114 static void hci_power_off(struct work_struct
*work
)
1116 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1119 BT_DBG("%s", hdev
->name
);
1121 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1123 hci_dev_close(hdev
->id
);
1126 static void hci_discov_off(struct work_struct
*work
)
1128 struct hci_dev
*hdev
;
1129 u8 scan
= SCAN_PAGE
;
1131 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1133 BT_DBG("%s", hdev
->name
);
1137 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1139 hdev
->discov_timeout
= 0;
1141 hci_dev_unlock(hdev
);
1144 int hci_uuids_clear(struct hci_dev
*hdev
)
1146 struct list_head
*p
, *n
;
1148 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1149 struct bt_uuid
*uuid
;
1151 uuid
= list_entry(p
, struct bt_uuid
, list
);
1160 int hci_link_keys_clear(struct hci_dev
*hdev
)
1162 struct list_head
*p
, *n
;
1164 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1165 struct link_key
*key
;
1167 key
= list_entry(p
, struct link_key
, list
);
1176 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1178 struct smp_ltk
*k
, *tmp
;
1180 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1188 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1192 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1193 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1199 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1200 u8 key_type
, u8 old_key_type
)
1203 if (key_type
< 0x03)
1206 /* Debug keys are insecure so don't store them persistently */
1207 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1210 /* Changed combination key and there's no previous one */
1211 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1214 /* Security mode 3 case */
1218 /* Neither local nor remote side had no-bonding as requirement */
1219 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1222 /* Local side had dedicated bonding as requirement */
1223 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1226 /* Remote side had dedicated bonding as requirement */
1227 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1230 /* If none of the above criteria match, then don't store the key
1235 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1239 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1240 if (k
->ediv
!= ediv
||
1241 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1249 EXPORT_SYMBOL(hci_find_ltk
);
1251 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1256 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1257 if (addr_type
== k
->bdaddr_type
&&
1258 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1263 EXPORT_SYMBOL(hci_find_ltk_by_addr
);
1265 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1266 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1268 struct link_key
*key
, *old_key
;
1269 u8 old_key_type
, persistent
;
1271 old_key
= hci_find_link_key(hdev
, bdaddr
);
1273 old_key_type
= old_key
->type
;
1276 old_key_type
= conn
? conn
->key_type
: 0xff;
1277 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1280 list_add(&key
->list
, &hdev
->link_keys
);
1283 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1285 /* Some buggy controller combinations generate a changed
1286 * combination key for legacy pairing even when there's no
1288 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1289 (!conn
|| conn
->remote_auth
== 0xff) &&
1290 old_key_type
== 0xff) {
1291 type
= HCI_LK_COMBINATION
;
1293 conn
->key_type
= type
;
1296 bacpy(&key
->bdaddr
, bdaddr
);
1297 memcpy(key
->val
, val
, 16);
1298 key
->pin_len
= pin_len
;
1300 if (type
== HCI_LK_CHANGED_COMBINATION
)
1301 key
->type
= old_key_type
;
1308 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1310 mgmt_new_link_key(hdev
, key
, persistent
);
1313 list_del(&key
->list
);
1320 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1321 int new_key
, u8 authenticated
, u8 tk
[16],
1322 u8 enc_size
, u16 ediv
, u8 rand
[8])
1324 struct smp_ltk
*key
, *old_key
;
1326 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1329 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1333 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1336 list_add(&key
->list
, &hdev
->long_term_keys
);
1339 bacpy(&key
->bdaddr
, bdaddr
);
1340 key
->bdaddr_type
= addr_type
;
1341 memcpy(key
->val
, tk
, sizeof(key
->val
));
1342 key
->authenticated
= authenticated
;
1344 key
->enc_size
= enc_size
;
1346 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1351 if (type
& HCI_SMP_LTK
)
1352 mgmt_new_ltk(hdev
, key
, 1);
1357 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1359 struct link_key
*key
;
1361 key
= hci_find_link_key(hdev
, bdaddr
);
1365 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1367 list_del(&key
->list
);
1373 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1375 struct smp_ltk
*k
, *tmp
;
1377 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1378 if (bacmp(bdaddr
, &k
->bdaddr
))
1381 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1390 /* HCI command timer function */
1391 static void hci_cmd_timer(unsigned long arg
)
1393 struct hci_dev
*hdev
= (void *) arg
;
1395 BT_ERR("%s command tx timeout", hdev
->name
);
1396 atomic_set(&hdev
->cmd_cnt
, 1);
1397 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1400 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1403 struct oob_data
*data
;
1405 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1406 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1412 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1414 struct oob_data
*data
;
1416 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1420 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1422 list_del(&data
->list
);
1428 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1430 struct oob_data
*data
, *n
;
1432 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1433 list_del(&data
->list
);
1440 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1443 struct oob_data
*data
;
1445 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1448 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1452 bacpy(&data
->bdaddr
, bdaddr
);
1453 list_add(&data
->list
, &hdev
->remote_oob_data
);
1456 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1457 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1459 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1464 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1467 struct bdaddr_list
*b
;
1469 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1470 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1476 int hci_blacklist_clear(struct hci_dev
*hdev
)
1478 struct list_head
*p
, *n
;
1480 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1481 struct bdaddr_list
*b
;
1483 b
= list_entry(p
, struct bdaddr_list
, list
);
1492 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1494 struct bdaddr_list
*entry
;
1496 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1499 if (hci_blacklist_lookup(hdev
, bdaddr
))
1502 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1506 bacpy(&entry
->bdaddr
, bdaddr
);
1508 list_add(&entry
->list
, &hdev
->blacklist
);
1510 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1513 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1515 struct bdaddr_list
*entry
;
1517 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1518 return hci_blacklist_clear(hdev
);
1520 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1524 list_del(&entry
->list
);
1527 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1530 static void hci_clear_adv_cache(struct work_struct
*work
)
1532 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1537 hci_adv_entries_clear(hdev
);
1539 hci_dev_unlock(hdev
);
1542 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1544 struct adv_entry
*entry
, *tmp
;
1546 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1547 list_del(&entry
->list
);
1551 BT_DBG("%s adv cache cleared", hdev
->name
);
1556 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1558 struct adv_entry
*entry
;
1560 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1561 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1567 static inline int is_connectable_adv(u8 evt_type
)
1569 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1575 int hci_add_adv_entry(struct hci_dev
*hdev
,
1576 struct hci_ev_le_advertising_info
*ev
)
1578 struct adv_entry
*entry
;
1580 if (!is_connectable_adv(ev
->evt_type
))
1583 /* Only new entries should be added to adv_entries. So, if
1584 * bdaddr was found, don't add it. */
1585 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1588 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1592 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1593 entry
->bdaddr_type
= ev
->bdaddr_type
;
1595 list_add(&entry
->list
, &hdev
->adv_entries
);
1597 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1598 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1603 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1605 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1606 struct hci_cp_le_set_scan_param cp
;
1608 memset(&cp
, 0, sizeof(cp
));
1609 cp
.type
= param
->type
;
1610 cp
.interval
= cpu_to_le16(param
->interval
);
1611 cp
.window
= cpu_to_le16(param
->window
);
1613 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1616 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1618 struct hci_cp_le_set_scan_enable cp
;
1620 memset(&cp
, 0, sizeof(cp
));
1623 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1626 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1627 u16 window
, int timeout
)
1629 long timeo
= msecs_to_jiffies(3000);
1630 struct le_scan_params param
;
1633 BT_DBG("%s", hdev
->name
);
1635 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1636 return -EINPROGRESS
;
1639 param
.interval
= interval
;
1640 param
.window
= window
;
1644 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1647 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1649 hci_req_unlock(hdev
);
1654 schedule_delayed_work(&hdev
->le_scan_disable
,
1655 msecs_to_jiffies(timeout
));
1660 static void le_scan_disable_work(struct work_struct
*work
)
1662 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1663 le_scan_disable
.work
);
1664 struct hci_cp_le_set_scan_enable cp
;
1666 BT_DBG("%s", hdev
->name
);
1668 memset(&cp
, 0, sizeof(cp
));
1670 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1673 static void le_scan_work(struct work_struct
*work
)
1675 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1676 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1678 BT_DBG("%s", hdev
->name
);
1680 hci_do_le_scan(hdev
, param
->type
, param
->interval
,
1681 param
->window
, param
->timeout
);
1684 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1687 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1689 BT_DBG("%s", hdev
->name
);
1691 if (work_busy(&hdev
->le_scan
))
1692 return -EINPROGRESS
;
1695 param
->interval
= interval
;
1696 param
->window
= window
;
1697 param
->timeout
= timeout
;
1699 queue_work(system_long_wq
, &hdev
->le_scan
);
1704 /* Register HCI device */
1705 int hci_register_dev(struct hci_dev
*hdev
)
1707 struct list_head
*head
= &hci_dev_list
, *p
;
1710 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1712 if (!hdev
->open
|| !hdev
->close
)
1715 /* Do not allow HCI_AMP devices to register at index 0,
1716 * so the index can be used as the AMP controller ID.
1718 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1720 write_lock(&hci_dev_list_lock
);
1722 /* Find first available device id */
1723 list_for_each(p
, &hci_dev_list
) {
1724 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1729 sprintf(hdev
->name
, "hci%d", id
);
1731 list_add_tail(&hdev
->list
, head
);
1733 mutex_init(&hdev
->lock
);
1736 hdev
->dev_flags
= 0;
1737 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1738 hdev
->esco_type
= (ESCO_HV1
);
1739 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1740 hdev
->io_capability
= 0x03; /* No Input No Output */
1742 hdev
->idle_timeout
= 0;
1743 hdev
->sniff_max_interval
= 800;
1744 hdev
->sniff_min_interval
= 80;
1746 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1747 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1748 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1751 skb_queue_head_init(&hdev
->rx_q
);
1752 skb_queue_head_init(&hdev
->cmd_q
);
1753 skb_queue_head_init(&hdev
->raw_q
);
1755 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1757 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1758 hdev
->reassembly
[i
] = NULL
;
1760 init_waitqueue_head(&hdev
->req_wait_q
);
1761 mutex_init(&hdev
->req_lock
);
1763 discovery_init(hdev
);
1765 hci_conn_hash_init(hdev
);
1767 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1769 INIT_LIST_HEAD(&hdev
->blacklist
);
1771 INIT_LIST_HEAD(&hdev
->uuids
);
1773 INIT_LIST_HEAD(&hdev
->link_keys
);
1774 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1776 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1778 INIT_LIST_HEAD(&hdev
->adv_entries
);
1780 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1781 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1782 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1784 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1786 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1788 atomic_set(&hdev
->promisc
, 0);
1790 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1792 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1794 write_unlock(&hci_dev_list_lock
);
1796 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1798 if (!hdev
->workqueue
) {
1803 error
= hci_add_sysfs(hdev
);
1807 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1808 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1810 if (rfkill_register(hdev
->rfkill
) < 0) {
1811 rfkill_destroy(hdev
->rfkill
);
1812 hdev
->rfkill
= NULL
;
1816 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1817 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1818 schedule_work(&hdev
->power_on
);
1820 hci_notify(hdev
, HCI_DEV_REG
);
1826 destroy_workqueue(hdev
->workqueue
);
1828 write_lock(&hci_dev_list_lock
);
1829 list_del(&hdev
->list
);
1830 write_unlock(&hci_dev_list_lock
);
1834 EXPORT_SYMBOL(hci_register_dev
);
1836 /* Unregister HCI device */
1837 void hci_unregister_dev(struct hci_dev
*hdev
)
1841 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1843 write_lock(&hci_dev_list_lock
);
1844 list_del(&hdev
->list
);
1845 write_unlock(&hci_dev_list_lock
);
1847 hci_dev_do_close(hdev
);
1849 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1850 kfree_skb(hdev
->reassembly
[i
]);
1852 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1853 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1855 mgmt_index_removed(hdev
);
1856 hci_dev_unlock(hdev
);
1859 /* mgmt_index_removed should take care of emptying the
1861 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1863 hci_notify(hdev
, HCI_DEV_UNREG
);
1866 rfkill_unregister(hdev
->rfkill
);
1867 rfkill_destroy(hdev
->rfkill
);
1870 hci_del_sysfs(hdev
);
1872 cancel_delayed_work_sync(&hdev
->adv_work
);
1874 destroy_workqueue(hdev
->workqueue
);
1877 hci_blacklist_clear(hdev
);
1878 hci_uuids_clear(hdev
);
1879 hci_link_keys_clear(hdev
);
1880 hci_smp_ltks_clear(hdev
);
1881 hci_remote_oob_data_clear(hdev
);
1882 hci_adv_entries_clear(hdev
);
1883 hci_dev_unlock(hdev
);
1887 EXPORT_SYMBOL(hci_unregister_dev
);
1889 /* Suspend HCI device */
1890 int hci_suspend_dev(struct hci_dev
*hdev
)
1892 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1895 EXPORT_SYMBOL(hci_suspend_dev
);
1897 /* Resume HCI device */
1898 int hci_resume_dev(struct hci_dev
*hdev
)
1900 hci_notify(hdev
, HCI_DEV_RESUME
);
1903 EXPORT_SYMBOL(hci_resume_dev
);
1905 /* Receive frame from HCI drivers */
1906 int hci_recv_frame(struct sk_buff
*skb
)
1908 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1909 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1910 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1916 bt_cb(skb
)->incoming
= 1;
1919 __net_timestamp(skb
);
1921 skb_queue_tail(&hdev
->rx_q
, skb
);
1922 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1926 EXPORT_SYMBOL(hci_recv_frame
);
1928 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1929 int count
, __u8 index
)
1934 struct sk_buff
*skb
;
1935 struct bt_skb_cb
*scb
;
1937 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1938 index
>= NUM_REASSEMBLY
)
1941 skb
= hdev
->reassembly
[index
];
1945 case HCI_ACLDATA_PKT
:
1946 len
= HCI_MAX_FRAME_SIZE
;
1947 hlen
= HCI_ACL_HDR_SIZE
;
1950 len
= HCI_MAX_EVENT_SIZE
;
1951 hlen
= HCI_EVENT_HDR_SIZE
;
1953 case HCI_SCODATA_PKT
:
1954 len
= HCI_MAX_SCO_SIZE
;
1955 hlen
= HCI_SCO_HDR_SIZE
;
1959 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1963 scb
= (void *) skb
->cb
;
1965 scb
->pkt_type
= type
;
1967 skb
->dev
= (void *) hdev
;
1968 hdev
->reassembly
[index
] = skb
;
1972 scb
= (void *) skb
->cb
;
1973 len
= min(scb
->expect
, (__u16
)count
);
1975 memcpy(skb_put(skb
, len
), data
, len
);
1984 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1985 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1986 scb
->expect
= h
->plen
;
1988 if (skb_tailroom(skb
) < scb
->expect
) {
1990 hdev
->reassembly
[index
] = NULL
;
1996 case HCI_ACLDATA_PKT
:
1997 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1998 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1999 scb
->expect
= __le16_to_cpu(h
->dlen
);
2001 if (skb_tailroom(skb
) < scb
->expect
) {
2003 hdev
->reassembly
[index
] = NULL
;
2009 case HCI_SCODATA_PKT
:
2010 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2011 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2012 scb
->expect
= h
->dlen
;
2014 if (skb_tailroom(skb
) < scb
->expect
) {
2016 hdev
->reassembly
[index
] = NULL
;
2023 if (scb
->expect
== 0) {
2024 /* Complete frame */
2026 bt_cb(skb
)->pkt_type
= type
;
2027 hci_recv_frame(skb
);
2029 hdev
->reassembly
[index
] = NULL
;
2037 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2041 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2045 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2049 data
+= (count
- rem
);
2055 EXPORT_SYMBOL(hci_recv_fragment
);
2057 #define STREAM_REASSEMBLY 0
2059 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2065 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2068 struct { char type
; } *pkt
;
2070 /* Start of the frame */
2077 type
= bt_cb(skb
)->pkt_type
;
2079 rem
= hci_reassembly(hdev
, type
, data
, count
,
2084 data
+= (count
- rem
);
2090 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2092 /* ---- Interface to upper protocols ---- */
2094 int hci_register_cb(struct hci_cb
*cb
)
2096 BT_DBG("%p name %s", cb
, cb
->name
);
2098 write_lock(&hci_cb_list_lock
);
2099 list_add(&cb
->list
, &hci_cb_list
);
2100 write_unlock(&hci_cb_list_lock
);
2104 EXPORT_SYMBOL(hci_register_cb
);
2106 int hci_unregister_cb(struct hci_cb
*cb
)
2108 BT_DBG("%p name %s", cb
, cb
->name
);
2110 write_lock(&hci_cb_list_lock
);
2111 list_del(&cb
->list
);
2112 write_unlock(&hci_cb_list_lock
);
2116 EXPORT_SYMBOL(hci_unregister_cb
);
2118 static int hci_send_frame(struct sk_buff
*skb
)
2120 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2127 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2129 if (atomic_read(&hdev
->promisc
)) {
2131 __net_timestamp(skb
);
2133 hci_send_to_sock(hdev
, skb
, NULL
);
2136 /* Get rid of skb owner, prior to sending to the driver. */
2139 return hdev
->send(skb
);
2142 /* Send HCI command */
2143 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2145 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2146 struct hci_command_hdr
*hdr
;
2147 struct sk_buff
*skb
;
2149 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
2151 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2153 BT_ERR("%s no memory for command", hdev
->name
);
2157 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2158 hdr
->opcode
= cpu_to_le16(opcode
);
2162 memcpy(skb_put(skb
, plen
), param
, plen
);
2164 BT_DBG("skb len %d", skb
->len
);
2166 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2167 skb
->dev
= (void *) hdev
;
2169 if (test_bit(HCI_INIT
, &hdev
->flags
))
2170 hdev
->init_last_cmd
= opcode
;
2172 skb_queue_tail(&hdev
->cmd_q
, skb
);
2173 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2178 /* Get data from the previously sent command */
2179 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2181 struct hci_command_hdr
*hdr
;
2183 if (!hdev
->sent_cmd
)
2186 hdr
= (void *) hdev
->sent_cmd
->data
;
2188 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2191 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2193 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2197 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2199 struct hci_acl_hdr
*hdr
;
2202 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2203 skb_reset_transport_header(skb
);
2204 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2205 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2206 hdr
->dlen
= cpu_to_le16(len
);
2209 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2210 struct sk_buff
*skb
, __u16 flags
)
2212 struct hci_dev
*hdev
= conn
->hdev
;
2213 struct sk_buff
*list
;
2215 list
= skb_shinfo(skb
)->frag_list
;
2217 /* Non fragmented */
2218 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2220 skb_queue_tail(queue
, skb
);
2223 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2225 skb_shinfo(skb
)->frag_list
= NULL
;
2227 /* Queue all fragments atomically */
2228 spin_lock(&queue
->lock
);
2230 __skb_queue_tail(queue
, skb
);
2232 flags
&= ~ACL_START
;
2235 skb
= list
; list
= list
->next
;
2237 skb
->dev
= (void *) hdev
;
2238 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2239 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2241 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2243 __skb_queue_tail(queue
, skb
);
2246 spin_unlock(&queue
->lock
);
2250 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2252 struct hci_conn
*conn
= chan
->conn
;
2253 struct hci_dev
*hdev
= conn
->hdev
;
2255 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2257 skb
->dev
= (void *) hdev
;
2258 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2259 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2261 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2263 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2265 EXPORT_SYMBOL(hci_send_acl
);
2268 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2270 struct hci_dev
*hdev
= conn
->hdev
;
2271 struct hci_sco_hdr hdr
;
2273 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2275 hdr
.handle
= cpu_to_le16(conn
->handle
);
2276 hdr
.dlen
= skb
->len
;
2278 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2279 skb_reset_transport_header(skb
);
2280 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2282 skb
->dev
= (void *) hdev
;
2283 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2285 skb_queue_tail(&conn
->data_q
, skb
);
2286 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2288 EXPORT_SYMBOL(hci_send_sco
);
2290 /* ---- HCI TX task (outgoing data) ---- */
2292 /* HCI Connection scheduler */
2293 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2295 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2296 struct hci_conn
*conn
= NULL
, *c
;
2297 int num
= 0, min
= ~0;
2299 /* We don't have to lock device here. Connections are always
2300 * added and removed with TX task disabled. */
2304 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2305 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2308 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2313 if (c
->sent
< min
) {
2318 if (hci_conn_num(hdev
, type
) == num
)
2327 switch (conn
->type
) {
2329 cnt
= hdev
->acl_cnt
;
2333 cnt
= hdev
->sco_cnt
;
2336 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2340 BT_ERR("Unknown link type");
2348 BT_DBG("conn %p quote %d", conn
, *quote
);
2352 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2354 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2357 BT_ERR("%s link tx timeout", hdev
->name
);
2361 /* Kill stalled connections */
2362 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2363 if (c
->type
== type
&& c
->sent
) {
2364 BT_ERR("%s killing stalled connection %s",
2365 hdev
->name
, batostr(&c
->dst
));
2366 hci_acl_disconn(c
, 0x13);
2373 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2376 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2377 struct hci_chan
*chan
= NULL
;
2378 int num
= 0, min
= ~0, cur_prio
= 0;
2379 struct hci_conn
*conn
;
2380 int cnt
, q
, conn_num
= 0;
2382 BT_DBG("%s", hdev
->name
);
2386 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2387 struct hci_chan
*tmp
;
2389 if (conn
->type
!= type
)
2392 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2397 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2398 struct sk_buff
*skb
;
2400 if (skb_queue_empty(&tmp
->data_q
))
2403 skb
= skb_peek(&tmp
->data_q
);
2404 if (skb
->priority
< cur_prio
)
2407 if (skb
->priority
> cur_prio
) {
2410 cur_prio
= skb
->priority
;
2415 if (conn
->sent
< min
) {
2421 if (hci_conn_num(hdev
, type
) == conn_num
)
2430 switch (chan
->conn
->type
) {
2432 cnt
= hdev
->acl_cnt
;
2436 cnt
= hdev
->sco_cnt
;
2439 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2443 BT_ERR("Unknown link type");
2448 BT_DBG("chan %p quote %d", chan
, *quote
);
2452 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2454 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2455 struct hci_conn
*conn
;
2458 BT_DBG("%s", hdev
->name
);
2462 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2463 struct hci_chan
*chan
;
2465 if (conn
->type
!= type
)
2468 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2473 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2474 struct sk_buff
*skb
;
2481 if (skb_queue_empty(&chan
->data_q
))
2484 skb
= skb_peek(&chan
->data_q
);
2485 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2488 skb
->priority
= HCI_PRIO_MAX
- 1;
2490 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2494 if (hci_conn_num(hdev
, type
) == num
)
2502 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2504 /* Calculate count of blocks used by this packet */
2505 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2508 static inline void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2510 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2511 /* ACL tx timeout must be longer than maximum
2512 * link supervision timeout (40.9 seconds) */
2513 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2514 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT
)))
2515 hci_link_tx_to(hdev
, ACL_LINK
);
2519 static inline void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2521 unsigned int cnt
= hdev
->acl_cnt
;
2522 struct hci_chan
*chan
;
2523 struct sk_buff
*skb
;
2526 __check_timeout(hdev
, cnt
);
2528 while (hdev
->acl_cnt
&&
2529 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2530 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2531 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2532 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2533 skb
->len
, skb
->priority
);
2535 /* Stop if priority has changed */
2536 if (skb
->priority
< priority
)
2539 skb
= skb_dequeue(&chan
->data_q
);
2541 hci_conn_enter_active_mode(chan
->conn
,
2542 bt_cb(skb
)->force_active
);
2544 hci_send_frame(skb
);
2545 hdev
->acl_last_tx
= jiffies
;
2553 if (cnt
!= hdev
->acl_cnt
)
2554 hci_prio_recalculate(hdev
, ACL_LINK
);
2557 static inline void hci_sched_acl_blk(struct hci_dev
*hdev
)
2559 unsigned int cnt
= hdev
->block_cnt
;
2560 struct hci_chan
*chan
;
2561 struct sk_buff
*skb
;
2564 __check_timeout(hdev
, cnt
);
2566 while (hdev
->block_cnt
> 0 &&
2567 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2568 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2569 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2572 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2573 skb
->len
, skb
->priority
);
2575 /* Stop if priority has changed */
2576 if (skb
->priority
< priority
)
2579 skb
= skb_dequeue(&chan
->data_q
);
2581 blocks
= __get_blocks(hdev
, skb
);
2582 if (blocks
> hdev
->block_cnt
)
2585 hci_conn_enter_active_mode(chan
->conn
,
2586 bt_cb(skb
)->force_active
);
2588 hci_send_frame(skb
);
2589 hdev
->acl_last_tx
= jiffies
;
2591 hdev
->block_cnt
-= blocks
;
2594 chan
->sent
+= blocks
;
2595 chan
->conn
->sent
+= blocks
;
2599 if (cnt
!= hdev
->block_cnt
)
2600 hci_prio_recalculate(hdev
, ACL_LINK
);
2603 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2605 BT_DBG("%s", hdev
->name
);
2607 if (!hci_conn_num(hdev
, ACL_LINK
))
2610 switch (hdev
->flow_ctl_mode
) {
2611 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2612 hci_sched_acl_pkt(hdev
);
2615 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2616 hci_sched_acl_blk(hdev
);
2622 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2624 struct hci_conn
*conn
;
2625 struct sk_buff
*skb
;
2628 BT_DBG("%s", hdev
->name
);
2630 if (!hci_conn_num(hdev
, SCO_LINK
))
2633 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2634 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2635 BT_DBG("skb %p len %d", skb
, skb
->len
);
2636 hci_send_frame(skb
);
2639 if (conn
->sent
== ~0)
2645 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2647 struct hci_conn
*conn
;
2648 struct sk_buff
*skb
;
2651 BT_DBG("%s", hdev
->name
);
2653 if (!hci_conn_num(hdev
, ESCO_LINK
))
2656 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2657 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2658 BT_DBG("skb %p len %d", skb
, skb
->len
);
2659 hci_send_frame(skb
);
2662 if (conn
->sent
== ~0)
2668 static inline void hci_sched_le(struct hci_dev
*hdev
)
2670 struct hci_chan
*chan
;
2671 struct sk_buff
*skb
;
2672 int quote
, cnt
, tmp
;
2674 BT_DBG("%s", hdev
->name
);
2676 if (!hci_conn_num(hdev
, LE_LINK
))
2679 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2680 /* LE tx timeout must be longer than maximum
2681 * link supervision timeout (40.9 seconds) */
2682 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2683 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2684 hci_link_tx_to(hdev
, LE_LINK
);
2687 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2689 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2690 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2691 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2692 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2693 skb
->len
, skb
->priority
);
2695 /* Stop if priority has changed */
2696 if (skb
->priority
< priority
)
2699 skb
= skb_dequeue(&chan
->data_q
);
2701 hci_send_frame(skb
);
2702 hdev
->le_last_tx
= jiffies
;
2713 hdev
->acl_cnt
= cnt
;
2716 hci_prio_recalculate(hdev
, LE_LINK
);
2719 static void hci_tx_work(struct work_struct
*work
)
2721 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2722 struct sk_buff
*skb
;
2724 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2725 hdev
->sco_cnt
, hdev
->le_cnt
);
2727 /* Schedule queues and send stuff to HCI driver */
2729 hci_sched_acl(hdev
);
2731 hci_sched_sco(hdev
);
2733 hci_sched_esco(hdev
);
2737 /* Send next queued raw (unknown type) packet */
2738 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2739 hci_send_frame(skb
);
2742 /* ----- HCI RX task (incoming data processing) ----- */
2744 /* ACL data packet */
2745 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2747 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2748 struct hci_conn
*conn
;
2749 __u16 handle
, flags
;
2751 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2753 handle
= __le16_to_cpu(hdr
->handle
);
2754 flags
= hci_flags(handle
);
2755 handle
= hci_handle(handle
);
2757 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2759 hdev
->stat
.acl_rx
++;
2762 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2763 hci_dev_unlock(hdev
);
2766 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2768 /* Send to upper protocol */
2769 l2cap_recv_acldata(conn
, skb
, flags
);
2772 BT_ERR("%s ACL packet for unknown connection handle %d",
2773 hdev
->name
, handle
);
2779 /* SCO data packet */
2780 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2782 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2783 struct hci_conn
*conn
;
2786 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2788 handle
= __le16_to_cpu(hdr
->handle
);
2790 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2792 hdev
->stat
.sco_rx
++;
2795 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2796 hci_dev_unlock(hdev
);
2799 /* Send to upper protocol */
2800 sco_recv_scodata(conn
, skb
);
2803 BT_ERR("%s SCO packet for unknown connection handle %d",
2804 hdev
->name
, handle
);
2810 static void hci_rx_work(struct work_struct
*work
)
2812 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2813 struct sk_buff
*skb
;
2815 BT_DBG("%s", hdev
->name
);
2817 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2818 if (atomic_read(&hdev
->promisc
)) {
2819 /* Send copy to the sockets */
2820 hci_send_to_sock(hdev
, skb
, NULL
);
2823 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2828 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2829 /* Don't process data packets in this states. */
2830 switch (bt_cb(skb
)->pkt_type
) {
2831 case HCI_ACLDATA_PKT
:
2832 case HCI_SCODATA_PKT
:
2839 switch (bt_cb(skb
)->pkt_type
) {
2841 BT_DBG("%s Event packet", hdev
->name
);
2842 hci_event_packet(hdev
, skb
);
2845 case HCI_ACLDATA_PKT
:
2846 BT_DBG("%s ACL data packet", hdev
->name
);
2847 hci_acldata_packet(hdev
, skb
);
2850 case HCI_SCODATA_PKT
:
2851 BT_DBG("%s SCO data packet", hdev
->name
);
2852 hci_scodata_packet(hdev
, skb
);
2862 static void hci_cmd_work(struct work_struct
*work
)
2864 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2865 struct sk_buff
*skb
;
2867 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2869 /* Send queued commands */
2870 if (atomic_read(&hdev
->cmd_cnt
)) {
2871 skb
= skb_dequeue(&hdev
->cmd_q
);
2875 kfree_skb(hdev
->sent_cmd
);
2877 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2878 if (hdev
->sent_cmd
) {
2879 atomic_dec(&hdev
->cmd_cnt
);
2880 hci_send_frame(skb
);
2881 if (test_bit(HCI_RESET
, &hdev
->flags
))
2882 del_timer(&hdev
->cmd_timer
);
2884 mod_timer(&hdev
->cmd_timer
,
2885 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2887 skb_queue_head(&hdev
->cmd_q
, skb
);
2888 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2893 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2895 /* General inquiry access code (GIAC) */
2896 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2897 struct hci_cp_inquiry cp
;
2899 BT_DBG("%s", hdev
->name
);
2901 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2902 return -EINPROGRESS
;
2904 inquiry_cache_flush(hdev
);
2906 memset(&cp
, 0, sizeof(cp
));
2907 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2910 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2913 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2915 BT_DBG("%s", hdev
->name
);
2917 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2920 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2923 module_param(enable_hs
, bool, 0644);
2924 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");