2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_rx_work(struct work_struct
*work
);
58 static void hci_cmd_work(struct work_struct
*work
);
59 static void hci_tx_work(struct work_struct
*work
);
62 LIST_HEAD(hci_dev_list
);
63 DEFINE_RWLOCK(hci_dev_list_lock
);
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list
);
67 DEFINE_RWLOCK(hci_cb_list_lock
);
69 /* ---- HCI notifications ---- */
71 static void hci_notify(struct hci_dev
*hdev
, int event
)
73 hci_sock_dev_event(hdev
, event
);
76 /* ---- HCI requests ---- */
78 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
85 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
88 if (hdev
->req_status
== HCI_REQ_PEND
) {
89 hdev
->req_result
= result
;
90 hdev
->req_status
= HCI_REQ_DONE
;
91 wake_up_interruptible(&hdev
->req_wait_q
);
95 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
97 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
99 if (hdev
->req_status
== HCI_REQ_PEND
) {
100 hdev
->req_result
= err
;
101 hdev
->req_status
= HCI_REQ_CANCELED
;
102 wake_up_interruptible(&hdev
->req_wait_q
);
106 /* Execute request and wait for completion. */
107 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
108 unsigned long opt
, __u32 timeout
)
110 DECLARE_WAITQUEUE(wait
, current
);
113 BT_DBG("%s start", hdev
->name
);
115 hdev
->req_status
= HCI_REQ_PEND
;
117 add_wait_queue(&hdev
->req_wait_q
, &wait
);
118 set_current_state(TASK_INTERRUPTIBLE
);
121 schedule_timeout(timeout
);
123 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
125 if (signal_pending(current
))
128 switch (hdev
->req_status
) {
130 err
= -bt_to_errno(hdev
->req_result
);
133 case HCI_REQ_CANCELED
:
134 err
= -hdev
->req_result
;
142 hdev
->req_status
= hdev
->req_result
= 0;
144 BT_DBG("%s end: err %d", hdev
->name
, err
);
149 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
150 unsigned long opt
, __u32 timeout
)
154 if (!test_bit(HCI_UP
, &hdev
->flags
))
157 /* Serialize all requests */
159 ret
= __hci_request(hdev
, req
, opt
, timeout
);
160 hci_req_unlock(hdev
);
165 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
167 BT_DBG("%s %ld", hdev
->name
, opt
);
170 set_bit(HCI_RESET
, &hdev
->flags
);
171 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
174 static void bredr_init(struct hci_dev
*hdev
)
176 struct hci_cp_delete_stored_link_key cp
;
180 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
182 /* Mandatory initialization */
185 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 /* Read Local Supported Features */
191 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
193 /* Read Local Version */
194 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
197 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
199 /* Read BD Address */
200 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
202 /* Read Class of Device */
203 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
205 /* Read Local Name */
206 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
208 /* Read Voice Setting */
209 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
211 /* Optional initialization */
213 /* Clear Event Filters */
214 flt_type
= HCI_FLT_CLEAR_ALL
;
215 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
217 /* Connection accept timeout ~20 secs */
218 param
= cpu_to_le16(0x7d00);
219 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
221 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
223 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
226 static void amp_init(struct hci_dev
*hdev
)
228 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
231 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
233 /* Read Local Version */
234 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
237 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
241 BT_DBG("%s %ld", hdev
->name
, opt
);
243 /* Driver initialization */
245 /* Special commands */
246 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
247 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
248 skb
->dev
= (void *) hdev
;
250 skb_queue_tail(&hdev
->cmd_q
, skb
);
251 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
253 skb_queue_purge(&hdev
->driver_init
);
255 switch (hdev
->dev_type
) {
265 BT_ERR("Unknown device type %d", hdev
->dev_type
);
271 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
273 BT_DBG("%s", hdev
->name
);
275 /* Read LE buffer size */
276 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
279 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
283 BT_DBG("%s %x", hdev
->name
, scan
);
285 /* Inquiry and Page scans */
286 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
289 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
293 BT_DBG("%s %x", hdev
->name
, auth
);
296 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
299 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
303 BT_DBG("%s %x", hdev
->name
, encrypt
);
306 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
309 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
311 __le16 policy
= cpu_to_le16(opt
);
313 BT_DBG("%s %x", hdev
->name
, policy
);
315 /* Default link policy */
316 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
319 /* Get HCI device by index.
320 * Device is held on return. */
321 struct hci_dev
*hci_dev_get(int index
)
323 struct hci_dev
*hdev
= NULL
, *d
;
330 read_lock(&hci_dev_list_lock
);
331 list_for_each_entry(d
, &hci_dev_list
, list
) {
332 if (d
->id
== index
) {
333 hdev
= hci_dev_hold(d
);
337 read_unlock(&hci_dev_list_lock
);
341 /* ---- Inquiry support ---- */
343 bool hci_discovery_active(struct hci_dev
*hdev
)
345 struct discovery_state
*discov
= &hdev
->discovery
;
347 switch (discov
->state
) {
348 case DISCOVERY_FINDING
:
349 case DISCOVERY_RESOLVING
:
357 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
359 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
361 if (hdev
->discovery
.state
== state
)
365 case DISCOVERY_STOPPED
:
366 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
367 mgmt_discovering(hdev
, 0);
368 hdev
->discovery
.type
= 0;
370 case DISCOVERY_STARTING
:
372 case DISCOVERY_FINDING
:
373 mgmt_discovering(hdev
, 1);
375 case DISCOVERY_RESOLVING
:
377 case DISCOVERY_STOPPING
:
381 hdev
->discovery
.state
= state
;
384 static void inquiry_cache_flush(struct hci_dev
*hdev
)
386 struct discovery_state
*cache
= &hdev
->discovery
;
387 struct inquiry_entry
*p
, *n
;
389 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
394 INIT_LIST_HEAD(&cache
->unknown
);
395 INIT_LIST_HEAD(&cache
->resolve
);
396 cache
->state
= DISCOVERY_STOPPED
;
399 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
401 struct discovery_state
*cache
= &hdev
->discovery
;
402 struct inquiry_entry
*e
;
404 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
406 list_for_each_entry(e
, &cache
->all
, all
) {
407 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
414 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
417 struct discovery_state
*cache
= &hdev
->discovery
;
418 struct inquiry_entry
*e
;
420 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
422 list_for_each_entry(e
, &cache
->unknown
, list
) {
423 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
430 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
434 struct discovery_state
*cache
= &hdev
->discovery
;
435 struct inquiry_entry
*e
;
437 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
439 list_for_each_entry(e
, &cache
->resolve
, list
) {
440 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
442 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
449 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
450 struct inquiry_entry
*ie
)
452 struct discovery_state
*cache
= &hdev
->discovery
;
453 struct list_head
*pos
= &cache
->resolve
;
454 struct inquiry_entry
*p
;
458 list_for_each_entry(p
, &cache
->resolve
, list
) {
459 if (p
->name_state
!= NAME_PENDING
&&
460 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
465 list_add(&ie
->list
, pos
);
468 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
471 struct discovery_state
*cache
= &hdev
->discovery
;
472 struct inquiry_entry
*ie
;
474 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
476 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
478 if (ie
->name_state
== NAME_NEEDED
&&
479 data
->rssi
!= ie
->data
.rssi
) {
480 ie
->data
.rssi
= data
->rssi
;
481 hci_inquiry_cache_update_resolve(hdev
, ie
);
487 /* Entry not in the cache. Add new one. */
488 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
492 list_add(&ie
->all
, &cache
->all
);
495 ie
->name_state
= NAME_KNOWN
;
497 ie
->name_state
= NAME_NOT_KNOWN
;
498 list_add(&ie
->list
, &cache
->unknown
);
502 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
503 ie
->name_state
!= NAME_PENDING
) {
504 ie
->name_state
= NAME_KNOWN
;
508 memcpy(&ie
->data
, data
, sizeof(*data
));
509 ie
->timestamp
= jiffies
;
510 cache
->timestamp
= jiffies
;
512 if (ie
->name_state
== NAME_NOT_KNOWN
)
518 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
520 struct discovery_state
*cache
= &hdev
->discovery
;
521 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
522 struct inquiry_entry
*e
;
525 list_for_each_entry(e
, &cache
->all
, all
) {
526 struct inquiry_data
*data
= &e
->data
;
531 bacpy(&info
->bdaddr
, &data
->bdaddr
);
532 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
533 info
->pscan_period_mode
= data
->pscan_period_mode
;
534 info
->pscan_mode
= data
->pscan_mode
;
535 memcpy(info
->dev_class
, data
->dev_class
, 3);
536 info
->clock_offset
= data
->clock_offset
;
542 BT_DBG("cache %p, copied %d", cache
, copied
);
546 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
548 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
549 struct hci_cp_inquiry cp
;
551 BT_DBG("%s", hdev
->name
);
553 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
557 memcpy(&cp
.lap
, &ir
->lap
, 3);
558 cp
.length
= ir
->length
;
559 cp
.num_rsp
= ir
->num_rsp
;
560 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
563 int hci_inquiry(void __user
*arg
)
565 __u8 __user
*ptr
= arg
;
566 struct hci_inquiry_req ir
;
567 struct hci_dev
*hdev
;
568 int err
= 0, do_inquiry
= 0, max_rsp
;
572 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
575 hdev
= hci_dev_get(ir
.dev_id
);
580 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
581 inquiry_cache_empty(hdev
) ||
582 ir
.flags
& IREQ_CACHE_FLUSH
) {
583 inquiry_cache_flush(hdev
);
586 hci_dev_unlock(hdev
);
588 timeo
= ir
.length
* msecs_to_jiffies(2000);
591 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
596 /* for unlimited number of responses we will use buffer with 255 entries */
597 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
599 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600 * copy it to the user space.
602 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
609 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
610 hci_dev_unlock(hdev
);
612 BT_DBG("num_rsp %d", ir
.num_rsp
);
614 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
616 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
629 /* ---- HCI ioctl helpers ---- */
631 int hci_dev_open(__u16 dev
)
633 struct hci_dev
*hdev
;
636 hdev
= hci_dev_get(dev
);
640 BT_DBG("%s %p", hdev
->name
, hdev
);
644 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
649 if (test_bit(HCI_UP
, &hdev
->flags
)) {
654 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
655 set_bit(HCI_RAW
, &hdev
->flags
);
657 /* Treat all non BR/EDR controllers as raw devices if
658 enable_hs is not set */
659 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
660 set_bit(HCI_RAW
, &hdev
->flags
);
662 if (hdev
->open(hdev
)) {
667 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
668 atomic_set(&hdev
->cmd_cnt
, 1);
669 set_bit(HCI_INIT
, &hdev
->flags
);
670 hdev
->init_last_cmd
= 0;
672 ret
= __hci_request(hdev
, hci_init_req
, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
675 if (lmp_host_le_capable(hdev
))
676 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
679 clear_bit(HCI_INIT
, &hdev
->flags
);
684 set_bit(HCI_UP
, &hdev
->flags
);
685 hci_notify(hdev
, HCI_DEV_UP
);
686 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
688 mgmt_powered(hdev
, 1);
689 hci_dev_unlock(hdev
);
692 /* Init failed, cleanup */
693 flush_work(&hdev
->tx_work
);
694 flush_work(&hdev
->cmd_work
);
695 flush_work(&hdev
->rx_work
);
697 skb_queue_purge(&hdev
->cmd_q
);
698 skb_queue_purge(&hdev
->rx_q
);
703 if (hdev
->sent_cmd
) {
704 kfree_skb(hdev
->sent_cmd
);
705 hdev
->sent_cmd
= NULL
;
713 hci_req_unlock(hdev
);
718 static int hci_dev_do_close(struct hci_dev
*hdev
)
720 BT_DBG("%s %p", hdev
->name
, hdev
);
722 cancel_work_sync(&hdev
->le_scan
);
724 hci_req_cancel(hdev
, ENODEV
);
727 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
728 del_timer_sync(&hdev
->cmd_timer
);
729 hci_req_unlock(hdev
);
733 /* Flush RX and TX works */
734 flush_work(&hdev
->tx_work
);
735 flush_work(&hdev
->rx_work
);
737 if (hdev
->discov_timeout
> 0) {
738 cancel_delayed_work(&hdev
->discov_off
);
739 hdev
->discov_timeout
= 0;
742 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
743 cancel_delayed_work(&hdev
->service_cache
);
745 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
748 inquiry_cache_flush(hdev
);
749 hci_conn_hash_flush(hdev
);
750 hci_dev_unlock(hdev
);
752 hci_notify(hdev
, HCI_DEV_DOWN
);
758 skb_queue_purge(&hdev
->cmd_q
);
759 atomic_set(&hdev
->cmd_cnt
, 1);
760 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
761 test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
762 set_bit(HCI_INIT
, &hdev
->flags
);
763 __hci_request(hdev
, hci_reset_req
, 0,
764 msecs_to_jiffies(250));
765 clear_bit(HCI_INIT
, &hdev
->flags
);
769 flush_work(&hdev
->cmd_work
);
772 skb_queue_purge(&hdev
->rx_q
);
773 skb_queue_purge(&hdev
->cmd_q
);
774 skb_queue_purge(&hdev
->raw_q
);
776 /* Drop last sent command */
777 if (hdev
->sent_cmd
) {
778 del_timer_sync(&hdev
->cmd_timer
);
779 kfree_skb(hdev
->sent_cmd
);
780 hdev
->sent_cmd
= NULL
;
783 /* After this point our queues are empty
784 * and no tasks are scheduled. */
787 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
789 mgmt_powered(hdev
, 0);
790 hci_dev_unlock(hdev
);
796 hci_req_unlock(hdev
);
802 int hci_dev_close(__u16 dev
)
804 struct hci_dev
*hdev
;
807 hdev
= hci_dev_get(dev
);
811 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
812 cancel_delayed_work(&hdev
->power_off
);
814 err
= hci_dev_do_close(hdev
);
820 int hci_dev_reset(__u16 dev
)
822 struct hci_dev
*hdev
;
825 hdev
= hci_dev_get(dev
);
831 if (!test_bit(HCI_UP
, &hdev
->flags
))
835 skb_queue_purge(&hdev
->rx_q
);
836 skb_queue_purge(&hdev
->cmd_q
);
839 inquiry_cache_flush(hdev
);
840 hci_conn_hash_flush(hdev
);
841 hci_dev_unlock(hdev
);
846 atomic_set(&hdev
->cmd_cnt
, 1);
847 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
849 if (!test_bit(HCI_RAW
, &hdev
->flags
))
850 ret
= __hci_request(hdev
, hci_reset_req
, 0,
851 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
854 hci_req_unlock(hdev
);
859 int hci_dev_reset_stat(__u16 dev
)
861 struct hci_dev
*hdev
;
864 hdev
= hci_dev_get(dev
);
868 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
875 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
877 struct hci_dev
*hdev
;
878 struct hci_dev_req dr
;
881 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
884 hdev
= hci_dev_get(dr
.dev_id
);
890 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
891 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
895 if (!lmp_encrypt_capable(hdev
)) {
900 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
901 /* Auth must be enabled first */
902 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
903 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
908 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
909 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
913 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
914 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
918 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
923 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
924 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
928 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
932 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
933 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
937 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
938 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
950 int hci_get_dev_list(void __user
*arg
)
952 struct hci_dev
*hdev
;
953 struct hci_dev_list_req
*dl
;
954 struct hci_dev_req
*dr
;
955 int n
= 0, size
, err
;
958 if (get_user(dev_num
, (__u16 __user
*) arg
))
961 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
964 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
966 dl
= kzalloc(size
, GFP_KERNEL
);
972 read_lock(&hci_dev_list_lock
);
973 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
974 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
975 cancel_delayed_work(&hdev
->power_off
);
977 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
978 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
980 (dr
+ n
)->dev_id
= hdev
->id
;
981 (dr
+ n
)->dev_opt
= hdev
->flags
;
986 read_unlock(&hci_dev_list_lock
);
989 size
= sizeof(*dl
) + n
* sizeof(*dr
);
991 err
= copy_to_user(arg
, dl
, size
);
994 return err
? -EFAULT
: 0;
997 int hci_get_dev_info(void __user
*arg
)
999 struct hci_dev
*hdev
;
1000 struct hci_dev_info di
;
1003 if (copy_from_user(&di
, arg
, sizeof(di
)))
1006 hdev
= hci_dev_get(di
.dev_id
);
1010 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1011 cancel_delayed_work_sync(&hdev
->power_off
);
1013 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1014 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1016 strcpy(di
.name
, hdev
->name
);
1017 di
.bdaddr
= hdev
->bdaddr
;
1018 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1019 di
.flags
= hdev
->flags
;
1020 di
.pkt_type
= hdev
->pkt_type
;
1021 di
.acl_mtu
= hdev
->acl_mtu
;
1022 di
.acl_pkts
= hdev
->acl_pkts
;
1023 di
.sco_mtu
= hdev
->sco_mtu
;
1024 di
.sco_pkts
= hdev
->sco_pkts
;
1025 di
.link_policy
= hdev
->link_policy
;
1026 di
.link_mode
= hdev
->link_mode
;
1028 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1029 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1031 if (copy_to_user(arg
, &di
, sizeof(di
)))
1039 /* ---- Interface to HCI drivers ---- */
1041 static int hci_rfkill_set_block(void *data
, bool blocked
)
1043 struct hci_dev
*hdev
= data
;
1045 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1050 hci_dev_do_close(hdev
);
1055 static const struct rfkill_ops hci_rfkill_ops
= {
1056 .set_block
= hci_rfkill_set_block
,
1059 /* Alloc HCI device */
1060 struct hci_dev
*hci_alloc_dev(void)
1062 struct hci_dev
*hdev
;
1064 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1068 hci_init_sysfs(hdev
);
1069 skb_queue_head_init(&hdev
->driver_init
);
1073 EXPORT_SYMBOL(hci_alloc_dev
);
1075 /* Free HCI device */
1076 void hci_free_dev(struct hci_dev
*hdev
)
1078 skb_queue_purge(&hdev
->driver_init
);
1080 /* will free via device release */
1081 put_device(&hdev
->dev
);
1083 EXPORT_SYMBOL(hci_free_dev
);
1085 static void hci_power_on(struct work_struct
*work
)
1087 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1089 BT_DBG("%s", hdev
->name
);
1091 if (hci_dev_open(hdev
->id
) < 0)
1094 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1095 schedule_delayed_work(&hdev
->power_off
,
1096 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1098 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1099 mgmt_index_added(hdev
);
1102 static void hci_power_off(struct work_struct
*work
)
1104 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1107 BT_DBG("%s", hdev
->name
);
1109 hci_dev_do_close(hdev
);
1112 static void hci_discov_off(struct work_struct
*work
)
1114 struct hci_dev
*hdev
;
1115 u8 scan
= SCAN_PAGE
;
1117 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1119 BT_DBG("%s", hdev
->name
);
1123 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1125 hdev
->discov_timeout
= 0;
1127 hci_dev_unlock(hdev
);
1130 int hci_uuids_clear(struct hci_dev
*hdev
)
1132 struct list_head
*p
, *n
;
1134 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1135 struct bt_uuid
*uuid
;
1137 uuid
= list_entry(p
, struct bt_uuid
, list
);
1146 int hci_link_keys_clear(struct hci_dev
*hdev
)
1148 struct list_head
*p
, *n
;
1150 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1151 struct link_key
*key
;
1153 key
= list_entry(p
, struct link_key
, list
);
1162 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1164 struct smp_ltk
*k
, *tmp
;
1166 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1174 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1178 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1179 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1185 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1186 u8 key_type
, u8 old_key_type
)
1189 if (key_type
< 0x03)
1192 /* Debug keys are insecure so don't store them persistently */
1193 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1196 /* Changed combination key and there's no previous one */
1197 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1200 /* Security mode 3 case */
1204 /* Neither local nor remote side had no-bonding as requirement */
1205 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1208 /* Local side had dedicated bonding as requirement */
1209 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1212 /* Remote side had dedicated bonding as requirement */
1213 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1216 /* If none of the above criteria match, then don't store the key
1221 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1225 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1226 if (k
->ediv
!= ediv
||
1227 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1235 EXPORT_SYMBOL(hci_find_ltk
);
1237 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1242 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1243 if (addr_type
== k
->bdaddr_type
&&
1244 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1249 EXPORT_SYMBOL(hci_find_ltk_by_addr
);
1251 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1252 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1254 struct link_key
*key
, *old_key
;
1255 u8 old_key_type
, persistent
;
1257 old_key
= hci_find_link_key(hdev
, bdaddr
);
1259 old_key_type
= old_key
->type
;
1262 old_key_type
= conn
? conn
->key_type
: 0xff;
1263 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1266 list_add(&key
->list
, &hdev
->link_keys
);
1269 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1271 /* Some buggy controller combinations generate a changed
1272 * combination key for legacy pairing even when there's no
1274 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1275 (!conn
|| conn
->remote_auth
== 0xff) &&
1276 old_key_type
== 0xff) {
1277 type
= HCI_LK_COMBINATION
;
1279 conn
->key_type
= type
;
1282 bacpy(&key
->bdaddr
, bdaddr
);
1283 memcpy(key
->val
, val
, 16);
1284 key
->pin_len
= pin_len
;
1286 if (type
== HCI_LK_CHANGED_COMBINATION
)
1287 key
->type
= old_key_type
;
1294 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1296 mgmt_new_link_key(hdev
, key
, persistent
);
1299 list_del(&key
->list
);
1306 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1307 int new_key
, u8 authenticated
, u8 tk
[16],
1308 u8 enc_size
, u16 ediv
, u8 rand
[8])
1310 struct smp_ltk
*key
, *old_key
;
1312 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1315 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1319 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1322 list_add(&key
->list
, &hdev
->long_term_keys
);
1325 bacpy(&key
->bdaddr
, bdaddr
);
1326 key
->bdaddr_type
= addr_type
;
1327 memcpy(key
->val
, tk
, sizeof(key
->val
));
1328 key
->authenticated
= authenticated
;
1330 key
->enc_size
= enc_size
;
1332 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1337 if (type
& HCI_SMP_LTK
)
1338 mgmt_new_ltk(hdev
, key
, 1);
1343 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1345 struct link_key
*key
;
1347 key
= hci_find_link_key(hdev
, bdaddr
);
1351 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1353 list_del(&key
->list
);
1359 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1361 struct smp_ltk
*k
, *tmp
;
1363 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1364 if (bacmp(bdaddr
, &k
->bdaddr
))
1367 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1376 /* HCI command timer function */
1377 static void hci_cmd_timer(unsigned long arg
)
1379 struct hci_dev
*hdev
= (void *) arg
;
1381 BT_ERR("%s command tx timeout", hdev
->name
);
1382 atomic_set(&hdev
->cmd_cnt
, 1);
1383 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1386 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1389 struct oob_data
*data
;
1391 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1392 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1398 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1400 struct oob_data
*data
;
1402 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1406 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1408 list_del(&data
->list
);
1414 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1416 struct oob_data
*data
, *n
;
1418 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1419 list_del(&data
->list
);
1426 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1429 struct oob_data
*data
;
1431 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1434 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1438 bacpy(&data
->bdaddr
, bdaddr
);
1439 list_add(&data
->list
, &hdev
->remote_oob_data
);
1442 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1443 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1445 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1450 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1453 struct bdaddr_list
*b
;
1455 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1456 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1462 int hci_blacklist_clear(struct hci_dev
*hdev
)
1464 struct list_head
*p
, *n
;
1466 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1467 struct bdaddr_list
*b
;
1469 b
= list_entry(p
, struct bdaddr_list
, list
);
1478 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1480 struct bdaddr_list
*entry
;
1482 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1485 if (hci_blacklist_lookup(hdev
, bdaddr
))
1488 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1492 bacpy(&entry
->bdaddr
, bdaddr
);
1494 list_add(&entry
->list
, &hdev
->blacklist
);
1496 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1499 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1501 struct bdaddr_list
*entry
;
1503 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1504 return hci_blacklist_clear(hdev
);
1506 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1510 list_del(&entry
->list
);
1513 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1516 static void hci_clear_adv_cache(struct work_struct
*work
)
1518 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1523 hci_adv_entries_clear(hdev
);
1525 hci_dev_unlock(hdev
);
1528 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1530 struct adv_entry
*entry
, *tmp
;
1532 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1533 list_del(&entry
->list
);
1537 BT_DBG("%s adv cache cleared", hdev
->name
);
1542 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1544 struct adv_entry
*entry
;
1546 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1547 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1553 static inline int is_connectable_adv(u8 evt_type
)
1555 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1561 int hci_add_adv_entry(struct hci_dev
*hdev
,
1562 struct hci_ev_le_advertising_info
*ev
)
1564 struct adv_entry
*entry
;
1566 if (!is_connectable_adv(ev
->evt_type
))
1569 /* Only new entries should be added to adv_entries. So, if
1570 * bdaddr was found, don't add it. */
1571 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1574 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1578 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1579 entry
->bdaddr_type
= ev
->bdaddr_type
;
1581 list_add(&entry
->list
, &hdev
->adv_entries
);
1583 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1584 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1589 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1591 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1592 struct hci_cp_le_set_scan_param cp
;
1594 memset(&cp
, 0, sizeof(cp
));
1595 cp
.type
= param
->type
;
1596 cp
.interval
= cpu_to_le16(param
->interval
);
1597 cp
.window
= cpu_to_le16(param
->window
);
1599 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1602 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1604 struct hci_cp_le_set_scan_enable cp
;
1606 memset(&cp
, 0, sizeof(cp
));
1609 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1612 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1613 u16 window
, int timeout
)
1615 long timeo
= msecs_to_jiffies(3000);
1616 struct le_scan_params param
;
1619 BT_DBG("%s", hdev
->name
);
1621 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1622 return -EINPROGRESS
;
1625 param
.interval
= interval
;
1626 param
.window
= window
;
1630 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1633 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1635 hci_req_unlock(hdev
);
1640 schedule_delayed_work(&hdev
->le_scan_disable
,
1641 msecs_to_jiffies(timeout
));
1646 static void le_scan_disable_work(struct work_struct
*work
)
1648 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1649 le_scan_disable
.work
);
1650 struct hci_cp_le_set_scan_enable cp
;
1652 BT_DBG("%s", hdev
->name
);
1654 memset(&cp
, 0, sizeof(cp
));
1656 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1659 static void le_scan_work(struct work_struct
*work
)
1661 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1662 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1664 BT_DBG("%s", hdev
->name
);
1666 hci_do_le_scan(hdev
, param
->type
, param
->interval
,
1667 param
->window
, param
->timeout
);
1670 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1673 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1675 BT_DBG("%s", hdev
->name
);
1677 if (work_busy(&hdev
->le_scan
))
1678 return -EINPROGRESS
;
1681 param
->interval
= interval
;
1682 param
->window
= window
;
1683 param
->timeout
= timeout
;
1685 queue_work(system_long_wq
, &hdev
->le_scan
);
1690 /* Register HCI device */
1691 int hci_register_dev(struct hci_dev
*hdev
)
1693 struct list_head
*head
= &hci_dev_list
, *p
;
1696 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1698 if (!hdev
->open
|| !hdev
->close
)
1701 /* Do not allow HCI_AMP devices to register at index 0,
1702 * so the index can be used as the AMP controller ID.
1704 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1706 write_lock(&hci_dev_list_lock
);
1708 /* Find first available device id */
1709 list_for_each(p
, &hci_dev_list
) {
1710 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1715 sprintf(hdev
->name
, "hci%d", id
);
1717 list_add_tail(&hdev
->list
, head
);
1719 mutex_init(&hdev
->lock
);
1722 hdev
->dev_flags
= 0;
1723 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1724 hdev
->esco_type
= (ESCO_HV1
);
1725 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1726 hdev
->io_capability
= 0x03; /* No Input No Output */
1728 hdev
->idle_timeout
= 0;
1729 hdev
->sniff_max_interval
= 800;
1730 hdev
->sniff_min_interval
= 80;
1732 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1733 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1734 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1737 skb_queue_head_init(&hdev
->rx_q
);
1738 skb_queue_head_init(&hdev
->cmd_q
);
1739 skb_queue_head_init(&hdev
->raw_q
);
1741 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1743 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1744 hdev
->reassembly
[i
] = NULL
;
1746 init_waitqueue_head(&hdev
->req_wait_q
);
1747 mutex_init(&hdev
->req_lock
);
1749 discovery_init(hdev
);
1751 hci_conn_hash_init(hdev
);
1753 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1755 INIT_LIST_HEAD(&hdev
->blacklist
);
1757 INIT_LIST_HEAD(&hdev
->uuids
);
1759 INIT_LIST_HEAD(&hdev
->link_keys
);
1760 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1762 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1764 INIT_LIST_HEAD(&hdev
->adv_entries
);
1766 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1767 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1768 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1770 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1772 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1774 atomic_set(&hdev
->promisc
, 0);
1776 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1778 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1780 write_unlock(&hci_dev_list_lock
);
1782 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1784 if (!hdev
->workqueue
) {
1789 error
= hci_add_sysfs(hdev
);
1793 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1794 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1796 if (rfkill_register(hdev
->rfkill
) < 0) {
1797 rfkill_destroy(hdev
->rfkill
);
1798 hdev
->rfkill
= NULL
;
1802 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1803 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1804 schedule_work(&hdev
->power_on
);
1806 hci_notify(hdev
, HCI_DEV_REG
);
1812 destroy_workqueue(hdev
->workqueue
);
1814 write_lock(&hci_dev_list_lock
);
1815 list_del(&hdev
->list
);
1816 write_unlock(&hci_dev_list_lock
);
1820 EXPORT_SYMBOL(hci_register_dev
);
1822 /* Unregister HCI device */
1823 void hci_unregister_dev(struct hci_dev
*hdev
)
1827 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1829 write_lock(&hci_dev_list_lock
);
1830 list_del(&hdev
->list
);
1831 write_unlock(&hci_dev_list_lock
);
1833 hci_dev_do_close(hdev
);
1835 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1836 kfree_skb(hdev
->reassembly
[i
]);
1838 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1839 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1841 mgmt_index_removed(hdev
);
1842 hci_dev_unlock(hdev
);
1845 /* mgmt_index_removed should take care of emptying the
1847 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1849 hci_notify(hdev
, HCI_DEV_UNREG
);
1852 rfkill_unregister(hdev
->rfkill
);
1853 rfkill_destroy(hdev
->rfkill
);
1856 hci_del_sysfs(hdev
);
1858 cancel_delayed_work_sync(&hdev
->adv_work
);
1860 destroy_workqueue(hdev
->workqueue
);
1863 hci_blacklist_clear(hdev
);
1864 hci_uuids_clear(hdev
);
1865 hci_link_keys_clear(hdev
);
1866 hci_smp_ltks_clear(hdev
);
1867 hci_remote_oob_data_clear(hdev
);
1868 hci_adv_entries_clear(hdev
);
1869 hci_dev_unlock(hdev
);
1873 EXPORT_SYMBOL(hci_unregister_dev
);
1875 /* Suspend HCI device */
1876 int hci_suspend_dev(struct hci_dev
*hdev
)
1878 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1881 EXPORT_SYMBOL(hci_suspend_dev
);
1883 /* Resume HCI device */
1884 int hci_resume_dev(struct hci_dev
*hdev
)
1886 hci_notify(hdev
, HCI_DEV_RESUME
);
1889 EXPORT_SYMBOL(hci_resume_dev
);
1891 /* Receive frame from HCI drivers */
1892 int hci_recv_frame(struct sk_buff
*skb
)
1894 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1895 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1896 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1902 bt_cb(skb
)->incoming
= 1;
1905 __net_timestamp(skb
);
1907 skb_queue_tail(&hdev
->rx_q
, skb
);
1908 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1912 EXPORT_SYMBOL(hci_recv_frame
);
1914 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1915 int count
, __u8 index
)
1920 struct sk_buff
*skb
;
1921 struct bt_skb_cb
*scb
;
1923 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1924 index
>= NUM_REASSEMBLY
)
1927 skb
= hdev
->reassembly
[index
];
1931 case HCI_ACLDATA_PKT
:
1932 len
= HCI_MAX_FRAME_SIZE
;
1933 hlen
= HCI_ACL_HDR_SIZE
;
1936 len
= HCI_MAX_EVENT_SIZE
;
1937 hlen
= HCI_EVENT_HDR_SIZE
;
1939 case HCI_SCODATA_PKT
:
1940 len
= HCI_MAX_SCO_SIZE
;
1941 hlen
= HCI_SCO_HDR_SIZE
;
1945 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1949 scb
= (void *) skb
->cb
;
1951 scb
->pkt_type
= type
;
1953 skb
->dev
= (void *) hdev
;
1954 hdev
->reassembly
[index
] = skb
;
1958 scb
= (void *) skb
->cb
;
1959 len
= min(scb
->expect
, (__u16
)count
);
1961 memcpy(skb_put(skb
, len
), data
, len
);
1970 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1971 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1972 scb
->expect
= h
->plen
;
1974 if (skb_tailroom(skb
) < scb
->expect
) {
1976 hdev
->reassembly
[index
] = NULL
;
1982 case HCI_ACLDATA_PKT
:
1983 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1984 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1985 scb
->expect
= __le16_to_cpu(h
->dlen
);
1987 if (skb_tailroom(skb
) < scb
->expect
) {
1989 hdev
->reassembly
[index
] = NULL
;
1995 case HCI_SCODATA_PKT
:
1996 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1997 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1998 scb
->expect
= h
->dlen
;
2000 if (skb_tailroom(skb
) < scb
->expect
) {
2002 hdev
->reassembly
[index
] = NULL
;
2009 if (scb
->expect
== 0) {
2010 /* Complete frame */
2012 bt_cb(skb
)->pkt_type
= type
;
2013 hci_recv_frame(skb
);
2015 hdev
->reassembly
[index
] = NULL
;
2023 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2027 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2031 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2035 data
+= (count
- rem
);
2041 EXPORT_SYMBOL(hci_recv_fragment
);
2043 #define STREAM_REASSEMBLY 0
2045 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2051 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2054 struct { char type
; } *pkt
;
2056 /* Start of the frame */
2063 type
= bt_cb(skb
)->pkt_type
;
2065 rem
= hci_reassembly(hdev
, type
, data
, count
,
2070 data
+= (count
- rem
);
2076 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2078 /* ---- Interface to upper protocols ---- */
2080 int hci_register_cb(struct hci_cb
*cb
)
2082 BT_DBG("%p name %s", cb
, cb
->name
);
2084 write_lock(&hci_cb_list_lock
);
2085 list_add(&cb
->list
, &hci_cb_list
);
2086 write_unlock(&hci_cb_list_lock
);
2090 EXPORT_SYMBOL(hci_register_cb
);
2092 int hci_unregister_cb(struct hci_cb
*cb
)
2094 BT_DBG("%p name %s", cb
, cb
->name
);
2096 write_lock(&hci_cb_list_lock
);
2097 list_del(&cb
->list
);
2098 write_unlock(&hci_cb_list_lock
);
2102 EXPORT_SYMBOL(hci_unregister_cb
);
2104 static int hci_send_frame(struct sk_buff
*skb
)
2106 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2113 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2116 __net_timestamp(skb
);
2118 /* Send copy to monitor */
2119 hci_send_to_monitor(hdev
, skb
);
2121 if (atomic_read(&hdev
->promisc
)) {
2122 /* Send copy to the sockets */
2123 hci_send_to_sock(hdev
, skb
);
2126 /* Get rid of skb owner, prior to sending to the driver. */
2129 return hdev
->send(skb
);
2132 /* Send HCI command */
2133 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2135 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2136 struct hci_command_hdr
*hdr
;
2137 struct sk_buff
*skb
;
2139 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
2141 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2143 BT_ERR("%s no memory for command", hdev
->name
);
2147 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2148 hdr
->opcode
= cpu_to_le16(opcode
);
2152 memcpy(skb_put(skb
, plen
), param
, plen
);
2154 BT_DBG("skb len %d", skb
->len
);
2156 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2157 skb
->dev
= (void *) hdev
;
2159 if (test_bit(HCI_INIT
, &hdev
->flags
))
2160 hdev
->init_last_cmd
= opcode
;
2162 skb_queue_tail(&hdev
->cmd_q
, skb
);
2163 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2168 /* Get data from the previously sent command */
2169 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2171 struct hci_command_hdr
*hdr
;
2173 if (!hdev
->sent_cmd
)
2176 hdr
= (void *) hdev
->sent_cmd
->data
;
2178 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2181 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2183 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2187 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2189 struct hci_acl_hdr
*hdr
;
2192 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2193 skb_reset_transport_header(skb
);
2194 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2195 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2196 hdr
->dlen
= cpu_to_le16(len
);
2199 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2200 struct sk_buff
*skb
, __u16 flags
)
2202 struct hci_dev
*hdev
= conn
->hdev
;
2203 struct sk_buff
*list
;
2205 list
= skb_shinfo(skb
)->frag_list
;
2207 /* Non fragmented */
2208 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2210 skb_queue_tail(queue
, skb
);
2213 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2215 skb_shinfo(skb
)->frag_list
= NULL
;
2217 /* Queue all fragments atomically */
2218 spin_lock(&queue
->lock
);
2220 __skb_queue_tail(queue
, skb
);
2222 flags
&= ~ACL_START
;
2225 skb
= list
; list
= list
->next
;
2227 skb
->dev
= (void *) hdev
;
2228 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2229 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2231 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2233 __skb_queue_tail(queue
, skb
);
2236 spin_unlock(&queue
->lock
);
2240 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2242 struct hci_conn
*conn
= chan
->conn
;
2243 struct hci_dev
*hdev
= conn
->hdev
;
2245 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2247 skb
->dev
= (void *) hdev
;
2248 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2249 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2251 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2253 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2255 EXPORT_SYMBOL(hci_send_acl
);
2258 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2260 struct hci_dev
*hdev
= conn
->hdev
;
2261 struct hci_sco_hdr hdr
;
2263 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2265 hdr
.handle
= cpu_to_le16(conn
->handle
);
2266 hdr
.dlen
= skb
->len
;
2268 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2269 skb_reset_transport_header(skb
);
2270 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2272 skb
->dev
= (void *) hdev
;
2273 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2275 skb_queue_tail(&conn
->data_q
, skb
);
2276 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2278 EXPORT_SYMBOL(hci_send_sco
);
2280 /* ---- HCI TX task (outgoing data) ---- */
2282 /* HCI Connection scheduler */
2283 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2285 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2286 struct hci_conn
*conn
= NULL
, *c
;
2287 int num
= 0, min
= ~0;
2289 /* We don't have to lock device here. Connections are always
2290 * added and removed with TX task disabled. */
2294 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2295 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2298 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2303 if (c
->sent
< min
) {
2308 if (hci_conn_num(hdev
, type
) == num
)
2317 switch (conn
->type
) {
2319 cnt
= hdev
->acl_cnt
;
2323 cnt
= hdev
->sco_cnt
;
2326 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2330 BT_ERR("Unknown link type");
2338 BT_DBG("conn %p quote %d", conn
, *quote
);
2342 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2344 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2347 BT_ERR("%s link tx timeout", hdev
->name
);
2351 /* Kill stalled connections */
2352 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2353 if (c
->type
== type
&& c
->sent
) {
2354 BT_ERR("%s killing stalled connection %s",
2355 hdev
->name
, batostr(&c
->dst
));
2356 hci_acl_disconn(c
, 0x13);
2363 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2366 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2367 struct hci_chan
*chan
= NULL
;
2368 int num
= 0, min
= ~0, cur_prio
= 0;
2369 struct hci_conn
*conn
;
2370 int cnt
, q
, conn_num
= 0;
2372 BT_DBG("%s", hdev
->name
);
2376 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2377 struct hci_chan
*tmp
;
2379 if (conn
->type
!= type
)
2382 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2387 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2388 struct sk_buff
*skb
;
2390 if (skb_queue_empty(&tmp
->data_q
))
2393 skb
= skb_peek(&tmp
->data_q
);
2394 if (skb
->priority
< cur_prio
)
2397 if (skb
->priority
> cur_prio
) {
2400 cur_prio
= skb
->priority
;
2405 if (conn
->sent
< min
) {
2411 if (hci_conn_num(hdev
, type
) == conn_num
)
2420 switch (chan
->conn
->type
) {
2422 cnt
= hdev
->acl_cnt
;
2426 cnt
= hdev
->sco_cnt
;
2429 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2433 BT_ERR("Unknown link type");
2438 BT_DBG("chan %p quote %d", chan
, *quote
);
2442 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2444 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2445 struct hci_conn
*conn
;
2448 BT_DBG("%s", hdev
->name
);
2452 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2453 struct hci_chan
*chan
;
2455 if (conn
->type
!= type
)
2458 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2463 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2464 struct sk_buff
*skb
;
2471 if (skb_queue_empty(&chan
->data_q
))
2474 skb
= skb_peek(&chan
->data_q
);
2475 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2478 skb
->priority
= HCI_PRIO_MAX
- 1;
2480 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2484 if (hci_conn_num(hdev
, type
) == num
)
2492 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2494 /* Calculate count of blocks used by this packet */
2495 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2498 static inline void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2500 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2501 /* ACL tx timeout must be longer than maximum
2502 * link supervision timeout (40.9 seconds) */
2503 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2504 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT
)))
2505 hci_link_tx_to(hdev
, ACL_LINK
);
2509 static inline void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2511 unsigned int cnt
= hdev
->acl_cnt
;
2512 struct hci_chan
*chan
;
2513 struct sk_buff
*skb
;
2516 __check_timeout(hdev
, cnt
);
2518 while (hdev
->acl_cnt
&&
2519 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2520 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2521 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2522 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2523 skb
->len
, skb
->priority
);
2525 /* Stop if priority has changed */
2526 if (skb
->priority
< priority
)
2529 skb
= skb_dequeue(&chan
->data_q
);
2531 hci_conn_enter_active_mode(chan
->conn
,
2532 bt_cb(skb
)->force_active
);
2534 hci_send_frame(skb
);
2535 hdev
->acl_last_tx
= jiffies
;
2543 if (cnt
!= hdev
->acl_cnt
)
2544 hci_prio_recalculate(hdev
, ACL_LINK
);
2547 static inline void hci_sched_acl_blk(struct hci_dev
*hdev
)
2549 unsigned int cnt
= hdev
->block_cnt
;
2550 struct hci_chan
*chan
;
2551 struct sk_buff
*skb
;
2554 __check_timeout(hdev
, cnt
);
2556 while (hdev
->block_cnt
> 0 &&
2557 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2558 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2559 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2562 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2563 skb
->len
, skb
->priority
);
2565 /* Stop if priority has changed */
2566 if (skb
->priority
< priority
)
2569 skb
= skb_dequeue(&chan
->data_q
);
2571 blocks
= __get_blocks(hdev
, skb
);
2572 if (blocks
> hdev
->block_cnt
)
2575 hci_conn_enter_active_mode(chan
->conn
,
2576 bt_cb(skb
)->force_active
);
2578 hci_send_frame(skb
);
2579 hdev
->acl_last_tx
= jiffies
;
2581 hdev
->block_cnt
-= blocks
;
2584 chan
->sent
+= blocks
;
2585 chan
->conn
->sent
+= blocks
;
2589 if (cnt
!= hdev
->block_cnt
)
2590 hci_prio_recalculate(hdev
, ACL_LINK
);
2593 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2595 BT_DBG("%s", hdev
->name
);
2597 if (!hci_conn_num(hdev
, ACL_LINK
))
2600 switch (hdev
->flow_ctl_mode
) {
2601 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2602 hci_sched_acl_pkt(hdev
);
2605 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2606 hci_sched_acl_blk(hdev
);
2612 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2614 struct hci_conn
*conn
;
2615 struct sk_buff
*skb
;
2618 BT_DBG("%s", hdev
->name
);
2620 if (!hci_conn_num(hdev
, SCO_LINK
))
2623 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2624 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2625 BT_DBG("skb %p len %d", skb
, skb
->len
);
2626 hci_send_frame(skb
);
2629 if (conn
->sent
== ~0)
2635 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2637 struct hci_conn
*conn
;
2638 struct sk_buff
*skb
;
2641 BT_DBG("%s", hdev
->name
);
2643 if (!hci_conn_num(hdev
, ESCO_LINK
))
2646 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2647 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2648 BT_DBG("skb %p len %d", skb
, skb
->len
);
2649 hci_send_frame(skb
);
2652 if (conn
->sent
== ~0)
2658 static inline void hci_sched_le(struct hci_dev
*hdev
)
2660 struct hci_chan
*chan
;
2661 struct sk_buff
*skb
;
2662 int quote
, cnt
, tmp
;
2664 BT_DBG("%s", hdev
->name
);
2666 if (!hci_conn_num(hdev
, LE_LINK
))
2669 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2670 /* LE tx timeout must be longer than maximum
2671 * link supervision timeout (40.9 seconds) */
2672 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2673 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2674 hci_link_tx_to(hdev
, LE_LINK
);
2677 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2679 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2680 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2681 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2682 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2683 skb
->len
, skb
->priority
);
2685 /* Stop if priority has changed */
2686 if (skb
->priority
< priority
)
2689 skb
= skb_dequeue(&chan
->data_q
);
2691 hci_send_frame(skb
);
2692 hdev
->le_last_tx
= jiffies
;
2703 hdev
->acl_cnt
= cnt
;
2706 hci_prio_recalculate(hdev
, LE_LINK
);
2709 static void hci_tx_work(struct work_struct
*work
)
2711 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2712 struct sk_buff
*skb
;
2714 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2715 hdev
->sco_cnt
, hdev
->le_cnt
);
2717 /* Schedule queues and send stuff to HCI driver */
2719 hci_sched_acl(hdev
);
2721 hci_sched_sco(hdev
);
2723 hci_sched_esco(hdev
);
2727 /* Send next queued raw (unknown type) packet */
2728 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2729 hci_send_frame(skb
);
2732 /* ----- HCI RX task (incoming data processing) ----- */
2734 /* ACL data packet */
2735 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2737 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2738 struct hci_conn
*conn
;
2739 __u16 handle
, flags
;
2741 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2743 handle
= __le16_to_cpu(hdr
->handle
);
2744 flags
= hci_flags(handle
);
2745 handle
= hci_handle(handle
);
2747 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2749 hdev
->stat
.acl_rx
++;
2752 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2753 hci_dev_unlock(hdev
);
2756 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2758 /* Send to upper protocol */
2759 l2cap_recv_acldata(conn
, skb
, flags
);
2762 BT_ERR("%s ACL packet for unknown connection handle %d",
2763 hdev
->name
, handle
);
2769 /* SCO data packet */
2770 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2772 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2773 struct hci_conn
*conn
;
2776 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2778 handle
= __le16_to_cpu(hdr
->handle
);
2780 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2782 hdev
->stat
.sco_rx
++;
2785 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2786 hci_dev_unlock(hdev
);
2789 /* Send to upper protocol */
2790 sco_recv_scodata(conn
, skb
);
2793 BT_ERR("%s SCO packet for unknown connection handle %d",
2794 hdev
->name
, handle
);
2800 static void hci_rx_work(struct work_struct
*work
)
2802 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2803 struct sk_buff
*skb
;
2805 BT_DBG("%s", hdev
->name
);
2807 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2808 /* Send copy to monitor */
2809 hci_send_to_monitor(hdev
, skb
);
2811 if (atomic_read(&hdev
->promisc
)) {
2812 /* Send copy to the sockets */
2813 hci_send_to_sock(hdev
, skb
);
2816 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2821 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2822 /* Don't process data packets in this states. */
2823 switch (bt_cb(skb
)->pkt_type
) {
2824 case HCI_ACLDATA_PKT
:
2825 case HCI_SCODATA_PKT
:
2832 switch (bt_cb(skb
)->pkt_type
) {
2834 BT_DBG("%s Event packet", hdev
->name
);
2835 hci_event_packet(hdev
, skb
);
2838 case HCI_ACLDATA_PKT
:
2839 BT_DBG("%s ACL data packet", hdev
->name
);
2840 hci_acldata_packet(hdev
, skb
);
2843 case HCI_SCODATA_PKT
:
2844 BT_DBG("%s SCO data packet", hdev
->name
);
2845 hci_scodata_packet(hdev
, skb
);
2855 static void hci_cmd_work(struct work_struct
*work
)
2857 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2858 struct sk_buff
*skb
;
2860 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2862 /* Send queued commands */
2863 if (atomic_read(&hdev
->cmd_cnt
)) {
2864 skb
= skb_dequeue(&hdev
->cmd_q
);
2868 kfree_skb(hdev
->sent_cmd
);
2870 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2871 if (hdev
->sent_cmd
) {
2872 atomic_dec(&hdev
->cmd_cnt
);
2873 hci_send_frame(skb
);
2874 if (test_bit(HCI_RESET
, &hdev
->flags
))
2875 del_timer(&hdev
->cmd_timer
);
2877 mod_timer(&hdev
->cmd_timer
,
2878 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2880 skb_queue_head(&hdev
->cmd_q
, skb
);
2881 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2886 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2888 /* General inquiry access code (GIAC) */
2889 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2890 struct hci_cp_inquiry cp
;
2892 BT_DBG("%s", hdev
->name
);
2894 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2895 return -EINPROGRESS
;
2897 inquiry_cache_flush(hdev
);
2899 memset(&cp
, 0, sizeof(cp
));
2900 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2903 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2906 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2908 BT_DBG("%s", hdev
->name
);
2910 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2913 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);