2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
59 static void hci_rx_work(struct work_struct
*work
);
60 static void hci_cmd_work(struct work_struct
*work
);
61 static void hci_tx_work(struct work_struct
*work
);
64 LIST_HEAD(hci_dev_list
);
65 DEFINE_RWLOCK(hci_dev_list_lock
);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list
);
69 DEFINE_RWLOCK(hci_cb_list_lock
);
71 /* ---- HCI notifications ---- */
73 static void hci_notify(struct hci_dev
*hdev
, int event
)
75 hci_sock_dev_event(hdev
, event
);
78 /* ---- HCI requests ---- */
80 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
82 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
84 /* If this is the init phase check if the completed command matches
85 * the last init command, and if not just return.
87 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
90 if (hdev
->req_status
== HCI_REQ_PEND
) {
91 hdev
->req_result
= result
;
92 hdev
->req_status
= HCI_REQ_DONE
;
93 wake_up_interruptible(&hdev
->req_wait_q
);
97 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
99 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
101 if (hdev
->req_status
== HCI_REQ_PEND
) {
102 hdev
->req_result
= err
;
103 hdev
->req_status
= HCI_REQ_CANCELED
;
104 wake_up_interruptible(&hdev
->req_wait_q
);
108 /* Execute request and wait for completion. */
109 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
110 unsigned long opt
, __u32 timeout
)
112 DECLARE_WAITQUEUE(wait
, current
);
115 BT_DBG("%s start", hdev
->name
);
117 hdev
->req_status
= HCI_REQ_PEND
;
119 add_wait_queue(&hdev
->req_wait_q
, &wait
);
120 set_current_state(TASK_INTERRUPTIBLE
);
123 schedule_timeout(timeout
);
125 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
127 if (signal_pending(current
))
130 switch (hdev
->req_status
) {
132 err
= -bt_to_errno(hdev
->req_result
);
135 case HCI_REQ_CANCELED
:
136 err
= -hdev
->req_result
;
144 hdev
->req_status
= hdev
->req_result
= 0;
146 BT_DBG("%s end: err %d", hdev
->name
, err
);
151 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
152 unsigned long opt
, __u32 timeout
)
156 if (!test_bit(HCI_UP
, &hdev
->flags
))
159 /* Serialize all requests */
161 ret
= __hci_request(hdev
, req
, opt
, timeout
);
162 hci_req_unlock(hdev
);
167 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
169 BT_DBG("%s %ld", hdev
->name
, opt
);
172 set_bit(HCI_RESET
, &hdev
->flags
);
173 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
176 static void bredr_init(struct hci_dev
*hdev
)
178 struct hci_cp_delete_stored_link_key cp
;
182 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
184 /* Mandatory initialization */
187 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
188 set_bit(HCI_RESET
, &hdev
->flags
);
189 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
192 /* Read Local Supported Features */
193 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
195 /* Read Local Version */
196 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
198 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
199 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
201 /* Read BD Address */
202 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
204 /* Read Class of Device */
205 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
207 /* Read Local Name */
208 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
210 /* Read Voice Setting */
211 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
213 /* Optional initialization */
215 /* Clear Event Filters */
216 flt_type
= HCI_FLT_CLEAR_ALL
;
217 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
219 /* Connection accept timeout ~20 secs */
220 param
= cpu_to_le16(0x7d00);
221 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
223 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
225 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
228 static void amp_init(struct hci_dev
*hdev
)
230 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
233 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
235 /* Read Local Version */
236 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
239 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
243 BT_DBG("%s %ld", hdev
->name
, opt
);
245 /* Driver initialization */
247 /* Special commands */
248 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
249 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
250 skb
->dev
= (void *) hdev
;
252 skb_queue_tail(&hdev
->cmd_q
, skb
);
253 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
255 skb_queue_purge(&hdev
->driver_init
);
257 switch (hdev
->dev_type
) {
267 BT_ERR("Unknown device type %d", hdev
->dev_type
);
273 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
275 BT_DBG("%s", hdev
->name
);
277 /* Read LE buffer size */
278 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
281 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
285 BT_DBG("%s %x", hdev
->name
, scan
);
287 /* Inquiry and Page scans */
288 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
291 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
295 BT_DBG("%s %x", hdev
->name
, auth
);
298 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
301 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
305 BT_DBG("%s %x", hdev
->name
, encrypt
);
308 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
311 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
313 __le16 policy
= cpu_to_le16(opt
);
315 BT_DBG("%s %x", hdev
->name
, policy
);
317 /* Default link policy */
318 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
321 /* Get HCI device by index.
322 * Device is held on return. */
323 struct hci_dev
*hci_dev_get(int index
)
325 struct hci_dev
*hdev
= NULL
, *d
;
332 read_lock(&hci_dev_list_lock
);
333 list_for_each_entry(d
, &hci_dev_list
, list
) {
334 if (d
->id
== index
) {
335 hdev
= hci_dev_hold(d
);
339 read_unlock(&hci_dev_list_lock
);
343 /* ---- Inquiry support ---- */
345 bool hci_discovery_active(struct hci_dev
*hdev
)
347 struct discovery_state
*discov
= &hdev
->discovery
;
349 switch (discov
->state
) {
350 case DISCOVERY_FINDING
:
351 case DISCOVERY_RESOLVING
:
359 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
361 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
363 if (hdev
->discovery
.state
== state
)
367 case DISCOVERY_STOPPED
:
368 hdev
->discovery
.type
= 0;
370 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
371 mgmt_discovering(hdev
, 0);
373 case DISCOVERY_STARTING
:
375 case DISCOVERY_FINDING
:
376 mgmt_discovering(hdev
, 1);
378 case DISCOVERY_RESOLVING
:
380 case DISCOVERY_STOPPING
:
384 hdev
->discovery
.state
= state
;
387 static void inquiry_cache_flush(struct hci_dev
*hdev
)
389 struct discovery_state
*cache
= &hdev
->discovery
;
390 struct inquiry_entry
*p
, *n
;
392 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
397 INIT_LIST_HEAD(&cache
->unknown
);
398 INIT_LIST_HEAD(&cache
->resolve
);
399 cache
->state
= DISCOVERY_STOPPED
;
402 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
404 struct discovery_state
*cache
= &hdev
->discovery
;
405 struct inquiry_entry
*e
;
407 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
409 list_for_each_entry(e
, &cache
->all
, all
) {
410 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
417 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
420 struct discovery_state
*cache
= &hdev
->discovery
;
421 struct inquiry_entry
*e
;
423 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
425 list_for_each_entry(e
, &cache
->unknown
, list
) {
426 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
433 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
437 struct discovery_state
*cache
= &hdev
->discovery
;
438 struct inquiry_entry
*e
;
440 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
442 list_for_each_entry(e
, &cache
->resolve
, list
) {
443 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
445 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
452 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
453 struct inquiry_entry
*ie
)
455 struct discovery_state
*cache
= &hdev
->discovery
;
456 struct list_head
*pos
= &cache
->resolve
;
457 struct inquiry_entry
*p
;
461 list_for_each_entry(p
, &cache
->resolve
, list
) {
462 if (p
->name_state
!= NAME_PENDING
&&
463 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
468 list_add(&ie
->list
, pos
);
471 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
474 struct discovery_state
*cache
= &hdev
->discovery
;
475 struct inquiry_entry
*ie
;
477 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
479 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
481 if (ie
->name_state
== NAME_NEEDED
&&
482 data
->rssi
!= ie
->data
.rssi
) {
483 ie
->data
.rssi
= data
->rssi
;
484 hci_inquiry_cache_update_resolve(hdev
, ie
);
490 /* Entry not in the cache. Add new one. */
491 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
495 list_add(&ie
->all
, &cache
->all
);
498 ie
->name_state
= NAME_KNOWN
;
500 ie
->name_state
= NAME_NOT_KNOWN
;
501 list_add(&ie
->list
, &cache
->unknown
);
505 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
506 ie
->name_state
!= NAME_PENDING
) {
507 ie
->name_state
= NAME_KNOWN
;
511 memcpy(&ie
->data
, data
, sizeof(*data
));
512 ie
->timestamp
= jiffies
;
513 cache
->timestamp
= jiffies
;
515 if (ie
->name_state
== NAME_NOT_KNOWN
)
521 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
523 struct discovery_state
*cache
= &hdev
->discovery
;
524 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
525 struct inquiry_entry
*e
;
528 list_for_each_entry(e
, &cache
->all
, all
) {
529 struct inquiry_data
*data
= &e
->data
;
534 bacpy(&info
->bdaddr
, &data
->bdaddr
);
535 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
536 info
->pscan_period_mode
= data
->pscan_period_mode
;
537 info
->pscan_mode
= data
->pscan_mode
;
538 memcpy(info
->dev_class
, data
->dev_class
, 3);
539 info
->clock_offset
= data
->clock_offset
;
545 BT_DBG("cache %p, copied %d", cache
, copied
);
549 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
551 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
552 struct hci_cp_inquiry cp
;
554 BT_DBG("%s", hdev
->name
);
556 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
560 memcpy(&cp
.lap
, &ir
->lap
, 3);
561 cp
.length
= ir
->length
;
562 cp
.num_rsp
= ir
->num_rsp
;
563 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
566 int hci_inquiry(void __user
*arg
)
568 __u8 __user
*ptr
= arg
;
569 struct hci_inquiry_req ir
;
570 struct hci_dev
*hdev
;
571 int err
= 0, do_inquiry
= 0, max_rsp
;
575 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
578 hdev
= hci_dev_get(ir
.dev_id
);
583 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
584 inquiry_cache_empty(hdev
) ||
585 ir
.flags
& IREQ_CACHE_FLUSH
) {
586 inquiry_cache_flush(hdev
);
589 hci_dev_unlock(hdev
);
591 timeo
= ir
.length
* msecs_to_jiffies(2000);
594 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
599 /* for unlimited number of responses we will use buffer with 255 entries */
600 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
602 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
603 * copy it to the user space.
605 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
612 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
613 hci_dev_unlock(hdev
);
615 BT_DBG("num_rsp %d", ir
.num_rsp
);
617 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
619 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
632 /* ---- HCI ioctl helpers ---- */
634 int hci_dev_open(__u16 dev
)
636 struct hci_dev
*hdev
;
639 hdev
= hci_dev_get(dev
);
643 BT_DBG("%s %p", hdev
->name
, hdev
);
647 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
652 if (test_bit(HCI_UP
, &hdev
->flags
)) {
657 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
658 set_bit(HCI_RAW
, &hdev
->flags
);
660 /* Treat all non BR/EDR controllers as raw devices if
661 enable_hs is not set */
662 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
663 set_bit(HCI_RAW
, &hdev
->flags
);
665 if (hdev
->open(hdev
)) {
670 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
671 atomic_set(&hdev
->cmd_cnt
, 1);
672 set_bit(HCI_INIT
, &hdev
->flags
);
673 hdev
->init_last_cmd
= 0;
675 ret
= __hci_request(hdev
, hci_init_req
, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
678 if (lmp_host_le_capable(hdev
))
679 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
680 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
682 clear_bit(HCI_INIT
, &hdev
->flags
);
687 set_bit(HCI_UP
, &hdev
->flags
);
688 hci_notify(hdev
, HCI_DEV_UP
);
689 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
691 mgmt_powered(hdev
, 1);
692 hci_dev_unlock(hdev
);
695 /* Init failed, cleanup */
696 flush_work(&hdev
->tx_work
);
697 flush_work(&hdev
->cmd_work
);
698 flush_work(&hdev
->rx_work
);
700 skb_queue_purge(&hdev
->cmd_q
);
701 skb_queue_purge(&hdev
->rx_q
);
706 if (hdev
->sent_cmd
) {
707 kfree_skb(hdev
->sent_cmd
);
708 hdev
->sent_cmd
= NULL
;
716 hci_req_unlock(hdev
);
721 static int hci_dev_do_close(struct hci_dev
*hdev
)
723 BT_DBG("%s %p", hdev
->name
, hdev
);
725 cancel_work_sync(&hdev
->le_scan
);
727 hci_req_cancel(hdev
, ENODEV
);
730 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
731 del_timer_sync(&hdev
->cmd_timer
);
732 hci_req_unlock(hdev
);
736 /* Flush RX and TX works */
737 flush_work(&hdev
->tx_work
);
738 flush_work(&hdev
->rx_work
);
740 if (hdev
->discov_timeout
> 0) {
741 cancel_delayed_work(&hdev
->discov_off
);
742 hdev
->discov_timeout
= 0;
745 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
746 cancel_delayed_work(&hdev
->power_off
);
748 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
749 cancel_delayed_work(&hdev
->service_cache
);
751 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
754 inquiry_cache_flush(hdev
);
755 hci_conn_hash_flush(hdev
);
756 hci_dev_unlock(hdev
);
758 hci_notify(hdev
, HCI_DEV_DOWN
);
764 skb_queue_purge(&hdev
->cmd_q
);
765 atomic_set(&hdev
->cmd_cnt
, 1);
766 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
767 test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
768 set_bit(HCI_INIT
, &hdev
->flags
);
769 __hci_request(hdev
, hci_reset_req
, 0,
770 msecs_to_jiffies(250));
771 clear_bit(HCI_INIT
, &hdev
->flags
);
775 flush_work(&hdev
->cmd_work
);
778 skb_queue_purge(&hdev
->rx_q
);
779 skb_queue_purge(&hdev
->cmd_q
);
780 skb_queue_purge(&hdev
->raw_q
);
782 /* Drop last sent command */
783 if (hdev
->sent_cmd
) {
784 del_timer_sync(&hdev
->cmd_timer
);
785 kfree_skb(hdev
->sent_cmd
);
786 hdev
->sent_cmd
= NULL
;
789 /* After this point our queues are empty
790 * and no tasks are scheduled. */
794 mgmt_powered(hdev
, 0);
795 hci_dev_unlock(hdev
);
800 hci_req_unlock(hdev
);
806 int hci_dev_close(__u16 dev
)
808 struct hci_dev
*hdev
;
811 hdev
= hci_dev_get(dev
);
814 err
= hci_dev_do_close(hdev
);
819 int hci_dev_reset(__u16 dev
)
821 struct hci_dev
*hdev
;
824 hdev
= hci_dev_get(dev
);
830 if (!test_bit(HCI_UP
, &hdev
->flags
))
834 skb_queue_purge(&hdev
->rx_q
);
835 skb_queue_purge(&hdev
->cmd_q
);
838 inquiry_cache_flush(hdev
);
839 hci_conn_hash_flush(hdev
);
840 hci_dev_unlock(hdev
);
845 atomic_set(&hdev
->cmd_cnt
, 1);
846 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
848 if (!test_bit(HCI_RAW
, &hdev
->flags
))
849 ret
= __hci_request(hdev
, hci_reset_req
, 0,
850 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
853 hci_req_unlock(hdev
);
858 int hci_dev_reset_stat(__u16 dev
)
860 struct hci_dev
*hdev
;
863 hdev
= hci_dev_get(dev
);
867 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
874 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
876 struct hci_dev
*hdev
;
877 struct hci_dev_req dr
;
880 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
883 hdev
= hci_dev_get(dr
.dev_id
);
889 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
890 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
894 if (!lmp_encrypt_capable(hdev
)) {
899 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
900 /* Auth must be enabled first */
901 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
902 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
907 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
908 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
912 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
917 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
918 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
922 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
923 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
927 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
931 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
932 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
936 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
937 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
949 int hci_get_dev_list(void __user
*arg
)
951 struct hci_dev
*hdev
;
952 struct hci_dev_list_req
*dl
;
953 struct hci_dev_req
*dr
;
954 int n
= 0, size
, err
;
957 if (get_user(dev_num
, (__u16 __user
*) arg
))
960 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
963 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
965 dl
= kzalloc(size
, GFP_KERNEL
);
971 read_lock(&hci_dev_list_lock
);
972 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
973 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
974 cancel_delayed_work(&hdev
->power_off
);
976 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
977 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
979 (dr
+ n
)->dev_id
= hdev
->id
;
980 (dr
+ n
)->dev_opt
= hdev
->flags
;
985 read_unlock(&hci_dev_list_lock
);
988 size
= sizeof(*dl
) + n
* sizeof(*dr
);
990 err
= copy_to_user(arg
, dl
, size
);
993 return err
? -EFAULT
: 0;
996 int hci_get_dev_info(void __user
*arg
)
998 struct hci_dev
*hdev
;
999 struct hci_dev_info di
;
1002 if (copy_from_user(&di
, arg
, sizeof(di
)))
1005 hdev
= hci_dev_get(di
.dev_id
);
1009 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1010 cancel_delayed_work_sync(&hdev
->power_off
);
1012 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1013 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1015 strcpy(di
.name
, hdev
->name
);
1016 di
.bdaddr
= hdev
->bdaddr
;
1017 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1018 di
.flags
= hdev
->flags
;
1019 di
.pkt_type
= hdev
->pkt_type
;
1020 di
.acl_mtu
= hdev
->acl_mtu
;
1021 di
.acl_pkts
= hdev
->acl_pkts
;
1022 di
.sco_mtu
= hdev
->sco_mtu
;
1023 di
.sco_pkts
= hdev
->sco_pkts
;
1024 di
.link_policy
= hdev
->link_policy
;
1025 di
.link_mode
= hdev
->link_mode
;
1027 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1028 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1030 if (copy_to_user(arg
, &di
, sizeof(di
)))
1038 /* ---- Interface to HCI drivers ---- */
1040 static int hci_rfkill_set_block(void *data
, bool blocked
)
1042 struct hci_dev
*hdev
= data
;
1044 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1049 hci_dev_do_close(hdev
);
1054 static const struct rfkill_ops hci_rfkill_ops
= {
1055 .set_block
= hci_rfkill_set_block
,
1058 /* Alloc HCI device */
1059 struct hci_dev
*hci_alloc_dev(void)
1061 struct hci_dev
*hdev
;
1063 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1067 hci_init_sysfs(hdev
);
1068 skb_queue_head_init(&hdev
->driver_init
);
1072 EXPORT_SYMBOL(hci_alloc_dev
);
1074 /* Free HCI device */
1075 void hci_free_dev(struct hci_dev
*hdev
)
1077 skb_queue_purge(&hdev
->driver_init
);
1079 /* will free via device release */
1080 put_device(&hdev
->dev
);
1082 EXPORT_SYMBOL(hci_free_dev
);
1084 static void hci_power_on(struct work_struct
*work
)
1086 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1088 BT_DBG("%s", hdev
->name
);
1090 if (hci_dev_open(hdev
->id
) < 0)
1093 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1094 schedule_delayed_work(&hdev
->power_off
,
1095 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1097 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1098 mgmt_index_added(hdev
);
1101 static void hci_power_off(struct work_struct
*work
)
1103 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1106 BT_DBG("%s", hdev
->name
);
1108 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1110 hci_dev_close(hdev
->id
);
1113 static void hci_discov_off(struct work_struct
*work
)
1115 struct hci_dev
*hdev
;
1116 u8 scan
= SCAN_PAGE
;
1118 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1120 BT_DBG("%s", hdev
->name
);
1124 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1126 hdev
->discov_timeout
= 0;
1128 hci_dev_unlock(hdev
);
1131 int hci_uuids_clear(struct hci_dev
*hdev
)
1133 struct list_head
*p
, *n
;
1135 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1136 struct bt_uuid
*uuid
;
1138 uuid
= list_entry(p
, struct bt_uuid
, list
);
1147 int hci_link_keys_clear(struct hci_dev
*hdev
)
1149 struct list_head
*p
, *n
;
1151 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1152 struct link_key
*key
;
1154 key
= list_entry(p
, struct link_key
, list
);
1163 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1165 struct smp_ltk
*k
, *tmp
;
1167 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1175 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1179 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1180 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1186 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1187 u8 key_type
, u8 old_key_type
)
1190 if (key_type
< 0x03)
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1197 /* Changed combination key and there's no previous one */
1198 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1201 /* Security mode 3 case */
1205 /* Neither local nor remote side had no-bonding as requirement */
1206 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1209 /* Local side had dedicated bonding as requirement */
1210 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1213 /* Remote side had dedicated bonding as requirement */
1214 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1217 /* If none of the above criteria match, then don't store the key
1222 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1226 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1227 if (k
->ediv
!= ediv
||
1228 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1236 EXPORT_SYMBOL(hci_find_ltk
);
1238 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1243 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1244 if (addr_type
== k
->bdaddr_type
&&
1245 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1250 EXPORT_SYMBOL(hci_find_ltk_by_addr
);
1252 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1253 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1255 struct link_key
*key
, *old_key
;
1256 u8 old_key_type
, persistent
;
1258 old_key
= hci_find_link_key(hdev
, bdaddr
);
1260 old_key_type
= old_key
->type
;
1263 old_key_type
= conn
? conn
->key_type
: 0xff;
1264 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1267 list_add(&key
->list
, &hdev
->link_keys
);
1270 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1272 /* Some buggy controller combinations generate a changed
1273 * combination key for legacy pairing even when there's no
1275 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1276 (!conn
|| conn
->remote_auth
== 0xff) &&
1277 old_key_type
== 0xff) {
1278 type
= HCI_LK_COMBINATION
;
1280 conn
->key_type
= type
;
1283 bacpy(&key
->bdaddr
, bdaddr
);
1284 memcpy(key
->val
, val
, 16);
1285 key
->pin_len
= pin_len
;
1287 if (type
== HCI_LK_CHANGED_COMBINATION
)
1288 key
->type
= old_key_type
;
1295 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1297 mgmt_new_link_key(hdev
, key
, persistent
);
1300 list_del(&key
->list
);
1307 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1308 int new_key
, u8 authenticated
, u8 tk
[16],
1309 u8 enc_size
, u16 ediv
, u8 rand
[8])
1311 struct smp_ltk
*key
, *old_key
;
1313 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1316 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1320 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1323 list_add(&key
->list
, &hdev
->long_term_keys
);
1326 bacpy(&key
->bdaddr
, bdaddr
);
1327 key
->bdaddr_type
= addr_type
;
1328 memcpy(key
->val
, tk
, sizeof(key
->val
));
1329 key
->authenticated
= authenticated
;
1331 key
->enc_size
= enc_size
;
1333 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1338 if (type
& HCI_SMP_LTK
)
1339 mgmt_new_ltk(hdev
, key
, 1);
1344 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1346 struct link_key
*key
;
1348 key
= hci_find_link_key(hdev
, bdaddr
);
1352 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1354 list_del(&key
->list
);
1360 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1362 struct smp_ltk
*k
, *tmp
;
1364 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1365 if (bacmp(bdaddr
, &k
->bdaddr
))
1368 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1377 /* HCI command timer function */
1378 static void hci_cmd_timer(unsigned long arg
)
1380 struct hci_dev
*hdev
= (void *) arg
;
1382 BT_ERR("%s command tx timeout", hdev
->name
);
1383 atomic_set(&hdev
->cmd_cnt
, 1);
1384 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1387 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1390 struct oob_data
*data
;
1392 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1393 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1399 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1401 struct oob_data
*data
;
1403 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1407 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1409 list_del(&data
->list
);
1415 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1417 struct oob_data
*data
, *n
;
1419 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1420 list_del(&data
->list
);
1427 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1430 struct oob_data
*data
;
1432 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1435 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1439 bacpy(&data
->bdaddr
, bdaddr
);
1440 list_add(&data
->list
, &hdev
->remote_oob_data
);
1443 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1444 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1446 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1451 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1454 struct bdaddr_list
*b
;
1456 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1457 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1463 int hci_blacklist_clear(struct hci_dev
*hdev
)
1465 struct list_head
*p
, *n
;
1467 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1468 struct bdaddr_list
*b
;
1470 b
= list_entry(p
, struct bdaddr_list
, list
);
1479 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1481 struct bdaddr_list
*entry
;
1483 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1486 if (hci_blacklist_lookup(hdev
, bdaddr
))
1489 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1493 bacpy(&entry
->bdaddr
, bdaddr
);
1495 list_add(&entry
->list
, &hdev
->blacklist
);
1497 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1500 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1502 struct bdaddr_list
*entry
;
1504 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1505 return hci_blacklist_clear(hdev
);
1507 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1511 list_del(&entry
->list
);
1514 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1517 static void hci_clear_adv_cache(struct work_struct
*work
)
1519 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1524 hci_adv_entries_clear(hdev
);
1526 hci_dev_unlock(hdev
);
1529 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1531 struct adv_entry
*entry
, *tmp
;
1533 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1534 list_del(&entry
->list
);
1538 BT_DBG("%s adv cache cleared", hdev
->name
);
1543 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1545 struct adv_entry
*entry
;
1547 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1548 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1554 static inline int is_connectable_adv(u8 evt_type
)
1556 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1562 int hci_add_adv_entry(struct hci_dev
*hdev
,
1563 struct hci_ev_le_advertising_info
*ev
)
1565 struct adv_entry
*entry
;
1567 if (!is_connectable_adv(ev
->evt_type
))
1570 /* Only new entries should be added to adv_entries. So, if
1571 * bdaddr was found, don't add it. */
1572 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1575 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1579 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1580 entry
->bdaddr_type
= ev
->bdaddr_type
;
1582 list_add(&entry
->list
, &hdev
->adv_entries
);
1584 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1585 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1590 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1592 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1593 struct hci_cp_le_set_scan_param cp
;
1595 memset(&cp
, 0, sizeof(cp
));
1596 cp
.type
= param
->type
;
1597 cp
.interval
= cpu_to_le16(param
->interval
);
1598 cp
.window
= cpu_to_le16(param
->window
);
1600 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1603 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1605 struct hci_cp_le_set_scan_enable cp
;
1607 memset(&cp
, 0, sizeof(cp
));
1610 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1613 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1614 u16 window
, int timeout
)
1616 long timeo
= msecs_to_jiffies(3000);
1617 struct le_scan_params param
;
1620 BT_DBG("%s", hdev
->name
);
1622 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1623 return -EINPROGRESS
;
1626 param
.interval
= interval
;
1627 param
.window
= window
;
1631 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1634 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1636 hci_req_unlock(hdev
);
1641 schedule_delayed_work(&hdev
->le_scan_disable
,
1642 msecs_to_jiffies(timeout
));
1647 static void le_scan_disable_work(struct work_struct
*work
)
1649 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1650 le_scan_disable
.work
);
1651 struct hci_cp_le_set_scan_enable cp
;
1653 BT_DBG("%s", hdev
->name
);
1655 memset(&cp
, 0, sizeof(cp
));
1657 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1660 static void le_scan_work(struct work_struct
*work
)
1662 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1663 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1665 BT_DBG("%s", hdev
->name
);
1667 hci_do_le_scan(hdev
, param
->type
, param
->interval
,
1668 param
->window
, param
->timeout
);
1671 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1674 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1676 BT_DBG("%s", hdev
->name
);
1678 if (work_busy(&hdev
->le_scan
))
1679 return -EINPROGRESS
;
1682 param
->interval
= interval
;
1683 param
->window
= window
;
1684 param
->timeout
= timeout
;
1686 queue_work(system_long_wq
, &hdev
->le_scan
);
1691 /* Register HCI device */
1692 int hci_register_dev(struct hci_dev
*hdev
)
1694 struct list_head
*head
= &hci_dev_list
, *p
;
1697 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1699 if (!hdev
->open
|| !hdev
->close
)
1702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1705 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1707 write_lock(&hci_dev_list_lock
);
1709 /* Find first available device id */
1710 list_for_each(p
, &hci_dev_list
) {
1711 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1716 sprintf(hdev
->name
, "hci%d", id
);
1718 list_add_tail(&hdev
->list
, head
);
1720 mutex_init(&hdev
->lock
);
1723 hdev
->dev_flags
= 0;
1724 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1725 hdev
->esco_type
= (ESCO_HV1
);
1726 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1727 hdev
->io_capability
= 0x03; /* No Input No Output */
1729 hdev
->idle_timeout
= 0;
1730 hdev
->sniff_max_interval
= 800;
1731 hdev
->sniff_min_interval
= 80;
1733 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1734 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1735 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1738 skb_queue_head_init(&hdev
->rx_q
);
1739 skb_queue_head_init(&hdev
->cmd_q
);
1740 skb_queue_head_init(&hdev
->raw_q
);
1742 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1744 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1745 hdev
->reassembly
[i
] = NULL
;
1747 init_waitqueue_head(&hdev
->req_wait_q
);
1748 mutex_init(&hdev
->req_lock
);
1750 discovery_init(hdev
);
1752 hci_conn_hash_init(hdev
);
1754 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1756 INIT_LIST_HEAD(&hdev
->blacklist
);
1758 INIT_LIST_HEAD(&hdev
->uuids
);
1760 INIT_LIST_HEAD(&hdev
->link_keys
);
1761 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1763 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1765 INIT_LIST_HEAD(&hdev
->adv_entries
);
1767 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1768 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1769 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1771 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1773 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1775 atomic_set(&hdev
->promisc
, 0);
1777 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1779 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1781 write_unlock(&hci_dev_list_lock
);
1783 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1785 if (!hdev
->workqueue
) {
1790 error
= hci_add_sysfs(hdev
);
1794 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1795 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1797 if (rfkill_register(hdev
->rfkill
) < 0) {
1798 rfkill_destroy(hdev
->rfkill
);
1799 hdev
->rfkill
= NULL
;
1803 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1804 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1805 schedule_work(&hdev
->power_on
);
1807 hci_notify(hdev
, HCI_DEV_REG
);
1813 destroy_workqueue(hdev
->workqueue
);
1815 write_lock(&hci_dev_list_lock
);
1816 list_del(&hdev
->list
);
1817 write_unlock(&hci_dev_list_lock
);
1821 EXPORT_SYMBOL(hci_register_dev
);
1823 /* Unregister HCI device */
1824 void hci_unregister_dev(struct hci_dev
*hdev
)
1828 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1830 write_lock(&hci_dev_list_lock
);
1831 list_del(&hdev
->list
);
1832 write_unlock(&hci_dev_list_lock
);
1834 hci_dev_do_close(hdev
);
1836 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1837 kfree_skb(hdev
->reassembly
[i
]);
1839 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1840 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1842 mgmt_index_removed(hdev
);
1843 hci_dev_unlock(hdev
);
1846 /* mgmt_index_removed should take care of emptying the
1848 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1850 hci_notify(hdev
, HCI_DEV_UNREG
);
1853 rfkill_unregister(hdev
->rfkill
);
1854 rfkill_destroy(hdev
->rfkill
);
1857 hci_del_sysfs(hdev
);
1859 cancel_delayed_work_sync(&hdev
->adv_work
);
1861 destroy_workqueue(hdev
->workqueue
);
1864 hci_blacklist_clear(hdev
);
1865 hci_uuids_clear(hdev
);
1866 hci_link_keys_clear(hdev
);
1867 hci_smp_ltks_clear(hdev
);
1868 hci_remote_oob_data_clear(hdev
);
1869 hci_adv_entries_clear(hdev
);
1870 hci_dev_unlock(hdev
);
1874 EXPORT_SYMBOL(hci_unregister_dev
);
1876 /* Suspend HCI device */
1877 int hci_suspend_dev(struct hci_dev
*hdev
)
1879 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1882 EXPORT_SYMBOL(hci_suspend_dev
);
1884 /* Resume HCI device */
1885 int hci_resume_dev(struct hci_dev
*hdev
)
1887 hci_notify(hdev
, HCI_DEV_RESUME
);
1890 EXPORT_SYMBOL(hci_resume_dev
);
1892 /* Receive frame from HCI drivers */
1893 int hci_recv_frame(struct sk_buff
*skb
)
1895 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1896 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1897 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1903 bt_cb(skb
)->incoming
= 1;
1906 __net_timestamp(skb
);
1908 skb_queue_tail(&hdev
->rx_q
, skb
);
1909 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1913 EXPORT_SYMBOL(hci_recv_frame
);
1915 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1916 int count
, __u8 index
)
1921 struct sk_buff
*skb
;
1922 struct bt_skb_cb
*scb
;
1924 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1925 index
>= NUM_REASSEMBLY
)
1928 skb
= hdev
->reassembly
[index
];
1932 case HCI_ACLDATA_PKT
:
1933 len
= HCI_MAX_FRAME_SIZE
;
1934 hlen
= HCI_ACL_HDR_SIZE
;
1937 len
= HCI_MAX_EVENT_SIZE
;
1938 hlen
= HCI_EVENT_HDR_SIZE
;
1940 case HCI_SCODATA_PKT
:
1941 len
= HCI_MAX_SCO_SIZE
;
1942 hlen
= HCI_SCO_HDR_SIZE
;
1946 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1950 scb
= (void *) skb
->cb
;
1952 scb
->pkt_type
= type
;
1954 skb
->dev
= (void *) hdev
;
1955 hdev
->reassembly
[index
] = skb
;
1959 scb
= (void *) skb
->cb
;
1960 len
= min(scb
->expect
, (__u16
)count
);
1962 memcpy(skb_put(skb
, len
), data
, len
);
1971 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1972 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1973 scb
->expect
= h
->plen
;
1975 if (skb_tailroom(skb
) < scb
->expect
) {
1977 hdev
->reassembly
[index
] = NULL
;
1983 case HCI_ACLDATA_PKT
:
1984 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1985 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1986 scb
->expect
= __le16_to_cpu(h
->dlen
);
1988 if (skb_tailroom(skb
) < scb
->expect
) {
1990 hdev
->reassembly
[index
] = NULL
;
1996 case HCI_SCODATA_PKT
:
1997 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1998 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1999 scb
->expect
= h
->dlen
;
2001 if (skb_tailroom(skb
) < scb
->expect
) {
2003 hdev
->reassembly
[index
] = NULL
;
2010 if (scb
->expect
== 0) {
2011 /* Complete frame */
2013 bt_cb(skb
)->pkt_type
= type
;
2014 hci_recv_frame(skb
);
2016 hdev
->reassembly
[index
] = NULL
;
2024 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2028 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2032 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2036 data
+= (count
- rem
);
2042 EXPORT_SYMBOL(hci_recv_fragment
);
2044 #define STREAM_REASSEMBLY 0
2046 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2052 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2055 struct { char type
; } *pkt
;
2057 /* Start of the frame */
2064 type
= bt_cb(skb
)->pkt_type
;
2066 rem
= hci_reassembly(hdev
, type
, data
, count
,
2071 data
+= (count
- rem
);
2077 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2079 /* ---- Interface to upper protocols ---- */
2081 int hci_register_cb(struct hci_cb
*cb
)
2083 BT_DBG("%p name %s", cb
, cb
->name
);
2085 write_lock(&hci_cb_list_lock
);
2086 list_add(&cb
->list
, &hci_cb_list
);
2087 write_unlock(&hci_cb_list_lock
);
2091 EXPORT_SYMBOL(hci_register_cb
);
2093 int hci_unregister_cb(struct hci_cb
*cb
)
2095 BT_DBG("%p name %s", cb
, cb
->name
);
2097 write_lock(&hci_cb_list_lock
);
2098 list_del(&cb
->list
);
2099 write_unlock(&hci_cb_list_lock
);
2103 EXPORT_SYMBOL(hci_unregister_cb
);
2105 static int hci_send_frame(struct sk_buff
*skb
)
2107 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2114 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2116 if (atomic_read(&hdev
->promisc
)) {
2118 __net_timestamp(skb
);
2120 hci_send_to_sock(hdev
, skb
);
2123 /* Get rid of skb owner, prior to sending to the driver. */
2126 return hdev
->send(skb
);
2129 /* Send HCI command */
2130 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2132 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2133 struct hci_command_hdr
*hdr
;
2134 struct sk_buff
*skb
;
2136 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
2138 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2140 BT_ERR("%s no memory for command", hdev
->name
);
2144 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2145 hdr
->opcode
= cpu_to_le16(opcode
);
2149 memcpy(skb_put(skb
, plen
), param
, plen
);
2151 BT_DBG("skb len %d", skb
->len
);
2153 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2154 skb
->dev
= (void *) hdev
;
2156 if (test_bit(HCI_INIT
, &hdev
->flags
))
2157 hdev
->init_last_cmd
= opcode
;
2159 skb_queue_tail(&hdev
->cmd_q
, skb
);
2160 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2165 /* Get data from the previously sent command */
2166 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2168 struct hci_command_hdr
*hdr
;
2170 if (!hdev
->sent_cmd
)
2173 hdr
= (void *) hdev
->sent_cmd
->data
;
2175 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2178 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2180 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2184 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2186 struct hci_acl_hdr
*hdr
;
2189 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2190 skb_reset_transport_header(skb
);
2191 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2192 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2193 hdr
->dlen
= cpu_to_le16(len
);
2196 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2197 struct sk_buff
*skb
, __u16 flags
)
2199 struct hci_dev
*hdev
= conn
->hdev
;
2200 struct sk_buff
*list
;
2202 list
= skb_shinfo(skb
)->frag_list
;
2204 /* Non fragmented */
2205 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2207 skb_queue_tail(queue
, skb
);
2210 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2212 skb_shinfo(skb
)->frag_list
= NULL
;
2214 /* Queue all fragments atomically */
2215 spin_lock(&queue
->lock
);
2217 __skb_queue_tail(queue
, skb
);
2219 flags
&= ~ACL_START
;
2222 skb
= list
; list
= list
->next
;
2224 skb
->dev
= (void *) hdev
;
2225 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2226 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2228 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2230 __skb_queue_tail(queue
, skb
);
2233 spin_unlock(&queue
->lock
);
2237 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2239 struct hci_conn
*conn
= chan
->conn
;
2240 struct hci_dev
*hdev
= conn
->hdev
;
2242 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2244 skb
->dev
= (void *) hdev
;
2245 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2246 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2248 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2250 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2252 EXPORT_SYMBOL(hci_send_acl
);
2255 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2257 struct hci_dev
*hdev
= conn
->hdev
;
2258 struct hci_sco_hdr hdr
;
2260 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2262 hdr
.handle
= cpu_to_le16(conn
->handle
);
2263 hdr
.dlen
= skb
->len
;
2265 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2266 skb_reset_transport_header(skb
);
2267 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2269 skb
->dev
= (void *) hdev
;
2270 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2272 skb_queue_tail(&conn
->data_q
, skb
);
2273 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2275 EXPORT_SYMBOL(hci_send_sco
);
2277 /* ---- HCI TX task (outgoing data) ---- */
2279 /* HCI Connection scheduler */
2280 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2282 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2283 struct hci_conn
*conn
= NULL
, *c
;
2284 int num
= 0, min
= ~0;
2286 /* We don't have to lock device here. Connections are always
2287 * added and removed with TX task disabled. */
2291 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2292 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2295 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2300 if (c
->sent
< min
) {
2305 if (hci_conn_num(hdev
, type
) == num
)
2314 switch (conn
->type
) {
2316 cnt
= hdev
->acl_cnt
;
2320 cnt
= hdev
->sco_cnt
;
2323 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2327 BT_ERR("Unknown link type");
2335 BT_DBG("conn %p quote %d", conn
, *quote
);
2339 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2341 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2344 BT_ERR("%s link tx timeout", hdev
->name
);
2348 /* Kill stalled connections */
2349 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2350 if (c
->type
== type
&& c
->sent
) {
2351 BT_ERR("%s killing stalled connection %s",
2352 hdev
->name
, batostr(&c
->dst
));
2353 hci_acl_disconn(c
, 0x13);
2360 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2363 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2364 struct hci_chan
*chan
= NULL
;
2365 int num
= 0, min
= ~0, cur_prio
= 0;
2366 struct hci_conn
*conn
;
2367 int cnt
, q
, conn_num
= 0;
2369 BT_DBG("%s", hdev
->name
);
2373 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2374 struct hci_chan
*tmp
;
2376 if (conn
->type
!= type
)
2379 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2384 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2385 struct sk_buff
*skb
;
2387 if (skb_queue_empty(&tmp
->data_q
))
2390 skb
= skb_peek(&tmp
->data_q
);
2391 if (skb
->priority
< cur_prio
)
2394 if (skb
->priority
> cur_prio
) {
2397 cur_prio
= skb
->priority
;
2402 if (conn
->sent
< min
) {
2408 if (hci_conn_num(hdev
, type
) == conn_num
)
2417 switch (chan
->conn
->type
) {
2419 cnt
= hdev
->acl_cnt
;
2423 cnt
= hdev
->sco_cnt
;
2426 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2430 BT_ERR("Unknown link type");
2435 BT_DBG("chan %p quote %d", chan
, *quote
);
2439 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2441 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2442 struct hci_conn
*conn
;
2445 BT_DBG("%s", hdev
->name
);
2449 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2450 struct hci_chan
*chan
;
2452 if (conn
->type
!= type
)
2455 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2460 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2461 struct sk_buff
*skb
;
2468 if (skb_queue_empty(&chan
->data_q
))
2471 skb
= skb_peek(&chan
->data_q
);
2472 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2475 skb
->priority
= HCI_PRIO_MAX
- 1;
2477 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2481 if (hci_conn_num(hdev
, type
) == num
)
2489 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2491 /* Calculate count of blocks used by this packet */
2492 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2495 static inline void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2497 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2498 /* ACL tx timeout must be longer than maximum
2499 * link supervision timeout (40.9 seconds) */
2500 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2501 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT
)))
2502 hci_link_tx_to(hdev
, ACL_LINK
);
2506 static inline void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2508 unsigned int cnt
= hdev
->acl_cnt
;
2509 struct hci_chan
*chan
;
2510 struct sk_buff
*skb
;
2513 __check_timeout(hdev
, cnt
);
2515 while (hdev
->acl_cnt
&&
2516 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2517 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2518 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2519 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2520 skb
->len
, skb
->priority
);
2522 /* Stop if priority has changed */
2523 if (skb
->priority
< priority
)
2526 skb
= skb_dequeue(&chan
->data_q
);
2528 hci_conn_enter_active_mode(chan
->conn
,
2529 bt_cb(skb
)->force_active
);
2531 hci_send_frame(skb
);
2532 hdev
->acl_last_tx
= jiffies
;
2540 if (cnt
!= hdev
->acl_cnt
)
2541 hci_prio_recalculate(hdev
, ACL_LINK
);
2544 static inline void hci_sched_acl_blk(struct hci_dev
*hdev
)
2546 unsigned int cnt
= hdev
->block_cnt
;
2547 struct hci_chan
*chan
;
2548 struct sk_buff
*skb
;
2551 __check_timeout(hdev
, cnt
);
2553 while (hdev
->block_cnt
> 0 &&
2554 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2555 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2556 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2559 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2560 skb
->len
, skb
->priority
);
2562 /* Stop if priority has changed */
2563 if (skb
->priority
< priority
)
2566 skb
= skb_dequeue(&chan
->data_q
);
2568 blocks
= __get_blocks(hdev
, skb
);
2569 if (blocks
> hdev
->block_cnt
)
2572 hci_conn_enter_active_mode(chan
->conn
,
2573 bt_cb(skb
)->force_active
);
2575 hci_send_frame(skb
);
2576 hdev
->acl_last_tx
= jiffies
;
2578 hdev
->block_cnt
-= blocks
;
2581 chan
->sent
+= blocks
;
2582 chan
->conn
->sent
+= blocks
;
2586 if (cnt
!= hdev
->block_cnt
)
2587 hci_prio_recalculate(hdev
, ACL_LINK
);
2590 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2592 BT_DBG("%s", hdev
->name
);
2594 if (!hci_conn_num(hdev
, ACL_LINK
))
2597 switch (hdev
->flow_ctl_mode
) {
2598 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2599 hci_sched_acl_pkt(hdev
);
2602 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2603 hci_sched_acl_blk(hdev
);
2609 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2611 struct hci_conn
*conn
;
2612 struct sk_buff
*skb
;
2615 BT_DBG("%s", hdev
->name
);
2617 if (!hci_conn_num(hdev
, SCO_LINK
))
2620 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2621 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2622 BT_DBG("skb %p len %d", skb
, skb
->len
);
2623 hci_send_frame(skb
);
2626 if (conn
->sent
== ~0)
2632 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2634 struct hci_conn
*conn
;
2635 struct sk_buff
*skb
;
2638 BT_DBG("%s", hdev
->name
);
2640 if (!hci_conn_num(hdev
, ESCO_LINK
))
2643 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2644 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2645 BT_DBG("skb %p len %d", skb
, skb
->len
);
2646 hci_send_frame(skb
);
2649 if (conn
->sent
== ~0)
2655 static inline void hci_sched_le(struct hci_dev
*hdev
)
2657 struct hci_chan
*chan
;
2658 struct sk_buff
*skb
;
2659 int quote
, cnt
, tmp
;
2661 BT_DBG("%s", hdev
->name
);
2663 if (!hci_conn_num(hdev
, LE_LINK
))
2666 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2667 /* LE tx timeout must be longer than maximum
2668 * link supervision timeout (40.9 seconds) */
2669 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2670 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2671 hci_link_tx_to(hdev
, LE_LINK
);
2674 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2676 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2677 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2678 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2679 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2680 skb
->len
, skb
->priority
);
2682 /* Stop if priority has changed */
2683 if (skb
->priority
< priority
)
2686 skb
= skb_dequeue(&chan
->data_q
);
2688 hci_send_frame(skb
);
2689 hdev
->le_last_tx
= jiffies
;
2700 hdev
->acl_cnt
= cnt
;
2703 hci_prio_recalculate(hdev
, LE_LINK
);
2706 static void hci_tx_work(struct work_struct
*work
)
2708 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2709 struct sk_buff
*skb
;
2711 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2712 hdev
->sco_cnt
, hdev
->le_cnt
);
2714 /* Schedule queues and send stuff to HCI driver */
2716 hci_sched_acl(hdev
);
2718 hci_sched_sco(hdev
);
2720 hci_sched_esco(hdev
);
2724 /* Send next queued raw (unknown type) packet */
2725 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2726 hci_send_frame(skb
);
2729 /* ----- HCI RX task (incoming data processing) ----- */
2731 /* ACL data packet */
2732 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2734 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2735 struct hci_conn
*conn
;
2736 __u16 handle
, flags
;
2738 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2740 handle
= __le16_to_cpu(hdr
->handle
);
2741 flags
= hci_flags(handle
);
2742 handle
= hci_handle(handle
);
2744 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2746 hdev
->stat
.acl_rx
++;
2749 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2750 hci_dev_unlock(hdev
);
2753 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2755 /* Send to upper protocol */
2756 l2cap_recv_acldata(conn
, skb
, flags
);
2759 BT_ERR("%s ACL packet for unknown connection handle %d",
2760 hdev
->name
, handle
);
2766 /* SCO data packet */
2767 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2769 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2770 struct hci_conn
*conn
;
2773 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2775 handle
= __le16_to_cpu(hdr
->handle
);
2777 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2779 hdev
->stat
.sco_rx
++;
2782 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2783 hci_dev_unlock(hdev
);
2786 /* Send to upper protocol */
2787 sco_recv_scodata(conn
, skb
);
2790 BT_ERR("%s SCO packet for unknown connection handle %d",
2791 hdev
->name
, handle
);
2797 static void hci_rx_work(struct work_struct
*work
)
2799 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2800 struct sk_buff
*skb
;
2802 BT_DBG("%s", hdev
->name
);
2804 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2805 if (atomic_read(&hdev
->promisc
)) {
2806 /* Send copy to the sockets */
2807 hci_send_to_sock(hdev
, skb
);
2810 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2815 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2816 /* Don't process data packets in this states. */
2817 switch (bt_cb(skb
)->pkt_type
) {
2818 case HCI_ACLDATA_PKT
:
2819 case HCI_SCODATA_PKT
:
2826 switch (bt_cb(skb
)->pkt_type
) {
2828 BT_DBG("%s Event packet", hdev
->name
);
2829 hci_event_packet(hdev
, skb
);
2832 case HCI_ACLDATA_PKT
:
2833 BT_DBG("%s ACL data packet", hdev
->name
);
2834 hci_acldata_packet(hdev
, skb
);
2837 case HCI_SCODATA_PKT
:
2838 BT_DBG("%s SCO data packet", hdev
->name
);
2839 hci_scodata_packet(hdev
, skb
);
2849 static void hci_cmd_work(struct work_struct
*work
)
2851 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2852 struct sk_buff
*skb
;
2854 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2856 /* Send queued commands */
2857 if (atomic_read(&hdev
->cmd_cnt
)) {
2858 skb
= skb_dequeue(&hdev
->cmd_q
);
2862 kfree_skb(hdev
->sent_cmd
);
2864 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2865 if (hdev
->sent_cmd
) {
2866 atomic_dec(&hdev
->cmd_cnt
);
2867 hci_send_frame(skb
);
2868 if (test_bit(HCI_RESET
, &hdev
->flags
))
2869 del_timer(&hdev
->cmd_timer
);
2871 mod_timer(&hdev
->cmd_timer
,
2872 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2874 skb_queue_head(&hdev
->cmd_q
, skb
);
2875 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2880 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2882 /* General inquiry access code (GIAC) */
2883 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2884 struct hci_cp_inquiry cp
;
2886 BT_DBG("%s", hdev
->name
);
2888 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2889 return -EINPROGRESS
;
2891 inquiry_cache_flush(hdev
);
2893 memset(&cp
, 0, sizeof(cp
));
2894 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2897 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2900 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2902 BT_DBG("%s", hdev
->name
);
2904 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2907 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2910 module_param(enable_hs
, bool, 0644);
2911 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");