2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_rx_work(struct work_struct
*work
);
58 static void hci_cmd_work(struct work_struct
*work
);
59 static void hci_tx_work(struct work_struct
*work
);
62 LIST_HEAD(hci_dev_list
);
63 DEFINE_RWLOCK(hci_dev_list_lock
);
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list
);
67 DEFINE_RWLOCK(hci_cb_list_lock
);
69 /* ---- HCI notifications ---- */
71 static void hci_notify(struct hci_dev
*hdev
, int event
)
73 hci_sock_dev_event(hdev
, event
);
76 /* ---- HCI requests ---- */
78 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
85 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
88 if (hdev
->req_status
== HCI_REQ_PEND
) {
89 hdev
->req_result
= result
;
90 hdev
->req_status
= HCI_REQ_DONE
;
91 wake_up_interruptible(&hdev
->req_wait_q
);
95 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
97 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
99 if (hdev
->req_status
== HCI_REQ_PEND
) {
100 hdev
->req_result
= err
;
101 hdev
->req_status
= HCI_REQ_CANCELED
;
102 wake_up_interruptible(&hdev
->req_wait_q
);
106 /* Execute request and wait for completion. */
107 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
108 unsigned long opt
, __u32 timeout
)
110 DECLARE_WAITQUEUE(wait
, current
);
113 BT_DBG("%s start", hdev
->name
);
115 hdev
->req_status
= HCI_REQ_PEND
;
117 add_wait_queue(&hdev
->req_wait_q
, &wait
);
118 set_current_state(TASK_INTERRUPTIBLE
);
121 schedule_timeout(timeout
);
123 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
125 if (signal_pending(current
))
128 switch (hdev
->req_status
) {
130 err
= -bt_to_errno(hdev
->req_result
);
133 case HCI_REQ_CANCELED
:
134 err
= -hdev
->req_result
;
142 hdev
->req_status
= hdev
->req_result
= 0;
144 BT_DBG("%s end: err %d", hdev
->name
, err
);
149 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
150 unsigned long opt
, __u32 timeout
)
154 if (!test_bit(HCI_UP
, &hdev
->flags
))
157 /* Serialize all requests */
159 ret
= __hci_request(hdev
, req
, opt
, timeout
);
160 hci_req_unlock(hdev
);
165 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
167 BT_DBG("%s %ld", hdev
->name
, opt
);
170 set_bit(HCI_RESET
, &hdev
->flags
);
171 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
174 static void bredr_init(struct hci_dev
*hdev
)
176 struct hci_cp_delete_stored_link_key cp
;
180 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
182 /* Mandatory initialization */
185 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 /* Read Local Supported Features */
191 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
193 /* Read Local Version */
194 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
197 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
199 /* Read BD Address */
200 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
202 /* Read Class of Device */
203 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
205 /* Read Local Name */
206 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
208 /* Read Voice Setting */
209 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
211 /* Optional initialization */
213 /* Clear Event Filters */
214 flt_type
= HCI_FLT_CLEAR_ALL
;
215 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
217 /* Connection accept timeout ~20 secs */
218 param
= cpu_to_le16(0x7d00);
219 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
221 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
223 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
226 static void amp_init(struct hci_dev
*hdev
)
228 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
231 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
233 /* Read Local Version */
234 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
237 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
241 BT_DBG("%s %ld", hdev
->name
, opt
);
243 /* Driver initialization */
245 /* Special commands */
246 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
247 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
248 skb
->dev
= (void *) hdev
;
250 skb_queue_tail(&hdev
->cmd_q
, skb
);
251 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
253 skb_queue_purge(&hdev
->driver_init
);
255 switch (hdev
->dev_type
) {
265 BT_ERR("Unknown device type %d", hdev
->dev_type
);
271 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
273 BT_DBG("%s", hdev
->name
);
275 /* Read LE buffer size */
276 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
279 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
283 BT_DBG("%s %x", hdev
->name
, scan
);
285 /* Inquiry and Page scans */
286 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
289 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
293 BT_DBG("%s %x", hdev
->name
, auth
);
296 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
299 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
303 BT_DBG("%s %x", hdev
->name
, encrypt
);
306 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
309 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
311 __le16 policy
= cpu_to_le16(opt
);
313 BT_DBG("%s %x", hdev
->name
, policy
);
315 /* Default link policy */
316 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
319 /* Get HCI device by index.
320 * Device is held on return. */
321 struct hci_dev
*hci_dev_get(int index
)
323 struct hci_dev
*hdev
= NULL
, *d
;
330 read_lock(&hci_dev_list_lock
);
331 list_for_each_entry(d
, &hci_dev_list
, list
) {
332 if (d
->id
== index
) {
333 hdev
= hci_dev_hold(d
);
337 read_unlock(&hci_dev_list_lock
);
341 /* ---- Inquiry support ---- */
343 bool hci_discovery_active(struct hci_dev
*hdev
)
345 struct discovery_state
*discov
= &hdev
->discovery
;
347 switch (discov
->state
) {
348 case DISCOVERY_FINDING
:
349 case DISCOVERY_RESOLVING
:
357 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
359 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
361 if (hdev
->discovery
.state
== state
)
365 case DISCOVERY_STOPPED
:
366 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
367 mgmt_discovering(hdev
, 0);
368 hdev
->discovery
.type
= 0;
370 case DISCOVERY_STARTING
:
372 case DISCOVERY_FINDING
:
373 mgmt_discovering(hdev
, 1);
375 case DISCOVERY_RESOLVING
:
377 case DISCOVERY_STOPPING
:
381 hdev
->discovery
.state
= state
;
384 static void inquiry_cache_flush(struct hci_dev
*hdev
)
386 struct discovery_state
*cache
= &hdev
->discovery
;
387 struct inquiry_entry
*p
, *n
;
389 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
394 INIT_LIST_HEAD(&cache
->unknown
);
395 INIT_LIST_HEAD(&cache
->resolve
);
396 cache
->state
= DISCOVERY_STOPPED
;
399 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
401 struct discovery_state
*cache
= &hdev
->discovery
;
402 struct inquiry_entry
*e
;
404 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
406 list_for_each_entry(e
, &cache
->all
, all
) {
407 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
414 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
417 struct discovery_state
*cache
= &hdev
->discovery
;
418 struct inquiry_entry
*e
;
420 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
422 list_for_each_entry(e
, &cache
->unknown
, list
) {
423 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
430 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
434 struct discovery_state
*cache
= &hdev
->discovery
;
435 struct inquiry_entry
*e
;
437 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
439 list_for_each_entry(e
, &cache
->resolve
, list
) {
440 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
442 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
449 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
450 struct inquiry_entry
*ie
)
452 struct discovery_state
*cache
= &hdev
->discovery
;
453 struct list_head
*pos
= &cache
->resolve
;
454 struct inquiry_entry
*p
;
458 list_for_each_entry(p
, &cache
->resolve
, list
) {
459 if (p
->name_state
!= NAME_PENDING
&&
460 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
465 list_add(&ie
->list
, pos
);
468 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
471 struct discovery_state
*cache
= &hdev
->discovery
;
472 struct inquiry_entry
*ie
;
474 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
476 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
478 if (ie
->name_state
== NAME_NEEDED
&&
479 data
->rssi
!= ie
->data
.rssi
) {
480 ie
->data
.rssi
= data
->rssi
;
481 hci_inquiry_cache_update_resolve(hdev
, ie
);
487 /* Entry not in the cache. Add new one. */
488 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
492 list_add(&ie
->all
, &cache
->all
);
495 ie
->name_state
= NAME_KNOWN
;
497 ie
->name_state
= NAME_NOT_KNOWN
;
498 list_add(&ie
->list
, &cache
->unknown
);
502 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
503 ie
->name_state
!= NAME_PENDING
) {
504 ie
->name_state
= NAME_KNOWN
;
508 memcpy(&ie
->data
, data
, sizeof(*data
));
509 ie
->timestamp
= jiffies
;
510 cache
->timestamp
= jiffies
;
512 if (ie
->name_state
== NAME_NOT_KNOWN
)
518 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
520 struct discovery_state
*cache
= &hdev
->discovery
;
521 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
522 struct inquiry_entry
*e
;
525 list_for_each_entry(e
, &cache
->all
, all
) {
526 struct inquiry_data
*data
= &e
->data
;
531 bacpy(&info
->bdaddr
, &data
->bdaddr
);
532 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
533 info
->pscan_period_mode
= data
->pscan_period_mode
;
534 info
->pscan_mode
= data
->pscan_mode
;
535 memcpy(info
->dev_class
, data
->dev_class
, 3);
536 info
->clock_offset
= data
->clock_offset
;
542 BT_DBG("cache %p, copied %d", cache
, copied
);
546 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
548 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
549 struct hci_cp_inquiry cp
;
551 BT_DBG("%s", hdev
->name
);
553 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
557 memcpy(&cp
.lap
, &ir
->lap
, 3);
558 cp
.length
= ir
->length
;
559 cp
.num_rsp
= ir
->num_rsp
;
560 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
563 int hci_inquiry(void __user
*arg
)
565 __u8 __user
*ptr
= arg
;
566 struct hci_inquiry_req ir
;
567 struct hci_dev
*hdev
;
568 int err
= 0, do_inquiry
= 0, max_rsp
;
572 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
575 hdev
= hci_dev_get(ir
.dev_id
);
580 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
581 inquiry_cache_empty(hdev
) ||
582 ir
.flags
& IREQ_CACHE_FLUSH
) {
583 inquiry_cache_flush(hdev
);
586 hci_dev_unlock(hdev
);
588 timeo
= ir
.length
* msecs_to_jiffies(2000);
591 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
596 /* for unlimited number of responses we will use buffer with 255 entries */
597 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
599 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600 * copy it to the user space.
602 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
609 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
610 hci_dev_unlock(hdev
);
612 BT_DBG("num_rsp %d", ir
.num_rsp
);
614 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
616 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
629 /* ---- HCI ioctl helpers ---- */
631 int hci_dev_open(__u16 dev
)
633 struct hci_dev
*hdev
;
636 hdev
= hci_dev_get(dev
);
640 BT_DBG("%s %p", hdev
->name
, hdev
);
644 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
649 if (test_bit(HCI_UP
, &hdev
->flags
)) {
654 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
655 set_bit(HCI_RAW
, &hdev
->flags
);
657 /* Treat all non BR/EDR controllers as raw devices if
658 enable_hs is not set */
659 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
660 set_bit(HCI_RAW
, &hdev
->flags
);
662 if (hdev
->open(hdev
)) {
667 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
668 atomic_set(&hdev
->cmd_cnt
, 1);
669 set_bit(HCI_INIT
, &hdev
->flags
);
670 hdev
->init_last_cmd
= 0;
672 ret
= __hci_request(hdev
, hci_init_req
, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
675 if (lmp_host_le_capable(hdev
))
676 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
679 clear_bit(HCI_INIT
, &hdev
->flags
);
684 set_bit(HCI_UP
, &hdev
->flags
);
685 hci_notify(hdev
, HCI_DEV_UP
);
686 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
688 mgmt_powered(hdev
, 1);
689 hci_dev_unlock(hdev
);
692 /* Init failed, cleanup */
693 flush_work(&hdev
->tx_work
);
694 flush_work(&hdev
->cmd_work
);
695 flush_work(&hdev
->rx_work
);
697 skb_queue_purge(&hdev
->cmd_q
);
698 skb_queue_purge(&hdev
->rx_q
);
703 if (hdev
->sent_cmd
) {
704 kfree_skb(hdev
->sent_cmd
);
705 hdev
->sent_cmd
= NULL
;
713 hci_req_unlock(hdev
);
718 static int hci_dev_do_close(struct hci_dev
*hdev
)
720 BT_DBG("%s %p", hdev
->name
, hdev
);
722 cancel_work_sync(&hdev
->le_scan
);
724 hci_req_cancel(hdev
, ENODEV
);
727 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
728 del_timer_sync(&hdev
->cmd_timer
);
729 hci_req_unlock(hdev
);
733 /* Flush RX and TX works */
734 flush_work(&hdev
->tx_work
);
735 flush_work(&hdev
->rx_work
);
737 if (hdev
->discov_timeout
> 0) {
738 cancel_delayed_work(&hdev
->discov_off
);
739 hdev
->discov_timeout
= 0;
740 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
743 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
744 cancel_delayed_work(&hdev
->service_cache
);
746 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
749 inquiry_cache_flush(hdev
);
750 hci_conn_hash_flush(hdev
);
751 hci_dev_unlock(hdev
);
753 hci_notify(hdev
, HCI_DEV_DOWN
);
759 skb_queue_purge(&hdev
->cmd_q
);
760 atomic_set(&hdev
->cmd_cnt
, 1);
761 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
762 test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
763 set_bit(HCI_INIT
, &hdev
->flags
);
764 __hci_request(hdev
, hci_reset_req
, 0,
765 msecs_to_jiffies(250));
766 clear_bit(HCI_INIT
, &hdev
->flags
);
770 flush_work(&hdev
->cmd_work
);
773 skb_queue_purge(&hdev
->rx_q
);
774 skb_queue_purge(&hdev
->cmd_q
);
775 skb_queue_purge(&hdev
->raw_q
);
777 /* Drop last sent command */
778 if (hdev
->sent_cmd
) {
779 del_timer_sync(&hdev
->cmd_timer
);
780 kfree_skb(hdev
->sent_cmd
);
781 hdev
->sent_cmd
= NULL
;
784 /* After this point our queues are empty
785 * and no tasks are scheduled. */
788 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
790 mgmt_powered(hdev
, 0);
791 hci_dev_unlock(hdev
);
797 hci_req_unlock(hdev
);
803 int hci_dev_close(__u16 dev
)
805 struct hci_dev
*hdev
;
808 hdev
= hci_dev_get(dev
);
812 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
813 cancel_delayed_work(&hdev
->power_off
);
815 err
= hci_dev_do_close(hdev
);
821 int hci_dev_reset(__u16 dev
)
823 struct hci_dev
*hdev
;
826 hdev
= hci_dev_get(dev
);
832 if (!test_bit(HCI_UP
, &hdev
->flags
))
836 skb_queue_purge(&hdev
->rx_q
);
837 skb_queue_purge(&hdev
->cmd_q
);
840 inquiry_cache_flush(hdev
);
841 hci_conn_hash_flush(hdev
);
842 hci_dev_unlock(hdev
);
847 atomic_set(&hdev
->cmd_cnt
, 1);
848 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
850 if (!test_bit(HCI_RAW
, &hdev
->flags
))
851 ret
= __hci_request(hdev
, hci_reset_req
, 0,
852 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
855 hci_req_unlock(hdev
);
860 int hci_dev_reset_stat(__u16 dev
)
862 struct hci_dev
*hdev
;
865 hdev
= hci_dev_get(dev
);
869 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
876 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
878 struct hci_dev
*hdev
;
879 struct hci_dev_req dr
;
882 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
885 hdev
= hci_dev_get(dr
.dev_id
);
891 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
892 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
896 if (!lmp_encrypt_capable(hdev
)) {
901 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
902 /* Auth must be enabled first */
903 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
904 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
909 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
910 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
914 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
915 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
919 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
920 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
924 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
925 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
929 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
933 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
934 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
938 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
939 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
951 int hci_get_dev_list(void __user
*arg
)
953 struct hci_dev
*hdev
;
954 struct hci_dev_list_req
*dl
;
955 struct hci_dev_req
*dr
;
956 int n
= 0, size
, err
;
959 if (get_user(dev_num
, (__u16 __user
*) arg
))
962 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
965 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
967 dl
= kzalloc(size
, GFP_KERNEL
);
973 read_lock(&hci_dev_list_lock
);
974 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
975 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
976 cancel_delayed_work(&hdev
->power_off
);
978 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
979 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
981 (dr
+ n
)->dev_id
= hdev
->id
;
982 (dr
+ n
)->dev_opt
= hdev
->flags
;
987 read_unlock(&hci_dev_list_lock
);
990 size
= sizeof(*dl
) + n
* sizeof(*dr
);
992 err
= copy_to_user(arg
, dl
, size
);
995 return err
? -EFAULT
: 0;
998 int hci_get_dev_info(void __user
*arg
)
1000 struct hci_dev
*hdev
;
1001 struct hci_dev_info di
;
1004 if (copy_from_user(&di
, arg
, sizeof(di
)))
1007 hdev
= hci_dev_get(di
.dev_id
);
1011 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1012 cancel_delayed_work_sync(&hdev
->power_off
);
1014 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1015 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1017 strcpy(di
.name
, hdev
->name
);
1018 di
.bdaddr
= hdev
->bdaddr
;
1019 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1020 di
.flags
= hdev
->flags
;
1021 di
.pkt_type
= hdev
->pkt_type
;
1022 di
.acl_mtu
= hdev
->acl_mtu
;
1023 di
.acl_pkts
= hdev
->acl_pkts
;
1024 di
.sco_mtu
= hdev
->sco_mtu
;
1025 di
.sco_pkts
= hdev
->sco_pkts
;
1026 di
.link_policy
= hdev
->link_policy
;
1027 di
.link_mode
= hdev
->link_mode
;
1029 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1030 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1032 if (copy_to_user(arg
, &di
, sizeof(di
)))
1040 /* ---- Interface to HCI drivers ---- */
1042 static int hci_rfkill_set_block(void *data
, bool blocked
)
1044 struct hci_dev
*hdev
= data
;
1046 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1051 hci_dev_do_close(hdev
);
1056 static const struct rfkill_ops hci_rfkill_ops
= {
1057 .set_block
= hci_rfkill_set_block
,
1060 /* Alloc HCI device */
1061 struct hci_dev
*hci_alloc_dev(void)
1063 struct hci_dev
*hdev
;
1065 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1069 hci_init_sysfs(hdev
);
1070 skb_queue_head_init(&hdev
->driver_init
);
1074 EXPORT_SYMBOL(hci_alloc_dev
);
1076 /* Free HCI device */
1077 void hci_free_dev(struct hci_dev
*hdev
)
1079 skb_queue_purge(&hdev
->driver_init
);
1081 /* will free via device release */
1082 put_device(&hdev
->dev
);
1084 EXPORT_SYMBOL(hci_free_dev
);
1086 static void hci_power_on(struct work_struct
*work
)
1088 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1090 BT_DBG("%s", hdev
->name
);
1092 if (hci_dev_open(hdev
->id
) < 0)
1095 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1096 schedule_delayed_work(&hdev
->power_off
,
1097 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1099 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1100 mgmt_index_added(hdev
);
1103 static void hci_power_off(struct work_struct
*work
)
1105 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1108 BT_DBG("%s", hdev
->name
);
1110 hci_dev_do_close(hdev
);
1113 static void hci_discov_off(struct work_struct
*work
)
1115 struct hci_dev
*hdev
;
1116 u8 scan
= SCAN_PAGE
;
1118 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1120 BT_DBG("%s", hdev
->name
);
1124 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1126 hdev
->discov_timeout
= 0;
1128 hci_dev_unlock(hdev
);
1131 int hci_uuids_clear(struct hci_dev
*hdev
)
1133 struct list_head
*p
, *n
;
1135 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1136 struct bt_uuid
*uuid
;
1138 uuid
= list_entry(p
, struct bt_uuid
, list
);
1147 int hci_link_keys_clear(struct hci_dev
*hdev
)
1149 struct list_head
*p
, *n
;
1151 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1152 struct link_key
*key
;
1154 key
= list_entry(p
, struct link_key
, list
);
1163 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1165 struct smp_ltk
*k
, *tmp
;
1167 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1175 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1179 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1180 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1186 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1187 u8 key_type
, u8 old_key_type
)
1190 if (key_type
< 0x03)
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1197 /* Changed combination key and there's no previous one */
1198 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1201 /* Security mode 3 case */
1205 /* Neither local nor remote side had no-bonding as requirement */
1206 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1209 /* Local side had dedicated bonding as requirement */
1210 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1213 /* Remote side had dedicated bonding as requirement */
1214 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1217 /* If none of the above criteria match, then don't store the key
1222 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1226 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1227 if (k
->ediv
!= ediv
||
1228 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1236 EXPORT_SYMBOL(hci_find_ltk
);
1238 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1243 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1244 if (addr_type
== k
->bdaddr_type
&&
1245 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1250 EXPORT_SYMBOL(hci_find_ltk_by_addr
);
1252 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1253 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1255 struct link_key
*key
, *old_key
;
1256 u8 old_key_type
, persistent
;
1258 old_key
= hci_find_link_key(hdev
, bdaddr
);
1260 old_key_type
= old_key
->type
;
1263 old_key_type
= conn
? conn
->key_type
: 0xff;
1264 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1267 list_add(&key
->list
, &hdev
->link_keys
);
1270 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1272 /* Some buggy controller combinations generate a changed
1273 * combination key for legacy pairing even when there's no
1275 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1276 (!conn
|| conn
->remote_auth
== 0xff) &&
1277 old_key_type
== 0xff) {
1278 type
= HCI_LK_COMBINATION
;
1280 conn
->key_type
= type
;
1283 bacpy(&key
->bdaddr
, bdaddr
);
1284 memcpy(key
->val
, val
, 16);
1285 key
->pin_len
= pin_len
;
1287 if (type
== HCI_LK_CHANGED_COMBINATION
)
1288 key
->type
= old_key_type
;
1295 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1297 mgmt_new_link_key(hdev
, key
, persistent
);
1300 list_del(&key
->list
);
1307 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1308 int new_key
, u8 authenticated
, u8 tk
[16],
1309 u8 enc_size
, u16 ediv
, u8 rand
[8])
1311 struct smp_ltk
*key
, *old_key
;
1313 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1316 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1320 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1323 list_add(&key
->list
, &hdev
->long_term_keys
);
1326 bacpy(&key
->bdaddr
, bdaddr
);
1327 key
->bdaddr_type
= addr_type
;
1328 memcpy(key
->val
, tk
, sizeof(key
->val
));
1329 key
->authenticated
= authenticated
;
1331 key
->enc_size
= enc_size
;
1333 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1338 if (type
& HCI_SMP_LTK
)
1339 mgmt_new_ltk(hdev
, key
, 1);
1344 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1346 struct link_key
*key
;
1348 key
= hci_find_link_key(hdev
, bdaddr
);
1352 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1354 list_del(&key
->list
);
1360 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1362 struct smp_ltk
*k
, *tmp
;
1364 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1365 if (bacmp(bdaddr
, &k
->bdaddr
))
1368 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1377 /* HCI command timer function */
1378 static void hci_cmd_timer(unsigned long arg
)
1380 struct hci_dev
*hdev
= (void *) arg
;
1382 BT_ERR("%s command tx timeout", hdev
->name
);
1383 atomic_set(&hdev
->cmd_cnt
, 1);
1384 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1387 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1390 struct oob_data
*data
;
1392 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1393 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1399 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1401 struct oob_data
*data
;
1403 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1407 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1409 list_del(&data
->list
);
1415 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1417 struct oob_data
*data
, *n
;
1419 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1420 list_del(&data
->list
);
1427 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1430 struct oob_data
*data
;
1432 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1435 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1439 bacpy(&data
->bdaddr
, bdaddr
);
1440 list_add(&data
->list
, &hdev
->remote_oob_data
);
1443 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1444 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1446 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1451 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1454 struct bdaddr_list
*b
;
1456 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1457 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1463 int hci_blacklist_clear(struct hci_dev
*hdev
)
1465 struct list_head
*p
, *n
;
1467 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1468 struct bdaddr_list
*b
;
1470 b
= list_entry(p
, struct bdaddr_list
, list
);
1479 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1481 struct bdaddr_list
*entry
;
1483 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1486 if (hci_blacklist_lookup(hdev
, bdaddr
))
1489 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1493 bacpy(&entry
->bdaddr
, bdaddr
);
1495 list_add(&entry
->list
, &hdev
->blacklist
);
1497 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1500 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1502 struct bdaddr_list
*entry
;
1504 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1505 return hci_blacklist_clear(hdev
);
1507 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1511 list_del(&entry
->list
);
1514 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1517 static void hci_clear_adv_cache(struct work_struct
*work
)
1519 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1524 hci_adv_entries_clear(hdev
);
1526 hci_dev_unlock(hdev
);
1529 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1531 struct adv_entry
*entry
, *tmp
;
1533 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1534 list_del(&entry
->list
);
1538 BT_DBG("%s adv cache cleared", hdev
->name
);
1543 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1545 struct adv_entry
*entry
;
1547 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1548 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1554 static inline int is_connectable_adv(u8 evt_type
)
1556 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1562 int hci_add_adv_entry(struct hci_dev
*hdev
,
1563 struct hci_ev_le_advertising_info
*ev
)
1565 struct adv_entry
*entry
;
1567 if (!is_connectable_adv(ev
->evt_type
))
1570 /* Only new entries should be added to adv_entries. So, if
1571 * bdaddr was found, don't add it. */
1572 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1575 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1579 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1580 entry
->bdaddr_type
= ev
->bdaddr_type
;
1582 list_add(&entry
->list
, &hdev
->adv_entries
);
1584 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1585 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1590 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1592 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1593 struct hci_cp_le_set_scan_param cp
;
1595 memset(&cp
, 0, sizeof(cp
));
1596 cp
.type
= param
->type
;
1597 cp
.interval
= cpu_to_le16(param
->interval
);
1598 cp
.window
= cpu_to_le16(param
->window
);
1600 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1603 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1605 struct hci_cp_le_set_scan_enable cp
;
1607 memset(&cp
, 0, sizeof(cp
));
1610 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1613 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1614 u16 window
, int timeout
)
1616 long timeo
= msecs_to_jiffies(3000);
1617 struct le_scan_params param
;
1620 BT_DBG("%s", hdev
->name
);
1622 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1623 return -EINPROGRESS
;
1626 param
.interval
= interval
;
1627 param
.window
= window
;
1631 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1634 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1636 hci_req_unlock(hdev
);
1641 schedule_delayed_work(&hdev
->le_scan_disable
,
1642 msecs_to_jiffies(timeout
));
1647 static void le_scan_disable_work(struct work_struct
*work
)
1649 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1650 le_scan_disable
.work
);
1651 struct hci_cp_le_set_scan_enable cp
;
1653 BT_DBG("%s", hdev
->name
);
1655 memset(&cp
, 0, sizeof(cp
));
1657 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1660 static void le_scan_work(struct work_struct
*work
)
1662 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1663 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1665 BT_DBG("%s", hdev
->name
);
1667 hci_do_le_scan(hdev
, param
->type
, param
->interval
,
1668 param
->window
, param
->timeout
);
1671 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1674 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1676 BT_DBG("%s", hdev
->name
);
1678 if (work_busy(&hdev
->le_scan
))
1679 return -EINPROGRESS
;
1682 param
->interval
= interval
;
1683 param
->window
= window
;
1684 param
->timeout
= timeout
;
1686 queue_work(system_long_wq
, &hdev
->le_scan
);
1691 /* Register HCI device */
1692 int hci_register_dev(struct hci_dev
*hdev
)
1694 struct list_head
*head
= &hci_dev_list
, *p
;
1697 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1699 if (!hdev
->open
|| !hdev
->close
)
1702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1705 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1707 write_lock(&hci_dev_list_lock
);
1709 /* Find first available device id */
1710 list_for_each(p
, &hci_dev_list
) {
1711 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1716 sprintf(hdev
->name
, "hci%d", id
);
1718 list_add_tail(&hdev
->list
, head
);
1720 mutex_init(&hdev
->lock
);
1723 hdev
->dev_flags
= 0;
1724 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1725 hdev
->esco_type
= (ESCO_HV1
);
1726 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1727 hdev
->io_capability
= 0x03; /* No Input No Output */
1729 hdev
->idle_timeout
= 0;
1730 hdev
->sniff_max_interval
= 800;
1731 hdev
->sniff_min_interval
= 80;
1733 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1734 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1735 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1738 skb_queue_head_init(&hdev
->rx_q
);
1739 skb_queue_head_init(&hdev
->cmd_q
);
1740 skb_queue_head_init(&hdev
->raw_q
);
1742 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1744 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1745 hdev
->reassembly
[i
] = NULL
;
1747 init_waitqueue_head(&hdev
->req_wait_q
);
1748 mutex_init(&hdev
->req_lock
);
1750 discovery_init(hdev
);
1752 hci_conn_hash_init(hdev
);
1754 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1756 INIT_LIST_HEAD(&hdev
->blacklist
);
1758 INIT_LIST_HEAD(&hdev
->uuids
);
1760 INIT_LIST_HEAD(&hdev
->link_keys
);
1761 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1763 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1765 INIT_LIST_HEAD(&hdev
->adv_entries
);
1767 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1768 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1769 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1771 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1773 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1775 atomic_set(&hdev
->promisc
, 0);
1777 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1779 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1781 write_unlock(&hci_dev_list_lock
);
1783 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1785 if (!hdev
->workqueue
) {
1790 error
= hci_add_sysfs(hdev
);
1794 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1795 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1797 if (rfkill_register(hdev
->rfkill
) < 0) {
1798 rfkill_destroy(hdev
->rfkill
);
1799 hdev
->rfkill
= NULL
;
1803 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1804 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1805 schedule_work(&hdev
->power_on
);
1807 hci_notify(hdev
, HCI_DEV_REG
);
1813 destroy_workqueue(hdev
->workqueue
);
1815 write_lock(&hci_dev_list_lock
);
1816 list_del(&hdev
->list
);
1817 write_unlock(&hci_dev_list_lock
);
1821 EXPORT_SYMBOL(hci_register_dev
);
1823 /* Unregister HCI device */
1824 void hci_unregister_dev(struct hci_dev
*hdev
)
1828 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1830 write_lock(&hci_dev_list_lock
);
1831 list_del(&hdev
->list
);
1832 write_unlock(&hci_dev_list_lock
);
1834 hci_dev_do_close(hdev
);
1836 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1837 kfree_skb(hdev
->reassembly
[i
]);
1839 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1840 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1842 mgmt_index_removed(hdev
);
1843 hci_dev_unlock(hdev
);
1846 /* mgmt_index_removed should take care of emptying the
1848 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1850 hci_notify(hdev
, HCI_DEV_UNREG
);
1853 rfkill_unregister(hdev
->rfkill
);
1854 rfkill_destroy(hdev
->rfkill
);
1857 hci_del_sysfs(hdev
);
1859 cancel_delayed_work_sync(&hdev
->adv_work
);
1861 destroy_workqueue(hdev
->workqueue
);
1864 hci_blacklist_clear(hdev
);
1865 hci_uuids_clear(hdev
);
1866 hci_link_keys_clear(hdev
);
1867 hci_smp_ltks_clear(hdev
);
1868 hci_remote_oob_data_clear(hdev
);
1869 hci_adv_entries_clear(hdev
);
1870 hci_dev_unlock(hdev
);
1874 EXPORT_SYMBOL(hci_unregister_dev
);
1876 /* Suspend HCI device */
1877 int hci_suspend_dev(struct hci_dev
*hdev
)
1879 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1882 EXPORT_SYMBOL(hci_suspend_dev
);
1884 /* Resume HCI device */
1885 int hci_resume_dev(struct hci_dev
*hdev
)
1887 hci_notify(hdev
, HCI_DEV_RESUME
);
1890 EXPORT_SYMBOL(hci_resume_dev
);
1892 /* Receive frame from HCI drivers */
1893 int hci_recv_frame(struct sk_buff
*skb
)
1895 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1896 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1897 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1903 bt_cb(skb
)->incoming
= 1;
1906 __net_timestamp(skb
);
1908 skb_queue_tail(&hdev
->rx_q
, skb
);
1909 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1913 EXPORT_SYMBOL(hci_recv_frame
);
1915 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1916 int count
, __u8 index
)
1921 struct sk_buff
*skb
;
1922 struct bt_skb_cb
*scb
;
1924 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1925 index
>= NUM_REASSEMBLY
)
1928 skb
= hdev
->reassembly
[index
];
1932 case HCI_ACLDATA_PKT
:
1933 len
= HCI_MAX_FRAME_SIZE
;
1934 hlen
= HCI_ACL_HDR_SIZE
;
1937 len
= HCI_MAX_EVENT_SIZE
;
1938 hlen
= HCI_EVENT_HDR_SIZE
;
1940 case HCI_SCODATA_PKT
:
1941 len
= HCI_MAX_SCO_SIZE
;
1942 hlen
= HCI_SCO_HDR_SIZE
;
1946 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1950 scb
= (void *) skb
->cb
;
1952 scb
->pkt_type
= type
;
1954 skb
->dev
= (void *) hdev
;
1955 hdev
->reassembly
[index
] = skb
;
1959 scb
= (void *) skb
->cb
;
1960 len
= min_t(__u16
, scb
->expect
, count
);
1962 memcpy(skb_put(skb
, len
), data
, len
);
1971 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1972 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1973 scb
->expect
= h
->plen
;
1975 if (skb_tailroom(skb
) < scb
->expect
) {
1977 hdev
->reassembly
[index
] = NULL
;
1983 case HCI_ACLDATA_PKT
:
1984 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1985 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1986 scb
->expect
= __le16_to_cpu(h
->dlen
);
1988 if (skb_tailroom(skb
) < scb
->expect
) {
1990 hdev
->reassembly
[index
] = NULL
;
1996 case HCI_SCODATA_PKT
:
1997 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1998 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1999 scb
->expect
= h
->dlen
;
2001 if (skb_tailroom(skb
) < scb
->expect
) {
2003 hdev
->reassembly
[index
] = NULL
;
2010 if (scb
->expect
== 0) {
2011 /* Complete frame */
2013 bt_cb(skb
)->pkt_type
= type
;
2014 hci_recv_frame(skb
);
2016 hdev
->reassembly
[index
] = NULL
;
2024 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2028 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2032 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2036 data
+= (count
- rem
);
2042 EXPORT_SYMBOL(hci_recv_fragment
);
2044 #define STREAM_REASSEMBLY 0
2046 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2052 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2055 struct { char type
; } *pkt
;
2057 /* Start of the frame */
2064 type
= bt_cb(skb
)->pkt_type
;
2066 rem
= hci_reassembly(hdev
, type
, data
, count
,
2071 data
+= (count
- rem
);
2077 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2079 /* ---- Interface to upper protocols ---- */
2081 int hci_register_cb(struct hci_cb
*cb
)
2083 BT_DBG("%p name %s", cb
, cb
->name
);
2085 write_lock(&hci_cb_list_lock
);
2086 list_add(&cb
->list
, &hci_cb_list
);
2087 write_unlock(&hci_cb_list_lock
);
2091 EXPORT_SYMBOL(hci_register_cb
);
2093 int hci_unregister_cb(struct hci_cb
*cb
)
2095 BT_DBG("%p name %s", cb
, cb
->name
);
2097 write_lock(&hci_cb_list_lock
);
2098 list_del(&cb
->list
);
2099 write_unlock(&hci_cb_list_lock
);
2103 EXPORT_SYMBOL(hci_unregister_cb
);
2105 static int hci_send_frame(struct sk_buff
*skb
)
2107 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2114 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2117 __net_timestamp(skb
);
2119 /* Send copy to monitor */
2120 hci_send_to_monitor(hdev
, skb
);
2122 if (atomic_read(&hdev
->promisc
)) {
2123 /* Send copy to the sockets */
2124 hci_send_to_sock(hdev
, skb
);
2127 /* Get rid of skb owner, prior to sending to the driver. */
2130 return hdev
->send(skb
);
2133 /* Send HCI command */
2134 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2136 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2137 struct hci_command_hdr
*hdr
;
2138 struct sk_buff
*skb
;
2140 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
2142 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2144 BT_ERR("%s no memory for command", hdev
->name
);
2148 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2149 hdr
->opcode
= cpu_to_le16(opcode
);
2153 memcpy(skb_put(skb
, plen
), param
, plen
);
2155 BT_DBG("skb len %d", skb
->len
);
2157 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2158 skb
->dev
= (void *) hdev
;
2160 if (test_bit(HCI_INIT
, &hdev
->flags
))
2161 hdev
->init_last_cmd
= opcode
;
2163 skb_queue_tail(&hdev
->cmd_q
, skb
);
2164 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2169 /* Get data from the previously sent command */
2170 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2172 struct hci_command_hdr
*hdr
;
2174 if (!hdev
->sent_cmd
)
2177 hdr
= (void *) hdev
->sent_cmd
->data
;
2179 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2182 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2184 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2188 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2190 struct hci_acl_hdr
*hdr
;
2193 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2194 skb_reset_transport_header(skb
);
2195 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2196 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2197 hdr
->dlen
= cpu_to_le16(len
);
2200 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2201 struct sk_buff
*skb
, __u16 flags
)
2203 struct hci_dev
*hdev
= conn
->hdev
;
2204 struct sk_buff
*list
;
2206 list
= skb_shinfo(skb
)->frag_list
;
2208 /* Non fragmented */
2209 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2211 skb_queue_tail(queue
, skb
);
2214 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2216 skb_shinfo(skb
)->frag_list
= NULL
;
2218 /* Queue all fragments atomically */
2219 spin_lock(&queue
->lock
);
2221 __skb_queue_tail(queue
, skb
);
2223 flags
&= ~ACL_START
;
2226 skb
= list
; list
= list
->next
;
2228 skb
->dev
= (void *) hdev
;
2229 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2230 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2232 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2234 __skb_queue_tail(queue
, skb
);
2237 spin_unlock(&queue
->lock
);
2241 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2243 struct hci_conn
*conn
= chan
->conn
;
2244 struct hci_dev
*hdev
= conn
->hdev
;
2246 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2248 skb
->dev
= (void *) hdev
;
2249 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2250 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2252 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2254 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2256 EXPORT_SYMBOL(hci_send_acl
);
2259 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2261 struct hci_dev
*hdev
= conn
->hdev
;
2262 struct hci_sco_hdr hdr
;
2264 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2266 hdr
.handle
= cpu_to_le16(conn
->handle
);
2267 hdr
.dlen
= skb
->len
;
2269 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2270 skb_reset_transport_header(skb
);
2271 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2273 skb
->dev
= (void *) hdev
;
2274 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2276 skb_queue_tail(&conn
->data_q
, skb
);
2277 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2279 EXPORT_SYMBOL(hci_send_sco
);
2281 /* ---- HCI TX task (outgoing data) ---- */
2283 /* HCI Connection scheduler */
2284 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2286 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2287 struct hci_conn
*conn
= NULL
, *c
;
2288 int num
= 0, min
= ~0;
2290 /* We don't have to lock device here. Connections are always
2291 * added and removed with TX task disabled. */
2295 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2296 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2299 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2304 if (c
->sent
< min
) {
2309 if (hci_conn_num(hdev
, type
) == num
)
2318 switch (conn
->type
) {
2320 cnt
= hdev
->acl_cnt
;
2324 cnt
= hdev
->sco_cnt
;
2327 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2331 BT_ERR("Unknown link type");
2339 BT_DBG("conn %p quote %d", conn
, *quote
);
2343 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2345 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2348 BT_ERR("%s link tx timeout", hdev
->name
);
2352 /* Kill stalled connections */
2353 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2354 if (c
->type
== type
&& c
->sent
) {
2355 BT_ERR("%s killing stalled connection %s",
2356 hdev
->name
, batostr(&c
->dst
));
2357 hci_acl_disconn(c
, 0x13);
2364 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2367 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2368 struct hci_chan
*chan
= NULL
;
2369 int num
= 0, min
= ~0, cur_prio
= 0;
2370 struct hci_conn
*conn
;
2371 int cnt
, q
, conn_num
= 0;
2373 BT_DBG("%s", hdev
->name
);
2377 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2378 struct hci_chan
*tmp
;
2380 if (conn
->type
!= type
)
2383 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2388 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2389 struct sk_buff
*skb
;
2391 if (skb_queue_empty(&tmp
->data_q
))
2394 skb
= skb_peek(&tmp
->data_q
);
2395 if (skb
->priority
< cur_prio
)
2398 if (skb
->priority
> cur_prio
) {
2401 cur_prio
= skb
->priority
;
2406 if (conn
->sent
< min
) {
2412 if (hci_conn_num(hdev
, type
) == conn_num
)
2421 switch (chan
->conn
->type
) {
2423 cnt
= hdev
->acl_cnt
;
2427 cnt
= hdev
->sco_cnt
;
2430 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2434 BT_ERR("Unknown link type");
2439 BT_DBG("chan %p quote %d", chan
, *quote
);
2443 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2445 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2446 struct hci_conn
*conn
;
2449 BT_DBG("%s", hdev
->name
);
2453 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2454 struct hci_chan
*chan
;
2456 if (conn
->type
!= type
)
2459 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2464 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2465 struct sk_buff
*skb
;
2472 if (skb_queue_empty(&chan
->data_q
))
2475 skb
= skb_peek(&chan
->data_q
);
2476 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2479 skb
->priority
= HCI_PRIO_MAX
- 1;
2481 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2485 if (hci_conn_num(hdev
, type
) == num
)
2493 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2495 /* Calculate count of blocks used by this packet */
2496 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2499 static inline void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2501 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2502 /* ACL tx timeout must be longer than maximum
2503 * link supervision timeout (40.9 seconds) */
2504 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2505 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT
)))
2506 hci_link_tx_to(hdev
, ACL_LINK
);
2510 static inline void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2512 unsigned int cnt
= hdev
->acl_cnt
;
2513 struct hci_chan
*chan
;
2514 struct sk_buff
*skb
;
2517 __check_timeout(hdev
, cnt
);
2519 while (hdev
->acl_cnt
&&
2520 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2521 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2522 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2523 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2524 skb
->len
, skb
->priority
);
2526 /* Stop if priority has changed */
2527 if (skb
->priority
< priority
)
2530 skb
= skb_dequeue(&chan
->data_q
);
2532 hci_conn_enter_active_mode(chan
->conn
,
2533 bt_cb(skb
)->force_active
);
2535 hci_send_frame(skb
);
2536 hdev
->acl_last_tx
= jiffies
;
2544 if (cnt
!= hdev
->acl_cnt
)
2545 hci_prio_recalculate(hdev
, ACL_LINK
);
2548 static inline void hci_sched_acl_blk(struct hci_dev
*hdev
)
2550 unsigned int cnt
= hdev
->block_cnt
;
2551 struct hci_chan
*chan
;
2552 struct sk_buff
*skb
;
2555 __check_timeout(hdev
, cnt
);
2557 while (hdev
->block_cnt
> 0 &&
2558 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2559 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2560 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2563 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2564 skb
->len
, skb
->priority
);
2566 /* Stop if priority has changed */
2567 if (skb
->priority
< priority
)
2570 skb
= skb_dequeue(&chan
->data_q
);
2572 blocks
= __get_blocks(hdev
, skb
);
2573 if (blocks
> hdev
->block_cnt
)
2576 hci_conn_enter_active_mode(chan
->conn
,
2577 bt_cb(skb
)->force_active
);
2579 hci_send_frame(skb
);
2580 hdev
->acl_last_tx
= jiffies
;
2582 hdev
->block_cnt
-= blocks
;
2585 chan
->sent
+= blocks
;
2586 chan
->conn
->sent
+= blocks
;
2590 if (cnt
!= hdev
->block_cnt
)
2591 hci_prio_recalculate(hdev
, ACL_LINK
);
2594 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2596 BT_DBG("%s", hdev
->name
);
2598 if (!hci_conn_num(hdev
, ACL_LINK
))
2601 switch (hdev
->flow_ctl_mode
) {
2602 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2603 hci_sched_acl_pkt(hdev
);
2606 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2607 hci_sched_acl_blk(hdev
);
2613 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2615 struct hci_conn
*conn
;
2616 struct sk_buff
*skb
;
2619 BT_DBG("%s", hdev
->name
);
2621 if (!hci_conn_num(hdev
, SCO_LINK
))
2624 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2625 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2626 BT_DBG("skb %p len %d", skb
, skb
->len
);
2627 hci_send_frame(skb
);
2630 if (conn
->sent
== ~0)
2636 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2638 struct hci_conn
*conn
;
2639 struct sk_buff
*skb
;
2642 BT_DBG("%s", hdev
->name
);
2644 if (!hci_conn_num(hdev
, ESCO_LINK
))
2647 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2648 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2649 BT_DBG("skb %p len %d", skb
, skb
->len
);
2650 hci_send_frame(skb
);
2653 if (conn
->sent
== ~0)
2659 static inline void hci_sched_le(struct hci_dev
*hdev
)
2661 struct hci_chan
*chan
;
2662 struct sk_buff
*skb
;
2663 int quote
, cnt
, tmp
;
2665 BT_DBG("%s", hdev
->name
);
2667 if (!hci_conn_num(hdev
, LE_LINK
))
2670 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2671 /* LE tx timeout must be longer than maximum
2672 * link supervision timeout (40.9 seconds) */
2673 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2674 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2675 hci_link_tx_to(hdev
, LE_LINK
);
2678 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2680 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2681 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2682 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2683 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2684 skb
->len
, skb
->priority
);
2686 /* Stop if priority has changed */
2687 if (skb
->priority
< priority
)
2690 skb
= skb_dequeue(&chan
->data_q
);
2692 hci_send_frame(skb
);
2693 hdev
->le_last_tx
= jiffies
;
2704 hdev
->acl_cnt
= cnt
;
2707 hci_prio_recalculate(hdev
, LE_LINK
);
2710 static void hci_tx_work(struct work_struct
*work
)
2712 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2713 struct sk_buff
*skb
;
2715 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2716 hdev
->sco_cnt
, hdev
->le_cnt
);
2718 /* Schedule queues and send stuff to HCI driver */
2720 hci_sched_acl(hdev
);
2722 hci_sched_sco(hdev
);
2724 hci_sched_esco(hdev
);
2728 /* Send next queued raw (unknown type) packet */
2729 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2730 hci_send_frame(skb
);
2733 /* ----- HCI RX task (incoming data processing) ----- */
2735 /* ACL data packet */
2736 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2738 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2739 struct hci_conn
*conn
;
2740 __u16 handle
, flags
;
2742 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2744 handle
= __le16_to_cpu(hdr
->handle
);
2745 flags
= hci_flags(handle
);
2746 handle
= hci_handle(handle
);
2748 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2750 hdev
->stat
.acl_rx
++;
2753 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2754 hci_dev_unlock(hdev
);
2757 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2759 /* Send to upper protocol */
2760 l2cap_recv_acldata(conn
, skb
, flags
);
2763 BT_ERR("%s ACL packet for unknown connection handle %d",
2764 hdev
->name
, handle
);
2770 /* SCO data packet */
2771 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2773 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2774 struct hci_conn
*conn
;
2777 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2779 handle
= __le16_to_cpu(hdr
->handle
);
2781 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2783 hdev
->stat
.sco_rx
++;
2786 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2787 hci_dev_unlock(hdev
);
2790 /* Send to upper protocol */
2791 sco_recv_scodata(conn
, skb
);
2794 BT_ERR("%s SCO packet for unknown connection handle %d",
2795 hdev
->name
, handle
);
2801 static void hci_rx_work(struct work_struct
*work
)
2803 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2804 struct sk_buff
*skb
;
2806 BT_DBG("%s", hdev
->name
);
2808 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2809 /* Send copy to monitor */
2810 hci_send_to_monitor(hdev
, skb
);
2812 if (atomic_read(&hdev
->promisc
)) {
2813 /* Send copy to the sockets */
2814 hci_send_to_sock(hdev
, skb
);
2817 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2822 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2823 /* Don't process data packets in this states. */
2824 switch (bt_cb(skb
)->pkt_type
) {
2825 case HCI_ACLDATA_PKT
:
2826 case HCI_SCODATA_PKT
:
2833 switch (bt_cb(skb
)->pkt_type
) {
2835 BT_DBG("%s Event packet", hdev
->name
);
2836 hci_event_packet(hdev
, skb
);
2839 case HCI_ACLDATA_PKT
:
2840 BT_DBG("%s ACL data packet", hdev
->name
);
2841 hci_acldata_packet(hdev
, skb
);
2844 case HCI_SCODATA_PKT
:
2845 BT_DBG("%s SCO data packet", hdev
->name
);
2846 hci_scodata_packet(hdev
, skb
);
2856 static void hci_cmd_work(struct work_struct
*work
)
2858 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2859 struct sk_buff
*skb
;
2861 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2863 /* Send queued commands */
2864 if (atomic_read(&hdev
->cmd_cnt
)) {
2865 skb
= skb_dequeue(&hdev
->cmd_q
);
2869 kfree_skb(hdev
->sent_cmd
);
2871 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2872 if (hdev
->sent_cmd
) {
2873 atomic_dec(&hdev
->cmd_cnt
);
2874 hci_send_frame(skb
);
2875 if (test_bit(HCI_RESET
, &hdev
->flags
))
2876 del_timer(&hdev
->cmd_timer
);
2878 mod_timer(&hdev
->cmd_timer
,
2879 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2881 skb_queue_head(&hdev
->cmd_q
, skb
);
2882 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2887 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2889 /* General inquiry access code (GIAC) */
2890 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2891 struct hci_cp_inquiry cp
;
2893 BT_DBG("%s", hdev
->name
);
2895 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2896 return -EINPROGRESS
;
2898 inquiry_cache_flush(hdev
);
2900 memset(&cp
, 0, sizeof(cp
));
2901 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2904 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2907 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2909 BT_DBG("%s", hdev
->name
);
2911 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2914 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);