2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_cmd_task(unsigned long arg
);
58 static void hci_rx_task(unsigned long arg
);
59 static void hci_tx_task(unsigned long arg
);
61 static DEFINE_RWLOCK(hci_task_lock
);
64 LIST_HEAD(hci_dev_list
);
65 DEFINE_RWLOCK(hci_dev_list_lock
);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list
);
69 DEFINE_RWLOCK(hci_cb_list_lock
);
72 #define HCI_MAX_PROTO 2
73 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block
*nb
)
82 return atomic_notifier_chain_register(&hci_notifier
, nb
);
85 int hci_unregister_notifier(struct notifier_block
*nb
)
87 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
90 static void hci_notify(struct hci_dev
*hdev
, int event
)
92 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
104 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
107 if (hdev
->req_status
== HCI_REQ_PEND
) {
108 hdev
->req_result
= result
;
109 hdev
->req_status
= HCI_REQ_DONE
;
110 wake_up_interruptible(&hdev
->req_wait_q
);
114 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
116 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
118 if (hdev
->req_status
== HCI_REQ_PEND
) {
119 hdev
->req_result
= err
;
120 hdev
->req_status
= HCI_REQ_CANCELED
;
121 wake_up_interruptible(&hdev
->req_wait_q
);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
127 unsigned long opt
, __u32 timeout
)
129 DECLARE_WAITQUEUE(wait
, current
);
132 BT_DBG("%s start", hdev
->name
);
134 hdev
->req_status
= HCI_REQ_PEND
;
136 add_wait_queue(&hdev
->req_wait_q
, &wait
);
137 set_current_state(TASK_INTERRUPTIBLE
);
140 schedule_timeout(timeout
);
142 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
144 if (signal_pending(current
))
147 switch (hdev
->req_status
) {
149 err
= -bt_to_errno(hdev
->req_result
);
152 case HCI_REQ_CANCELED
:
153 err
= -hdev
->req_result
;
161 hdev
->req_status
= hdev
->req_result
= 0;
163 BT_DBG("%s end: err %d", hdev
->name
, err
);
168 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
169 unsigned long opt
, __u32 timeout
)
173 if (!test_bit(HCI_UP
, &hdev
->flags
))
176 /* Serialize all requests */
178 ret
= __hci_request(hdev
, req
, opt
, timeout
);
179 hci_req_unlock(hdev
);
184 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
186 BT_DBG("%s %ld", hdev
->name
, opt
);
189 set_bit(HCI_RESET
, &hdev
->flags
);
190 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
193 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
195 struct hci_cp_delete_stored_link_key cp
;
200 BT_DBG("%s %ld", hdev
->name
, opt
);
202 /* Driver initialization */
204 /* Special commands */
205 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
206 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
207 skb
->dev
= (void *) hdev
;
209 skb_queue_tail(&hdev
->cmd_q
, skb
);
210 tasklet_schedule(&hdev
->cmd_task
);
212 skb_queue_purge(&hdev
->driver_init
);
214 /* Mandatory initialization */
217 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
218 set_bit(HCI_RESET
, &hdev
->flags
);
219 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
225 /* Read Local Version */
226 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
232 /* Host buffer size */
234 struct hci_cp_host_buffer_size cp
;
235 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
236 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
237 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
238 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
239 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
243 /* Read BD Address */
244 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
246 /* Read Class of Device */
247 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
249 /* Read Local Name */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
252 /* Read Voice Setting */
253 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
255 /* Optional initialization */
257 /* Clear Event Filters */
258 flt_type
= HCI_FLT_CLEAR_ALL
;
259 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
261 /* Connection accept timeout ~20 secs */
262 param
= cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
265 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
267 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
270 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
272 BT_DBG("%s", hdev
->name
);
274 /* Read LE buffer size */
275 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
278 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
282 BT_DBG("%s %x", hdev
->name
, scan
);
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
288 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
292 BT_DBG("%s %x", hdev
->name
, auth
);
295 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
298 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
302 BT_DBG("%s %x", hdev
->name
, encrypt
);
305 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
308 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
310 __le16 policy
= cpu_to_le16(opt
);
312 BT_DBG("%s %x", hdev
->name
, policy
);
314 /* Default link policy */
315 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev
*hci_dev_get(int index
)
322 struct hci_dev
*hdev
= NULL
, *d
;
329 read_lock(&hci_dev_list_lock
);
330 list_for_each_entry(d
, &hci_dev_list
, list
) {
331 if (d
->id
== index
) {
332 hdev
= hci_dev_hold(d
);
336 read_unlock(&hci_dev_list_lock
);
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev
*hdev
)
343 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
344 struct inquiry_entry
*next
= cache
->list
, *e
;
346 BT_DBG("cache %p", cache
);
355 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
357 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
358 struct inquiry_entry
*e
;
360 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
362 for (e
= cache
->list
; e
; e
= e
->next
)
363 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
368 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
370 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
371 struct inquiry_entry
*ie
;
373 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
375 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
377 /* Entry not in the cache. Add new one. */
378 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
382 ie
->next
= cache
->list
;
386 memcpy(&ie
->data
, data
, sizeof(*data
));
387 ie
->timestamp
= jiffies
;
388 cache
->timestamp
= jiffies
;
391 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
393 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
394 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
395 struct inquiry_entry
*e
;
398 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
399 struct inquiry_data
*data
= &e
->data
;
400 bacpy(&info
->bdaddr
, &data
->bdaddr
);
401 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
402 info
->pscan_period_mode
= data
->pscan_period_mode
;
403 info
->pscan_mode
= data
->pscan_mode
;
404 memcpy(info
->dev_class
, data
->dev_class
, 3);
405 info
->clock_offset
= data
->clock_offset
;
409 BT_DBG("cache %p, copied %d", cache
, copied
);
413 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
415 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
416 struct hci_cp_inquiry cp
;
418 BT_DBG("%s", hdev
->name
);
420 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
424 memcpy(&cp
.lap
, &ir
->lap
, 3);
425 cp
.length
= ir
->length
;
426 cp
.num_rsp
= ir
->num_rsp
;
427 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
430 int hci_inquiry(void __user
*arg
)
432 __u8 __user
*ptr
= arg
;
433 struct hci_inquiry_req ir
;
434 struct hci_dev
*hdev
;
435 int err
= 0, do_inquiry
= 0, max_rsp
;
439 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
442 hdev
= hci_dev_get(ir
.dev_id
);
446 hci_dev_lock_bh(hdev
);
447 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
448 inquiry_cache_empty(hdev
) ||
449 ir
.flags
& IREQ_CACHE_FLUSH
) {
450 inquiry_cache_flush(hdev
);
453 hci_dev_unlock_bh(hdev
);
455 timeo
= ir
.length
* msecs_to_jiffies(2000);
458 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
469 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
475 hci_dev_lock_bh(hdev
);
476 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
477 hci_dev_unlock_bh(hdev
);
479 BT_DBG("num_rsp %d", ir
.num_rsp
);
481 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
483 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
496 /* ---- HCI ioctl helpers ---- */
498 int hci_dev_open(__u16 dev
)
500 struct hci_dev
*hdev
;
503 hdev
= hci_dev_get(dev
);
507 BT_DBG("%s %p", hdev
->name
, hdev
);
511 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
516 if (test_bit(HCI_UP
, &hdev
->flags
)) {
521 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
522 set_bit(HCI_RAW
, &hdev
->flags
);
524 /* Treat all non BR/EDR controllers as raw devices if
525 enable_hs is not set */
526 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
527 set_bit(HCI_RAW
, &hdev
->flags
);
529 if (hdev
->open(hdev
)) {
534 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
535 atomic_set(&hdev
->cmd_cnt
, 1);
536 set_bit(HCI_INIT
, &hdev
->flags
);
537 hdev
->init_last_cmd
= 0;
539 ret
= __hci_request(hdev
, hci_init_req
, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
542 if (lmp_host_le_capable(hdev
))
543 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
546 clear_bit(HCI_INIT
, &hdev
->flags
);
551 set_bit(HCI_UP
, &hdev
->flags
);
552 hci_notify(hdev
, HCI_DEV_UP
);
553 if (!test_bit(HCI_SETUP
, &hdev
->flags
)) {
554 hci_dev_lock_bh(hdev
);
555 mgmt_powered(hdev
, 1);
556 hci_dev_unlock_bh(hdev
);
559 /* Init failed, cleanup */
560 tasklet_kill(&hdev
->rx_task
);
561 tasklet_kill(&hdev
->tx_task
);
562 tasklet_kill(&hdev
->cmd_task
);
564 skb_queue_purge(&hdev
->cmd_q
);
565 skb_queue_purge(&hdev
->rx_q
);
570 if (hdev
->sent_cmd
) {
571 kfree_skb(hdev
->sent_cmd
);
572 hdev
->sent_cmd
= NULL
;
580 hci_req_unlock(hdev
);
585 static int hci_dev_do_close(struct hci_dev
*hdev
)
587 BT_DBG("%s %p", hdev
->name
, hdev
);
589 hci_req_cancel(hdev
, ENODEV
);
592 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
593 del_timer_sync(&hdev
->cmd_timer
);
594 hci_req_unlock(hdev
);
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev
->rx_task
);
600 tasklet_kill(&hdev
->tx_task
);
602 if (hdev
->discov_timeout
> 0) {
603 cancel_delayed_work(&hdev
->discov_off
);
604 hdev
->discov_timeout
= 0;
607 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
608 cancel_delayed_work(&hdev
->power_off
);
610 hci_dev_lock_bh(hdev
);
611 inquiry_cache_flush(hdev
);
612 hci_conn_hash_flush(hdev
);
613 hci_dev_unlock_bh(hdev
);
615 hci_notify(hdev
, HCI_DEV_DOWN
);
621 skb_queue_purge(&hdev
->cmd_q
);
622 atomic_set(&hdev
->cmd_cnt
, 1);
623 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
624 set_bit(HCI_INIT
, &hdev
->flags
);
625 __hci_request(hdev
, hci_reset_req
, 0,
626 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
627 clear_bit(HCI_INIT
, &hdev
->flags
);
631 tasklet_kill(&hdev
->cmd_task
);
634 skb_queue_purge(&hdev
->rx_q
);
635 skb_queue_purge(&hdev
->cmd_q
);
636 skb_queue_purge(&hdev
->raw_q
);
638 /* Drop last sent command */
639 if (hdev
->sent_cmd
) {
640 del_timer_sync(&hdev
->cmd_timer
);
641 kfree_skb(hdev
->sent_cmd
);
642 hdev
->sent_cmd
= NULL
;
645 /* After this point our queues are empty
646 * and no tasks are scheduled. */
649 hci_dev_lock_bh(hdev
);
650 mgmt_powered(hdev
, 0);
651 hci_dev_unlock_bh(hdev
);
656 hci_req_unlock(hdev
);
662 int hci_dev_close(__u16 dev
)
664 struct hci_dev
*hdev
;
667 hdev
= hci_dev_get(dev
);
670 err
= hci_dev_do_close(hdev
);
675 int hci_dev_reset(__u16 dev
)
677 struct hci_dev
*hdev
;
680 hdev
= hci_dev_get(dev
);
685 tasklet_disable(&hdev
->tx_task
);
687 if (!test_bit(HCI_UP
, &hdev
->flags
))
691 skb_queue_purge(&hdev
->rx_q
);
692 skb_queue_purge(&hdev
->cmd_q
);
694 hci_dev_lock_bh(hdev
);
695 inquiry_cache_flush(hdev
);
696 hci_conn_hash_flush(hdev
);
697 hci_dev_unlock_bh(hdev
);
702 atomic_set(&hdev
->cmd_cnt
, 1);
703 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
705 if (!test_bit(HCI_RAW
, &hdev
->flags
))
706 ret
= __hci_request(hdev
, hci_reset_req
, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
710 tasklet_enable(&hdev
->tx_task
);
711 hci_req_unlock(hdev
);
716 int hci_dev_reset_stat(__u16 dev
)
718 struct hci_dev
*hdev
;
721 hdev
= hci_dev_get(dev
);
725 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
732 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
734 struct hci_dev
*hdev
;
735 struct hci_dev_req dr
;
738 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
741 hdev
= hci_dev_get(dr
.dev_id
);
747 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
752 if (!lmp_encrypt_capable(hdev
)) {
757 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
758 /* Auth must be enabled first */
759 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
765 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
770 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
771 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
775 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
780 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
781 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
785 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
789 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
790 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
794 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
795 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
807 int hci_get_dev_list(void __user
*arg
)
809 struct hci_dev
*hdev
;
810 struct hci_dev_list_req
*dl
;
811 struct hci_dev_req
*dr
;
812 int n
= 0, size
, err
;
815 if (get_user(dev_num
, (__u16 __user
*) arg
))
818 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
821 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
823 dl
= kzalloc(size
, GFP_KERNEL
);
829 read_lock_bh(&hci_dev_list_lock
);
830 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
831 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
832 cancel_delayed_work(&hdev
->power_off
);
834 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
835 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
837 (dr
+ n
)->dev_id
= hdev
->id
;
838 (dr
+ n
)->dev_opt
= hdev
->flags
;
843 read_unlock_bh(&hci_dev_list_lock
);
846 size
= sizeof(*dl
) + n
* sizeof(*dr
);
848 err
= copy_to_user(arg
, dl
, size
);
851 return err
? -EFAULT
: 0;
854 int hci_get_dev_info(void __user
*arg
)
856 struct hci_dev
*hdev
;
857 struct hci_dev_info di
;
860 if (copy_from_user(&di
, arg
, sizeof(di
)))
863 hdev
= hci_dev_get(di
.dev_id
);
867 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
868 cancel_delayed_work_sync(&hdev
->power_off
);
870 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
871 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
873 strcpy(di
.name
, hdev
->name
);
874 di
.bdaddr
= hdev
->bdaddr
;
875 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
876 di
.flags
= hdev
->flags
;
877 di
.pkt_type
= hdev
->pkt_type
;
878 di
.acl_mtu
= hdev
->acl_mtu
;
879 di
.acl_pkts
= hdev
->acl_pkts
;
880 di
.sco_mtu
= hdev
->sco_mtu
;
881 di
.sco_pkts
= hdev
->sco_pkts
;
882 di
.link_policy
= hdev
->link_policy
;
883 di
.link_mode
= hdev
->link_mode
;
885 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
886 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
888 if (copy_to_user(arg
, &di
, sizeof(di
)))
896 /* ---- Interface to HCI drivers ---- */
898 static int hci_rfkill_set_block(void *data
, bool blocked
)
900 struct hci_dev
*hdev
= data
;
902 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
907 hci_dev_do_close(hdev
);
912 static const struct rfkill_ops hci_rfkill_ops
= {
913 .set_block
= hci_rfkill_set_block
,
916 /* Alloc HCI device */
917 struct hci_dev
*hci_alloc_dev(void)
919 struct hci_dev
*hdev
;
921 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
925 hci_init_sysfs(hdev
);
926 skb_queue_head_init(&hdev
->driver_init
);
930 EXPORT_SYMBOL(hci_alloc_dev
);
932 /* Free HCI device */
933 void hci_free_dev(struct hci_dev
*hdev
)
935 skb_queue_purge(&hdev
->driver_init
);
937 /* will free via device release */
938 put_device(&hdev
->dev
);
940 EXPORT_SYMBOL(hci_free_dev
);
942 static void hci_power_on(struct work_struct
*work
)
944 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
946 BT_DBG("%s", hdev
->name
);
948 if (hci_dev_open(hdev
->id
) < 0)
951 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
952 queue_delayed_work(hdev
->workqueue
, &hdev
->power_off
,
953 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
955 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
956 mgmt_index_added(hdev
);
959 static void hci_power_off(struct work_struct
*work
)
961 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
964 BT_DBG("%s", hdev
->name
);
966 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
968 hci_dev_close(hdev
->id
);
971 static void hci_discov_off(struct work_struct
*work
)
973 struct hci_dev
*hdev
;
976 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
978 BT_DBG("%s", hdev
->name
);
980 hci_dev_lock_bh(hdev
);
982 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
984 hdev
->discov_timeout
= 0;
986 hci_dev_unlock_bh(hdev
);
989 int hci_uuids_clear(struct hci_dev
*hdev
)
991 struct list_head
*p
, *n
;
993 list_for_each_safe(p
, n
, &hdev
->uuids
) {
994 struct bt_uuid
*uuid
;
996 uuid
= list_entry(p
, struct bt_uuid
, list
);
1005 int hci_link_keys_clear(struct hci_dev
*hdev
)
1007 struct list_head
*p
, *n
;
1009 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1010 struct link_key
*key
;
1012 key
= list_entry(p
, struct link_key
, list
);
1021 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1025 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1026 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1032 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1033 u8 key_type
, u8 old_key_type
)
1036 if (key_type
< 0x03)
1039 /* Debug keys are insecure so don't store them persistently */
1040 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1043 /* Changed combination key and there's no previous one */
1044 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1047 /* Security mode 3 case */
1051 /* Neither local nor remote side had no-bonding as requirement */
1052 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1055 /* Local side had dedicated bonding as requirement */
1056 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1059 /* Remote side had dedicated bonding as requirement */
1060 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1063 /* If none of the above criteria match, then don't store the key
1068 struct link_key
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1072 list_for_each_entry(k
, &hdev
->link_keys
, list
) {
1073 struct key_master_id
*id
;
1075 if (k
->type
!= HCI_LK_SMP_LTK
)
1078 if (k
->dlen
!= sizeof(*id
))
1081 id
= (void *) &k
->data
;
1082 if (id
->ediv
== ediv
&&
1083 (memcmp(rand
, id
->rand
, sizeof(id
->rand
)) == 0))
1089 EXPORT_SYMBOL(hci_find_ltk
);
1091 struct link_key
*hci_find_link_key_type(struct hci_dev
*hdev
,
1092 bdaddr_t
*bdaddr
, u8 type
)
1096 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1097 if (k
->type
== type
&& bacmp(bdaddr
, &k
->bdaddr
) == 0)
1102 EXPORT_SYMBOL(hci_find_link_key_type
);
1104 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1105 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1107 struct link_key
*key
, *old_key
;
1108 u8 old_key_type
, persistent
;
1110 old_key
= hci_find_link_key(hdev
, bdaddr
);
1112 old_key_type
= old_key
->type
;
1115 old_key_type
= conn
? conn
->key_type
: 0xff;
1116 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1119 list_add(&key
->list
, &hdev
->link_keys
);
1122 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1124 /* Some buggy controller combinations generate a changed
1125 * combination key for legacy pairing even when there's no
1127 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1128 (!conn
|| conn
->remote_auth
== 0xff) &&
1129 old_key_type
== 0xff) {
1130 type
= HCI_LK_COMBINATION
;
1132 conn
->key_type
= type
;
1135 bacpy(&key
->bdaddr
, bdaddr
);
1136 memcpy(key
->val
, val
, 16);
1137 key
->pin_len
= pin_len
;
1139 if (type
== HCI_LK_CHANGED_COMBINATION
)
1140 key
->type
= old_key_type
;
1147 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1149 mgmt_new_link_key(hdev
, key
, persistent
);
1152 list_del(&key
->list
);
1159 int hci_add_ltk(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1160 u8 key_size
, __le16 ediv
, u8 rand
[8], u8 ltk
[16])
1162 struct link_key
*key
, *old_key
;
1163 struct key_master_id
*id
;
1166 BT_DBG("%s addr %s", hdev
->name
, batostr(bdaddr
));
1168 old_key
= hci_find_link_key_type(hdev
, bdaddr
, HCI_LK_SMP_LTK
);
1171 old_key_type
= old_key
->type
;
1173 key
= kzalloc(sizeof(*key
) + sizeof(*id
), GFP_ATOMIC
);
1176 list_add(&key
->list
, &hdev
->link_keys
);
1177 old_key_type
= 0xff;
1180 key
->dlen
= sizeof(*id
);
1182 bacpy(&key
->bdaddr
, bdaddr
);
1183 memcpy(key
->val
, ltk
, sizeof(key
->val
));
1184 key
->type
= HCI_LK_SMP_LTK
;
1185 key
->pin_len
= key_size
;
1187 id
= (void *) &key
->data
;
1189 memcpy(id
->rand
, rand
, sizeof(id
->rand
));
1192 mgmt_new_link_key(hdev
, key
, old_key_type
);
1197 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1199 struct link_key
*key
;
1201 key
= hci_find_link_key(hdev
, bdaddr
);
1205 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1207 list_del(&key
->list
);
1213 /* HCI command timer function */
1214 static void hci_cmd_timer(unsigned long arg
)
1216 struct hci_dev
*hdev
= (void *) arg
;
1218 BT_ERR("%s command tx timeout", hdev
->name
);
1219 atomic_set(&hdev
->cmd_cnt
, 1);
1220 tasklet_schedule(&hdev
->cmd_task
);
1223 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1226 struct oob_data
*data
;
1228 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1229 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1235 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1237 struct oob_data
*data
;
1239 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1243 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1245 list_del(&data
->list
);
1251 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1253 struct oob_data
*data
, *n
;
1255 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1256 list_del(&data
->list
);
1263 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1266 struct oob_data
*data
;
1268 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1271 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1275 bacpy(&data
->bdaddr
, bdaddr
);
1276 list_add(&data
->list
, &hdev
->remote_oob_data
);
1279 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1280 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1282 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1287 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1290 struct bdaddr_list
*b
;
1292 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1293 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1299 int hci_blacklist_clear(struct hci_dev
*hdev
)
1301 struct list_head
*p
, *n
;
1303 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1304 struct bdaddr_list
*b
;
1306 b
= list_entry(p
, struct bdaddr_list
, list
);
1315 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1317 struct bdaddr_list
*entry
;
1319 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1322 if (hci_blacklist_lookup(hdev
, bdaddr
))
1325 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1329 bacpy(&entry
->bdaddr
, bdaddr
);
1331 list_add(&entry
->list
, &hdev
->blacklist
);
1333 return mgmt_device_blocked(hdev
, bdaddr
);
1336 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1338 struct bdaddr_list
*entry
;
1340 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1341 return hci_blacklist_clear(hdev
);
1343 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1347 list_del(&entry
->list
);
1350 return mgmt_device_unblocked(hdev
, bdaddr
);
1353 static void hci_clear_adv_cache(unsigned long arg
)
1355 struct hci_dev
*hdev
= (void *) arg
;
1359 hci_adv_entries_clear(hdev
);
1361 hci_dev_unlock(hdev
);
1364 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1366 struct adv_entry
*entry
, *tmp
;
1368 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1369 list_del(&entry
->list
);
1373 BT_DBG("%s adv cache cleared", hdev
->name
);
1378 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1380 struct adv_entry
*entry
;
1382 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1383 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1389 static inline int is_connectable_adv(u8 evt_type
)
1391 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1397 int hci_add_adv_entry(struct hci_dev
*hdev
,
1398 struct hci_ev_le_advertising_info
*ev
)
1400 struct adv_entry
*entry
;
1402 if (!is_connectable_adv(ev
->evt_type
))
1405 /* Only new entries should be added to adv_entries. So, if
1406 * bdaddr was found, don't add it. */
1407 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1410 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
1414 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1415 entry
->bdaddr_type
= ev
->bdaddr_type
;
1417 list_add(&entry
->list
, &hdev
->adv_entries
);
1419 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1420 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1425 /* Register HCI device */
1426 int hci_register_dev(struct hci_dev
*hdev
)
1428 struct list_head
*head
= &hci_dev_list
, *p
;
1431 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1432 hdev
->bus
, hdev
->owner
);
1434 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1437 /* Do not allow HCI_AMP devices to register at index 0,
1438 * so the index can be used as the AMP controller ID.
1440 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1442 write_lock_bh(&hci_dev_list_lock
);
1444 /* Find first available device id */
1445 list_for_each(p
, &hci_dev_list
) {
1446 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1451 sprintf(hdev
->name
, "hci%d", id
);
1453 list_add(&hdev
->list
, head
);
1455 atomic_set(&hdev
->refcnt
, 1);
1456 spin_lock_init(&hdev
->lock
);
1459 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1460 hdev
->esco_type
= (ESCO_HV1
);
1461 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1462 hdev
->io_capability
= 0x03; /* No Input No Output */
1464 hdev
->idle_timeout
= 0;
1465 hdev
->sniff_max_interval
= 800;
1466 hdev
->sniff_min_interval
= 80;
1468 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
, (unsigned long) hdev
);
1469 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
1470 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
1472 skb_queue_head_init(&hdev
->rx_q
);
1473 skb_queue_head_init(&hdev
->cmd_q
);
1474 skb_queue_head_init(&hdev
->raw_q
);
1476 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1478 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1479 hdev
->reassembly
[i
] = NULL
;
1481 init_waitqueue_head(&hdev
->req_wait_q
);
1482 mutex_init(&hdev
->req_lock
);
1484 inquiry_cache_init(hdev
);
1486 hci_conn_hash_init(hdev
);
1488 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1490 INIT_LIST_HEAD(&hdev
->blacklist
);
1492 INIT_LIST_HEAD(&hdev
->uuids
);
1494 INIT_LIST_HEAD(&hdev
->link_keys
);
1496 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1498 INIT_LIST_HEAD(&hdev
->adv_entries
);
1499 setup_timer(&hdev
->adv_timer
, hci_clear_adv_cache
,
1500 (unsigned long) hdev
);
1502 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1503 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1505 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1507 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1509 atomic_set(&hdev
->promisc
, 0);
1511 write_unlock_bh(&hci_dev_list_lock
);
1513 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
1514 if (!hdev
->workqueue
) {
1519 error
= hci_add_sysfs(hdev
);
1523 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1524 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1526 if (rfkill_register(hdev
->rfkill
) < 0) {
1527 rfkill_destroy(hdev
->rfkill
);
1528 hdev
->rfkill
= NULL
;
1532 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1533 set_bit(HCI_SETUP
, &hdev
->flags
);
1534 queue_work(hdev
->workqueue
, &hdev
->power_on
);
1536 hci_notify(hdev
, HCI_DEV_REG
);
1541 destroy_workqueue(hdev
->workqueue
);
1543 write_lock_bh(&hci_dev_list_lock
);
1544 list_del(&hdev
->list
);
1545 write_unlock_bh(&hci_dev_list_lock
);
1549 EXPORT_SYMBOL(hci_register_dev
);
1551 /* Unregister HCI device */
1552 void hci_unregister_dev(struct hci_dev
*hdev
)
1556 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1558 write_lock_bh(&hci_dev_list_lock
);
1559 list_del(&hdev
->list
);
1560 write_unlock_bh(&hci_dev_list_lock
);
1562 hci_dev_do_close(hdev
);
1564 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1565 kfree_skb(hdev
->reassembly
[i
]);
1567 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1568 !test_bit(HCI_SETUP
, &hdev
->flags
)) {
1569 hci_dev_lock_bh(hdev
);
1570 mgmt_index_removed(hdev
);
1571 hci_dev_unlock_bh(hdev
);
1574 /* mgmt_index_removed should take care of emptying the
1576 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1578 hci_notify(hdev
, HCI_DEV_UNREG
);
1581 rfkill_unregister(hdev
->rfkill
);
1582 rfkill_destroy(hdev
->rfkill
);
1585 hci_del_sysfs(hdev
);
1587 del_timer(&hdev
->adv_timer
);
1589 destroy_workqueue(hdev
->workqueue
);
1591 hci_dev_lock_bh(hdev
);
1592 hci_blacklist_clear(hdev
);
1593 hci_uuids_clear(hdev
);
1594 hci_link_keys_clear(hdev
);
1595 hci_remote_oob_data_clear(hdev
);
1596 hci_adv_entries_clear(hdev
);
1597 hci_dev_unlock_bh(hdev
);
1599 __hci_dev_put(hdev
);
1601 EXPORT_SYMBOL(hci_unregister_dev
);
1603 /* Suspend HCI device */
1604 int hci_suspend_dev(struct hci_dev
*hdev
)
1606 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1609 EXPORT_SYMBOL(hci_suspend_dev
);
1611 /* Resume HCI device */
1612 int hci_resume_dev(struct hci_dev
*hdev
)
1614 hci_notify(hdev
, HCI_DEV_RESUME
);
1617 EXPORT_SYMBOL(hci_resume_dev
);
1619 /* Receive frame from HCI drivers */
1620 int hci_recv_frame(struct sk_buff
*skb
)
1622 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1623 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1624 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1630 bt_cb(skb
)->incoming
= 1;
1633 __net_timestamp(skb
);
1635 /* Queue frame for rx task */
1636 skb_queue_tail(&hdev
->rx_q
, skb
);
1637 tasklet_schedule(&hdev
->rx_task
);
1641 EXPORT_SYMBOL(hci_recv_frame
);
1643 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1644 int count
, __u8 index
)
1649 struct sk_buff
*skb
;
1650 struct bt_skb_cb
*scb
;
1652 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1653 index
>= NUM_REASSEMBLY
)
1656 skb
= hdev
->reassembly
[index
];
1660 case HCI_ACLDATA_PKT
:
1661 len
= HCI_MAX_FRAME_SIZE
;
1662 hlen
= HCI_ACL_HDR_SIZE
;
1665 len
= HCI_MAX_EVENT_SIZE
;
1666 hlen
= HCI_EVENT_HDR_SIZE
;
1668 case HCI_SCODATA_PKT
:
1669 len
= HCI_MAX_SCO_SIZE
;
1670 hlen
= HCI_SCO_HDR_SIZE
;
1674 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1678 scb
= (void *) skb
->cb
;
1680 scb
->pkt_type
= type
;
1682 skb
->dev
= (void *) hdev
;
1683 hdev
->reassembly
[index
] = skb
;
1687 scb
= (void *) skb
->cb
;
1688 len
= min(scb
->expect
, (__u16
)count
);
1690 memcpy(skb_put(skb
, len
), data
, len
);
1699 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1700 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1701 scb
->expect
= h
->plen
;
1703 if (skb_tailroom(skb
) < scb
->expect
) {
1705 hdev
->reassembly
[index
] = NULL
;
1711 case HCI_ACLDATA_PKT
:
1712 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1713 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1714 scb
->expect
= __le16_to_cpu(h
->dlen
);
1716 if (skb_tailroom(skb
) < scb
->expect
) {
1718 hdev
->reassembly
[index
] = NULL
;
1724 case HCI_SCODATA_PKT
:
1725 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1726 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1727 scb
->expect
= h
->dlen
;
1729 if (skb_tailroom(skb
) < scb
->expect
) {
1731 hdev
->reassembly
[index
] = NULL
;
1738 if (scb
->expect
== 0) {
1739 /* Complete frame */
1741 bt_cb(skb
)->pkt_type
= type
;
1742 hci_recv_frame(skb
);
1744 hdev
->reassembly
[index
] = NULL
;
1752 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1756 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1760 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1764 data
+= (count
- rem
);
1770 EXPORT_SYMBOL(hci_recv_fragment
);
1772 #define STREAM_REASSEMBLY 0
1774 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1780 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1783 struct { char type
; } *pkt
;
1785 /* Start of the frame */
1792 type
= bt_cb(skb
)->pkt_type
;
1794 rem
= hci_reassembly(hdev
, type
, data
, count
,
1799 data
+= (count
- rem
);
1805 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1807 /* ---- Interface to upper protocols ---- */
1809 /* Register/Unregister protocols.
1810 * hci_task_lock is used to ensure that no tasks are running. */
1811 int hci_register_proto(struct hci_proto
*hp
)
1815 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1817 if (hp
->id
>= HCI_MAX_PROTO
)
1820 write_lock_bh(&hci_task_lock
);
1822 if (!hci_proto
[hp
->id
])
1823 hci_proto
[hp
->id
] = hp
;
1827 write_unlock_bh(&hci_task_lock
);
1831 EXPORT_SYMBOL(hci_register_proto
);
1833 int hci_unregister_proto(struct hci_proto
*hp
)
1837 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1839 if (hp
->id
>= HCI_MAX_PROTO
)
1842 write_lock_bh(&hci_task_lock
);
1844 if (hci_proto
[hp
->id
])
1845 hci_proto
[hp
->id
] = NULL
;
1849 write_unlock_bh(&hci_task_lock
);
1853 EXPORT_SYMBOL(hci_unregister_proto
);
1855 int hci_register_cb(struct hci_cb
*cb
)
1857 BT_DBG("%p name %s", cb
, cb
->name
);
1859 write_lock_bh(&hci_cb_list_lock
);
1860 list_add(&cb
->list
, &hci_cb_list
);
1861 write_unlock_bh(&hci_cb_list_lock
);
1865 EXPORT_SYMBOL(hci_register_cb
);
1867 int hci_unregister_cb(struct hci_cb
*cb
)
1869 BT_DBG("%p name %s", cb
, cb
->name
);
1871 write_lock_bh(&hci_cb_list_lock
);
1872 list_del(&cb
->list
);
1873 write_unlock_bh(&hci_cb_list_lock
);
1877 EXPORT_SYMBOL(hci_unregister_cb
);
1879 static int hci_send_frame(struct sk_buff
*skb
)
1881 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1888 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1890 if (atomic_read(&hdev
->promisc
)) {
1892 __net_timestamp(skb
);
1894 hci_send_to_sock(hdev
, skb
, NULL
);
1897 /* Get rid of skb owner, prior to sending to the driver. */
1900 return hdev
->send(skb
);
1903 /* Send HCI command */
1904 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1906 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1907 struct hci_command_hdr
*hdr
;
1908 struct sk_buff
*skb
;
1910 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1912 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1914 BT_ERR("%s no memory for command", hdev
->name
);
1918 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1919 hdr
->opcode
= cpu_to_le16(opcode
);
1923 memcpy(skb_put(skb
, plen
), param
, plen
);
1925 BT_DBG("skb len %d", skb
->len
);
1927 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1928 skb
->dev
= (void *) hdev
;
1930 if (test_bit(HCI_INIT
, &hdev
->flags
))
1931 hdev
->init_last_cmd
= opcode
;
1933 skb_queue_tail(&hdev
->cmd_q
, skb
);
1934 tasklet_schedule(&hdev
->cmd_task
);
1939 /* Get data from the previously sent command */
1940 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1942 struct hci_command_hdr
*hdr
;
1944 if (!hdev
->sent_cmd
)
1947 hdr
= (void *) hdev
->sent_cmd
->data
;
1949 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1952 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1954 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1958 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1960 struct hci_acl_hdr
*hdr
;
1963 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1964 skb_reset_transport_header(skb
);
1965 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1966 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1967 hdr
->dlen
= cpu_to_le16(len
);
1970 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
1971 struct sk_buff
*skb
, __u16 flags
)
1973 struct hci_dev
*hdev
= conn
->hdev
;
1974 struct sk_buff
*list
;
1976 list
= skb_shinfo(skb
)->frag_list
;
1978 /* Non fragmented */
1979 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1981 skb_queue_tail(queue
, skb
);
1984 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1986 skb_shinfo(skb
)->frag_list
= NULL
;
1988 /* Queue all fragments atomically */
1989 spin_lock_bh(&queue
->lock
);
1991 __skb_queue_tail(queue
, skb
);
1993 flags
&= ~ACL_START
;
1996 skb
= list
; list
= list
->next
;
1998 skb
->dev
= (void *) hdev
;
1999 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2000 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2002 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2004 __skb_queue_tail(queue
, skb
);
2007 spin_unlock_bh(&queue
->lock
);
2011 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2013 struct hci_conn
*conn
= chan
->conn
;
2014 struct hci_dev
*hdev
= conn
->hdev
;
2016 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2018 skb
->dev
= (void *) hdev
;
2019 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2020 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2022 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2024 tasklet_schedule(&hdev
->tx_task
);
2026 EXPORT_SYMBOL(hci_send_acl
);
2029 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2031 struct hci_dev
*hdev
= conn
->hdev
;
2032 struct hci_sco_hdr hdr
;
2034 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2036 hdr
.handle
= cpu_to_le16(conn
->handle
);
2037 hdr
.dlen
= skb
->len
;
2039 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2040 skb_reset_transport_header(skb
);
2041 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2043 skb
->dev
= (void *) hdev
;
2044 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2046 skb_queue_tail(&conn
->data_q
, skb
);
2047 tasklet_schedule(&hdev
->tx_task
);
2049 EXPORT_SYMBOL(hci_send_sco
);
2051 /* ---- HCI TX task (outgoing data) ---- */
2053 /* HCI Connection scheduler */
2054 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2056 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2057 struct hci_conn
*conn
= NULL
, *c
;
2058 int num
= 0, min
= ~0;
2060 /* We don't have to lock device here. Connections are always
2061 * added and removed with TX task disabled. */
2062 list_for_each_entry(c
, &h
->list
, list
) {
2063 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2066 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2071 if (c
->sent
< min
) {
2076 if (hci_conn_num(hdev
, type
) == num
)
2083 switch (conn
->type
) {
2085 cnt
= hdev
->acl_cnt
;
2089 cnt
= hdev
->sco_cnt
;
2092 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2096 BT_ERR("Unknown link type");
2104 BT_DBG("conn %p quote %d", conn
, *quote
);
2108 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2110 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2113 BT_ERR("%s link tx timeout", hdev
->name
);
2115 /* Kill stalled connections */
2116 list_for_each_entry(c
, &h
->list
, list
) {
2117 if (c
->type
== type
&& c
->sent
) {
2118 BT_ERR("%s killing stalled connection %s",
2119 hdev
->name
, batostr(&c
->dst
));
2120 hci_acl_disconn(c
, 0x13);
2125 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2128 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2129 struct hci_chan
*chan
= NULL
;
2130 int num
= 0, min
= ~0, cur_prio
= 0;
2131 struct hci_conn
*conn
;
2132 int cnt
, q
, conn_num
= 0;
2134 BT_DBG("%s", hdev
->name
);
2136 list_for_each_entry(conn
, &h
->list
, list
) {
2137 struct hci_chan_hash
*ch
;
2138 struct hci_chan
*tmp
;
2140 if (conn
->type
!= type
)
2143 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2148 ch
= &conn
->chan_hash
;
2150 list_for_each_entry(tmp
, &ch
->list
, list
) {
2151 struct sk_buff
*skb
;
2153 if (skb_queue_empty(&tmp
->data_q
))
2156 skb
= skb_peek(&tmp
->data_q
);
2157 if (skb
->priority
< cur_prio
)
2160 if (skb
->priority
> cur_prio
) {
2163 cur_prio
= skb
->priority
;
2168 if (conn
->sent
< min
) {
2174 if (hci_conn_num(hdev
, type
) == conn_num
)
2181 switch (chan
->conn
->type
) {
2183 cnt
= hdev
->acl_cnt
;
2187 cnt
= hdev
->sco_cnt
;
2190 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2194 BT_ERR("Unknown link type");
2199 BT_DBG("chan %p quote %d", chan
, *quote
);
2203 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2205 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2206 struct hci_conn
*conn
;
2209 BT_DBG("%s", hdev
->name
);
2211 list_for_each_entry(conn
, &h
->list
, list
) {
2212 struct hci_chan_hash
*ch
;
2213 struct hci_chan
*chan
;
2215 if (conn
->type
!= type
)
2218 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2223 ch
= &conn
->chan_hash
;
2224 list_for_each_entry(chan
, &ch
->list
, list
) {
2225 struct sk_buff
*skb
;
2232 if (skb_queue_empty(&chan
->data_q
))
2235 skb
= skb_peek(&chan
->data_q
);
2236 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2239 skb
->priority
= HCI_PRIO_MAX
- 1;
2241 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2245 if (hci_conn_num(hdev
, type
) == num
)
2250 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2252 struct hci_chan
*chan
;
2253 struct sk_buff
*skb
;
2257 BT_DBG("%s", hdev
->name
);
2259 if (!hci_conn_num(hdev
, ACL_LINK
))
2262 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2263 /* ACL tx timeout must be longer than maximum
2264 * link supervision timeout (40.9 seconds) */
2265 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
2266 hci_link_tx_to(hdev
, ACL_LINK
);
2269 cnt
= hdev
->acl_cnt
;
2271 while (hdev
->acl_cnt
&&
2272 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2273 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2274 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2275 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2276 skb
->len
, skb
->priority
);
2278 /* Stop if priority has changed */
2279 if (skb
->priority
< priority
)
2282 skb
= skb_dequeue(&chan
->data_q
);
2284 hci_conn_enter_active_mode(chan
->conn
,
2285 bt_cb(skb
)->force_active
);
2287 hci_send_frame(skb
);
2288 hdev
->acl_last_tx
= jiffies
;
2296 if (cnt
!= hdev
->acl_cnt
)
2297 hci_prio_recalculate(hdev
, ACL_LINK
);
2301 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2303 struct hci_conn
*conn
;
2304 struct sk_buff
*skb
;
2307 BT_DBG("%s", hdev
->name
);
2309 if (!hci_conn_num(hdev
, SCO_LINK
))
2312 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2313 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2314 BT_DBG("skb %p len %d", skb
, skb
->len
);
2315 hci_send_frame(skb
);
2318 if (conn
->sent
== ~0)
2324 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2326 struct hci_conn
*conn
;
2327 struct sk_buff
*skb
;
2330 BT_DBG("%s", hdev
->name
);
2332 if (!hci_conn_num(hdev
, ESCO_LINK
))
2335 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2336 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2337 BT_DBG("skb %p len %d", skb
, skb
->len
);
2338 hci_send_frame(skb
);
2341 if (conn
->sent
== ~0)
2347 static inline void hci_sched_le(struct hci_dev
*hdev
)
2349 struct hci_chan
*chan
;
2350 struct sk_buff
*skb
;
2351 int quote
, cnt
, tmp
;
2353 BT_DBG("%s", hdev
->name
);
2355 if (!hci_conn_num(hdev
, LE_LINK
))
2358 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2359 /* LE tx timeout must be longer than maximum
2360 * link supervision timeout (40.9 seconds) */
2361 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2362 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2363 hci_link_tx_to(hdev
, LE_LINK
);
2366 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2368 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2369 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2370 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2371 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2372 skb
->len
, skb
->priority
);
2374 /* Stop if priority has changed */
2375 if (skb
->priority
< priority
)
2378 skb
= skb_dequeue(&chan
->data_q
);
2380 hci_send_frame(skb
);
2381 hdev
->le_last_tx
= jiffies
;
2392 hdev
->acl_cnt
= cnt
;
2395 hci_prio_recalculate(hdev
, LE_LINK
);
2398 static void hci_tx_task(unsigned long arg
)
2400 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2401 struct sk_buff
*skb
;
2403 read_lock(&hci_task_lock
);
2405 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2406 hdev
->sco_cnt
, hdev
->le_cnt
);
2408 /* Schedule queues and send stuff to HCI driver */
2410 hci_sched_acl(hdev
);
2412 hci_sched_sco(hdev
);
2414 hci_sched_esco(hdev
);
2418 /* Send next queued raw (unknown type) packet */
2419 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2420 hci_send_frame(skb
);
2422 read_unlock(&hci_task_lock
);
2425 /* ----- HCI RX task (incoming data processing) ----- */
2427 /* ACL data packet */
2428 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2430 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2431 struct hci_conn
*conn
;
2432 __u16 handle
, flags
;
2434 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2436 handle
= __le16_to_cpu(hdr
->handle
);
2437 flags
= hci_flags(handle
);
2438 handle
= hci_handle(handle
);
2440 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2442 hdev
->stat
.acl_rx
++;
2445 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2446 hci_dev_unlock(hdev
);
2449 register struct hci_proto
*hp
;
2451 hci_conn_enter_active_mode(conn
, bt_cb(skb
)->force_active
);
2453 /* Send to upper protocol */
2454 hp
= hci_proto
[HCI_PROTO_L2CAP
];
2455 if (hp
&& hp
->recv_acldata
) {
2456 hp
->recv_acldata(conn
, skb
, flags
);
2460 BT_ERR("%s ACL packet for unknown connection handle %d",
2461 hdev
->name
, handle
);
2467 /* SCO data packet */
2468 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2470 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2471 struct hci_conn
*conn
;
2474 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2476 handle
= __le16_to_cpu(hdr
->handle
);
2478 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2480 hdev
->stat
.sco_rx
++;
2483 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2484 hci_dev_unlock(hdev
);
2487 register struct hci_proto
*hp
;
2489 /* Send to upper protocol */
2490 hp
= hci_proto
[HCI_PROTO_SCO
];
2491 if (hp
&& hp
->recv_scodata
) {
2492 hp
->recv_scodata(conn
, skb
);
2496 BT_ERR("%s SCO packet for unknown connection handle %d",
2497 hdev
->name
, handle
);
2503 static void hci_rx_task(unsigned long arg
)
2505 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2506 struct sk_buff
*skb
;
2508 BT_DBG("%s", hdev
->name
);
2510 read_lock(&hci_task_lock
);
2512 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2513 if (atomic_read(&hdev
->promisc
)) {
2514 /* Send copy to the sockets */
2515 hci_send_to_sock(hdev
, skb
, NULL
);
2518 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2523 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2524 /* Don't process data packets in this states. */
2525 switch (bt_cb(skb
)->pkt_type
) {
2526 case HCI_ACLDATA_PKT
:
2527 case HCI_SCODATA_PKT
:
2534 switch (bt_cb(skb
)->pkt_type
) {
2536 hci_event_packet(hdev
, skb
);
2539 case HCI_ACLDATA_PKT
:
2540 BT_DBG("%s ACL data packet", hdev
->name
);
2541 hci_acldata_packet(hdev
, skb
);
2544 case HCI_SCODATA_PKT
:
2545 BT_DBG("%s SCO data packet", hdev
->name
);
2546 hci_scodata_packet(hdev
, skb
);
2555 read_unlock(&hci_task_lock
);
2558 static void hci_cmd_task(unsigned long arg
)
2560 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2561 struct sk_buff
*skb
;
2563 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2565 /* Send queued commands */
2566 if (atomic_read(&hdev
->cmd_cnt
)) {
2567 skb
= skb_dequeue(&hdev
->cmd_q
);
2571 kfree_skb(hdev
->sent_cmd
);
2573 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2574 if (hdev
->sent_cmd
) {
2575 atomic_dec(&hdev
->cmd_cnt
);
2576 hci_send_frame(skb
);
2577 if (test_bit(HCI_RESET
, &hdev
->flags
))
2578 del_timer(&hdev
->cmd_timer
);
2580 mod_timer(&hdev
->cmd_timer
,
2581 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2583 skb_queue_head(&hdev
->cmd_q
, skb
);
2584 tasklet_schedule(&hdev
->cmd_task
);
2589 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2591 /* General inquiry access code (GIAC) */
2592 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2593 struct hci_cp_inquiry cp
;
2595 BT_DBG("%s", hdev
->name
);
2597 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2598 return -EINPROGRESS
;
2600 memset(&cp
, 0, sizeof(cp
));
2601 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2604 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2607 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2609 BT_DBG("%s", hdev
->name
);
2611 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2614 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);