2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_cmd_task(unsigned long arg
);
58 static void hci_rx_task(unsigned long arg
);
59 static void hci_tx_task(unsigned long arg
);
61 static DEFINE_RWLOCK(hci_task_lock
);
64 LIST_HEAD(hci_dev_list
);
65 DEFINE_RWLOCK(hci_dev_list_lock
);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list
);
69 DEFINE_RWLOCK(hci_cb_list_lock
);
72 #define HCI_MAX_PROTO 2
73 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block
*nb
)
82 return atomic_notifier_chain_register(&hci_notifier
, nb
);
85 int hci_unregister_notifier(struct notifier_block
*nb
)
87 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
90 static void hci_notify(struct hci_dev
*hdev
, int event
)
92 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
104 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
107 if (hdev
->req_status
== HCI_REQ_PEND
) {
108 hdev
->req_result
= result
;
109 hdev
->req_status
= HCI_REQ_DONE
;
110 wake_up_interruptible(&hdev
->req_wait_q
);
114 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
116 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
118 if (hdev
->req_status
== HCI_REQ_PEND
) {
119 hdev
->req_result
= err
;
120 hdev
->req_status
= HCI_REQ_CANCELED
;
121 wake_up_interruptible(&hdev
->req_wait_q
);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
127 unsigned long opt
, __u32 timeout
)
129 DECLARE_WAITQUEUE(wait
, current
);
132 BT_DBG("%s start", hdev
->name
);
134 hdev
->req_status
= HCI_REQ_PEND
;
136 add_wait_queue(&hdev
->req_wait_q
, &wait
);
137 set_current_state(TASK_INTERRUPTIBLE
);
140 schedule_timeout(timeout
);
142 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
144 if (signal_pending(current
))
147 switch (hdev
->req_status
) {
149 err
= -bt_to_errno(hdev
->req_result
);
152 case HCI_REQ_CANCELED
:
153 err
= -hdev
->req_result
;
161 hdev
->req_status
= hdev
->req_result
= 0;
163 BT_DBG("%s end: err %d", hdev
->name
, err
);
168 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
169 unsigned long opt
, __u32 timeout
)
173 if (!test_bit(HCI_UP
, &hdev
->flags
))
176 /* Serialize all requests */
178 ret
= __hci_request(hdev
, req
, opt
, timeout
);
179 hci_req_unlock(hdev
);
184 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
186 BT_DBG("%s %ld", hdev
->name
, opt
);
189 set_bit(HCI_RESET
, &hdev
->flags
);
190 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
193 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
195 struct hci_cp_delete_stored_link_key cp
;
200 BT_DBG("%s %ld", hdev
->name
, opt
);
202 /* Driver initialization */
204 /* Special commands */
205 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
206 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
207 skb
->dev
= (void *) hdev
;
209 skb_queue_tail(&hdev
->cmd_q
, skb
);
210 tasklet_schedule(&hdev
->cmd_task
);
212 skb_queue_purge(&hdev
->driver_init
);
214 /* Mandatory initialization */
217 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
218 set_bit(HCI_RESET
, &hdev
->flags
);
219 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
225 /* Read Local Version */
226 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
232 /* Host buffer size */
234 struct hci_cp_host_buffer_size cp
;
235 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
236 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
237 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
238 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
239 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
243 /* Read BD Address */
244 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
246 /* Read Class of Device */
247 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
249 /* Read Local Name */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
252 /* Read Voice Setting */
253 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
255 /* Optional initialization */
257 /* Clear Event Filters */
258 flt_type
= HCI_FLT_CLEAR_ALL
;
259 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
261 /* Connection accept timeout ~20 secs */
262 param
= cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
265 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
267 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
270 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
272 BT_DBG("%s", hdev
->name
);
274 /* Read LE buffer size */
275 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
278 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
282 BT_DBG("%s %x", hdev
->name
, scan
);
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
288 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
292 BT_DBG("%s %x", hdev
->name
, auth
);
295 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
298 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
302 BT_DBG("%s %x", hdev
->name
, encrypt
);
305 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
308 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
310 __le16 policy
= cpu_to_le16(opt
);
312 BT_DBG("%s %x", hdev
->name
, policy
);
314 /* Default link policy */
315 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev
*hci_dev_get(int index
)
322 struct hci_dev
*hdev
= NULL
, *d
;
329 read_lock(&hci_dev_list_lock
);
330 list_for_each_entry(d
, &hci_dev_list
, list
) {
331 if (d
->id
== index
) {
332 hdev
= hci_dev_hold(d
);
336 read_unlock(&hci_dev_list_lock
);
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev
*hdev
)
343 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
344 struct inquiry_entry
*next
= cache
->list
, *e
;
346 BT_DBG("cache %p", cache
);
355 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
357 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
358 struct inquiry_entry
*e
;
360 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
362 for (e
= cache
->list
; e
; e
= e
->next
)
363 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
368 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
370 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
371 struct inquiry_entry
*ie
;
373 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
375 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
377 /* Entry not in the cache. Add new one. */
378 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
382 ie
->next
= cache
->list
;
386 memcpy(&ie
->data
, data
, sizeof(*data
));
387 ie
->timestamp
= jiffies
;
388 cache
->timestamp
= jiffies
;
391 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
393 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
394 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
395 struct inquiry_entry
*e
;
398 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
399 struct inquiry_data
*data
= &e
->data
;
400 bacpy(&info
->bdaddr
, &data
->bdaddr
);
401 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
402 info
->pscan_period_mode
= data
->pscan_period_mode
;
403 info
->pscan_mode
= data
->pscan_mode
;
404 memcpy(info
->dev_class
, data
->dev_class
, 3);
405 info
->clock_offset
= data
->clock_offset
;
409 BT_DBG("cache %p, copied %d", cache
, copied
);
413 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
415 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
416 struct hci_cp_inquiry cp
;
418 BT_DBG("%s", hdev
->name
);
420 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
424 memcpy(&cp
.lap
, &ir
->lap
, 3);
425 cp
.length
= ir
->length
;
426 cp
.num_rsp
= ir
->num_rsp
;
427 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
430 int hci_inquiry(void __user
*arg
)
432 __u8 __user
*ptr
= arg
;
433 struct hci_inquiry_req ir
;
434 struct hci_dev
*hdev
;
435 int err
= 0, do_inquiry
= 0, max_rsp
;
439 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
442 hdev
= hci_dev_get(ir
.dev_id
);
446 hci_dev_lock_bh(hdev
);
447 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
448 inquiry_cache_empty(hdev
) ||
449 ir
.flags
& IREQ_CACHE_FLUSH
) {
450 inquiry_cache_flush(hdev
);
453 hci_dev_unlock_bh(hdev
);
455 timeo
= ir
.length
* msecs_to_jiffies(2000);
458 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
469 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
475 hci_dev_lock_bh(hdev
);
476 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
477 hci_dev_unlock_bh(hdev
);
479 BT_DBG("num_rsp %d", ir
.num_rsp
);
481 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
483 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
496 /* ---- HCI ioctl helpers ---- */
498 int hci_dev_open(__u16 dev
)
500 struct hci_dev
*hdev
;
503 hdev
= hci_dev_get(dev
);
507 BT_DBG("%s %p", hdev
->name
, hdev
);
511 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
516 if (test_bit(HCI_UP
, &hdev
->flags
)) {
521 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
522 set_bit(HCI_RAW
, &hdev
->flags
);
524 /* Treat all non BR/EDR controllers as raw devices if
525 enable_hs is not set */
526 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
527 set_bit(HCI_RAW
, &hdev
->flags
);
529 if (hdev
->open(hdev
)) {
534 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
535 atomic_set(&hdev
->cmd_cnt
, 1);
536 set_bit(HCI_INIT
, &hdev
->flags
);
537 hdev
->init_last_cmd
= 0;
539 ret
= __hci_request(hdev
, hci_init_req
, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
542 if (lmp_host_le_capable(hdev
))
543 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
546 clear_bit(HCI_INIT
, &hdev
->flags
);
551 set_bit(HCI_UP
, &hdev
->flags
);
552 hci_notify(hdev
, HCI_DEV_UP
);
553 if (!test_bit(HCI_SETUP
, &hdev
->flags
)) {
554 hci_dev_lock_bh(hdev
);
555 mgmt_powered(hdev
, 1);
556 hci_dev_unlock_bh(hdev
);
559 /* Init failed, cleanup */
560 tasklet_kill(&hdev
->rx_task
);
561 tasklet_kill(&hdev
->tx_task
);
562 tasklet_kill(&hdev
->cmd_task
);
564 skb_queue_purge(&hdev
->cmd_q
);
565 skb_queue_purge(&hdev
->rx_q
);
570 if (hdev
->sent_cmd
) {
571 kfree_skb(hdev
->sent_cmd
);
572 hdev
->sent_cmd
= NULL
;
580 hci_req_unlock(hdev
);
585 static int hci_dev_do_close(struct hci_dev
*hdev
)
587 BT_DBG("%s %p", hdev
->name
, hdev
);
589 hci_req_cancel(hdev
, ENODEV
);
592 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
593 del_timer_sync(&hdev
->cmd_timer
);
594 hci_req_unlock(hdev
);
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev
->rx_task
);
600 tasklet_kill(&hdev
->tx_task
);
602 if (hdev
->discov_timeout
> 0) {
603 cancel_delayed_work(&hdev
->discov_off
);
604 hdev
->discov_timeout
= 0;
607 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
608 cancel_delayed_work(&hdev
->power_off
);
610 hci_dev_lock_bh(hdev
);
611 inquiry_cache_flush(hdev
);
612 hci_conn_hash_flush(hdev
);
613 hci_dev_unlock_bh(hdev
);
615 hci_notify(hdev
, HCI_DEV_DOWN
);
621 skb_queue_purge(&hdev
->cmd_q
);
622 atomic_set(&hdev
->cmd_cnt
, 1);
623 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
624 set_bit(HCI_INIT
, &hdev
->flags
);
625 __hci_request(hdev
, hci_reset_req
, 0,
626 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
627 clear_bit(HCI_INIT
, &hdev
->flags
);
631 tasklet_kill(&hdev
->cmd_task
);
634 skb_queue_purge(&hdev
->rx_q
);
635 skb_queue_purge(&hdev
->cmd_q
);
636 skb_queue_purge(&hdev
->raw_q
);
638 /* Drop last sent command */
639 if (hdev
->sent_cmd
) {
640 del_timer_sync(&hdev
->cmd_timer
);
641 kfree_skb(hdev
->sent_cmd
);
642 hdev
->sent_cmd
= NULL
;
645 /* After this point our queues are empty
646 * and no tasks are scheduled. */
649 hci_dev_lock_bh(hdev
);
650 mgmt_powered(hdev
, 0);
651 hci_dev_unlock_bh(hdev
);
656 hci_req_unlock(hdev
);
662 int hci_dev_close(__u16 dev
)
664 struct hci_dev
*hdev
;
667 hdev
= hci_dev_get(dev
);
670 err
= hci_dev_do_close(hdev
);
675 int hci_dev_reset(__u16 dev
)
677 struct hci_dev
*hdev
;
680 hdev
= hci_dev_get(dev
);
685 tasklet_disable(&hdev
->tx_task
);
687 if (!test_bit(HCI_UP
, &hdev
->flags
))
691 skb_queue_purge(&hdev
->rx_q
);
692 skb_queue_purge(&hdev
->cmd_q
);
694 hci_dev_lock_bh(hdev
);
695 inquiry_cache_flush(hdev
);
696 hci_conn_hash_flush(hdev
);
697 hci_dev_unlock_bh(hdev
);
702 atomic_set(&hdev
->cmd_cnt
, 1);
703 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
705 if (!test_bit(HCI_RAW
, &hdev
->flags
))
706 ret
= __hci_request(hdev
, hci_reset_req
, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
710 tasklet_enable(&hdev
->tx_task
);
711 hci_req_unlock(hdev
);
716 int hci_dev_reset_stat(__u16 dev
)
718 struct hci_dev
*hdev
;
721 hdev
= hci_dev_get(dev
);
725 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
732 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
734 struct hci_dev
*hdev
;
735 struct hci_dev_req dr
;
738 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
741 hdev
= hci_dev_get(dr
.dev_id
);
747 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
752 if (!lmp_encrypt_capable(hdev
)) {
757 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
758 /* Auth must be enabled first */
759 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
765 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
770 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
771 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
775 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
780 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
781 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
785 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
789 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
790 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
794 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
795 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
807 int hci_get_dev_list(void __user
*arg
)
809 struct hci_dev
*hdev
;
810 struct hci_dev_list_req
*dl
;
811 struct hci_dev_req
*dr
;
812 int n
= 0, size
, err
;
815 if (get_user(dev_num
, (__u16 __user
*) arg
))
818 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
821 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
823 dl
= kzalloc(size
, GFP_KERNEL
);
829 read_lock_bh(&hci_dev_list_lock
);
830 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
831 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
832 cancel_delayed_work(&hdev
->power_off
);
834 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
835 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
837 (dr
+ n
)->dev_id
= hdev
->id
;
838 (dr
+ n
)->dev_opt
= hdev
->flags
;
843 read_unlock_bh(&hci_dev_list_lock
);
846 size
= sizeof(*dl
) + n
* sizeof(*dr
);
848 err
= copy_to_user(arg
, dl
, size
);
851 return err
? -EFAULT
: 0;
854 int hci_get_dev_info(void __user
*arg
)
856 struct hci_dev
*hdev
;
857 struct hci_dev_info di
;
860 if (copy_from_user(&di
, arg
, sizeof(di
)))
863 hdev
= hci_dev_get(di
.dev_id
);
867 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
868 cancel_delayed_work_sync(&hdev
->power_off
);
870 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
871 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
873 strcpy(di
.name
, hdev
->name
);
874 di
.bdaddr
= hdev
->bdaddr
;
875 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
876 di
.flags
= hdev
->flags
;
877 di
.pkt_type
= hdev
->pkt_type
;
878 di
.acl_mtu
= hdev
->acl_mtu
;
879 di
.acl_pkts
= hdev
->acl_pkts
;
880 di
.sco_mtu
= hdev
->sco_mtu
;
881 di
.sco_pkts
= hdev
->sco_pkts
;
882 di
.link_policy
= hdev
->link_policy
;
883 di
.link_mode
= hdev
->link_mode
;
885 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
886 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
888 if (copy_to_user(arg
, &di
, sizeof(di
)))
896 /* ---- Interface to HCI drivers ---- */
898 static int hci_rfkill_set_block(void *data
, bool blocked
)
900 struct hci_dev
*hdev
= data
;
902 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
907 hci_dev_do_close(hdev
);
912 static const struct rfkill_ops hci_rfkill_ops
= {
913 .set_block
= hci_rfkill_set_block
,
916 /* Alloc HCI device */
917 struct hci_dev
*hci_alloc_dev(void)
919 struct hci_dev
*hdev
;
921 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
925 hci_init_sysfs(hdev
);
926 skb_queue_head_init(&hdev
->driver_init
);
930 EXPORT_SYMBOL(hci_alloc_dev
);
932 /* Free HCI device */
933 void hci_free_dev(struct hci_dev
*hdev
)
935 skb_queue_purge(&hdev
->driver_init
);
937 /* will free via device release */
938 put_device(&hdev
->dev
);
940 EXPORT_SYMBOL(hci_free_dev
);
942 static void hci_power_on(struct work_struct
*work
)
944 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
946 BT_DBG("%s", hdev
->name
);
948 if (hci_dev_open(hdev
->id
) < 0)
951 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
952 queue_delayed_work(hdev
->workqueue
, &hdev
->power_off
,
953 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
955 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
956 mgmt_index_added(hdev
);
959 static void hci_power_off(struct work_struct
*work
)
961 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
964 BT_DBG("%s", hdev
->name
);
966 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
968 hci_dev_close(hdev
->id
);
971 static void hci_discov_off(struct work_struct
*work
)
973 struct hci_dev
*hdev
;
976 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
978 BT_DBG("%s", hdev
->name
);
980 hci_dev_lock_bh(hdev
);
982 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
984 hdev
->discov_timeout
= 0;
986 hci_dev_unlock_bh(hdev
);
989 int hci_uuids_clear(struct hci_dev
*hdev
)
991 struct list_head
*p
, *n
;
993 list_for_each_safe(p
, n
, &hdev
->uuids
) {
994 struct bt_uuid
*uuid
;
996 uuid
= list_entry(p
, struct bt_uuid
, list
);
1005 int hci_link_keys_clear(struct hci_dev
*hdev
)
1007 struct list_head
*p
, *n
;
1009 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1010 struct link_key
*key
;
1012 key
= list_entry(p
, struct link_key
, list
);
1021 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1025 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1026 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1032 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1033 u8 key_type
, u8 old_key_type
)
1036 if (key_type
< 0x03)
1039 /* Debug keys are insecure so don't store them persistently */
1040 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1043 /* Changed combination key and there's no previous one */
1044 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1047 /* Security mode 3 case */
1051 /* Neither local nor remote side had no-bonding as requirement */
1052 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1055 /* Local side had dedicated bonding as requirement */
1056 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1059 /* Remote side had dedicated bonding as requirement */
1060 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1063 /* If none of the above criteria match, then don't store the key
1068 struct link_key
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1072 list_for_each_entry(k
, &hdev
->link_keys
, list
) {
1073 struct key_master_id
*id
;
1075 if (k
->type
!= HCI_LK_SMP_LTK
)
1078 if (k
->dlen
!= sizeof(*id
))
1081 id
= (void *) &k
->data
;
1082 if (id
->ediv
== ediv
&&
1083 (memcmp(rand
, id
->rand
, sizeof(id
->rand
)) == 0))
1089 EXPORT_SYMBOL(hci_find_ltk
);
1091 struct link_key
*hci_find_link_key_type(struct hci_dev
*hdev
,
1092 bdaddr_t
*bdaddr
, u8 type
)
1096 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1097 if (k
->type
== type
&& bacmp(bdaddr
, &k
->bdaddr
) == 0)
1102 EXPORT_SYMBOL(hci_find_link_key_type
);
1104 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1105 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1107 struct link_key
*key
, *old_key
;
1108 u8 old_key_type
, persistent
;
1110 old_key
= hci_find_link_key(hdev
, bdaddr
);
1112 old_key_type
= old_key
->type
;
1115 old_key_type
= conn
? conn
->key_type
: 0xff;
1116 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1119 list_add(&key
->list
, &hdev
->link_keys
);
1122 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1124 /* Some buggy controller combinations generate a changed
1125 * combination key for legacy pairing even when there's no
1127 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1128 (!conn
|| conn
->remote_auth
== 0xff) &&
1129 old_key_type
== 0xff) {
1130 type
= HCI_LK_COMBINATION
;
1132 conn
->key_type
= type
;
1135 bacpy(&key
->bdaddr
, bdaddr
);
1136 memcpy(key
->val
, val
, 16);
1137 key
->pin_len
= pin_len
;
1139 if (type
== HCI_LK_CHANGED_COMBINATION
)
1140 key
->type
= old_key_type
;
1147 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1149 mgmt_new_link_key(hdev
, key
, persistent
);
1152 list_del(&key
->list
);
1159 int hci_add_ltk(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1160 u8 key_size
, __le16 ediv
, u8 rand
[8], u8 ltk
[16])
1162 struct link_key
*key
, *old_key
;
1163 struct key_master_id
*id
;
1166 BT_DBG("%s addr %s", hdev
->name
, batostr(bdaddr
));
1168 old_key
= hci_find_link_key_type(hdev
, bdaddr
, HCI_LK_SMP_LTK
);
1171 old_key_type
= old_key
->type
;
1173 key
= kzalloc(sizeof(*key
) + sizeof(*id
), GFP_ATOMIC
);
1176 list_add(&key
->list
, &hdev
->link_keys
);
1177 old_key_type
= 0xff;
1180 key
->dlen
= sizeof(*id
);
1182 bacpy(&key
->bdaddr
, bdaddr
);
1183 memcpy(key
->val
, ltk
, sizeof(key
->val
));
1184 key
->type
= HCI_LK_SMP_LTK
;
1185 key
->pin_len
= key_size
;
1187 id
= (void *) &key
->data
;
1189 memcpy(id
->rand
, rand
, sizeof(id
->rand
));
1192 mgmt_new_link_key(hdev
, key
, old_key_type
);
1197 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1199 struct link_key
*key
;
1201 key
= hci_find_link_key(hdev
, bdaddr
);
1205 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1207 list_del(&key
->list
);
1213 /* HCI command timer function */
1214 static void hci_cmd_timer(unsigned long arg
)
1216 struct hci_dev
*hdev
= (void *) arg
;
1218 BT_ERR("%s command tx timeout", hdev
->name
);
1219 atomic_set(&hdev
->cmd_cnt
, 1);
1220 tasklet_schedule(&hdev
->cmd_task
);
1223 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1226 struct oob_data
*data
;
1228 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1229 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1235 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1237 struct oob_data
*data
;
1239 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1243 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1245 list_del(&data
->list
);
1251 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1253 struct oob_data
*data
, *n
;
1255 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1256 list_del(&data
->list
);
1263 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1266 struct oob_data
*data
;
1268 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1271 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1275 bacpy(&data
->bdaddr
, bdaddr
);
1276 list_add(&data
->list
, &hdev
->remote_oob_data
);
1279 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1280 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1282 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1287 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1290 struct bdaddr_list
*b
;
1292 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1293 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1299 int hci_blacklist_clear(struct hci_dev
*hdev
)
1301 struct list_head
*p
, *n
;
1303 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1304 struct bdaddr_list
*b
;
1306 b
= list_entry(p
, struct bdaddr_list
, list
);
1315 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1317 struct bdaddr_list
*entry
;
1319 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1322 if (hci_blacklist_lookup(hdev
, bdaddr
))
1325 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1329 bacpy(&entry
->bdaddr
, bdaddr
);
1331 list_add(&entry
->list
, &hdev
->blacklist
);
1333 return mgmt_device_blocked(hdev
, bdaddr
);
1336 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1338 struct bdaddr_list
*entry
;
1340 if (bacmp(bdaddr
, BDADDR_ANY
) == 0) {
1341 return hci_blacklist_clear(hdev
);
1344 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1349 list_del(&entry
->list
);
1352 return mgmt_device_unblocked(hdev
, bdaddr
);
1355 static void hci_clear_adv_cache(unsigned long arg
)
1357 struct hci_dev
*hdev
= (void *) arg
;
1361 hci_adv_entries_clear(hdev
);
1363 hci_dev_unlock(hdev
);
1366 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1368 struct adv_entry
*entry
, *tmp
;
1370 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1371 list_del(&entry
->list
);
1375 BT_DBG("%s adv cache cleared", hdev
->name
);
1380 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1382 struct adv_entry
*entry
;
1384 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1385 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1391 static inline int is_connectable_adv(u8 evt_type
)
1393 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1399 int hci_add_adv_entry(struct hci_dev
*hdev
,
1400 struct hci_ev_le_advertising_info
*ev
)
1402 struct adv_entry
*entry
;
1404 if (!is_connectable_adv(ev
->evt_type
))
1407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
1409 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1412 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
1416 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1417 entry
->bdaddr_type
= ev
->bdaddr_type
;
1419 list_add(&entry
->list
, &hdev
->adv_entries
);
1421 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1422 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1427 /* Register HCI device */
1428 int hci_register_dev(struct hci_dev
*hdev
)
1430 struct list_head
*head
= &hci_dev_list
, *p
;
1433 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1434 hdev
->bus
, hdev
->owner
);
1436 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1439 /* Do not allow HCI_AMP devices to register at index 0,
1440 * so the index can be used as the AMP controller ID.
1442 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1444 write_lock_bh(&hci_dev_list_lock
);
1446 /* Find first available device id */
1447 list_for_each(p
, &hci_dev_list
) {
1448 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1453 sprintf(hdev
->name
, "hci%d", id
);
1455 list_add(&hdev
->list
, head
);
1457 atomic_set(&hdev
->refcnt
, 1);
1458 spin_lock_init(&hdev
->lock
);
1461 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1462 hdev
->esco_type
= (ESCO_HV1
);
1463 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1464 hdev
->io_capability
= 0x03; /* No Input No Output */
1466 hdev
->idle_timeout
= 0;
1467 hdev
->sniff_max_interval
= 800;
1468 hdev
->sniff_min_interval
= 80;
1470 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
, (unsigned long) hdev
);
1471 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
1472 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
1474 skb_queue_head_init(&hdev
->rx_q
);
1475 skb_queue_head_init(&hdev
->cmd_q
);
1476 skb_queue_head_init(&hdev
->raw_q
);
1478 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1480 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1481 hdev
->reassembly
[i
] = NULL
;
1483 init_waitqueue_head(&hdev
->req_wait_q
);
1484 mutex_init(&hdev
->req_lock
);
1486 inquiry_cache_init(hdev
);
1488 hci_conn_hash_init(hdev
);
1490 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1492 INIT_LIST_HEAD(&hdev
->blacklist
);
1494 INIT_LIST_HEAD(&hdev
->uuids
);
1496 INIT_LIST_HEAD(&hdev
->link_keys
);
1498 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1500 INIT_LIST_HEAD(&hdev
->adv_entries
);
1501 setup_timer(&hdev
->adv_timer
, hci_clear_adv_cache
,
1502 (unsigned long) hdev
);
1504 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1505 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1507 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1509 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1511 atomic_set(&hdev
->promisc
, 0);
1513 write_unlock_bh(&hci_dev_list_lock
);
1515 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
1516 if (!hdev
->workqueue
) {
1521 error
= hci_add_sysfs(hdev
);
1525 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1526 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1528 if (rfkill_register(hdev
->rfkill
) < 0) {
1529 rfkill_destroy(hdev
->rfkill
);
1530 hdev
->rfkill
= NULL
;
1534 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1535 set_bit(HCI_SETUP
, &hdev
->flags
);
1536 queue_work(hdev
->workqueue
, &hdev
->power_on
);
1538 hci_notify(hdev
, HCI_DEV_REG
);
1543 destroy_workqueue(hdev
->workqueue
);
1545 write_lock_bh(&hci_dev_list_lock
);
1546 list_del(&hdev
->list
);
1547 write_unlock_bh(&hci_dev_list_lock
);
1551 EXPORT_SYMBOL(hci_register_dev
);
1553 /* Unregister HCI device */
1554 void hci_unregister_dev(struct hci_dev
*hdev
)
1558 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1560 write_lock_bh(&hci_dev_list_lock
);
1561 list_del(&hdev
->list
);
1562 write_unlock_bh(&hci_dev_list_lock
);
1564 hci_dev_do_close(hdev
);
1566 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1567 kfree_skb(hdev
->reassembly
[i
]);
1569 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1570 !test_bit(HCI_SETUP
, &hdev
->flags
)) {
1571 hci_dev_lock_bh(hdev
);
1572 mgmt_index_removed(hdev
);
1573 hci_dev_unlock_bh(hdev
);
1576 /* mgmt_index_removed should take care of emptying the
1578 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1580 hci_notify(hdev
, HCI_DEV_UNREG
);
1583 rfkill_unregister(hdev
->rfkill
);
1584 rfkill_destroy(hdev
->rfkill
);
1587 hci_del_sysfs(hdev
);
1589 del_timer(&hdev
->adv_timer
);
1591 destroy_workqueue(hdev
->workqueue
);
1593 hci_dev_lock_bh(hdev
);
1594 hci_blacklist_clear(hdev
);
1595 hci_uuids_clear(hdev
);
1596 hci_link_keys_clear(hdev
);
1597 hci_remote_oob_data_clear(hdev
);
1598 hci_adv_entries_clear(hdev
);
1599 hci_dev_unlock_bh(hdev
);
1601 __hci_dev_put(hdev
);
1603 EXPORT_SYMBOL(hci_unregister_dev
);
1605 /* Suspend HCI device */
1606 int hci_suspend_dev(struct hci_dev
*hdev
)
1608 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1611 EXPORT_SYMBOL(hci_suspend_dev
);
1613 /* Resume HCI device */
1614 int hci_resume_dev(struct hci_dev
*hdev
)
1616 hci_notify(hdev
, HCI_DEV_RESUME
);
1619 EXPORT_SYMBOL(hci_resume_dev
);
1621 /* Receive frame from HCI drivers */
1622 int hci_recv_frame(struct sk_buff
*skb
)
1624 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1625 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1626 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1632 bt_cb(skb
)->incoming
= 1;
1635 __net_timestamp(skb
);
1637 /* Queue frame for rx task */
1638 skb_queue_tail(&hdev
->rx_q
, skb
);
1639 tasklet_schedule(&hdev
->rx_task
);
1643 EXPORT_SYMBOL(hci_recv_frame
);
1645 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1646 int count
, __u8 index
)
1651 struct sk_buff
*skb
;
1652 struct bt_skb_cb
*scb
;
1654 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1655 index
>= NUM_REASSEMBLY
)
1658 skb
= hdev
->reassembly
[index
];
1662 case HCI_ACLDATA_PKT
:
1663 len
= HCI_MAX_FRAME_SIZE
;
1664 hlen
= HCI_ACL_HDR_SIZE
;
1667 len
= HCI_MAX_EVENT_SIZE
;
1668 hlen
= HCI_EVENT_HDR_SIZE
;
1670 case HCI_SCODATA_PKT
:
1671 len
= HCI_MAX_SCO_SIZE
;
1672 hlen
= HCI_SCO_HDR_SIZE
;
1676 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1680 scb
= (void *) skb
->cb
;
1682 scb
->pkt_type
= type
;
1684 skb
->dev
= (void *) hdev
;
1685 hdev
->reassembly
[index
] = skb
;
1689 scb
= (void *) skb
->cb
;
1690 len
= min(scb
->expect
, (__u16
)count
);
1692 memcpy(skb_put(skb
, len
), data
, len
);
1701 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1702 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1703 scb
->expect
= h
->plen
;
1705 if (skb_tailroom(skb
) < scb
->expect
) {
1707 hdev
->reassembly
[index
] = NULL
;
1713 case HCI_ACLDATA_PKT
:
1714 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1715 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1716 scb
->expect
= __le16_to_cpu(h
->dlen
);
1718 if (skb_tailroom(skb
) < scb
->expect
) {
1720 hdev
->reassembly
[index
] = NULL
;
1726 case HCI_SCODATA_PKT
:
1727 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1728 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1729 scb
->expect
= h
->dlen
;
1731 if (skb_tailroom(skb
) < scb
->expect
) {
1733 hdev
->reassembly
[index
] = NULL
;
1740 if (scb
->expect
== 0) {
1741 /* Complete frame */
1743 bt_cb(skb
)->pkt_type
= type
;
1744 hci_recv_frame(skb
);
1746 hdev
->reassembly
[index
] = NULL
;
1754 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1758 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1762 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1766 data
+= (count
- rem
);
1772 EXPORT_SYMBOL(hci_recv_fragment
);
1774 #define STREAM_REASSEMBLY 0
1776 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1782 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1785 struct { char type
; } *pkt
;
1787 /* Start of the frame */
1794 type
= bt_cb(skb
)->pkt_type
;
1796 rem
= hci_reassembly(hdev
, type
, data
, count
,
1801 data
+= (count
- rem
);
1807 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1809 /* ---- Interface to upper protocols ---- */
1811 /* Register/Unregister protocols.
1812 * hci_task_lock is used to ensure that no tasks are running. */
1813 int hci_register_proto(struct hci_proto
*hp
)
1817 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1819 if (hp
->id
>= HCI_MAX_PROTO
)
1822 write_lock_bh(&hci_task_lock
);
1824 if (!hci_proto
[hp
->id
])
1825 hci_proto
[hp
->id
] = hp
;
1829 write_unlock_bh(&hci_task_lock
);
1833 EXPORT_SYMBOL(hci_register_proto
);
1835 int hci_unregister_proto(struct hci_proto
*hp
)
1839 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1841 if (hp
->id
>= HCI_MAX_PROTO
)
1844 write_lock_bh(&hci_task_lock
);
1846 if (hci_proto
[hp
->id
])
1847 hci_proto
[hp
->id
] = NULL
;
1851 write_unlock_bh(&hci_task_lock
);
1855 EXPORT_SYMBOL(hci_unregister_proto
);
1857 int hci_register_cb(struct hci_cb
*cb
)
1859 BT_DBG("%p name %s", cb
, cb
->name
);
1861 write_lock_bh(&hci_cb_list_lock
);
1862 list_add(&cb
->list
, &hci_cb_list
);
1863 write_unlock_bh(&hci_cb_list_lock
);
1867 EXPORT_SYMBOL(hci_register_cb
);
1869 int hci_unregister_cb(struct hci_cb
*cb
)
1871 BT_DBG("%p name %s", cb
, cb
->name
);
1873 write_lock_bh(&hci_cb_list_lock
);
1874 list_del(&cb
->list
);
1875 write_unlock_bh(&hci_cb_list_lock
);
1879 EXPORT_SYMBOL(hci_unregister_cb
);
1881 static int hci_send_frame(struct sk_buff
*skb
)
1883 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1890 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1892 if (atomic_read(&hdev
->promisc
)) {
1894 __net_timestamp(skb
);
1896 hci_send_to_sock(hdev
, skb
, NULL
);
1899 /* Get rid of skb owner, prior to sending to the driver. */
1902 return hdev
->send(skb
);
1905 /* Send HCI command */
1906 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1908 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1909 struct hci_command_hdr
*hdr
;
1910 struct sk_buff
*skb
;
1912 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1914 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1916 BT_ERR("%s no memory for command", hdev
->name
);
1920 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1921 hdr
->opcode
= cpu_to_le16(opcode
);
1925 memcpy(skb_put(skb
, plen
), param
, plen
);
1927 BT_DBG("skb len %d", skb
->len
);
1929 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1930 skb
->dev
= (void *) hdev
;
1932 if (test_bit(HCI_INIT
, &hdev
->flags
))
1933 hdev
->init_last_cmd
= opcode
;
1935 skb_queue_tail(&hdev
->cmd_q
, skb
);
1936 tasklet_schedule(&hdev
->cmd_task
);
1941 /* Get data from the previously sent command */
1942 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1944 struct hci_command_hdr
*hdr
;
1946 if (!hdev
->sent_cmd
)
1949 hdr
= (void *) hdev
->sent_cmd
->data
;
1951 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1954 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1956 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1960 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1962 struct hci_acl_hdr
*hdr
;
1965 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1966 skb_reset_transport_header(skb
);
1967 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1968 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1969 hdr
->dlen
= cpu_to_le16(len
);
1972 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
1973 struct sk_buff
*skb
, __u16 flags
)
1975 struct hci_dev
*hdev
= conn
->hdev
;
1976 struct sk_buff
*list
;
1978 list
= skb_shinfo(skb
)->frag_list
;
1980 /* Non fragmented */
1981 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1983 skb_queue_tail(queue
, skb
);
1986 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1988 skb_shinfo(skb
)->frag_list
= NULL
;
1990 /* Queue all fragments atomically */
1991 spin_lock_bh(&queue
->lock
);
1993 __skb_queue_tail(queue
, skb
);
1995 flags
&= ~ACL_START
;
1998 skb
= list
; list
= list
->next
;
2000 skb
->dev
= (void *) hdev
;
2001 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2002 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2004 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2006 __skb_queue_tail(queue
, skb
);
2009 spin_unlock_bh(&queue
->lock
);
2013 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2015 struct hci_conn
*conn
= chan
->conn
;
2016 struct hci_dev
*hdev
= conn
->hdev
;
2018 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2020 skb
->dev
= (void *) hdev
;
2021 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2022 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2024 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2026 tasklet_schedule(&hdev
->tx_task
);
2028 EXPORT_SYMBOL(hci_send_acl
);
2031 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2033 struct hci_dev
*hdev
= conn
->hdev
;
2034 struct hci_sco_hdr hdr
;
2036 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2038 hdr
.handle
= cpu_to_le16(conn
->handle
);
2039 hdr
.dlen
= skb
->len
;
2041 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2042 skb_reset_transport_header(skb
);
2043 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2045 skb
->dev
= (void *) hdev
;
2046 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2048 skb_queue_tail(&conn
->data_q
, skb
);
2049 tasklet_schedule(&hdev
->tx_task
);
2051 EXPORT_SYMBOL(hci_send_sco
);
2053 /* ---- HCI TX task (outgoing data) ---- */
2055 /* HCI Connection scheduler */
2056 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2058 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2059 struct hci_conn
*conn
= NULL
, *c
;
2060 int num
= 0, min
= ~0;
2062 /* We don't have to lock device here. Connections are always
2063 * added and removed with TX task disabled. */
2064 list_for_each_entry(c
, &h
->list
, list
) {
2065 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2068 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2073 if (c
->sent
< min
) {
2078 if (hci_conn_num(hdev
, type
) == num
)
2085 switch (conn
->type
) {
2087 cnt
= hdev
->acl_cnt
;
2091 cnt
= hdev
->sco_cnt
;
2094 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2098 BT_ERR("Unknown link type");
2106 BT_DBG("conn %p quote %d", conn
, *quote
);
2110 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2112 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2115 BT_ERR("%s link tx timeout", hdev
->name
);
2117 /* Kill stalled connections */
2118 list_for_each_entry(c
, &h
->list
, list
) {
2119 if (c
->type
== type
&& c
->sent
) {
2120 BT_ERR("%s killing stalled connection %s",
2121 hdev
->name
, batostr(&c
->dst
));
2122 hci_acl_disconn(c
, 0x13);
2127 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2130 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2131 struct hci_chan
*chan
= NULL
;
2132 int num
= 0, min
= ~0, cur_prio
= 0;
2133 struct hci_conn
*conn
;
2134 int cnt
, q
, conn_num
= 0;
2136 BT_DBG("%s", hdev
->name
);
2138 list_for_each_entry(conn
, &h
->list
, list
) {
2139 struct hci_chan_hash
*ch
;
2140 struct hci_chan
*tmp
;
2142 if (conn
->type
!= type
)
2145 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2150 ch
= &conn
->chan_hash
;
2152 list_for_each_entry(tmp
, &ch
->list
, list
) {
2153 struct sk_buff
*skb
;
2155 if (skb_queue_empty(&tmp
->data_q
))
2158 skb
= skb_peek(&tmp
->data_q
);
2159 if (skb
->priority
< cur_prio
)
2162 if (skb
->priority
> cur_prio
) {
2165 cur_prio
= skb
->priority
;
2170 if (conn
->sent
< min
) {
2176 if (hci_conn_num(hdev
, type
) == conn_num
)
2183 switch (chan
->conn
->type
) {
2185 cnt
= hdev
->acl_cnt
;
2189 cnt
= hdev
->sco_cnt
;
2192 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2196 BT_ERR("Unknown link type");
2201 BT_DBG("chan %p quote %d", chan
, *quote
);
2205 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2207 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2208 struct hci_conn
*conn
;
2211 BT_DBG("%s", hdev
->name
);
2213 list_for_each_entry(conn
, &h
->list
, list
) {
2214 struct hci_chan_hash
*ch
;
2215 struct hci_chan
*chan
;
2217 if (conn
->type
!= type
)
2220 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2225 ch
= &conn
->chan_hash
;
2226 list_for_each_entry(chan
, &ch
->list
, list
) {
2227 struct sk_buff
*skb
;
2234 if (skb_queue_empty(&chan
->data_q
))
2237 skb
= skb_peek(&chan
->data_q
);
2238 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2241 skb
->priority
= HCI_PRIO_MAX
- 1;
2243 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2247 if (hci_conn_num(hdev
, type
) == num
)
2252 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2254 struct hci_chan
*chan
;
2255 struct sk_buff
*skb
;
2259 BT_DBG("%s", hdev
->name
);
2261 if (!hci_conn_num(hdev
, ACL_LINK
))
2264 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2265 /* ACL tx timeout must be longer than maximum
2266 * link supervision timeout (40.9 seconds) */
2267 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
2268 hci_link_tx_to(hdev
, ACL_LINK
);
2271 cnt
= hdev
->acl_cnt
;
2273 while (hdev
->acl_cnt
&&
2274 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2275 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2276 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2277 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2278 skb
->len
, skb
->priority
);
2280 /* Stop if priority has changed */
2281 if (skb
->priority
< priority
)
2284 skb
= skb_dequeue(&chan
->data_q
);
2286 hci_conn_enter_active_mode(chan
->conn
,
2287 bt_cb(skb
)->force_active
);
2289 hci_send_frame(skb
);
2290 hdev
->acl_last_tx
= jiffies
;
2298 if (cnt
!= hdev
->acl_cnt
)
2299 hci_prio_recalculate(hdev
, ACL_LINK
);
2303 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2305 struct hci_conn
*conn
;
2306 struct sk_buff
*skb
;
2309 BT_DBG("%s", hdev
->name
);
2311 if (!hci_conn_num(hdev
, SCO_LINK
))
2314 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2315 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2316 BT_DBG("skb %p len %d", skb
, skb
->len
);
2317 hci_send_frame(skb
);
2320 if (conn
->sent
== ~0)
2326 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2328 struct hci_conn
*conn
;
2329 struct sk_buff
*skb
;
2332 BT_DBG("%s", hdev
->name
);
2334 if (!hci_conn_num(hdev
, ESCO_LINK
))
2337 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2338 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2339 BT_DBG("skb %p len %d", skb
, skb
->len
);
2340 hci_send_frame(skb
);
2343 if (conn
->sent
== ~0)
2349 static inline void hci_sched_le(struct hci_dev
*hdev
)
2351 struct hci_chan
*chan
;
2352 struct sk_buff
*skb
;
2353 int quote
, cnt
, tmp
;
2355 BT_DBG("%s", hdev
->name
);
2357 if (!hci_conn_num(hdev
, LE_LINK
))
2360 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2361 /* LE tx timeout must be longer than maximum
2362 * link supervision timeout (40.9 seconds) */
2363 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2364 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2365 hci_link_tx_to(hdev
, LE_LINK
);
2368 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2370 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2371 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2372 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2373 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2374 skb
->len
, skb
->priority
);
2376 /* Stop if priority has changed */
2377 if (skb
->priority
< priority
)
2380 skb
= skb_dequeue(&chan
->data_q
);
2382 hci_send_frame(skb
);
2383 hdev
->le_last_tx
= jiffies
;
2394 hdev
->acl_cnt
= cnt
;
2397 hci_prio_recalculate(hdev
, LE_LINK
);
2400 static void hci_tx_task(unsigned long arg
)
2402 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2403 struct sk_buff
*skb
;
2405 read_lock(&hci_task_lock
);
2407 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2408 hdev
->sco_cnt
, hdev
->le_cnt
);
2410 /* Schedule queues and send stuff to HCI driver */
2412 hci_sched_acl(hdev
);
2414 hci_sched_sco(hdev
);
2416 hci_sched_esco(hdev
);
2420 /* Send next queued raw (unknown type) packet */
2421 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2422 hci_send_frame(skb
);
2424 read_unlock(&hci_task_lock
);
2427 /* ----- HCI RX task (incoming data processing) ----- */
2429 /* ACL data packet */
2430 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2432 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2433 struct hci_conn
*conn
;
2434 __u16 handle
, flags
;
2436 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2438 handle
= __le16_to_cpu(hdr
->handle
);
2439 flags
= hci_flags(handle
);
2440 handle
= hci_handle(handle
);
2442 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2444 hdev
->stat
.acl_rx
++;
2447 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2448 hci_dev_unlock(hdev
);
2451 register struct hci_proto
*hp
;
2453 hci_conn_enter_active_mode(conn
, bt_cb(skb
)->force_active
);
2455 /* Send to upper protocol */
2456 hp
= hci_proto
[HCI_PROTO_L2CAP
];
2457 if (hp
&& hp
->recv_acldata
) {
2458 hp
->recv_acldata(conn
, skb
, flags
);
2462 BT_ERR("%s ACL packet for unknown connection handle %d",
2463 hdev
->name
, handle
);
2469 /* SCO data packet */
2470 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2472 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2473 struct hci_conn
*conn
;
2476 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2478 handle
= __le16_to_cpu(hdr
->handle
);
2480 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2482 hdev
->stat
.sco_rx
++;
2485 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2486 hci_dev_unlock(hdev
);
2489 register struct hci_proto
*hp
;
2491 /* Send to upper protocol */
2492 hp
= hci_proto
[HCI_PROTO_SCO
];
2493 if (hp
&& hp
->recv_scodata
) {
2494 hp
->recv_scodata(conn
, skb
);
2498 BT_ERR("%s SCO packet for unknown connection handle %d",
2499 hdev
->name
, handle
);
2505 static void hci_rx_task(unsigned long arg
)
2507 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2508 struct sk_buff
*skb
;
2510 BT_DBG("%s", hdev
->name
);
2512 read_lock(&hci_task_lock
);
2514 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2515 if (atomic_read(&hdev
->promisc
)) {
2516 /* Send copy to the sockets */
2517 hci_send_to_sock(hdev
, skb
, NULL
);
2520 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2525 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2526 /* Don't process data packets in this states. */
2527 switch (bt_cb(skb
)->pkt_type
) {
2528 case HCI_ACLDATA_PKT
:
2529 case HCI_SCODATA_PKT
:
2536 switch (bt_cb(skb
)->pkt_type
) {
2538 hci_event_packet(hdev
, skb
);
2541 case HCI_ACLDATA_PKT
:
2542 BT_DBG("%s ACL data packet", hdev
->name
);
2543 hci_acldata_packet(hdev
, skb
);
2546 case HCI_SCODATA_PKT
:
2547 BT_DBG("%s SCO data packet", hdev
->name
);
2548 hci_scodata_packet(hdev
, skb
);
2557 read_unlock(&hci_task_lock
);
2560 static void hci_cmd_task(unsigned long arg
)
2562 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2563 struct sk_buff
*skb
;
2565 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2567 /* Send queued commands */
2568 if (atomic_read(&hdev
->cmd_cnt
)) {
2569 skb
= skb_dequeue(&hdev
->cmd_q
);
2573 kfree_skb(hdev
->sent_cmd
);
2575 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2576 if (hdev
->sent_cmd
) {
2577 atomic_dec(&hdev
->cmd_cnt
);
2578 hci_send_frame(skb
);
2579 if (test_bit(HCI_RESET
, &hdev
->flags
))
2580 del_timer(&hdev
->cmd_timer
);
2582 mod_timer(&hdev
->cmd_timer
,
2583 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2585 skb_queue_head(&hdev
->cmd_q
, skb
);
2586 tasklet_schedule(&hdev
->cmd_task
);
2591 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2593 /* General inquiry access code (GIAC) */
2594 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2595 struct hci_cp_inquiry cp
;
2597 BT_DBG("%s", hdev
->name
);
2599 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2600 return -EINPROGRESS
;
2602 memset(&cp
, 0, sizeof(cp
));
2603 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2606 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2609 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2611 BT_DBG("%s", hdev
->name
);
2613 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2616 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);