2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 #define AUTO_OFF_TIMEOUT 2000
55 static void hci_cmd_task(unsigned long arg
);
56 static void hci_rx_task(unsigned long arg
);
57 static void hci_tx_task(unsigned long arg
);
58 static void hci_notify(struct hci_dev
*hdev
, int event
);
60 static DEFINE_RWLOCK(hci_task_lock
);
63 LIST_HEAD(hci_dev_list
);
64 DEFINE_RWLOCK(hci_dev_list_lock
);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list
);
68 DEFINE_RWLOCK(hci_cb_list_lock
);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block
*nb
)
81 return atomic_notifier_chain_register(&hci_notifier
, nb
);
84 int hci_unregister_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
89 static void hci_notify(struct hci_dev
*hdev
, int event
)
91 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
103 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
106 if (hdev
->req_status
== HCI_REQ_PEND
) {
107 hdev
->req_result
= result
;
108 hdev
->req_status
= HCI_REQ_DONE
;
109 wake_up_interruptible(&hdev
->req_wait_q
);
113 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
115 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
117 if (hdev
->req_status
== HCI_REQ_PEND
) {
118 hdev
->req_result
= err
;
119 hdev
->req_status
= HCI_REQ_CANCELED
;
120 wake_up_interruptible(&hdev
->req_wait_q
);
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
126 unsigned long opt
, __u32 timeout
)
128 DECLARE_WAITQUEUE(wait
, current
);
131 BT_DBG("%s start", hdev
->name
);
133 hdev
->req_status
= HCI_REQ_PEND
;
135 add_wait_queue(&hdev
->req_wait_q
, &wait
);
136 set_current_state(TASK_INTERRUPTIBLE
);
139 schedule_timeout(timeout
);
141 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
143 if (signal_pending(current
))
146 switch (hdev
->req_status
) {
148 err
= -bt_err(hdev
->req_result
);
151 case HCI_REQ_CANCELED
:
152 err
= -hdev
->req_result
;
160 hdev
->req_status
= hdev
->req_result
= 0;
162 BT_DBG("%s end: err %d", hdev
->name
, err
);
167 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
168 unsigned long opt
, __u32 timeout
)
172 if (!test_bit(HCI_UP
, &hdev
->flags
))
175 /* Serialize all requests */
177 ret
= __hci_request(hdev
, req
, opt
, timeout
);
178 hci_req_unlock(hdev
);
183 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
185 BT_DBG("%s %ld", hdev
->name
, opt
);
188 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
191 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
193 struct hci_cp_delete_stored_link_key cp
;
198 BT_DBG("%s %ld", hdev
->name
, opt
);
200 /* Driver initialization */
202 /* Special commands */
203 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
204 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
205 skb
->dev
= (void *) hdev
;
207 skb_queue_tail(&hdev
->cmd_q
, skb
);
208 tasklet_schedule(&hdev
->cmd_task
);
210 skb_queue_purge(&hdev
->driver_init
);
212 /* Mandatory initialization */
215 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
))
216 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
218 /* Read Local Supported Features */
219 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
221 /* Read Local Version */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
228 /* Host buffer size */
230 struct hci_cp_host_buffer_size cp
;
231 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
232 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
233 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
234 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
235 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
239 /* Read BD Address */
240 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
242 /* Read Class of Device */
243 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
245 /* Read Local Name */
246 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
248 /* Read Voice Setting */
249 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
251 /* Optional initialization */
253 /* Clear Event Filters */
254 flt_type
= HCI_FLT_CLEAR_ALL
;
255 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
257 /* Connection accept timeout ~20 secs */
258 param
= cpu_to_le16(0x7d00);
259 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
261 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
263 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
266 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
270 BT_DBG("%s %x", hdev
->name
, scan
);
272 /* Inquiry and Page scans */
273 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
276 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
280 BT_DBG("%s %x", hdev
->name
, auth
);
283 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
286 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
290 BT_DBG("%s %x", hdev
->name
, encrypt
);
293 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
296 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
298 __le16 policy
= cpu_to_le16(opt
);
300 BT_DBG("%s %x", hdev
->name
, policy
);
302 /* Default link policy */
303 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
306 /* Get HCI device by index.
307 * Device is held on return. */
308 struct hci_dev
*hci_dev_get(int index
)
310 struct hci_dev
*hdev
= NULL
;
318 read_lock(&hci_dev_list_lock
);
319 list_for_each(p
, &hci_dev_list
) {
320 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
321 if (d
->id
== index
) {
322 hdev
= hci_dev_hold(d
);
326 read_unlock(&hci_dev_list_lock
);
330 /* ---- Inquiry support ---- */
331 static void inquiry_cache_flush(struct hci_dev
*hdev
)
333 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
334 struct inquiry_entry
*next
= cache
->list
, *e
;
336 BT_DBG("cache %p", cache
);
345 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
347 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
348 struct inquiry_entry
*e
;
350 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
352 for (e
= cache
->list
; e
; e
= e
->next
)
353 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
358 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
360 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
361 struct inquiry_entry
*ie
;
363 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
365 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
367 /* Entry not in the cache. Add new one. */
368 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
372 ie
->next
= cache
->list
;
376 memcpy(&ie
->data
, data
, sizeof(*data
));
377 ie
->timestamp
= jiffies
;
378 cache
->timestamp
= jiffies
;
381 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
383 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
384 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
385 struct inquiry_entry
*e
;
388 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
389 struct inquiry_data
*data
= &e
->data
;
390 bacpy(&info
->bdaddr
, &data
->bdaddr
);
391 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
392 info
->pscan_period_mode
= data
->pscan_period_mode
;
393 info
->pscan_mode
= data
->pscan_mode
;
394 memcpy(info
->dev_class
, data
->dev_class
, 3);
395 info
->clock_offset
= data
->clock_offset
;
399 BT_DBG("cache %p, copied %d", cache
, copied
);
403 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
405 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
406 struct hci_cp_inquiry cp
;
408 BT_DBG("%s", hdev
->name
);
410 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
414 memcpy(&cp
.lap
, &ir
->lap
, 3);
415 cp
.length
= ir
->length
;
416 cp
.num_rsp
= ir
->num_rsp
;
417 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
420 int hci_inquiry(void __user
*arg
)
422 __u8 __user
*ptr
= arg
;
423 struct hci_inquiry_req ir
;
424 struct hci_dev
*hdev
;
425 int err
= 0, do_inquiry
= 0, max_rsp
;
429 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
432 hdev
= hci_dev_get(ir
.dev_id
);
436 hci_dev_lock_bh(hdev
);
437 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
438 inquiry_cache_empty(hdev
) ||
439 ir
.flags
& IREQ_CACHE_FLUSH
) {
440 inquiry_cache_flush(hdev
);
443 hci_dev_unlock_bh(hdev
);
445 timeo
= ir
.length
* msecs_to_jiffies(2000);
448 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
453 /* for unlimited number of responses we will use buffer with 255 entries */
454 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
456 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457 * copy it to the user space.
459 buf
= kmalloc(sizeof(struct inquiry_info
) *max_rsp
, GFP_KERNEL
);
465 hci_dev_lock_bh(hdev
);
466 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
467 hci_dev_unlock_bh(hdev
);
469 BT_DBG("num_rsp %d", ir
.num_rsp
);
471 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
473 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
486 /* ---- HCI ioctl helpers ---- */
488 int hci_dev_open(__u16 dev
)
490 struct hci_dev
*hdev
;
493 hdev
= hci_dev_get(dev
);
497 BT_DBG("%s %p", hdev
->name
, hdev
);
501 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
506 if (test_bit(HCI_UP
, &hdev
->flags
)) {
511 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
512 set_bit(HCI_RAW
, &hdev
->flags
);
514 /* Treat all non BR/EDR controllers as raw devices for now */
515 if (hdev
->dev_type
!= HCI_BREDR
)
516 set_bit(HCI_RAW
, &hdev
->flags
);
518 if (hdev
->open(hdev
)) {
523 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
524 atomic_set(&hdev
->cmd_cnt
, 1);
525 set_bit(HCI_INIT
, &hdev
->flags
);
526 hdev
->init_last_cmd
= 0;
528 //__hci_request(hdev, hci_reset_req, 0, HZ);
529 ret
= __hci_request(hdev
, hci_init_req
, 0,
530 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
532 clear_bit(HCI_INIT
, &hdev
->flags
);
537 set_bit(HCI_UP
, &hdev
->flags
);
538 hci_notify(hdev
, HCI_DEV_UP
);
539 if (!test_bit(HCI_SETUP
, &hdev
->flags
))
540 mgmt_powered(hdev
->id
, 1);
542 /* Init failed, cleanup */
543 tasklet_kill(&hdev
->rx_task
);
544 tasklet_kill(&hdev
->tx_task
);
545 tasklet_kill(&hdev
->cmd_task
);
547 skb_queue_purge(&hdev
->cmd_q
);
548 skb_queue_purge(&hdev
->rx_q
);
553 if (hdev
->sent_cmd
) {
554 kfree_skb(hdev
->sent_cmd
);
555 hdev
->sent_cmd
= NULL
;
563 hci_req_unlock(hdev
);
568 static int hci_dev_do_close(struct hci_dev
*hdev
)
570 BT_DBG("%s %p", hdev
->name
, hdev
);
572 hci_req_cancel(hdev
, ENODEV
);
575 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
576 hci_req_unlock(hdev
);
580 /* Kill RX and TX tasks */
581 tasklet_kill(&hdev
->rx_task
);
582 tasklet_kill(&hdev
->tx_task
);
584 hci_dev_lock_bh(hdev
);
585 inquiry_cache_flush(hdev
);
586 hci_conn_hash_flush(hdev
);
587 hci_dev_unlock_bh(hdev
);
589 hci_notify(hdev
, HCI_DEV_DOWN
);
595 skb_queue_purge(&hdev
->cmd_q
);
596 atomic_set(&hdev
->cmd_cnt
, 1);
597 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
598 set_bit(HCI_INIT
, &hdev
->flags
);
599 __hci_request(hdev
, hci_reset_req
, 0,
600 msecs_to_jiffies(250));
601 clear_bit(HCI_INIT
, &hdev
->flags
);
605 tasklet_kill(&hdev
->cmd_task
);
608 skb_queue_purge(&hdev
->rx_q
);
609 skb_queue_purge(&hdev
->cmd_q
);
610 skb_queue_purge(&hdev
->raw_q
);
612 /* Drop last sent command */
613 if (hdev
->sent_cmd
) {
614 kfree_skb(hdev
->sent_cmd
);
615 hdev
->sent_cmd
= NULL
;
618 /* After this point our queues are empty
619 * and no tasks are scheduled. */
622 mgmt_powered(hdev
->id
, 0);
627 hci_req_unlock(hdev
);
633 int hci_dev_close(__u16 dev
)
635 struct hci_dev
*hdev
;
638 hdev
= hci_dev_get(dev
);
641 err
= hci_dev_do_close(hdev
);
646 int hci_dev_reset(__u16 dev
)
648 struct hci_dev
*hdev
;
651 hdev
= hci_dev_get(dev
);
656 tasklet_disable(&hdev
->tx_task
);
658 if (!test_bit(HCI_UP
, &hdev
->flags
))
662 skb_queue_purge(&hdev
->rx_q
);
663 skb_queue_purge(&hdev
->cmd_q
);
665 hci_dev_lock_bh(hdev
);
666 inquiry_cache_flush(hdev
);
667 hci_conn_hash_flush(hdev
);
668 hci_dev_unlock_bh(hdev
);
673 atomic_set(&hdev
->cmd_cnt
, 1);
674 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
676 if (!test_bit(HCI_RAW
, &hdev
->flags
))
677 ret
= __hci_request(hdev
, hci_reset_req
, 0,
678 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
681 tasklet_enable(&hdev
->tx_task
);
682 hci_req_unlock(hdev
);
687 int hci_dev_reset_stat(__u16 dev
)
689 struct hci_dev
*hdev
;
692 hdev
= hci_dev_get(dev
);
696 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
703 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
705 struct hci_dev
*hdev
;
706 struct hci_dev_req dr
;
709 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
712 hdev
= hci_dev_get(dr
.dev_id
);
718 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
719 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
723 if (!lmp_encrypt_capable(hdev
)) {
728 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
729 /* Auth must be enabled first */
730 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
736 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
741 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
742 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
746 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
747 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
751 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
752 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
756 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
760 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
761 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
765 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
766 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
778 int hci_get_dev_list(void __user
*arg
)
780 struct hci_dev_list_req
*dl
;
781 struct hci_dev_req
*dr
;
783 int n
= 0, size
, err
;
786 if (get_user(dev_num
, (__u16 __user
*) arg
))
789 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
792 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
794 dl
= kzalloc(size
, GFP_KERNEL
);
800 read_lock_bh(&hci_dev_list_lock
);
801 list_for_each(p
, &hci_dev_list
) {
802 struct hci_dev
*hdev
;
804 hdev
= list_entry(p
, struct hci_dev
, list
);
806 hci_del_off_timer(hdev
);
808 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
809 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
811 (dr
+ n
)->dev_id
= hdev
->id
;
812 (dr
+ n
)->dev_opt
= hdev
->flags
;
817 read_unlock_bh(&hci_dev_list_lock
);
820 size
= sizeof(*dl
) + n
* sizeof(*dr
);
822 err
= copy_to_user(arg
, dl
, size
);
825 return err
? -EFAULT
: 0;
828 int hci_get_dev_info(void __user
*arg
)
830 struct hci_dev
*hdev
;
831 struct hci_dev_info di
;
834 if (copy_from_user(&di
, arg
, sizeof(di
)))
837 hdev
= hci_dev_get(di
.dev_id
);
841 hci_del_off_timer(hdev
);
843 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
844 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
846 strcpy(di
.name
, hdev
->name
);
847 di
.bdaddr
= hdev
->bdaddr
;
848 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
849 di
.flags
= hdev
->flags
;
850 di
.pkt_type
= hdev
->pkt_type
;
851 di
.acl_mtu
= hdev
->acl_mtu
;
852 di
.acl_pkts
= hdev
->acl_pkts
;
853 di
.sco_mtu
= hdev
->sco_mtu
;
854 di
.sco_pkts
= hdev
->sco_pkts
;
855 di
.link_policy
= hdev
->link_policy
;
856 di
.link_mode
= hdev
->link_mode
;
858 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
859 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
861 if (copy_to_user(arg
, &di
, sizeof(di
)))
869 /* ---- Interface to HCI drivers ---- */
871 static int hci_rfkill_set_block(void *data
, bool blocked
)
873 struct hci_dev
*hdev
= data
;
875 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
880 hci_dev_do_close(hdev
);
885 static const struct rfkill_ops hci_rfkill_ops
= {
886 .set_block
= hci_rfkill_set_block
,
889 /* Alloc HCI device */
890 struct hci_dev
*hci_alloc_dev(void)
892 struct hci_dev
*hdev
;
894 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
898 skb_queue_head_init(&hdev
->driver_init
);
902 EXPORT_SYMBOL(hci_alloc_dev
);
904 /* Free HCI device */
905 void hci_free_dev(struct hci_dev
*hdev
)
907 skb_queue_purge(&hdev
->driver_init
);
909 /* will free via device release */
910 put_device(&hdev
->dev
);
912 EXPORT_SYMBOL(hci_free_dev
);
914 static void hci_power_on(struct work_struct
*work
)
916 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
918 BT_DBG("%s", hdev
->name
);
920 if (hci_dev_open(hdev
->id
) < 0)
923 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
924 mod_timer(&hdev
->off_timer
,
925 jiffies
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
927 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
928 mgmt_index_added(hdev
->id
);
931 static void hci_power_off(struct work_struct
*work
)
933 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_off
);
935 BT_DBG("%s", hdev
->name
);
937 hci_dev_close(hdev
->id
);
940 static void hci_auto_off(unsigned long data
)
942 struct hci_dev
*hdev
= (struct hci_dev
*) data
;
944 BT_DBG("%s", hdev
->name
);
946 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
948 queue_work(hdev
->workqueue
, &hdev
->power_off
);
951 void hci_del_off_timer(struct hci_dev
*hdev
)
953 BT_DBG("%s", hdev
->name
);
955 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
956 del_timer(&hdev
->off_timer
);
959 int hci_uuids_clear(struct hci_dev
*hdev
)
961 struct list_head
*p
, *n
;
963 list_for_each_safe(p
, n
, &hdev
->uuids
) {
964 struct bt_uuid
*uuid
;
966 uuid
= list_entry(p
, struct bt_uuid
, list
);
975 int hci_link_keys_clear(struct hci_dev
*hdev
)
977 struct list_head
*p
, *n
;
979 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
980 struct link_key
*key
;
982 key
= list_entry(p
, struct link_key
, list
);
991 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
995 list_for_each(p
, &hdev
->link_keys
) {
998 k
= list_entry(p
, struct link_key
, list
);
1000 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1007 int hci_add_link_key(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1008 u8
*val
, u8 type
, u8 pin_len
)
1010 struct link_key
*key
, *old_key
;
1013 old_key
= hci_find_link_key(hdev
, bdaddr
);
1015 old_key_type
= old_key
->type
;
1018 old_key_type
= 0xff;
1019 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1022 list_add(&key
->list
, &hdev
->link_keys
);
1025 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1027 bacpy(&key
->bdaddr
, bdaddr
);
1028 memcpy(key
->val
, val
, 16);
1030 key
->pin_len
= pin_len
;
1033 mgmt_new_key(hdev
->id
, key
, old_key_type
);
1036 key
->type
= old_key_type
;
1041 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1043 struct link_key
*key
;
1045 key
= hci_find_link_key(hdev
, bdaddr
);
1049 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1051 list_del(&key
->list
);
1057 /* Register HCI device */
1058 int hci_register_dev(struct hci_dev
*hdev
)
1060 struct list_head
*head
= &hci_dev_list
, *p
;
1063 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1064 hdev
->bus
, hdev
->owner
);
1066 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1069 write_lock_bh(&hci_dev_list_lock
);
1071 /* Find first available device id */
1072 list_for_each(p
, &hci_dev_list
) {
1073 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1078 sprintf(hdev
->name
, "hci%d", id
);
1080 list_add(&hdev
->list
, head
);
1082 atomic_set(&hdev
->refcnt
, 1);
1083 spin_lock_init(&hdev
->lock
);
1086 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1087 hdev
->esco_type
= (ESCO_HV1
);
1088 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1089 hdev
->io_capability
= 0x03; /* No Input No Output */
1091 hdev
->idle_timeout
= 0;
1092 hdev
->sniff_max_interval
= 800;
1093 hdev
->sniff_min_interval
= 80;
1095 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
, (unsigned long) hdev
);
1096 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
1097 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
1099 skb_queue_head_init(&hdev
->rx_q
);
1100 skb_queue_head_init(&hdev
->cmd_q
);
1101 skb_queue_head_init(&hdev
->raw_q
);
1103 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1104 hdev
->reassembly
[i
] = NULL
;
1106 init_waitqueue_head(&hdev
->req_wait_q
);
1107 mutex_init(&hdev
->req_lock
);
1109 inquiry_cache_init(hdev
);
1111 hci_conn_hash_init(hdev
);
1113 INIT_LIST_HEAD(&hdev
->blacklist
);
1115 INIT_LIST_HEAD(&hdev
->uuids
);
1117 INIT_LIST_HEAD(&hdev
->link_keys
);
1119 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1120 INIT_WORK(&hdev
->power_off
, hci_power_off
);
1121 setup_timer(&hdev
->off_timer
, hci_auto_off
, (unsigned long) hdev
);
1123 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1125 atomic_set(&hdev
->promisc
, 0);
1127 write_unlock_bh(&hci_dev_list_lock
);
1129 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
1130 if (!hdev
->workqueue
)
1133 hci_register_sysfs(hdev
);
1135 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1136 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1138 if (rfkill_register(hdev
->rfkill
) < 0) {
1139 rfkill_destroy(hdev
->rfkill
);
1140 hdev
->rfkill
= NULL
;
1144 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1145 set_bit(HCI_SETUP
, &hdev
->flags
);
1146 queue_work(hdev
->workqueue
, &hdev
->power_on
);
1148 hci_notify(hdev
, HCI_DEV_REG
);
1153 write_lock_bh(&hci_dev_list_lock
);
1154 list_del(&hdev
->list
);
1155 write_unlock_bh(&hci_dev_list_lock
);
1159 EXPORT_SYMBOL(hci_register_dev
);
1161 /* Unregister HCI device */
1162 int hci_unregister_dev(struct hci_dev
*hdev
)
1166 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1168 write_lock_bh(&hci_dev_list_lock
);
1169 list_del(&hdev
->list
);
1170 write_unlock_bh(&hci_dev_list_lock
);
1172 hci_dev_do_close(hdev
);
1174 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1175 kfree_skb(hdev
->reassembly
[i
]);
1177 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1178 !test_bit(HCI_SETUP
, &hdev
->flags
))
1179 mgmt_index_removed(hdev
->id
);
1181 hci_notify(hdev
, HCI_DEV_UNREG
);
1184 rfkill_unregister(hdev
->rfkill
);
1185 rfkill_destroy(hdev
->rfkill
);
1188 hci_unregister_sysfs(hdev
);
1190 destroy_workqueue(hdev
->workqueue
);
1192 hci_dev_lock_bh(hdev
);
1193 hci_blacklist_clear(hdev
);
1194 hci_uuids_clear(hdev
);
1195 hci_link_keys_clear(hdev
);
1196 hci_dev_unlock_bh(hdev
);
1198 __hci_dev_put(hdev
);
1202 EXPORT_SYMBOL(hci_unregister_dev
);
1204 /* Suspend HCI device */
1205 int hci_suspend_dev(struct hci_dev
*hdev
)
1207 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1210 EXPORT_SYMBOL(hci_suspend_dev
);
1212 /* Resume HCI device */
1213 int hci_resume_dev(struct hci_dev
*hdev
)
1215 hci_notify(hdev
, HCI_DEV_RESUME
);
1218 EXPORT_SYMBOL(hci_resume_dev
);
1220 /* Receive frame from HCI drivers */
1221 int hci_recv_frame(struct sk_buff
*skb
)
1223 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1224 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1225 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1231 bt_cb(skb
)->incoming
= 1;
1234 __net_timestamp(skb
);
1236 /* Queue frame for rx task */
1237 skb_queue_tail(&hdev
->rx_q
, skb
);
1238 tasklet_schedule(&hdev
->rx_task
);
1242 EXPORT_SYMBOL(hci_recv_frame
);
1244 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1245 int count
, __u8 index
, gfp_t gfp_mask
)
1250 struct sk_buff
*skb
;
1251 struct bt_skb_cb
*scb
;
1253 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1254 index
>= NUM_REASSEMBLY
)
1257 skb
= hdev
->reassembly
[index
];
1261 case HCI_ACLDATA_PKT
:
1262 len
= HCI_MAX_FRAME_SIZE
;
1263 hlen
= HCI_ACL_HDR_SIZE
;
1266 len
= HCI_MAX_EVENT_SIZE
;
1267 hlen
= HCI_EVENT_HDR_SIZE
;
1269 case HCI_SCODATA_PKT
:
1270 len
= HCI_MAX_SCO_SIZE
;
1271 hlen
= HCI_SCO_HDR_SIZE
;
1275 skb
= bt_skb_alloc(len
, gfp_mask
);
1279 scb
= (void *) skb
->cb
;
1281 scb
->pkt_type
= type
;
1283 skb
->dev
= (void *) hdev
;
1284 hdev
->reassembly
[index
] = skb
;
1288 scb
= (void *) skb
->cb
;
1289 len
= min(scb
->expect
, (__u16
)count
);
1291 memcpy(skb_put(skb
, len
), data
, len
);
1300 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1301 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1302 scb
->expect
= h
->plen
;
1304 if (skb_tailroom(skb
) < scb
->expect
) {
1306 hdev
->reassembly
[index
] = NULL
;
1312 case HCI_ACLDATA_PKT
:
1313 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1314 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1315 scb
->expect
= __le16_to_cpu(h
->dlen
);
1317 if (skb_tailroom(skb
) < scb
->expect
) {
1319 hdev
->reassembly
[index
] = NULL
;
1325 case HCI_SCODATA_PKT
:
1326 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1327 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1328 scb
->expect
= h
->dlen
;
1330 if (skb_tailroom(skb
) < scb
->expect
) {
1332 hdev
->reassembly
[index
] = NULL
;
1339 if (scb
->expect
== 0) {
1340 /* Complete frame */
1342 bt_cb(skb
)->pkt_type
= type
;
1343 hci_recv_frame(skb
);
1345 hdev
->reassembly
[index
] = NULL
;
1353 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1357 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1361 rem
= hci_reassembly(hdev
, type
, data
, count
,
1362 type
- 1, GFP_ATOMIC
);
1366 data
+= (count
- rem
);
1372 EXPORT_SYMBOL(hci_recv_fragment
);
1374 #define STREAM_REASSEMBLY 0
1376 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1382 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1385 struct { char type
; } *pkt
;
1387 /* Start of the frame */
1394 type
= bt_cb(skb
)->pkt_type
;
1396 rem
= hci_reassembly(hdev
, type
, data
,
1397 count
, STREAM_REASSEMBLY
, GFP_ATOMIC
);
1401 data
+= (count
- rem
);
1407 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1409 /* ---- Interface to upper protocols ---- */
1411 /* Register/Unregister protocols.
1412 * hci_task_lock is used to ensure that no tasks are running. */
1413 int hci_register_proto(struct hci_proto
*hp
)
1417 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1419 if (hp
->id
>= HCI_MAX_PROTO
)
1422 write_lock_bh(&hci_task_lock
);
1424 if (!hci_proto
[hp
->id
])
1425 hci_proto
[hp
->id
] = hp
;
1429 write_unlock_bh(&hci_task_lock
);
1433 EXPORT_SYMBOL(hci_register_proto
);
1435 int hci_unregister_proto(struct hci_proto
*hp
)
1439 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1441 if (hp
->id
>= HCI_MAX_PROTO
)
1444 write_lock_bh(&hci_task_lock
);
1446 if (hci_proto
[hp
->id
])
1447 hci_proto
[hp
->id
] = NULL
;
1451 write_unlock_bh(&hci_task_lock
);
1455 EXPORT_SYMBOL(hci_unregister_proto
);
1457 int hci_register_cb(struct hci_cb
*cb
)
1459 BT_DBG("%p name %s", cb
, cb
->name
);
1461 write_lock_bh(&hci_cb_list_lock
);
1462 list_add(&cb
->list
, &hci_cb_list
);
1463 write_unlock_bh(&hci_cb_list_lock
);
1467 EXPORT_SYMBOL(hci_register_cb
);
1469 int hci_unregister_cb(struct hci_cb
*cb
)
1471 BT_DBG("%p name %s", cb
, cb
->name
);
1473 write_lock_bh(&hci_cb_list_lock
);
1474 list_del(&cb
->list
);
1475 write_unlock_bh(&hci_cb_list_lock
);
1479 EXPORT_SYMBOL(hci_unregister_cb
);
1481 static int hci_send_frame(struct sk_buff
*skb
)
1483 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1490 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1492 if (atomic_read(&hdev
->promisc
)) {
1494 __net_timestamp(skb
);
1496 hci_send_to_sock(hdev
, skb
, NULL
);
1499 /* Get rid of skb owner, prior to sending to the driver. */
1502 return hdev
->send(skb
);
1505 /* Send HCI command */
1506 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1508 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1509 struct hci_command_hdr
*hdr
;
1510 struct sk_buff
*skb
;
1512 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1514 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1516 BT_ERR("%s no memory for command", hdev
->name
);
1520 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1521 hdr
->opcode
= cpu_to_le16(opcode
);
1525 memcpy(skb_put(skb
, plen
), param
, plen
);
1527 BT_DBG("skb len %d", skb
->len
);
1529 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1530 skb
->dev
= (void *) hdev
;
1532 if (test_bit(HCI_INIT
, &hdev
->flags
))
1533 hdev
->init_last_cmd
= opcode
;
1535 skb_queue_tail(&hdev
->cmd_q
, skb
);
1536 tasklet_schedule(&hdev
->cmd_task
);
1541 /* Get data from the previously sent command */
1542 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1544 struct hci_command_hdr
*hdr
;
1546 if (!hdev
->sent_cmd
)
1549 hdr
= (void *) hdev
->sent_cmd
->data
;
1551 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1554 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1556 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1560 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1562 struct hci_acl_hdr
*hdr
;
1565 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1566 skb_reset_transport_header(skb
);
1567 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1568 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1569 hdr
->dlen
= cpu_to_le16(len
);
1572 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1574 struct hci_dev
*hdev
= conn
->hdev
;
1575 struct sk_buff
*list
;
1577 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1579 skb
->dev
= (void *) hdev
;
1580 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1581 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1583 list
= skb_shinfo(skb
)->frag_list
;
1585 /* Non fragmented */
1586 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1588 skb_queue_tail(&conn
->data_q
, skb
);
1591 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1593 skb_shinfo(skb
)->frag_list
= NULL
;
1595 /* Queue all fragments atomically */
1596 spin_lock_bh(&conn
->data_q
.lock
);
1598 __skb_queue_tail(&conn
->data_q
, skb
);
1600 flags
&= ~ACL_START
;
1603 skb
= list
; list
= list
->next
;
1605 skb
->dev
= (void *) hdev
;
1606 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1607 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1609 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1611 __skb_queue_tail(&conn
->data_q
, skb
);
1614 spin_unlock_bh(&conn
->data_q
.lock
);
1617 tasklet_schedule(&hdev
->tx_task
);
1619 EXPORT_SYMBOL(hci_send_acl
);
1622 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1624 struct hci_dev
*hdev
= conn
->hdev
;
1625 struct hci_sco_hdr hdr
;
1627 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1629 hdr
.handle
= cpu_to_le16(conn
->handle
);
1630 hdr
.dlen
= skb
->len
;
1632 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1633 skb_reset_transport_header(skb
);
1634 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1636 skb
->dev
= (void *) hdev
;
1637 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1639 skb_queue_tail(&conn
->data_q
, skb
);
1640 tasklet_schedule(&hdev
->tx_task
);
1642 EXPORT_SYMBOL(hci_send_sco
);
1644 /* ---- HCI TX task (outgoing data) ---- */
1646 /* HCI Connection scheduler */
1647 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1649 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1650 struct hci_conn
*conn
= NULL
;
1651 int num
= 0, min
= ~0;
1652 struct list_head
*p
;
1654 /* We don't have to lock device here. Connections are always
1655 * added and removed with TX task disabled. */
1656 list_for_each(p
, &h
->list
) {
1658 c
= list_entry(p
, struct hci_conn
, list
);
1660 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1663 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1668 if (c
->sent
< min
) {
1675 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1681 BT_DBG("conn %p quote %d", conn
, *quote
);
1685 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1687 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1688 struct list_head
*p
;
1691 BT_ERR("%s ACL tx timeout", hdev
->name
);
1693 /* Kill stalled connections */
1694 list_for_each(p
, &h
->list
) {
1695 c
= list_entry(p
, struct hci_conn
, list
);
1696 if (c
->type
== ACL_LINK
&& c
->sent
) {
1697 BT_ERR("%s killing stalled ACL connection %s",
1698 hdev
->name
, batostr(&c
->dst
));
1699 hci_acl_disconn(c
, 0x13);
1704 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1706 struct hci_conn
*conn
;
1707 struct sk_buff
*skb
;
1710 BT_DBG("%s", hdev
->name
);
1712 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1713 /* ACL tx timeout must be longer than maximum
1714 * link supervision timeout (40.9 seconds) */
1715 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1716 hci_acl_tx_to(hdev
);
1719 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1720 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1721 BT_DBG("skb %p len %d", skb
, skb
->len
);
1723 hci_conn_enter_active_mode(conn
);
1725 hci_send_frame(skb
);
1726 hdev
->acl_last_tx
= jiffies
;
1735 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1737 struct hci_conn
*conn
;
1738 struct sk_buff
*skb
;
1741 BT_DBG("%s", hdev
->name
);
1743 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1744 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1745 BT_DBG("skb %p len %d", skb
, skb
->len
);
1746 hci_send_frame(skb
);
1749 if (conn
->sent
== ~0)
1755 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1757 struct hci_conn
*conn
;
1758 struct sk_buff
*skb
;
1761 BT_DBG("%s", hdev
->name
);
1763 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1764 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1765 BT_DBG("skb %p len %d", skb
, skb
->len
);
1766 hci_send_frame(skb
);
1769 if (conn
->sent
== ~0)
1775 static void hci_tx_task(unsigned long arg
)
1777 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1778 struct sk_buff
*skb
;
1780 read_lock(&hci_task_lock
);
1782 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1784 /* Schedule queues and send stuff to HCI driver */
1786 hci_sched_acl(hdev
);
1788 hci_sched_sco(hdev
);
1790 hci_sched_esco(hdev
);
1792 /* Send next queued raw (unknown type) packet */
1793 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1794 hci_send_frame(skb
);
1796 read_unlock(&hci_task_lock
);
1799 /* ----- HCI RX task (incoming data proccessing) ----- */
1801 /* ACL data packet */
1802 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1804 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1805 struct hci_conn
*conn
;
1806 __u16 handle
, flags
;
1808 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1810 handle
= __le16_to_cpu(hdr
->handle
);
1811 flags
= hci_flags(handle
);
1812 handle
= hci_handle(handle
);
1814 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1816 hdev
->stat
.acl_rx
++;
1819 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1820 hci_dev_unlock(hdev
);
1823 register struct hci_proto
*hp
;
1825 hci_conn_enter_active_mode(conn
);
1827 /* Send to upper protocol */
1828 hp
= hci_proto
[HCI_PROTO_L2CAP
];
1829 if (hp
&& hp
->recv_acldata
) {
1830 hp
->recv_acldata(conn
, skb
, flags
);
1834 BT_ERR("%s ACL packet for unknown connection handle %d",
1835 hdev
->name
, handle
);
1841 /* SCO data packet */
1842 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1844 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1845 struct hci_conn
*conn
;
1848 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1850 handle
= __le16_to_cpu(hdr
->handle
);
1852 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1854 hdev
->stat
.sco_rx
++;
1857 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1858 hci_dev_unlock(hdev
);
1861 register struct hci_proto
*hp
;
1863 /* Send to upper protocol */
1864 hp
= hci_proto
[HCI_PROTO_SCO
];
1865 if (hp
&& hp
->recv_scodata
) {
1866 hp
->recv_scodata(conn
, skb
);
1870 BT_ERR("%s SCO packet for unknown connection handle %d",
1871 hdev
->name
, handle
);
1877 static void hci_rx_task(unsigned long arg
)
1879 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1880 struct sk_buff
*skb
;
1882 BT_DBG("%s", hdev
->name
);
1884 read_lock(&hci_task_lock
);
1886 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1887 if (atomic_read(&hdev
->promisc
)) {
1888 /* Send copy to the sockets */
1889 hci_send_to_sock(hdev
, skb
, NULL
);
1892 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1897 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1898 /* Don't process data packets in this states. */
1899 switch (bt_cb(skb
)->pkt_type
) {
1900 case HCI_ACLDATA_PKT
:
1901 case HCI_SCODATA_PKT
:
1908 switch (bt_cb(skb
)->pkt_type
) {
1910 hci_event_packet(hdev
, skb
);
1913 case HCI_ACLDATA_PKT
:
1914 BT_DBG("%s ACL data packet", hdev
->name
);
1915 hci_acldata_packet(hdev
, skb
);
1918 case HCI_SCODATA_PKT
:
1919 BT_DBG("%s SCO data packet", hdev
->name
);
1920 hci_scodata_packet(hdev
, skb
);
1929 read_unlock(&hci_task_lock
);
1932 static void hci_cmd_task(unsigned long arg
)
1934 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1935 struct sk_buff
*skb
;
1937 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1939 if (!atomic_read(&hdev
->cmd_cnt
) && time_after(jiffies
, hdev
->cmd_last_tx
+ HZ
)) {
1940 BT_ERR("%s command tx timeout", hdev
->name
);
1941 atomic_set(&hdev
->cmd_cnt
, 1);
1944 /* Send queued commands */
1945 if (atomic_read(&hdev
->cmd_cnt
)) {
1946 skb
= skb_dequeue(&hdev
->cmd_q
);
1950 kfree_skb(hdev
->sent_cmd
);
1952 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
1953 if (hdev
->sent_cmd
) {
1954 atomic_dec(&hdev
->cmd_cnt
);
1955 hci_send_frame(skb
);
1956 hdev
->cmd_last_tx
= jiffies
;
1958 skb_queue_head(&hdev
->cmd_q
, skb
);
1959 tasklet_schedule(&hdev
->cmd_task
);