2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 #define AUTO_OFF_TIMEOUT 2000
55 static void hci_cmd_task(unsigned long arg
);
56 static void hci_rx_task(unsigned long arg
);
57 static void hci_tx_task(unsigned long arg
);
58 static void hci_notify(struct hci_dev
*hdev
, int event
);
60 static DEFINE_RWLOCK(hci_task_lock
);
63 LIST_HEAD(hci_dev_list
);
64 DEFINE_RWLOCK(hci_dev_list_lock
);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list
);
68 DEFINE_RWLOCK(hci_cb_list_lock
);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block
*nb
)
81 return atomic_notifier_chain_register(&hci_notifier
, nb
);
84 int hci_unregister_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
89 static void hci_notify(struct hci_dev
*hdev
, int event
)
91 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
103 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
106 if (hdev
->req_status
== HCI_REQ_PEND
) {
107 hdev
->req_result
= result
;
108 hdev
->req_status
= HCI_REQ_DONE
;
109 wake_up_interruptible(&hdev
->req_wait_q
);
113 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
115 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
117 if (hdev
->req_status
== HCI_REQ_PEND
) {
118 hdev
->req_result
= err
;
119 hdev
->req_status
= HCI_REQ_CANCELED
;
120 wake_up_interruptible(&hdev
->req_wait_q
);
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
126 unsigned long opt
, __u32 timeout
)
128 DECLARE_WAITQUEUE(wait
, current
);
131 BT_DBG("%s start", hdev
->name
);
133 hdev
->req_status
= HCI_REQ_PEND
;
135 add_wait_queue(&hdev
->req_wait_q
, &wait
);
136 set_current_state(TASK_INTERRUPTIBLE
);
139 schedule_timeout(timeout
);
141 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
143 if (signal_pending(current
))
146 switch (hdev
->req_status
) {
148 err
= -bt_err(hdev
->req_result
);
151 case HCI_REQ_CANCELED
:
152 err
= -hdev
->req_result
;
160 hdev
->req_status
= hdev
->req_result
= 0;
162 BT_DBG("%s end: err %d", hdev
->name
, err
);
167 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
168 unsigned long opt
, __u32 timeout
)
172 if (!test_bit(HCI_UP
, &hdev
->flags
))
175 /* Serialize all requests */
177 ret
= __hci_request(hdev
, req
, opt
, timeout
);
178 hci_req_unlock(hdev
);
183 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
185 BT_DBG("%s %ld", hdev
->name
, opt
);
188 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
191 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
197 BT_DBG("%s %ld", hdev
->name
, opt
);
199 /* Driver initialization */
201 /* Special commands */
202 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
203 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
204 skb
->dev
= (void *) hdev
;
206 skb_queue_tail(&hdev
->cmd_q
, skb
);
207 tasklet_schedule(&hdev
->cmd_task
);
209 skb_queue_purge(&hdev
->driver_init
);
211 /* Mandatory initialization */
214 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
))
215 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
217 /* Read Local Supported Features */
218 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
220 /* Read Local Version */
221 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
223 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
224 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
227 /* Host buffer size */
229 struct hci_cp_host_buffer_size cp
;
230 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
231 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
232 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
233 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
234 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
238 /* Read BD Address */
239 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
241 /* Read Class of Device */
242 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
244 /* Read Local Name */
245 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
247 /* Read Voice Setting */
248 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
250 /* Optional initialization */
252 /* Clear Event Filters */
253 flt_type
= HCI_FLT_CLEAR_ALL
;
254 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
256 /* Page timeout ~20 secs */
257 param
= cpu_to_le16(0x8000);
258 hci_send_cmd(hdev
, HCI_OP_WRITE_PG_TIMEOUT
, 2, ¶m
);
260 /* Connection accept timeout ~20 secs */
261 param
= cpu_to_le16(0x7d00);
262 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
265 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
269 BT_DBG("%s %x", hdev
->name
, scan
);
271 /* Inquiry and Page scans */
272 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
275 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
279 BT_DBG("%s %x", hdev
->name
, auth
);
282 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
285 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s %x", hdev
->name
, encrypt
);
292 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
295 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
297 __le16 policy
= cpu_to_le16(opt
);
299 BT_DBG("%s %x", hdev
->name
, policy
);
301 /* Default link policy */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
305 /* Get HCI device by index.
306 * Device is held on return. */
307 struct hci_dev
*hci_dev_get(int index
)
309 struct hci_dev
*hdev
= NULL
;
317 read_lock(&hci_dev_list_lock
);
318 list_for_each(p
, &hci_dev_list
) {
319 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
320 if (d
->id
== index
) {
321 hdev
= hci_dev_hold(d
);
325 read_unlock(&hci_dev_list_lock
);
329 /* ---- Inquiry support ---- */
330 static void inquiry_cache_flush(struct hci_dev
*hdev
)
332 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
333 struct inquiry_entry
*next
= cache
->list
, *e
;
335 BT_DBG("cache %p", cache
);
344 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
346 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
347 struct inquiry_entry
*e
;
349 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
351 for (e
= cache
->list
; e
; e
= e
->next
)
352 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
357 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
359 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
360 struct inquiry_entry
*ie
;
362 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
364 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
366 /* Entry not in the cache. Add new one. */
367 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
371 ie
->next
= cache
->list
;
375 memcpy(&ie
->data
, data
, sizeof(*data
));
376 ie
->timestamp
= jiffies
;
377 cache
->timestamp
= jiffies
;
380 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
382 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
383 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
384 struct inquiry_entry
*e
;
387 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
388 struct inquiry_data
*data
= &e
->data
;
389 bacpy(&info
->bdaddr
, &data
->bdaddr
);
390 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
391 info
->pscan_period_mode
= data
->pscan_period_mode
;
392 info
->pscan_mode
= data
->pscan_mode
;
393 memcpy(info
->dev_class
, data
->dev_class
, 3);
394 info
->clock_offset
= data
->clock_offset
;
398 BT_DBG("cache %p, copied %d", cache
, copied
);
402 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
404 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
405 struct hci_cp_inquiry cp
;
407 BT_DBG("%s", hdev
->name
);
409 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
413 memcpy(&cp
.lap
, &ir
->lap
, 3);
414 cp
.length
= ir
->length
;
415 cp
.num_rsp
= ir
->num_rsp
;
416 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
419 int hci_inquiry(void __user
*arg
)
421 __u8 __user
*ptr
= arg
;
422 struct hci_inquiry_req ir
;
423 struct hci_dev
*hdev
;
424 int err
= 0, do_inquiry
= 0, max_rsp
;
428 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
431 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
434 hci_dev_lock_bh(hdev
);
435 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
436 inquiry_cache_empty(hdev
) ||
437 ir
.flags
& IREQ_CACHE_FLUSH
) {
438 inquiry_cache_flush(hdev
);
441 hci_dev_unlock_bh(hdev
);
443 timeo
= ir
.length
* msecs_to_jiffies(2000);
446 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
451 /* for unlimited number of responses we will use buffer with 255 entries */
452 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
454 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
455 * copy it to the user space.
457 buf
= kmalloc(sizeof(struct inquiry_info
) *max_rsp
, GFP_KERNEL
);
463 hci_dev_lock_bh(hdev
);
464 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
465 hci_dev_unlock_bh(hdev
);
467 BT_DBG("num_rsp %d", ir
.num_rsp
);
469 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
471 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
484 /* ---- HCI ioctl helpers ---- */
486 int hci_dev_open(__u16 dev
)
488 struct hci_dev
*hdev
;
491 if (!(hdev
= hci_dev_get(dev
)))
494 BT_DBG("%s %p", hdev
->name
, hdev
);
498 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
503 if (test_bit(HCI_UP
, &hdev
->flags
)) {
508 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
509 set_bit(HCI_RAW
, &hdev
->flags
);
511 /* Treat all non BR/EDR controllers as raw devices for now */
512 if (hdev
->dev_type
!= HCI_BREDR
)
513 set_bit(HCI_RAW
, &hdev
->flags
);
515 if (hdev
->open(hdev
)) {
520 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
521 atomic_set(&hdev
->cmd_cnt
, 1);
522 set_bit(HCI_INIT
, &hdev
->flags
);
523 hdev
->init_last_cmd
= 0;
525 //__hci_request(hdev, hci_reset_req, 0, HZ);
526 ret
= __hci_request(hdev
, hci_init_req
, 0,
527 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
529 clear_bit(HCI_INIT
, &hdev
->flags
);
534 set_bit(HCI_UP
, &hdev
->flags
);
535 hci_notify(hdev
, HCI_DEV_UP
);
536 if (!test_bit(HCI_SETUP
, &hdev
->flags
))
537 mgmt_powered(hdev
->id
, 1);
539 /* Init failed, cleanup */
540 tasklet_kill(&hdev
->rx_task
);
541 tasklet_kill(&hdev
->tx_task
);
542 tasklet_kill(&hdev
->cmd_task
);
544 skb_queue_purge(&hdev
->cmd_q
);
545 skb_queue_purge(&hdev
->rx_q
);
550 if (hdev
->sent_cmd
) {
551 kfree_skb(hdev
->sent_cmd
);
552 hdev
->sent_cmd
= NULL
;
560 hci_req_unlock(hdev
);
565 static int hci_dev_do_close(struct hci_dev
*hdev
)
567 BT_DBG("%s %p", hdev
->name
, hdev
);
569 hci_req_cancel(hdev
, ENODEV
);
572 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
573 hci_req_unlock(hdev
);
577 /* Kill RX and TX tasks */
578 tasklet_kill(&hdev
->rx_task
);
579 tasklet_kill(&hdev
->tx_task
);
581 hci_dev_lock_bh(hdev
);
582 inquiry_cache_flush(hdev
);
583 hci_conn_hash_flush(hdev
);
584 hci_dev_unlock_bh(hdev
);
586 hci_notify(hdev
, HCI_DEV_DOWN
);
592 skb_queue_purge(&hdev
->cmd_q
);
593 atomic_set(&hdev
->cmd_cnt
, 1);
594 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
595 set_bit(HCI_INIT
, &hdev
->flags
);
596 __hci_request(hdev
, hci_reset_req
, 0,
597 msecs_to_jiffies(250));
598 clear_bit(HCI_INIT
, &hdev
->flags
);
602 tasklet_kill(&hdev
->cmd_task
);
605 skb_queue_purge(&hdev
->rx_q
);
606 skb_queue_purge(&hdev
->cmd_q
);
607 skb_queue_purge(&hdev
->raw_q
);
609 /* Drop last sent command */
610 if (hdev
->sent_cmd
) {
611 kfree_skb(hdev
->sent_cmd
);
612 hdev
->sent_cmd
= NULL
;
615 /* After this point our queues are empty
616 * and no tasks are scheduled. */
619 mgmt_powered(hdev
->id
, 0);
624 hci_req_unlock(hdev
);
630 int hci_dev_close(__u16 dev
)
632 struct hci_dev
*hdev
;
635 hdev
= hci_dev_get(dev
);
638 err
= hci_dev_do_close(hdev
);
643 int hci_dev_reset(__u16 dev
)
645 struct hci_dev
*hdev
;
648 hdev
= hci_dev_get(dev
);
653 tasklet_disable(&hdev
->tx_task
);
655 if (!test_bit(HCI_UP
, &hdev
->flags
))
659 skb_queue_purge(&hdev
->rx_q
);
660 skb_queue_purge(&hdev
->cmd_q
);
662 hci_dev_lock_bh(hdev
);
663 inquiry_cache_flush(hdev
);
664 hci_conn_hash_flush(hdev
);
665 hci_dev_unlock_bh(hdev
);
670 atomic_set(&hdev
->cmd_cnt
, 1);
671 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
673 if (!test_bit(HCI_RAW
, &hdev
->flags
))
674 ret
= __hci_request(hdev
, hci_reset_req
, 0,
675 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
678 tasklet_enable(&hdev
->tx_task
);
679 hci_req_unlock(hdev
);
684 int hci_dev_reset_stat(__u16 dev
)
686 struct hci_dev
*hdev
;
689 hdev
= hci_dev_get(dev
);
693 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
700 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
702 struct hci_dev
*hdev
;
703 struct hci_dev_req dr
;
706 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
709 hdev
= hci_dev_get(dr
.dev_id
);
715 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
716 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
720 if (!lmp_encrypt_capable(hdev
)) {
725 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
726 /* Auth must be enabled first */
727 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
728 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
733 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
734 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
738 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
739 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
743 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
744 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
748 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
749 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
753 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
757 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
758 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
762 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
763 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
775 int hci_get_dev_list(void __user
*arg
)
777 struct hci_dev_list_req
*dl
;
778 struct hci_dev_req
*dr
;
780 int n
= 0, size
, err
;
783 if (get_user(dev_num
, (__u16 __user
*) arg
))
786 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
789 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
791 dl
= kzalloc(size
, GFP_KERNEL
);
797 read_lock_bh(&hci_dev_list_lock
);
798 list_for_each(p
, &hci_dev_list
) {
799 struct hci_dev
*hdev
;
801 hdev
= list_entry(p
, struct hci_dev
, list
);
803 hci_del_off_timer(hdev
);
805 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
806 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
808 (dr
+ n
)->dev_id
= hdev
->id
;
809 (dr
+ n
)->dev_opt
= hdev
->flags
;
814 read_unlock_bh(&hci_dev_list_lock
);
817 size
= sizeof(*dl
) + n
* sizeof(*dr
);
819 err
= copy_to_user(arg
, dl
, size
);
822 return err
? -EFAULT
: 0;
825 int hci_get_dev_info(void __user
*arg
)
827 struct hci_dev
*hdev
;
828 struct hci_dev_info di
;
831 if (copy_from_user(&di
, arg
, sizeof(di
)))
834 hdev
= hci_dev_get(di
.dev_id
);
838 hci_del_off_timer(hdev
);
840 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
841 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
843 strcpy(di
.name
, hdev
->name
);
844 di
.bdaddr
= hdev
->bdaddr
;
845 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
846 di
.flags
= hdev
->flags
;
847 di
.pkt_type
= hdev
->pkt_type
;
848 di
.acl_mtu
= hdev
->acl_mtu
;
849 di
.acl_pkts
= hdev
->acl_pkts
;
850 di
.sco_mtu
= hdev
->sco_mtu
;
851 di
.sco_pkts
= hdev
->sco_pkts
;
852 di
.link_policy
= hdev
->link_policy
;
853 di
.link_mode
= hdev
->link_mode
;
855 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
856 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
858 if (copy_to_user(arg
, &di
, sizeof(di
)))
866 /* ---- Interface to HCI drivers ---- */
868 static int hci_rfkill_set_block(void *data
, bool blocked
)
870 struct hci_dev
*hdev
= data
;
872 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
877 hci_dev_do_close(hdev
);
882 static const struct rfkill_ops hci_rfkill_ops
= {
883 .set_block
= hci_rfkill_set_block
,
886 /* Alloc HCI device */
887 struct hci_dev
*hci_alloc_dev(void)
889 struct hci_dev
*hdev
;
891 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
895 skb_queue_head_init(&hdev
->driver_init
);
899 EXPORT_SYMBOL(hci_alloc_dev
);
901 /* Free HCI device */
902 void hci_free_dev(struct hci_dev
*hdev
)
904 skb_queue_purge(&hdev
->driver_init
);
906 /* will free via device release */
907 put_device(&hdev
->dev
);
909 EXPORT_SYMBOL(hci_free_dev
);
911 static void hci_power_on(struct work_struct
*work
)
913 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
915 BT_DBG("%s", hdev
->name
);
917 if (hci_dev_open(hdev
->id
) < 0)
920 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
921 mod_timer(&hdev
->off_timer
,
922 jiffies
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
924 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
925 mgmt_index_added(hdev
->id
);
928 static void hci_power_off(struct work_struct
*work
)
930 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_off
);
932 BT_DBG("%s", hdev
->name
);
934 hci_dev_close(hdev
->id
);
937 static void hci_auto_off(unsigned long data
)
939 struct hci_dev
*hdev
= (struct hci_dev
*) data
;
941 BT_DBG("%s", hdev
->name
);
943 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
945 queue_work(hdev
->workqueue
, &hdev
->power_off
);
948 void hci_del_off_timer(struct hci_dev
*hdev
)
950 BT_DBG("%s", hdev
->name
);
952 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
953 del_timer(&hdev
->off_timer
);
956 int hci_uuids_clear(struct hci_dev
*hdev
)
958 struct list_head
*p
, *n
;
960 list_for_each_safe(p
, n
, &hdev
->uuids
) {
961 struct bt_uuid
*uuid
;
963 uuid
= list_entry(p
, struct bt_uuid
, list
);
972 /* Register HCI device */
973 int hci_register_dev(struct hci_dev
*hdev
)
975 struct list_head
*head
= &hci_dev_list
, *p
;
978 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
979 hdev
->bus
, hdev
->owner
);
981 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
984 write_lock_bh(&hci_dev_list_lock
);
986 /* Find first available device id */
987 list_for_each(p
, &hci_dev_list
) {
988 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
993 sprintf(hdev
->name
, "hci%d", id
);
995 list_add(&hdev
->list
, head
);
997 atomic_set(&hdev
->refcnt
, 1);
998 spin_lock_init(&hdev
->lock
);
1001 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1002 hdev
->esco_type
= (ESCO_HV1
);
1003 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1005 hdev
->idle_timeout
= 0;
1006 hdev
->sniff_max_interval
= 800;
1007 hdev
->sniff_min_interval
= 80;
1009 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
, (unsigned long) hdev
);
1010 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
1011 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
1013 skb_queue_head_init(&hdev
->rx_q
);
1014 skb_queue_head_init(&hdev
->cmd_q
);
1015 skb_queue_head_init(&hdev
->raw_q
);
1017 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1018 hdev
->reassembly
[i
] = NULL
;
1020 init_waitqueue_head(&hdev
->req_wait_q
);
1021 mutex_init(&hdev
->req_lock
);
1023 inquiry_cache_init(hdev
);
1025 hci_conn_hash_init(hdev
);
1027 INIT_LIST_HEAD(&hdev
->blacklist
);
1029 INIT_LIST_HEAD(&hdev
->uuids
);
1031 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1032 INIT_WORK(&hdev
->power_off
, hci_power_off
);
1033 setup_timer(&hdev
->off_timer
, hci_auto_off
, (unsigned long) hdev
);
1035 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1037 atomic_set(&hdev
->promisc
, 0);
1039 write_unlock_bh(&hci_dev_list_lock
);
1041 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
1042 if (!hdev
->workqueue
)
1045 hci_register_sysfs(hdev
);
1047 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1048 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1050 if (rfkill_register(hdev
->rfkill
) < 0) {
1051 rfkill_destroy(hdev
->rfkill
);
1052 hdev
->rfkill
= NULL
;
1056 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1057 set_bit(HCI_SETUP
, &hdev
->flags
);
1058 queue_work(hdev
->workqueue
, &hdev
->power_on
);
1060 hci_notify(hdev
, HCI_DEV_REG
);
1065 write_lock_bh(&hci_dev_list_lock
);
1066 list_del(&hdev
->list
);
1067 write_unlock_bh(&hci_dev_list_lock
);
1071 EXPORT_SYMBOL(hci_register_dev
);
1073 /* Unregister HCI device */
1074 int hci_unregister_dev(struct hci_dev
*hdev
)
1078 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1080 write_lock_bh(&hci_dev_list_lock
);
1081 list_del(&hdev
->list
);
1082 write_unlock_bh(&hci_dev_list_lock
);
1084 hci_dev_do_close(hdev
);
1086 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1087 kfree_skb(hdev
->reassembly
[i
]);
1089 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1090 !test_bit(HCI_SETUP
, &hdev
->flags
))
1091 mgmt_index_removed(hdev
->id
);
1093 hci_notify(hdev
, HCI_DEV_UNREG
);
1096 rfkill_unregister(hdev
->rfkill
);
1097 rfkill_destroy(hdev
->rfkill
);
1100 hci_unregister_sysfs(hdev
);
1102 destroy_workqueue(hdev
->workqueue
);
1104 hci_dev_lock_bh(hdev
);
1105 hci_blacklist_clear(hdev
);
1106 hci_uuids_clear(hdev
);
1107 hci_dev_unlock_bh(hdev
);
1109 __hci_dev_put(hdev
);
1113 EXPORT_SYMBOL(hci_unregister_dev
);
1115 /* Suspend HCI device */
1116 int hci_suspend_dev(struct hci_dev
*hdev
)
1118 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1121 EXPORT_SYMBOL(hci_suspend_dev
);
1123 /* Resume HCI device */
1124 int hci_resume_dev(struct hci_dev
*hdev
)
1126 hci_notify(hdev
, HCI_DEV_RESUME
);
1129 EXPORT_SYMBOL(hci_resume_dev
);
1131 /* Receive frame from HCI drivers */
1132 int hci_recv_frame(struct sk_buff
*skb
)
1134 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1135 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1136 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1142 bt_cb(skb
)->incoming
= 1;
1145 __net_timestamp(skb
);
1147 /* Queue frame for rx task */
1148 skb_queue_tail(&hdev
->rx_q
, skb
);
1149 tasklet_schedule(&hdev
->rx_task
);
1153 EXPORT_SYMBOL(hci_recv_frame
);
1155 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1156 int count
, __u8 index
, gfp_t gfp_mask
)
1161 struct sk_buff
*skb
;
1162 struct bt_skb_cb
*scb
;
1164 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1165 index
>= NUM_REASSEMBLY
)
1168 skb
= hdev
->reassembly
[index
];
1172 case HCI_ACLDATA_PKT
:
1173 len
= HCI_MAX_FRAME_SIZE
;
1174 hlen
= HCI_ACL_HDR_SIZE
;
1177 len
= HCI_MAX_EVENT_SIZE
;
1178 hlen
= HCI_EVENT_HDR_SIZE
;
1180 case HCI_SCODATA_PKT
:
1181 len
= HCI_MAX_SCO_SIZE
;
1182 hlen
= HCI_SCO_HDR_SIZE
;
1186 skb
= bt_skb_alloc(len
, gfp_mask
);
1190 scb
= (void *) skb
->cb
;
1192 scb
->pkt_type
= type
;
1194 skb
->dev
= (void *) hdev
;
1195 hdev
->reassembly
[index
] = skb
;
1199 scb
= (void *) skb
->cb
;
1200 len
= min(scb
->expect
, (__u16
)count
);
1202 memcpy(skb_put(skb
, len
), data
, len
);
1211 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1212 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1213 scb
->expect
= h
->plen
;
1215 if (skb_tailroom(skb
) < scb
->expect
) {
1217 hdev
->reassembly
[index
] = NULL
;
1223 case HCI_ACLDATA_PKT
:
1224 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1225 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1226 scb
->expect
= __le16_to_cpu(h
->dlen
);
1228 if (skb_tailroom(skb
) < scb
->expect
) {
1230 hdev
->reassembly
[index
] = NULL
;
1236 case HCI_SCODATA_PKT
:
1237 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1238 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1239 scb
->expect
= h
->dlen
;
1241 if (skb_tailroom(skb
) < scb
->expect
) {
1243 hdev
->reassembly
[index
] = NULL
;
1250 if (scb
->expect
== 0) {
1251 /* Complete frame */
1253 bt_cb(skb
)->pkt_type
= type
;
1254 hci_recv_frame(skb
);
1256 hdev
->reassembly
[index
] = NULL
;
1264 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1268 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1272 rem
= hci_reassembly(hdev
, type
, data
, count
,
1273 type
- 1, GFP_ATOMIC
);
1277 data
+= (count
- rem
);
1283 EXPORT_SYMBOL(hci_recv_fragment
);
1285 #define STREAM_REASSEMBLY 0
1287 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1293 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1296 struct { char type
; } *pkt
;
1298 /* Start of the frame */
1305 type
= bt_cb(skb
)->pkt_type
;
1307 rem
= hci_reassembly(hdev
, type
, data
,
1308 count
, STREAM_REASSEMBLY
, GFP_ATOMIC
);
1312 data
+= (count
- rem
);
1318 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1320 /* ---- Interface to upper protocols ---- */
1322 /* Register/Unregister protocols.
1323 * hci_task_lock is used to ensure that no tasks are running. */
1324 int hci_register_proto(struct hci_proto
*hp
)
1328 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1330 if (hp
->id
>= HCI_MAX_PROTO
)
1333 write_lock_bh(&hci_task_lock
);
1335 if (!hci_proto
[hp
->id
])
1336 hci_proto
[hp
->id
] = hp
;
1340 write_unlock_bh(&hci_task_lock
);
1344 EXPORT_SYMBOL(hci_register_proto
);
1346 int hci_unregister_proto(struct hci_proto
*hp
)
1350 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1352 if (hp
->id
>= HCI_MAX_PROTO
)
1355 write_lock_bh(&hci_task_lock
);
1357 if (hci_proto
[hp
->id
])
1358 hci_proto
[hp
->id
] = NULL
;
1362 write_unlock_bh(&hci_task_lock
);
1366 EXPORT_SYMBOL(hci_unregister_proto
);
1368 int hci_register_cb(struct hci_cb
*cb
)
1370 BT_DBG("%p name %s", cb
, cb
->name
);
1372 write_lock_bh(&hci_cb_list_lock
);
1373 list_add(&cb
->list
, &hci_cb_list
);
1374 write_unlock_bh(&hci_cb_list_lock
);
1378 EXPORT_SYMBOL(hci_register_cb
);
1380 int hci_unregister_cb(struct hci_cb
*cb
)
1382 BT_DBG("%p name %s", cb
, cb
->name
);
1384 write_lock_bh(&hci_cb_list_lock
);
1385 list_del(&cb
->list
);
1386 write_unlock_bh(&hci_cb_list_lock
);
1390 EXPORT_SYMBOL(hci_unregister_cb
);
1392 static int hci_send_frame(struct sk_buff
*skb
)
1394 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1401 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1403 if (atomic_read(&hdev
->promisc
)) {
1405 __net_timestamp(skb
);
1407 hci_send_to_sock(hdev
, skb
, NULL
);
1410 /* Get rid of skb owner, prior to sending to the driver. */
1413 return hdev
->send(skb
);
1416 /* Send HCI command */
1417 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1419 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1420 struct hci_command_hdr
*hdr
;
1421 struct sk_buff
*skb
;
1423 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1425 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1427 BT_ERR("%s no memory for command", hdev
->name
);
1431 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1432 hdr
->opcode
= cpu_to_le16(opcode
);
1436 memcpy(skb_put(skb
, plen
), param
, plen
);
1438 BT_DBG("skb len %d", skb
->len
);
1440 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1441 skb
->dev
= (void *) hdev
;
1443 if (test_bit(HCI_INIT
, &hdev
->flags
))
1444 hdev
->init_last_cmd
= opcode
;
1446 skb_queue_tail(&hdev
->cmd_q
, skb
);
1447 tasklet_schedule(&hdev
->cmd_task
);
1452 /* Get data from the previously sent command */
1453 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1455 struct hci_command_hdr
*hdr
;
1457 if (!hdev
->sent_cmd
)
1460 hdr
= (void *) hdev
->sent_cmd
->data
;
1462 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1465 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1467 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1471 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1473 struct hci_acl_hdr
*hdr
;
1476 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1477 skb_reset_transport_header(skb
);
1478 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1479 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1480 hdr
->dlen
= cpu_to_le16(len
);
1483 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1485 struct hci_dev
*hdev
= conn
->hdev
;
1486 struct sk_buff
*list
;
1488 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1490 skb
->dev
= (void *) hdev
;
1491 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1492 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1494 list
= skb_shinfo(skb
)->frag_list
;
1496 /* Non fragmented */
1497 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1499 skb_queue_tail(&conn
->data_q
, skb
);
1502 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1504 skb_shinfo(skb
)->frag_list
= NULL
;
1506 /* Queue all fragments atomically */
1507 spin_lock_bh(&conn
->data_q
.lock
);
1509 __skb_queue_tail(&conn
->data_q
, skb
);
1511 flags
&= ~ACL_START
;
1514 skb
= list
; list
= list
->next
;
1516 skb
->dev
= (void *) hdev
;
1517 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1518 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1520 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1522 __skb_queue_tail(&conn
->data_q
, skb
);
1525 spin_unlock_bh(&conn
->data_q
.lock
);
1528 tasklet_schedule(&hdev
->tx_task
);
1530 EXPORT_SYMBOL(hci_send_acl
);
1533 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1535 struct hci_dev
*hdev
= conn
->hdev
;
1536 struct hci_sco_hdr hdr
;
1538 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1540 hdr
.handle
= cpu_to_le16(conn
->handle
);
1541 hdr
.dlen
= skb
->len
;
1543 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1544 skb_reset_transport_header(skb
);
1545 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1547 skb
->dev
= (void *) hdev
;
1548 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1550 skb_queue_tail(&conn
->data_q
, skb
);
1551 tasklet_schedule(&hdev
->tx_task
);
1553 EXPORT_SYMBOL(hci_send_sco
);
1555 /* ---- HCI TX task (outgoing data) ---- */
1557 /* HCI Connection scheduler */
1558 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1560 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1561 struct hci_conn
*conn
= NULL
;
1562 int num
= 0, min
= ~0;
1563 struct list_head
*p
;
1565 /* We don't have to lock device here. Connections are always
1566 * added and removed with TX task disabled. */
1567 list_for_each(p
, &h
->list
) {
1569 c
= list_entry(p
, struct hci_conn
, list
);
1571 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1574 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1579 if (c
->sent
< min
) {
1586 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1592 BT_DBG("conn %p quote %d", conn
, *quote
);
1596 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1598 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1599 struct list_head
*p
;
1602 BT_ERR("%s ACL tx timeout", hdev
->name
);
1604 /* Kill stalled connections */
1605 list_for_each(p
, &h
->list
) {
1606 c
= list_entry(p
, struct hci_conn
, list
);
1607 if (c
->type
== ACL_LINK
&& c
->sent
) {
1608 BT_ERR("%s killing stalled ACL connection %s",
1609 hdev
->name
, batostr(&c
->dst
));
1610 hci_acl_disconn(c
, 0x13);
1615 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1617 struct hci_conn
*conn
;
1618 struct sk_buff
*skb
;
1621 BT_DBG("%s", hdev
->name
);
1623 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1624 /* ACL tx timeout must be longer than maximum
1625 * link supervision timeout (40.9 seconds) */
1626 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1627 hci_acl_tx_to(hdev
);
1630 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1631 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1632 BT_DBG("skb %p len %d", skb
, skb
->len
);
1634 hci_conn_enter_active_mode(conn
);
1636 hci_send_frame(skb
);
1637 hdev
->acl_last_tx
= jiffies
;
1646 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1648 struct hci_conn
*conn
;
1649 struct sk_buff
*skb
;
1652 BT_DBG("%s", hdev
->name
);
1654 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1655 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1656 BT_DBG("skb %p len %d", skb
, skb
->len
);
1657 hci_send_frame(skb
);
1660 if (conn
->sent
== ~0)
1666 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1668 struct hci_conn
*conn
;
1669 struct sk_buff
*skb
;
1672 BT_DBG("%s", hdev
->name
);
1674 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1675 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1676 BT_DBG("skb %p len %d", skb
, skb
->len
);
1677 hci_send_frame(skb
);
1680 if (conn
->sent
== ~0)
1686 static void hci_tx_task(unsigned long arg
)
1688 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1689 struct sk_buff
*skb
;
1691 read_lock(&hci_task_lock
);
1693 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1695 /* Schedule queues and send stuff to HCI driver */
1697 hci_sched_acl(hdev
);
1699 hci_sched_sco(hdev
);
1701 hci_sched_esco(hdev
);
1703 /* Send next queued raw (unknown type) packet */
1704 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1705 hci_send_frame(skb
);
1707 read_unlock(&hci_task_lock
);
1710 /* ----- HCI RX task (incoming data proccessing) ----- */
1712 /* ACL data packet */
1713 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1715 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1716 struct hci_conn
*conn
;
1717 __u16 handle
, flags
;
1719 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1721 handle
= __le16_to_cpu(hdr
->handle
);
1722 flags
= hci_flags(handle
);
1723 handle
= hci_handle(handle
);
1725 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1727 hdev
->stat
.acl_rx
++;
1730 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1731 hci_dev_unlock(hdev
);
1734 register struct hci_proto
*hp
;
1736 hci_conn_enter_active_mode(conn
);
1738 /* Send to upper protocol */
1739 hp
= hci_proto
[HCI_PROTO_L2CAP
];
1740 if (hp
&& hp
->recv_acldata
) {
1741 hp
->recv_acldata(conn
, skb
, flags
);
1745 BT_ERR("%s ACL packet for unknown connection handle %d",
1746 hdev
->name
, handle
);
1752 /* SCO data packet */
1753 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1755 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1756 struct hci_conn
*conn
;
1759 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1761 handle
= __le16_to_cpu(hdr
->handle
);
1763 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1765 hdev
->stat
.sco_rx
++;
1768 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1769 hci_dev_unlock(hdev
);
1772 register struct hci_proto
*hp
;
1774 /* Send to upper protocol */
1775 hp
= hci_proto
[HCI_PROTO_SCO
];
1776 if (hp
&& hp
->recv_scodata
) {
1777 hp
->recv_scodata(conn
, skb
);
1781 BT_ERR("%s SCO packet for unknown connection handle %d",
1782 hdev
->name
, handle
);
1788 static void hci_rx_task(unsigned long arg
)
1790 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1791 struct sk_buff
*skb
;
1793 BT_DBG("%s", hdev
->name
);
1795 read_lock(&hci_task_lock
);
1797 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1798 if (atomic_read(&hdev
->promisc
)) {
1799 /* Send copy to the sockets */
1800 hci_send_to_sock(hdev
, skb
, NULL
);
1803 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1808 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1809 /* Don't process data packets in this states. */
1810 switch (bt_cb(skb
)->pkt_type
) {
1811 case HCI_ACLDATA_PKT
:
1812 case HCI_SCODATA_PKT
:
1819 switch (bt_cb(skb
)->pkt_type
) {
1821 hci_event_packet(hdev
, skb
);
1824 case HCI_ACLDATA_PKT
:
1825 BT_DBG("%s ACL data packet", hdev
->name
);
1826 hci_acldata_packet(hdev
, skb
);
1829 case HCI_SCODATA_PKT
:
1830 BT_DBG("%s SCO data packet", hdev
->name
);
1831 hci_scodata_packet(hdev
, skb
);
1840 read_unlock(&hci_task_lock
);
1843 static void hci_cmd_task(unsigned long arg
)
1845 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1846 struct sk_buff
*skb
;
1848 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1850 if (!atomic_read(&hdev
->cmd_cnt
) && time_after(jiffies
, hdev
->cmd_last_tx
+ HZ
)) {
1851 BT_ERR("%s command tx timeout", hdev
->name
);
1852 atomic_set(&hdev
->cmd_cnt
, 1);
1855 /* Send queued commands */
1856 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1857 kfree_skb(hdev
->sent_cmd
);
1859 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
1860 if (hdev
->sent_cmd
) {
1861 atomic_dec(&hdev
->cmd_cnt
);
1862 hci_send_frame(skb
);
1863 hdev
->cmd_last_tx
= jiffies
;
1865 skb_queue_head(&hdev
->cmd_q
, skb
);
1866 tasklet_schedule(&hdev
->cmd_task
);