2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev
->name
, cmd
, result
);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
68 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
69 u16 opcode
= __le16_to_cpu(sent
->opcode
);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
82 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
84 skb_queue_head(&hdev
->cmd_q
, skb
);
85 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
91 if (hdev
->req_status
== HCI_REQ_PEND
) {
92 hdev
->req_result
= result
;
93 hdev
->req_status
= HCI_REQ_DONE
;
94 wake_up_interruptible(&hdev
->req_wait_q
);
98 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
100 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= err
;
104 hdev
->req_status
= HCI_REQ_CANCELED
;
105 wake_up_interruptible(&hdev
->req_wait_q
);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev
*hdev
,
111 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
114 DECLARE_WAITQUEUE(wait
, current
);
117 BT_DBG("%s start", hdev
->name
);
119 hdev
->req_status
= HCI_REQ_PEND
;
121 add_wait_queue(&hdev
->req_wait_q
, &wait
);
122 set_current_state(TASK_INTERRUPTIBLE
);
125 schedule_timeout(timeout
);
127 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
129 if (signal_pending(current
))
132 switch (hdev
->req_status
) {
134 err
= -bt_to_errno(hdev
->req_result
);
137 case HCI_REQ_CANCELED
:
138 err
= -hdev
->req_result
;
146 hdev
->req_status
= hdev
->req_result
= 0;
148 BT_DBG("%s end: err %d", hdev
->name
, err
);
153 static int hci_request(struct hci_dev
*hdev
,
154 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
155 unsigned long opt
, __u32 timeout
)
159 if (!test_bit(HCI_UP
, &hdev
->flags
))
162 /* Serialize all requests */
164 ret
= __hci_request(hdev
, req
, opt
, timeout
);
165 hci_req_unlock(hdev
);
170 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
172 BT_DBG("%s %ld", hdev
->name
, opt
);
175 set_bit(HCI_RESET
, &hdev
->flags
);
176 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
179 static void bredr_init(struct hci_dev
*hdev
)
181 struct hci_cp_delete_stored_link_key cp
;
185 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
187 /* Mandatory initialization */
189 /* Read Local Supported Features */
190 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
192 /* Read Local Version */
193 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
198 /* Read BD Address */
199 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
201 /* Read Class of Device */
202 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
204 /* Read Local Name */
205 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
207 /* Read Voice Setting */
208 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
210 /* Optional initialization */
212 /* Clear Event Filters */
213 flt_type
= HCI_FLT_CLEAR_ALL
;
214 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
216 /* Connection accept timeout ~20 secs */
217 param
= __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
220 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
222 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
225 static void amp_init(struct hci_dev
*hdev
)
227 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
229 /* Read Local Version */
230 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
236 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
240 BT_DBG("%s %ld", hdev
->name
, opt
);
242 /* Driver initialization */
244 /* Special commands */
245 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
246 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
247 skb
->dev
= (void *) hdev
;
249 skb_queue_tail(&hdev
->cmd_q
, skb
);
250 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
252 skb_queue_purge(&hdev
->driver_init
);
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
256 hci_reset_req(hdev
, 0);
258 switch (hdev
->dev_type
) {
268 BT_ERR("Unknown device type %d", hdev
->dev_type
);
273 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
275 BT_DBG("%s", hdev
->name
);
277 /* Read LE buffer size */
278 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
281 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
285 BT_DBG("%s %x", hdev
->name
, scan
);
287 /* Inquiry and Page scans */
288 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
291 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
295 BT_DBG("%s %x", hdev
->name
, auth
);
298 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
301 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
305 BT_DBG("%s %x", hdev
->name
, encrypt
);
308 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
311 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
313 __le16 policy
= cpu_to_le16(opt
);
315 BT_DBG("%s %x", hdev
->name
, policy
);
317 /* Default link policy */
318 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
321 /* Get HCI device by index.
322 * Device is held on return. */
323 struct hci_dev
*hci_dev_get(int index
)
325 struct hci_dev
*hdev
= NULL
, *d
;
332 read_lock(&hci_dev_list_lock
);
333 list_for_each_entry(d
, &hci_dev_list
, list
) {
334 if (d
->id
== index
) {
335 hdev
= hci_dev_hold(d
);
339 read_unlock(&hci_dev_list_lock
);
343 /* ---- Inquiry support ---- */
345 bool hci_discovery_active(struct hci_dev
*hdev
)
347 struct discovery_state
*discov
= &hdev
->discovery
;
349 switch (discov
->state
) {
350 case DISCOVERY_FINDING
:
351 case DISCOVERY_RESOLVING
:
359 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
361 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
363 if (hdev
->discovery
.state
== state
)
367 case DISCOVERY_STOPPED
:
368 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
369 mgmt_discovering(hdev
, 0);
371 case DISCOVERY_STARTING
:
373 case DISCOVERY_FINDING
:
374 mgmt_discovering(hdev
, 1);
376 case DISCOVERY_RESOLVING
:
378 case DISCOVERY_STOPPING
:
382 hdev
->discovery
.state
= state
;
385 static void inquiry_cache_flush(struct hci_dev
*hdev
)
387 struct discovery_state
*cache
= &hdev
->discovery
;
388 struct inquiry_entry
*p
, *n
;
390 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
395 INIT_LIST_HEAD(&cache
->unknown
);
396 INIT_LIST_HEAD(&cache
->resolve
);
399 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
402 struct discovery_state
*cache
= &hdev
->discovery
;
403 struct inquiry_entry
*e
;
405 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
407 list_for_each_entry(e
, &cache
->all
, all
) {
408 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
415 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
418 struct discovery_state
*cache
= &hdev
->discovery
;
419 struct inquiry_entry
*e
;
421 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
423 list_for_each_entry(e
, &cache
->unknown
, list
) {
424 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
431 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
435 struct discovery_state
*cache
= &hdev
->discovery
;
436 struct inquiry_entry
*e
;
438 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
440 list_for_each_entry(e
, &cache
->resolve
, list
) {
441 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
443 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
450 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
451 struct inquiry_entry
*ie
)
453 struct discovery_state
*cache
= &hdev
->discovery
;
454 struct list_head
*pos
= &cache
->resolve
;
455 struct inquiry_entry
*p
;
459 list_for_each_entry(p
, &cache
->resolve
, list
) {
460 if (p
->name_state
!= NAME_PENDING
&&
461 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
466 list_add(&ie
->list
, pos
);
469 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
470 bool name_known
, bool *ssp
)
472 struct discovery_state
*cache
= &hdev
->discovery
;
473 struct inquiry_entry
*ie
;
475 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
478 *ssp
= data
->ssp_mode
;
480 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
482 if (ie
->data
.ssp_mode
&& ssp
)
485 if (ie
->name_state
== NAME_NEEDED
&&
486 data
->rssi
!= ie
->data
.rssi
) {
487 ie
->data
.rssi
= data
->rssi
;
488 hci_inquiry_cache_update_resolve(hdev
, ie
);
494 /* Entry not in the cache. Add new one. */
495 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
499 list_add(&ie
->all
, &cache
->all
);
502 ie
->name_state
= NAME_KNOWN
;
504 ie
->name_state
= NAME_NOT_KNOWN
;
505 list_add(&ie
->list
, &cache
->unknown
);
509 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
510 ie
->name_state
!= NAME_PENDING
) {
511 ie
->name_state
= NAME_KNOWN
;
515 memcpy(&ie
->data
, data
, sizeof(*data
));
516 ie
->timestamp
= jiffies
;
517 cache
->timestamp
= jiffies
;
519 if (ie
->name_state
== NAME_NOT_KNOWN
)
525 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
527 struct discovery_state
*cache
= &hdev
->discovery
;
528 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
529 struct inquiry_entry
*e
;
532 list_for_each_entry(e
, &cache
->all
, all
) {
533 struct inquiry_data
*data
= &e
->data
;
538 bacpy(&info
->bdaddr
, &data
->bdaddr
);
539 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
540 info
->pscan_period_mode
= data
->pscan_period_mode
;
541 info
->pscan_mode
= data
->pscan_mode
;
542 memcpy(info
->dev_class
, data
->dev_class
, 3);
543 info
->clock_offset
= data
->clock_offset
;
549 BT_DBG("cache %p, copied %d", cache
, copied
);
553 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
555 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
556 struct hci_cp_inquiry cp
;
558 BT_DBG("%s", hdev
->name
);
560 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
564 memcpy(&cp
.lap
, &ir
->lap
, 3);
565 cp
.length
= ir
->length
;
566 cp
.num_rsp
= ir
->num_rsp
;
567 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
570 int hci_inquiry(void __user
*arg
)
572 __u8 __user
*ptr
= arg
;
573 struct hci_inquiry_req ir
;
574 struct hci_dev
*hdev
;
575 int err
= 0, do_inquiry
= 0, max_rsp
;
579 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
582 hdev
= hci_dev_get(ir
.dev_id
);
587 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
588 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
589 inquiry_cache_flush(hdev
);
592 hci_dev_unlock(hdev
);
594 timeo
= ir
.length
* msecs_to_jiffies(2000);
597 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
602 /* for unlimited number of responses we will use buffer with
605 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
607 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
608 * copy it to the user space.
610 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
617 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
618 hci_dev_unlock(hdev
);
620 BT_DBG("num_rsp %d", ir
.num_rsp
);
622 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
624 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
637 /* ---- HCI ioctl helpers ---- */
639 int hci_dev_open(__u16 dev
)
641 struct hci_dev
*hdev
;
644 hdev
= hci_dev_get(dev
);
648 BT_DBG("%s %p", hdev
->name
, hdev
);
652 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
657 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
662 if (test_bit(HCI_UP
, &hdev
->flags
)) {
667 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
668 set_bit(HCI_RAW
, &hdev
->flags
);
670 /* Treat all non BR/EDR controllers as raw devices if
671 enable_hs is not set */
672 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
673 set_bit(HCI_RAW
, &hdev
->flags
);
675 if (hdev
->open(hdev
)) {
680 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
681 atomic_set(&hdev
->cmd_cnt
, 1);
682 set_bit(HCI_INIT
, &hdev
->flags
);
683 hdev
->init_last_cmd
= 0;
685 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
687 if (lmp_host_le_capable(hdev
))
688 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
691 clear_bit(HCI_INIT
, &hdev
->flags
);
696 set_bit(HCI_UP
, &hdev
->flags
);
697 hci_notify(hdev
, HCI_DEV_UP
);
698 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
699 mgmt_valid_hdev(hdev
)) {
701 mgmt_powered(hdev
, 1);
702 hci_dev_unlock(hdev
);
705 /* Init failed, cleanup */
706 flush_work(&hdev
->tx_work
);
707 flush_work(&hdev
->cmd_work
);
708 flush_work(&hdev
->rx_work
);
710 skb_queue_purge(&hdev
->cmd_q
);
711 skb_queue_purge(&hdev
->rx_q
);
716 if (hdev
->sent_cmd
) {
717 kfree_skb(hdev
->sent_cmd
);
718 hdev
->sent_cmd
= NULL
;
726 hci_req_unlock(hdev
);
731 static int hci_dev_do_close(struct hci_dev
*hdev
)
733 BT_DBG("%s %p", hdev
->name
, hdev
);
735 cancel_work_sync(&hdev
->le_scan
);
737 hci_req_cancel(hdev
, ENODEV
);
740 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
741 del_timer_sync(&hdev
->cmd_timer
);
742 hci_req_unlock(hdev
);
746 /* Flush RX and TX works */
747 flush_work(&hdev
->tx_work
);
748 flush_work(&hdev
->rx_work
);
750 if (hdev
->discov_timeout
> 0) {
751 cancel_delayed_work(&hdev
->discov_off
);
752 hdev
->discov_timeout
= 0;
753 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
756 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
757 cancel_delayed_work(&hdev
->service_cache
);
759 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
762 inquiry_cache_flush(hdev
);
763 hci_conn_hash_flush(hdev
);
764 hci_dev_unlock(hdev
);
766 hci_notify(hdev
, HCI_DEV_DOWN
);
772 skb_queue_purge(&hdev
->cmd_q
);
773 atomic_set(&hdev
->cmd_cnt
, 1);
774 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
775 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
776 set_bit(HCI_INIT
, &hdev
->flags
);
777 __hci_request(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
778 clear_bit(HCI_INIT
, &hdev
->flags
);
782 flush_work(&hdev
->cmd_work
);
785 skb_queue_purge(&hdev
->rx_q
);
786 skb_queue_purge(&hdev
->cmd_q
);
787 skb_queue_purge(&hdev
->raw_q
);
789 /* Drop last sent command */
790 if (hdev
->sent_cmd
) {
791 del_timer_sync(&hdev
->cmd_timer
);
792 kfree_skb(hdev
->sent_cmd
);
793 hdev
->sent_cmd
= NULL
;
796 /* After this point our queues are empty
797 * and no tasks are scheduled. */
800 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
801 mgmt_valid_hdev(hdev
)) {
803 mgmt_powered(hdev
, 0);
804 hci_dev_unlock(hdev
);
810 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
811 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
813 hci_req_unlock(hdev
);
819 int hci_dev_close(__u16 dev
)
821 struct hci_dev
*hdev
;
824 hdev
= hci_dev_get(dev
);
828 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
829 cancel_delayed_work(&hdev
->power_off
);
831 err
= hci_dev_do_close(hdev
);
837 int hci_dev_reset(__u16 dev
)
839 struct hci_dev
*hdev
;
842 hdev
= hci_dev_get(dev
);
848 if (!test_bit(HCI_UP
, &hdev
->flags
))
852 skb_queue_purge(&hdev
->rx_q
);
853 skb_queue_purge(&hdev
->cmd_q
);
856 inquiry_cache_flush(hdev
);
857 hci_conn_hash_flush(hdev
);
858 hci_dev_unlock(hdev
);
863 atomic_set(&hdev
->cmd_cnt
, 1);
864 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
866 if (!test_bit(HCI_RAW
, &hdev
->flags
))
867 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
870 hci_req_unlock(hdev
);
875 int hci_dev_reset_stat(__u16 dev
)
877 struct hci_dev
*hdev
;
880 hdev
= hci_dev_get(dev
);
884 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
891 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
893 struct hci_dev
*hdev
;
894 struct hci_dev_req dr
;
897 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
900 hdev
= hci_dev_get(dr
.dev_id
);
906 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
911 if (!lmp_encrypt_capable(hdev
)) {
916 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
917 /* Auth must be enabled first */
918 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
924 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
929 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
934 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
939 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
940 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
944 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
948 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
949 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
953 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
954 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
966 int hci_get_dev_list(void __user
*arg
)
968 struct hci_dev
*hdev
;
969 struct hci_dev_list_req
*dl
;
970 struct hci_dev_req
*dr
;
971 int n
= 0, size
, err
;
974 if (get_user(dev_num
, (__u16 __user
*) arg
))
977 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
980 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
982 dl
= kzalloc(size
, GFP_KERNEL
);
988 read_lock(&hci_dev_list_lock
);
989 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
990 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
991 cancel_delayed_work(&hdev
->power_off
);
993 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
994 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
996 (dr
+ n
)->dev_id
= hdev
->id
;
997 (dr
+ n
)->dev_opt
= hdev
->flags
;
1002 read_unlock(&hci_dev_list_lock
);
1005 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1007 err
= copy_to_user(arg
, dl
, size
);
1010 return err
? -EFAULT
: 0;
1013 int hci_get_dev_info(void __user
*arg
)
1015 struct hci_dev
*hdev
;
1016 struct hci_dev_info di
;
1019 if (copy_from_user(&di
, arg
, sizeof(di
)))
1022 hdev
= hci_dev_get(di
.dev_id
);
1026 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1027 cancel_delayed_work_sync(&hdev
->power_off
);
1029 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1030 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1032 strcpy(di
.name
, hdev
->name
);
1033 di
.bdaddr
= hdev
->bdaddr
;
1034 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1035 di
.flags
= hdev
->flags
;
1036 di
.pkt_type
= hdev
->pkt_type
;
1037 di
.acl_mtu
= hdev
->acl_mtu
;
1038 di
.acl_pkts
= hdev
->acl_pkts
;
1039 di
.sco_mtu
= hdev
->sco_mtu
;
1040 di
.sco_pkts
= hdev
->sco_pkts
;
1041 di
.link_policy
= hdev
->link_policy
;
1042 di
.link_mode
= hdev
->link_mode
;
1044 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1045 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1047 if (copy_to_user(arg
, &di
, sizeof(di
)))
1055 /* ---- Interface to HCI drivers ---- */
1057 static int hci_rfkill_set_block(void *data
, bool blocked
)
1059 struct hci_dev
*hdev
= data
;
1061 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1066 hci_dev_do_close(hdev
);
1071 static const struct rfkill_ops hci_rfkill_ops
= {
1072 .set_block
= hci_rfkill_set_block
,
1075 static void hci_power_on(struct work_struct
*work
)
1077 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1079 BT_DBG("%s", hdev
->name
);
1081 if (hci_dev_open(hdev
->id
) < 0)
1084 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1085 schedule_delayed_work(&hdev
->power_off
, HCI_AUTO_OFF_TIMEOUT
);
1087 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1088 mgmt_index_added(hdev
);
1091 static void hci_power_off(struct work_struct
*work
)
1093 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1096 BT_DBG("%s", hdev
->name
);
1098 hci_dev_do_close(hdev
);
1101 static void hci_discov_off(struct work_struct
*work
)
1103 struct hci_dev
*hdev
;
1104 u8 scan
= SCAN_PAGE
;
1106 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1108 BT_DBG("%s", hdev
->name
);
1112 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1114 hdev
->discov_timeout
= 0;
1116 hci_dev_unlock(hdev
);
1119 int hci_uuids_clear(struct hci_dev
*hdev
)
1121 struct list_head
*p
, *n
;
1123 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1124 struct bt_uuid
*uuid
;
1126 uuid
= list_entry(p
, struct bt_uuid
, list
);
1135 int hci_link_keys_clear(struct hci_dev
*hdev
)
1137 struct list_head
*p
, *n
;
1139 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1140 struct link_key
*key
;
1142 key
= list_entry(p
, struct link_key
, list
);
1151 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1153 struct smp_ltk
*k
, *tmp
;
1155 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1163 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1167 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1168 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1174 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1175 u8 key_type
, u8 old_key_type
)
1178 if (key_type
< 0x03)
1181 /* Debug keys are insecure so don't store them persistently */
1182 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1185 /* Changed combination key and there's no previous one */
1186 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1189 /* Security mode 3 case */
1193 /* Neither local nor remote side had no-bonding as requirement */
1194 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1197 /* Local side had dedicated bonding as requirement */
1198 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1201 /* Remote side had dedicated bonding as requirement */
1202 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1205 /* If none of the above criteria match, then don't store the key
1210 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1214 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1215 if (k
->ediv
!= ediv
||
1216 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1225 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1230 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1231 if (addr_type
== k
->bdaddr_type
&&
1232 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1238 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1239 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1241 struct link_key
*key
, *old_key
;
1245 old_key
= hci_find_link_key(hdev
, bdaddr
);
1247 old_key_type
= old_key
->type
;
1250 old_key_type
= conn
? conn
->key_type
: 0xff;
1251 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1254 list_add(&key
->list
, &hdev
->link_keys
);
1257 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1259 /* Some buggy controller combinations generate a changed
1260 * combination key for legacy pairing even when there's no
1262 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1263 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1264 type
= HCI_LK_COMBINATION
;
1266 conn
->key_type
= type
;
1269 bacpy(&key
->bdaddr
, bdaddr
);
1270 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1271 key
->pin_len
= pin_len
;
1273 if (type
== HCI_LK_CHANGED_COMBINATION
)
1274 key
->type
= old_key_type
;
1281 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1283 mgmt_new_link_key(hdev
, key
, persistent
);
1286 conn
->flush_key
= !persistent
;
1291 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1292 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1295 struct smp_ltk
*key
, *old_key
;
1297 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1300 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1304 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1307 list_add(&key
->list
, &hdev
->long_term_keys
);
1310 bacpy(&key
->bdaddr
, bdaddr
);
1311 key
->bdaddr_type
= addr_type
;
1312 memcpy(key
->val
, tk
, sizeof(key
->val
));
1313 key
->authenticated
= authenticated
;
1315 key
->enc_size
= enc_size
;
1317 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1322 if (type
& HCI_SMP_LTK
)
1323 mgmt_new_ltk(hdev
, key
, 1);
1328 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1330 struct link_key
*key
;
1332 key
= hci_find_link_key(hdev
, bdaddr
);
1336 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1338 list_del(&key
->list
);
1344 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1346 struct smp_ltk
*k
, *tmp
;
1348 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1349 if (bacmp(bdaddr
, &k
->bdaddr
))
1352 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1361 /* HCI command timer function */
1362 static void hci_cmd_timeout(unsigned long arg
)
1364 struct hci_dev
*hdev
= (void *) arg
;
1366 if (hdev
->sent_cmd
) {
1367 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1368 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1370 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1372 BT_ERR("%s command tx timeout", hdev
->name
);
1375 atomic_set(&hdev
->cmd_cnt
, 1);
1376 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1379 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1382 struct oob_data
*data
;
1384 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1385 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1391 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1393 struct oob_data
*data
;
1395 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1399 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1401 list_del(&data
->list
);
1407 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1409 struct oob_data
*data
, *n
;
1411 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1412 list_del(&data
->list
);
1419 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1422 struct oob_data
*data
;
1424 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1427 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1431 bacpy(&data
->bdaddr
, bdaddr
);
1432 list_add(&data
->list
, &hdev
->remote_oob_data
);
1435 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1436 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1438 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1443 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1445 struct bdaddr_list
*b
;
1447 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1448 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1454 int hci_blacklist_clear(struct hci_dev
*hdev
)
1456 struct list_head
*p
, *n
;
1458 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1459 struct bdaddr_list
*b
;
1461 b
= list_entry(p
, struct bdaddr_list
, list
);
1470 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1472 struct bdaddr_list
*entry
;
1474 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1477 if (hci_blacklist_lookup(hdev
, bdaddr
))
1480 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1484 bacpy(&entry
->bdaddr
, bdaddr
);
1486 list_add(&entry
->list
, &hdev
->blacklist
);
1488 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1491 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1493 struct bdaddr_list
*entry
;
1495 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1496 return hci_blacklist_clear(hdev
);
1498 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1502 list_del(&entry
->list
);
1505 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1508 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1510 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1511 struct hci_cp_le_set_scan_param cp
;
1513 memset(&cp
, 0, sizeof(cp
));
1514 cp
.type
= param
->type
;
1515 cp
.interval
= cpu_to_le16(param
->interval
);
1516 cp
.window
= cpu_to_le16(param
->window
);
1518 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1521 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1523 struct hci_cp_le_set_scan_enable cp
;
1525 memset(&cp
, 0, sizeof(cp
));
1529 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1532 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1533 u16 window
, int timeout
)
1535 long timeo
= msecs_to_jiffies(3000);
1536 struct le_scan_params param
;
1539 BT_DBG("%s", hdev
->name
);
1541 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1542 return -EINPROGRESS
;
1545 param
.interval
= interval
;
1546 param
.window
= window
;
1550 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1553 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1555 hci_req_unlock(hdev
);
1560 schedule_delayed_work(&hdev
->le_scan_disable
,
1561 msecs_to_jiffies(timeout
));
1566 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1568 BT_DBG("%s", hdev
->name
);
1570 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1573 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1574 struct hci_cp_le_set_scan_enable cp
;
1576 /* Send HCI command to disable LE Scan */
1577 memset(&cp
, 0, sizeof(cp
));
1578 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1584 static void le_scan_disable_work(struct work_struct
*work
)
1586 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1587 le_scan_disable
.work
);
1588 struct hci_cp_le_set_scan_enable cp
;
1590 BT_DBG("%s", hdev
->name
);
1592 memset(&cp
, 0, sizeof(cp
));
1594 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1597 static void le_scan_work(struct work_struct
*work
)
1599 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1600 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1602 BT_DBG("%s", hdev
->name
);
1604 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1608 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1611 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1613 BT_DBG("%s", hdev
->name
);
1615 if (work_busy(&hdev
->le_scan
))
1616 return -EINPROGRESS
;
1619 param
->interval
= interval
;
1620 param
->window
= window
;
1621 param
->timeout
= timeout
;
1623 queue_work(system_long_wq
, &hdev
->le_scan
);
1628 /* Alloc HCI device */
1629 struct hci_dev
*hci_alloc_dev(void)
1631 struct hci_dev
*hdev
;
1633 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1637 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1638 hdev
->esco_type
= (ESCO_HV1
);
1639 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1640 hdev
->io_capability
= 0x03; /* No Input No Output */
1642 hdev
->sniff_max_interval
= 800;
1643 hdev
->sniff_min_interval
= 80;
1645 mutex_init(&hdev
->lock
);
1646 mutex_init(&hdev
->req_lock
);
1648 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1649 INIT_LIST_HEAD(&hdev
->blacklist
);
1650 INIT_LIST_HEAD(&hdev
->uuids
);
1651 INIT_LIST_HEAD(&hdev
->link_keys
);
1652 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1653 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1654 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1656 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1657 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1658 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1659 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1660 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1662 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1663 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1664 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1666 skb_queue_head_init(&hdev
->driver_init
);
1667 skb_queue_head_init(&hdev
->rx_q
);
1668 skb_queue_head_init(&hdev
->cmd_q
);
1669 skb_queue_head_init(&hdev
->raw_q
);
1671 init_waitqueue_head(&hdev
->req_wait_q
);
1673 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1675 hci_init_sysfs(hdev
);
1676 discovery_init(hdev
);
1680 EXPORT_SYMBOL(hci_alloc_dev
);
1682 /* Free HCI device */
1683 void hci_free_dev(struct hci_dev
*hdev
)
1685 skb_queue_purge(&hdev
->driver_init
);
1687 /* will free via device release */
1688 put_device(&hdev
->dev
);
1690 EXPORT_SYMBOL(hci_free_dev
);
1692 /* Register HCI device */
1693 int hci_register_dev(struct hci_dev
*hdev
)
1697 if (!hdev
->open
|| !hdev
->close
)
1700 /* Do not allow HCI_AMP devices to register at index 0,
1701 * so the index can be used as the AMP controller ID.
1703 switch (hdev
->dev_type
) {
1705 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1708 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1717 sprintf(hdev
->name
, "hci%d", id
);
1720 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1722 write_lock(&hci_dev_list_lock
);
1723 list_add(&hdev
->list
, &hci_dev_list
);
1724 write_unlock(&hci_dev_list_lock
);
1726 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1728 if (!hdev
->workqueue
) {
1733 error
= hci_add_sysfs(hdev
);
1737 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1738 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1741 if (rfkill_register(hdev
->rfkill
) < 0) {
1742 rfkill_destroy(hdev
->rfkill
);
1743 hdev
->rfkill
= NULL
;
1747 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1749 if (hdev
->dev_type
!= HCI_AMP
)
1750 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1752 schedule_work(&hdev
->power_on
);
1754 hci_notify(hdev
, HCI_DEV_REG
);
1760 destroy_workqueue(hdev
->workqueue
);
1762 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1763 write_lock(&hci_dev_list_lock
);
1764 list_del(&hdev
->list
);
1765 write_unlock(&hci_dev_list_lock
);
1769 EXPORT_SYMBOL(hci_register_dev
);
1771 /* Unregister HCI device */
1772 void hci_unregister_dev(struct hci_dev
*hdev
)
1776 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1778 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1782 write_lock(&hci_dev_list_lock
);
1783 list_del(&hdev
->list
);
1784 write_unlock(&hci_dev_list_lock
);
1786 hci_dev_do_close(hdev
);
1788 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1789 kfree_skb(hdev
->reassembly
[i
]);
1791 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1792 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1794 mgmt_index_removed(hdev
);
1795 hci_dev_unlock(hdev
);
1798 /* mgmt_index_removed should take care of emptying the
1800 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1802 hci_notify(hdev
, HCI_DEV_UNREG
);
1805 rfkill_unregister(hdev
->rfkill
);
1806 rfkill_destroy(hdev
->rfkill
);
1809 hci_del_sysfs(hdev
);
1811 destroy_workqueue(hdev
->workqueue
);
1814 hci_blacklist_clear(hdev
);
1815 hci_uuids_clear(hdev
);
1816 hci_link_keys_clear(hdev
);
1817 hci_smp_ltks_clear(hdev
);
1818 hci_remote_oob_data_clear(hdev
);
1819 hci_dev_unlock(hdev
);
1823 ida_simple_remove(&hci_index_ida
, id
);
1825 EXPORT_SYMBOL(hci_unregister_dev
);
1827 /* Suspend HCI device */
1828 int hci_suspend_dev(struct hci_dev
*hdev
)
1830 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1833 EXPORT_SYMBOL(hci_suspend_dev
);
1835 /* Resume HCI device */
1836 int hci_resume_dev(struct hci_dev
*hdev
)
1838 hci_notify(hdev
, HCI_DEV_RESUME
);
1841 EXPORT_SYMBOL(hci_resume_dev
);
1843 /* Receive frame from HCI drivers */
1844 int hci_recv_frame(struct sk_buff
*skb
)
1846 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1847 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1848 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1854 bt_cb(skb
)->incoming
= 1;
1857 __net_timestamp(skb
);
1859 skb_queue_tail(&hdev
->rx_q
, skb
);
1860 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1864 EXPORT_SYMBOL(hci_recv_frame
);
1866 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1867 int count
, __u8 index
)
1872 struct sk_buff
*skb
;
1873 struct bt_skb_cb
*scb
;
1875 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1876 index
>= NUM_REASSEMBLY
)
1879 skb
= hdev
->reassembly
[index
];
1883 case HCI_ACLDATA_PKT
:
1884 len
= HCI_MAX_FRAME_SIZE
;
1885 hlen
= HCI_ACL_HDR_SIZE
;
1888 len
= HCI_MAX_EVENT_SIZE
;
1889 hlen
= HCI_EVENT_HDR_SIZE
;
1891 case HCI_SCODATA_PKT
:
1892 len
= HCI_MAX_SCO_SIZE
;
1893 hlen
= HCI_SCO_HDR_SIZE
;
1897 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1901 scb
= (void *) skb
->cb
;
1903 scb
->pkt_type
= type
;
1905 skb
->dev
= (void *) hdev
;
1906 hdev
->reassembly
[index
] = skb
;
1910 scb
= (void *) skb
->cb
;
1911 len
= min_t(uint
, scb
->expect
, count
);
1913 memcpy(skb_put(skb
, len
), data
, len
);
1922 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1923 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1924 scb
->expect
= h
->plen
;
1926 if (skb_tailroom(skb
) < scb
->expect
) {
1928 hdev
->reassembly
[index
] = NULL
;
1934 case HCI_ACLDATA_PKT
:
1935 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1936 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1937 scb
->expect
= __le16_to_cpu(h
->dlen
);
1939 if (skb_tailroom(skb
) < scb
->expect
) {
1941 hdev
->reassembly
[index
] = NULL
;
1947 case HCI_SCODATA_PKT
:
1948 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1949 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1950 scb
->expect
= h
->dlen
;
1952 if (skb_tailroom(skb
) < scb
->expect
) {
1954 hdev
->reassembly
[index
] = NULL
;
1961 if (scb
->expect
== 0) {
1962 /* Complete frame */
1964 bt_cb(skb
)->pkt_type
= type
;
1965 hci_recv_frame(skb
);
1967 hdev
->reassembly
[index
] = NULL
;
1975 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1979 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1983 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1987 data
+= (count
- rem
);
1993 EXPORT_SYMBOL(hci_recv_fragment
);
1995 #define STREAM_REASSEMBLY 0
1997 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2003 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2006 struct { char type
; } *pkt
;
2008 /* Start of the frame */
2015 type
= bt_cb(skb
)->pkt_type
;
2017 rem
= hci_reassembly(hdev
, type
, data
, count
,
2022 data
+= (count
- rem
);
2028 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2030 /* ---- Interface to upper protocols ---- */
2032 int hci_register_cb(struct hci_cb
*cb
)
2034 BT_DBG("%p name %s", cb
, cb
->name
);
2036 write_lock(&hci_cb_list_lock
);
2037 list_add(&cb
->list
, &hci_cb_list
);
2038 write_unlock(&hci_cb_list_lock
);
2042 EXPORT_SYMBOL(hci_register_cb
);
2044 int hci_unregister_cb(struct hci_cb
*cb
)
2046 BT_DBG("%p name %s", cb
, cb
->name
);
2048 write_lock(&hci_cb_list_lock
);
2049 list_del(&cb
->list
);
2050 write_unlock(&hci_cb_list_lock
);
2054 EXPORT_SYMBOL(hci_unregister_cb
);
2056 static int hci_send_frame(struct sk_buff
*skb
)
2058 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2065 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2068 __net_timestamp(skb
);
2070 /* Send copy to monitor */
2071 hci_send_to_monitor(hdev
, skb
);
2073 if (atomic_read(&hdev
->promisc
)) {
2074 /* Send copy to the sockets */
2075 hci_send_to_sock(hdev
, skb
);
2078 /* Get rid of skb owner, prior to sending to the driver. */
2081 return hdev
->send(skb
);
2084 /* Send HCI command */
2085 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2087 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2088 struct hci_command_hdr
*hdr
;
2089 struct sk_buff
*skb
;
2091 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2093 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2095 BT_ERR("%s no memory for command", hdev
->name
);
2099 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2100 hdr
->opcode
= cpu_to_le16(opcode
);
2104 memcpy(skb_put(skb
, plen
), param
, plen
);
2106 BT_DBG("skb len %d", skb
->len
);
2108 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2109 skb
->dev
= (void *) hdev
;
2111 if (test_bit(HCI_INIT
, &hdev
->flags
))
2112 hdev
->init_last_cmd
= opcode
;
2114 skb_queue_tail(&hdev
->cmd_q
, skb
);
2115 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2120 /* Get data from the previously sent command */
2121 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2123 struct hci_command_hdr
*hdr
;
2125 if (!hdev
->sent_cmd
)
2128 hdr
= (void *) hdev
->sent_cmd
->data
;
2130 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2133 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2135 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2139 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2141 struct hci_acl_hdr
*hdr
;
2144 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2145 skb_reset_transport_header(skb
);
2146 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2147 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2148 hdr
->dlen
= cpu_to_le16(len
);
2151 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2152 struct sk_buff
*skb
, __u16 flags
)
2154 struct hci_dev
*hdev
= conn
->hdev
;
2155 struct sk_buff
*list
;
2157 skb
->len
= skb_headlen(skb
);
2160 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2161 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2163 list
= skb_shinfo(skb
)->frag_list
;
2165 /* Non fragmented */
2166 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2168 skb_queue_tail(queue
, skb
);
2171 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2173 skb_shinfo(skb
)->frag_list
= NULL
;
2175 /* Queue all fragments atomically */
2176 spin_lock(&queue
->lock
);
2178 __skb_queue_tail(queue
, skb
);
2180 flags
&= ~ACL_START
;
2183 skb
= list
; list
= list
->next
;
2185 skb
->dev
= (void *) hdev
;
2186 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2187 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2189 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2191 __skb_queue_tail(queue
, skb
);
2194 spin_unlock(&queue
->lock
);
2198 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2200 struct hci_conn
*conn
= chan
->conn
;
2201 struct hci_dev
*hdev
= conn
->hdev
;
2203 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2205 skb
->dev
= (void *) hdev
;
2207 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2209 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2213 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2215 struct hci_dev
*hdev
= conn
->hdev
;
2216 struct hci_sco_hdr hdr
;
2218 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2220 hdr
.handle
= cpu_to_le16(conn
->handle
);
2221 hdr
.dlen
= skb
->len
;
2223 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2224 skb_reset_transport_header(skb
);
2225 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2227 skb
->dev
= (void *) hdev
;
2228 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2230 skb_queue_tail(&conn
->data_q
, skb
);
2231 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2234 /* ---- HCI TX task (outgoing data) ---- */
2236 /* HCI Connection scheduler */
2237 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2240 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2241 struct hci_conn
*conn
= NULL
, *c
;
2242 unsigned int num
= 0, min
= ~0;
2244 /* We don't have to lock device here. Connections are always
2245 * added and removed with TX task disabled. */
2249 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2250 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2253 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2258 if (c
->sent
< min
) {
2263 if (hci_conn_num(hdev
, type
) == num
)
2272 switch (conn
->type
) {
2274 cnt
= hdev
->acl_cnt
;
2278 cnt
= hdev
->sco_cnt
;
2281 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2285 BT_ERR("Unknown link type");
2293 BT_DBG("conn %p quote %d", conn
, *quote
);
2297 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2299 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2302 BT_ERR("%s link tx timeout", hdev
->name
);
2306 /* Kill stalled connections */
2307 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2308 if (c
->type
== type
&& c
->sent
) {
2309 BT_ERR("%s killing stalled connection %s",
2310 hdev
->name
, batostr(&c
->dst
));
2311 hci_acl_disconn(c
, HCI_ERROR_REMOTE_USER_TERM
);
2318 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2321 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2322 struct hci_chan
*chan
= NULL
;
2323 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2324 struct hci_conn
*conn
;
2325 int cnt
, q
, conn_num
= 0;
2327 BT_DBG("%s", hdev
->name
);
2331 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2332 struct hci_chan
*tmp
;
2334 if (conn
->type
!= type
)
2337 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2342 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2343 struct sk_buff
*skb
;
2345 if (skb_queue_empty(&tmp
->data_q
))
2348 skb
= skb_peek(&tmp
->data_q
);
2349 if (skb
->priority
< cur_prio
)
2352 if (skb
->priority
> cur_prio
) {
2355 cur_prio
= skb
->priority
;
2360 if (conn
->sent
< min
) {
2366 if (hci_conn_num(hdev
, type
) == conn_num
)
2375 switch (chan
->conn
->type
) {
2377 cnt
= hdev
->acl_cnt
;
2381 cnt
= hdev
->sco_cnt
;
2384 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2388 BT_ERR("Unknown link type");
2393 BT_DBG("chan %p quote %d", chan
, *quote
);
2397 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2399 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2400 struct hci_conn
*conn
;
2403 BT_DBG("%s", hdev
->name
);
2407 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2408 struct hci_chan
*chan
;
2410 if (conn
->type
!= type
)
2413 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2418 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2419 struct sk_buff
*skb
;
2426 if (skb_queue_empty(&chan
->data_q
))
2429 skb
= skb_peek(&chan
->data_q
);
2430 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2433 skb
->priority
= HCI_PRIO_MAX
- 1;
2435 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2439 if (hci_conn_num(hdev
, type
) == num
)
2447 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2449 /* Calculate count of blocks used by this packet */
2450 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2453 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2455 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2456 /* ACL tx timeout must be longer than maximum
2457 * link supervision timeout (40.9 seconds) */
2458 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2459 HCI_ACL_TX_TIMEOUT
))
2460 hci_link_tx_to(hdev
, ACL_LINK
);
2464 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2466 unsigned int cnt
= hdev
->acl_cnt
;
2467 struct hci_chan
*chan
;
2468 struct sk_buff
*skb
;
2471 __check_timeout(hdev
, cnt
);
2473 while (hdev
->acl_cnt
&&
2474 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2475 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2476 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2477 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2478 skb
->len
, skb
->priority
);
2480 /* Stop if priority has changed */
2481 if (skb
->priority
< priority
)
2484 skb
= skb_dequeue(&chan
->data_q
);
2486 hci_conn_enter_active_mode(chan
->conn
,
2487 bt_cb(skb
)->force_active
);
2489 hci_send_frame(skb
);
2490 hdev
->acl_last_tx
= jiffies
;
2498 if (cnt
!= hdev
->acl_cnt
)
2499 hci_prio_recalculate(hdev
, ACL_LINK
);
2502 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2504 unsigned int cnt
= hdev
->block_cnt
;
2505 struct hci_chan
*chan
;
2506 struct sk_buff
*skb
;
2509 __check_timeout(hdev
, cnt
);
2511 while (hdev
->block_cnt
> 0 &&
2512 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2513 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2514 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2517 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2518 skb
->len
, skb
->priority
);
2520 /* Stop if priority has changed */
2521 if (skb
->priority
< priority
)
2524 skb
= skb_dequeue(&chan
->data_q
);
2526 blocks
= __get_blocks(hdev
, skb
);
2527 if (blocks
> hdev
->block_cnt
)
2530 hci_conn_enter_active_mode(chan
->conn
,
2531 bt_cb(skb
)->force_active
);
2533 hci_send_frame(skb
);
2534 hdev
->acl_last_tx
= jiffies
;
2536 hdev
->block_cnt
-= blocks
;
2539 chan
->sent
+= blocks
;
2540 chan
->conn
->sent
+= blocks
;
2544 if (cnt
!= hdev
->block_cnt
)
2545 hci_prio_recalculate(hdev
, ACL_LINK
);
2548 static void hci_sched_acl(struct hci_dev
*hdev
)
2550 BT_DBG("%s", hdev
->name
);
2552 if (!hci_conn_num(hdev
, ACL_LINK
))
2555 switch (hdev
->flow_ctl_mode
) {
2556 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2557 hci_sched_acl_pkt(hdev
);
2560 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2561 hci_sched_acl_blk(hdev
);
2567 static void hci_sched_sco(struct hci_dev
*hdev
)
2569 struct hci_conn
*conn
;
2570 struct sk_buff
*skb
;
2573 BT_DBG("%s", hdev
->name
);
2575 if (!hci_conn_num(hdev
, SCO_LINK
))
2578 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2579 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2580 BT_DBG("skb %p len %d", skb
, skb
->len
);
2581 hci_send_frame(skb
);
2584 if (conn
->sent
== ~0)
2590 static void hci_sched_esco(struct hci_dev
*hdev
)
2592 struct hci_conn
*conn
;
2593 struct sk_buff
*skb
;
2596 BT_DBG("%s", hdev
->name
);
2598 if (!hci_conn_num(hdev
, ESCO_LINK
))
2601 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2603 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2604 BT_DBG("skb %p len %d", skb
, skb
->len
);
2605 hci_send_frame(skb
);
2608 if (conn
->sent
== ~0)
2614 static void hci_sched_le(struct hci_dev
*hdev
)
2616 struct hci_chan
*chan
;
2617 struct sk_buff
*skb
;
2618 int quote
, cnt
, tmp
;
2620 BT_DBG("%s", hdev
->name
);
2622 if (!hci_conn_num(hdev
, LE_LINK
))
2625 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2626 /* LE tx timeout must be longer than maximum
2627 * link supervision timeout (40.9 seconds) */
2628 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2629 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2630 hci_link_tx_to(hdev
, LE_LINK
);
2633 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2635 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2636 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2637 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2638 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2639 skb
->len
, skb
->priority
);
2641 /* Stop if priority has changed */
2642 if (skb
->priority
< priority
)
2645 skb
= skb_dequeue(&chan
->data_q
);
2647 hci_send_frame(skb
);
2648 hdev
->le_last_tx
= jiffies
;
2659 hdev
->acl_cnt
= cnt
;
2662 hci_prio_recalculate(hdev
, LE_LINK
);
2665 static void hci_tx_work(struct work_struct
*work
)
2667 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2668 struct sk_buff
*skb
;
2670 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2671 hdev
->sco_cnt
, hdev
->le_cnt
);
2673 /* Schedule queues and send stuff to HCI driver */
2675 hci_sched_acl(hdev
);
2677 hci_sched_sco(hdev
);
2679 hci_sched_esco(hdev
);
2683 /* Send next queued raw (unknown type) packet */
2684 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2685 hci_send_frame(skb
);
2688 /* ----- HCI RX task (incoming data processing) ----- */
2690 /* ACL data packet */
2691 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2693 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2694 struct hci_conn
*conn
;
2695 __u16 handle
, flags
;
2697 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2699 handle
= __le16_to_cpu(hdr
->handle
);
2700 flags
= hci_flags(handle
);
2701 handle
= hci_handle(handle
);
2703 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
2706 hdev
->stat
.acl_rx
++;
2709 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2710 hci_dev_unlock(hdev
);
2713 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2716 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
2717 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2718 mgmt_device_connected(hdev
, &conn
->dst
, conn
->type
,
2719 conn
->dst_type
, 0, NULL
, 0,
2721 hci_dev_unlock(hdev
);
2723 /* Send to upper protocol */
2724 l2cap_recv_acldata(conn
, skb
, flags
);
2727 BT_ERR("%s ACL packet for unknown connection handle %d",
2728 hdev
->name
, handle
);
2734 /* SCO data packet */
2735 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2737 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2738 struct hci_conn
*conn
;
2741 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2743 handle
= __le16_to_cpu(hdr
->handle
);
2745 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
2747 hdev
->stat
.sco_rx
++;
2750 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2751 hci_dev_unlock(hdev
);
2754 /* Send to upper protocol */
2755 sco_recv_scodata(conn
, skb
);
2758 BT_ERR("%s SCO packet for unknown connection handle %d",
2759 hdev
->name
, handle
);
2765 static void hci_rx_work(struct work_struct
*work
)
2767 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2768 struct sk_buff
*skb
;
2770 BT_DBG("%s", hdev
->name
);
2772 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2773 /* Send copy to monitor */
2774 hci_send_to_monitor(hdev
, skb
);
2776 if (atomic_read(&hdev
->promisc
)) {
2777 /* Send copy to the sockets */
2778 hci_send_to_sock(hdev
, skb
);
2781 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2786 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2787 /* Don't process data packets in this states. */
2788 switch (bt_cb(skb
)->pkt_type
) {
2789 case HCI_ACLDATA_PKT
:
2790 case HCI_SCODATA_PKT
:
2797 switch (bt_cb(skb
)->pkt_type
) {
2799 BT_DBG("%s Event packet", hdev
->name
);
2800 hci_event_packet(hdev
, skb
);
2803 case HCI_ACLDATA_PKT
:
2804 BT_DBG("%s ACL data packet", hdev
->name
);
2805 hci_acldata_packet(hdev
, skb
);
2808 case HCI_SCODATA_PKT
:
2809 BT_DBG("%s SCO data packet", hdev
->name
);
2810 hci_scodata_packet(hdev
, skb
);
2820 static void hci_cmd_work(struct work_struct
*work
)
2822 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2823 struct sk_buff
*skb
;
2825 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
2826 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
2828 /* Send queued commands */
2829 if (atomic_read(&hdev
->cmd_cnt
)) {
2830 skb
= skb_dequeue(&hdev
->cmd_q
);
2834 kfree_skb(hdev
->sent_cmd
);
2836 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2837 if (hdev
->sent_cmd
) {
2838 atomic_dec(&hdev
->cmd_cnt
);
2839 hci_send_frame(skb
);
2840 if (test_bit(HCI_RESET
, &hdev
->flags
))
2841 del_timer(&hdev
->cmd_timer
);
2843 mod_timer(&hdev
->cmd_timer
,
2844 jiffies
+ HCI_CMD_TIMEOUT
);
2846 skb_queue_head(&hdev
->cmd_q
, skb
);
2847 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2852 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2854 /* General inquiry access code (GIAC) */
2855 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2856 struct hci_cp_inquiry cp
;
2858 BT_DBG("%s", hdev
->name
);
2860 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2861 return -EINPROGRESS
;
2863 inquiry_cache_flush(hdev
);
2865 memset(&cp
, 0, sizeof(cp
));
2866 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2869 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2872 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2874 BT_DBG("%s", hdev
->name
);
2876 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2879 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2882 u8
bdaddr_to_le(u8 bdaddr_type
)
2884 switch (bdaddr_type
) {
2885 case BDADDR_LE_PUBLIC
:
2886 return ADDR_LE_DEV_PUBLIC
;
2889 /* Fallback to LE Random address type */
2890 return ADDR_LE_DEV_RANDOM
;