2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
)
62 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
64 if (hdev
->req_status
== HCI_REQ_PEND
) {
65 hdev
->req_result
= result
;
66 hdev
->req_status
= HCI_REQ_DONE
;
67 wake_up_interruptible(&hdev
->req_wait_q
);
71 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
73 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
75 if (hdev
->req_status
== HCI_REQ_PEND
) {
76 hdev
->req_result
= err
;
77 hdev
->req_status
= HCI_REQ_CANCELED
;
78 wake_up_interruptible(&hdev
->req_wait_q
);
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev
*hdev
,
84 void (*func
)(struct hci_request
*req
,
86 unsigned long opt
, __u32 timeout
)
88 struct hci_request req
;
89 DECLARE_WAITQUEUE(wait
, current
);
92 BT_DBG("%s start", hdev
->name
);
94 hci_req_init(&req
, hdev
);
96 hdev
->req_status
= HCI_REQ_PEND
;
100 err
= hci_req_run(&req
, hci_req_sync_complete
);
102 hdev
->req_status
= 0;
103 /* req_run will fail if the request did not add any
104 * commands to the queue, something that can happen when
105 * a request with conditionals doesn't trigger any
106 * commands to be sent. This is normal behavior and
107 * should not trigger an error return.
112 add_wait_queue(&hdev
->req_wait_q
, &wait
);
113 set_current_state(TASK_INTERRUPTIBLE
);
115 schedule_timeout(timeout
);
117 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
119 if (signal_pending(current
))
122 switch (hdev
->req_status
) {
124 err
= -bt_to_errno(hdev
->req_result
);
127 case HCI_REQ_CANCELED
:
128 err
= -hdev
->req_result
;
136 hdev
->req_status
= hdev
->req_result
= 0;
138 BT_DBG("%s end: err %d", hdev
->name
, err
);
143 static int hci_req_sync(struct hci_dev
*hdev
,
144 void (*req
)(struct hci_request
*req
,
146 unsigned long opt
, __u32 timeout
)
150 if (!test_bit(HCI_UP
, &hdev
->flags
))
153 /* Serialize all requests */
155 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
156 hci_req_unlock(hdev
);
161 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
163 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
166 set_bit(HCI_RESET
, &req
->hdev
->flags
);
167 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
170 static void bredr_init(struct hci_request
*req
)
172 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
174 /* Read Local Supported Features */
175 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
177 /* Read Local Version */
178 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
180 /* Read BD Address */
181 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
184 static void amp_init(struct hci_request
*req
)
186 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
188 /* Read Local Version */
189 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
191 /* Read Local AMP Info */
192 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
194 /* Read Data Blk size */
195 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
198 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
200 struct hci_dev
*hdev
= req
->hdev
;
201 struct hci_request init_req
;
204 BT_DBG("%s %ld", hdev
->name
, opt
);
206 /* Driver initialization */
208 hci_req_init(&init_req
, hdev
);
210 /* Special commands */
211 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
212 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
213 skb
->dev
= (void *) hdev
;
215 if (skb_queue_empty(&init_req
.cmd_q
))
216 bt_cb(skb
)->req
.start
= true;
218 skb_queue_tail(&init_req
.cmd_q
, skb
);
220 skb_queue_purge(&hdev
->driver_init
);
222 hci_req_run(&init_req
, NULL
);
225 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
226 hci_reset_req(req
, 0);
228 switch (hdev
->dev_type
) {
238 BT_ERR("Unknown device type %d", hdev
->dev_type
);
243 static void bredr_setup(struct hci_request
*req
)
245 struct hci_cp_delete_stored_link_key cp
;
249 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
250 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
252 /* Read Class of Device */
253 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
255 /* Read Local Name */
256 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
258 /* Read Voice Setting */
259 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
261 /* Clear Event Filters */
262 flt_type
= HCI_FLT_CLEAR_ALL
;
263 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
265 /* Connection accept timeout ~20 secs */
266 param
= __constant_cpu_to_le16(0x7d00);
267 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
269 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
270 cp
.delete_all
= 0x01;
271 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
274 static void le_setup(struct hci_request
*req
)
276 /* Read LE Buffer Size */
277 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
279 /* Read LE Local Supported Features */
280 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
282 /* Read LE Advertising Channel TX Power */
283 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
285 /* Read LE White List Size */
286 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
288 /* Read LE Supported States */
289 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
292 static u8
hci_get_inquiry_mode(struct hci_dev
*hdev
)
294 if (lmp_ext_inq_capable(hdev
))
297 if (lmp_inq_rssi_capable(hdev
))
300 if (hdev
->manufacturer
== 11 && hdev
->hci_rev
== 0x00 &&
301 hdev
->lmp_subver
== 0x0757)
304 if (hdev
->manufacturer
== 15) {
305 if (hdev
->hci_rev
== 0x03 && hdev
->lmp_subver
== 0x6963)
307 if (hdev
->hci_rev
== 0x09 && hdev
->lmp_subver
== 0x6963)
309 if (hdev
->hci_rev
== 0x00 && hdev
->lmp_subver
== 0x6965)
313 if (hdev
->manufacturer
== 31 && hdev
->hci_rev
== 0x2005 &&
314 hdev
->lmp_subver
== 0x1805)
320 static void hci_setup_inquiry_mode(struct hci_request
*req
)
324 mode
= hci_get_inquiry_mode(req
->hdev
);
326 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
329 static void hci_setup_event_mask(struct hci_request
*req
)
331 struct hci_dev
*hdev
= req
->hdev
;
333 /* The second byte is 0xff instead of 0x9f (two reserved bits
334 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
337 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
339 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340 * any event mask for pre 1.2 devices.
342 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
345 if (lmp_bredr_capable(hdev
)) {
346 events
[4] |= 0x01; /* Flow Specification Complete */
347 events
[4] |= 0x02; /* Inquiry Result with RSSI */
348 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
349 events
[5] |= 0x08; /* Synchronous Connection Complete */
350 events
[5] |= 0x10; /* Synchronous Connection Changed */
353 if (lmp_inq_rssi_capable(hdev
))
354 events
[4] |= 0x02; /* Inquiry Result with RSSI */
356 if (lmp_sniffsubr_capable(hdev
))
357 events
[5] |= 0x20; /* Sniff Subrating */
359 if (lmp_pause_enc_capable(hdev
))
360 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
362 if (lmp_ext_inq_capable(hdev
))
363 events
[5] |= 0x40; /* Extended Inquiry Result */
365 if (lmp_no_flush_capable(hdev
))
366 events
[7] |= 0x01; /* Enhanced Flush Complete */
368 if (lmp_lsto_capable(hdev
))
369 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
371 if (lmp_ssp_capable(hdev
)) {
372 events
[6] |= 0x01; /* IO Capability Request */
373 events
[6] |= 0x02; /* IO Capability Response */
374 events
[6] |= 0x04; /* User Confirmation Request */
375 events
[6] |= 0x08; /* User Passkey Request */
376 events
[6] |= 0x10; /* Remote OOB Data Request */
377 events
[6] |= 0x20; /* Simple Pairing Complete */
378 events
[7] |= 0x04; /* User Passkey Notification */
379 events
[7] |= 0x08; /* Keypress Notification */
380 events
[7] |= 0x10; /* Remote Host Supported
381 * Features Notification
385 if (lmp_le_capable(hdev
))
386 events
[7] |= 0x20; /* LE Meta-Event */
388 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
390 if (lmp_le_capable(hdev
)) {
391 memset(events
, 0, sizeof(events
));
393 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
,
394 sizeof(events
), events
);
398 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
400 struct hci_dev
*hdev
= req
->hdev
;
402 if (lmp_bredr_capable(hdev
))
405 if (lmp_le_capable(hdev
))
408 hci_setup_event_mask(req
);
410 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
)
411 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
413 if (lmp_ssp_capable(hdev
)) {
414 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
416 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
417 sizeof(mode
), &mode
);
419 struct hci_cp_write_eir cp
;
421 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
422 memset(&cp
, 0, sizeof(cp
));
424 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
428 if (lmp_inq_rssi_capable(hdev
))
429 hci_setup_inquiry_mode(req
);
431 if (lmp_inq_tx_pwr_capable(hdev
))
432 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
434 if (lmp_ext_feat_capable(hdev
)) {
435 struct hci_cp_read_local_ext_features cp
;
438 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
442 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
)) {
444 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
449 static void hci_setup_link_policy(struct hci_request
*req
)
451 struct hci_dev
*hdev
= req
->hdev
;
452 struct hci_cp_write_def_link_policy cp
;
455 if (lmp_rswitch_capable(hdev
))
456 link_policy
|= HCI_LP_RSWITCH
;
457 if (lmp_hold_capable(hdev
))
458 link_policy
|= HCI_LP_HOLD
;
459 if (lmp_sniff_capable(hdev
))
460 link_policy
|= HCI_LP_SNIFF
;
461 if (lmp_park_capable(hdev
))
462 link_policy
|= HCI_LP_PARK
;
464 cp
.policy
= cpu_to_le16(link_policy
);
465 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
468 static void hci_set_le_support(struct hci_request
*req
)
470 struct hci_dev
*hdev
= req
->hdev
;
471 struct hci_cp_write_le_host_supported cp
;
473 memset(&cp
, 0, sizeof(cp
));
475 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
477 cp
.simul
= lmp_le_br_capable(hdev
);
480 if (cp
.le
!= lmp_host_le_capable(hdev
))
481 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
485 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
487 struct hci_dev
*hdev
= req
->hdev
;
489 if (hdev
->commands
[5] & 0x10)
490 hci_setup_link_policy(req
);
492 if (lmp_le_capable(hdev
))
493 hci_set_le_support(req
);
496 static int __hci_init(struct hci_dev
*hdev
)
500 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
504 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
505 * BR/EDR/LE type controllers. AMP controllers only need the
508 if (hdev
->dev_type
!= HCI_BREDR
)
511 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
515 return __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
518 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
522 BT_DBG("%s %x", req
->hdev
->name
, scan
);
524 /* Inquiry and Page scans */
525 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
528 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
532 BT_DBG("%s %x", req
->hdev
->name
, auth
);
535 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
538 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
542 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
545 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
548 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
550 __le16 policy
= cpu_to_le16(opt
);
552 BT_DBG("%s %x", req
->hdev
->name
, policy
);
554 /* Default link policy */
555 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
558 /* Get HCI device by index.
559 * Device is held on return. */
560 struct hci_dev
*hci_dev_get(int index
)
562 struct hci_dev
*hdev
= NULL
, *d
;
569 read_lock(&hci_dev_list_lock
);
570 list_for_each_entry(d
, &hci_dev_list
, list
) {
571 if (d
->id
== index
) {
572 hdev
= hci_dev_hold(d
);
576 read_unlock(&hci_dev_list_lock
);
580 /* ---- Inquiry support ---- */
582 bool hci_discovery_active(struct hci_dev
*hdev
)
584 struct discovery_state
*discov
= &hdev
->discovery
;
586 switch (discov
->state
) {
587 case DISCOVERY_FINDING
:
588 case DISCOVERY_RESOLVING
:
596 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
598 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
600 if (hdev
->discovery
.state
== state
)
604 case DISCOVERY_STOPPED
:
605 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
606 mgmt_discovering(hdev
, 0);
608 case DISCOVERY_STARTING
:
610 case DISCOVERY_FINDING
:
611 mgmt_discovering(hdev
, 1);
613 case DISCOVERY_RESOLVING
:
615 case DISCOVERY_STOPPING
:
619 hdev
->discovery
.state
= state
;
622 static void inquiry_cache_flush(struct hci_dev
*hdev
)
624 struct discovery_state
*cache
= &hdev
->discovery
;
625 struct inquiry_entry
*p
, *n
;
627 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
632 INIT_LIST_HEAD(&cache
->unknown
);
633 INIT_LIST_HEAD(&cache
->resolve
);
636 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
639 struct discovery_state
*cache
= &hdev
->discovery
;
640 struct inquiry_entry
*e
;
642 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
644 list_for_each_entry(e
, &cache
->all
, all
) {
645 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
652 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
655 struct discovery_state
*cache
= &hdev
->discovery
;
656 struct inquiry_entry
*e
;
658 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
660 list_for_each_entry(e
, &cache
->unknown
, list
) {
661 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
668 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
672 struct discovery_state
*cache
= &hdev
->discovery
;
673 struct inquiry_entry
*e
;
675 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
677 list_for_each_entry(e
, &cache
->resolve
, list
) {
678 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
680 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
687 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
688 struct inquiry_entry
*ie
)
690 struct discovery_state
*cache
= &hdev
->discovery
;
691 struct list_head
*pos
= &cache
->resolve
;
692 struct inquiry_entry
*p
;
696 list_for_each_entry(p
, &cache
->resolve
, list
) {
697 if (p
->name_state
!= NAME_PENDING
&&
698 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
703 list_add(&ie
->list
, pos
);
706 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
707 bool name_known
, bool *ssp
)
709 struct discovery_state
*cache
= &hdev
->discovery
;
710 struct inquiry_entry
*ie
;
712 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
714 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
717 *ssp
= data
->ssp_mode
;
719 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
721 if (ie
->data
.ssp_mode
&& ssp
)
724 if (ie
->name_state
== NAME_NEEDED
&&
725 data
->rssi
!= ie
->data
.rssi
) {
726 ie
->data
.rssi
= data
->rssi
;
727 hci_inquiry_cache_update_resolve(hdev
, ie
);
733 /* Entry not in the cache. Add new one. */
734 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
738 list_add(&ie
->all
, &cache
->all
);
741 ie
->name_state
= NAME_KNOWN
;
743 ie
->name_state
= NAME_NOT_KNOWN
;
744 list_add(&ie
->list
, &cache
->unknown
);
748 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
749 ie
->name_state
!= NAME_PENDING
) {
750 ie
->name_state
= NAME_KNOWN
;
754 memcpy(&ie
->data
, data
, sizeof(*data
));
755 ie
->timestamp
= jiffies
;
756 cache
->timestamp
= jiffies
;
758 if (ie
->name_state
== NAME_NOT_KNOWN
)
764 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
766 struct discovery_state
*cache
= &hdev
->discovery
;
767 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
768 struct inquiry_entry
*e
;
771 list_for_each_entry(e
, &cache
->all
, all
) {
772 struct inquiry_data
*data
= &e
->data
;
777 bacpy(&info
->bdaddr
, &data
->bdaddr
);
778 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
779 info
->pscan_period_mode
= data
->pscan_period_mode
;
780 info
->pscan_mode
= data
->pscan_mode
;
781 memcpy(info
->dev_class
, data
->dev_class
, 3);
782 info
->clock_offset
= data
->clock_offset
;
788 BT_DBG("cache %p, copied %d", cache
, copied
);
792 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
794 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
795 struct hci_dev
*hdev
= req
->hdev
;
796 struct hci_cp_inquiry cp
;
798 BT_DBG("%s", hdev
->name
);
800 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
804 memcpy(&cp
.lap
, &ir
->lap
, 3);
805 cp
.length
= ir
->length
;
806 cp
.num_rsp
= ir
->num_rsp
;
807 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
810 int hci_inquiry(void __user
*arg
)
812 __u8 __user
*ptr
= arg
;
813 struct hci_inquiry_req ir
;
814 struct hci_dev
*hdev
;
815 int err
= 0, do_inquiry
= 0, max_rsp
;
819 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
822 hdev
= hci_dev_get(ir
.dev_id
);
827 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
828 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
829 inquiry_cache_flush(hdev
);
832 hci_dev_unlock(hdev
);
834 timeo
= ir
.length
* msecs_to_jiffies(2000);
837 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
843 /* for unlimited number of responses we will use buffer with
846 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
848 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
849 * copy it to the user space.
851 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
858 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
859 hci_dev_unlock(hdev
);
861 BT_DBG("num_rsp %d", ir
.num_rsp
);
863 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
865 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
878 static u8
create_ad(struct hci_dev
*hdev
, u8
*ptr
)
880 u8 ad_len
= 0, flags
= 0;
883 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
884 flags
|= LE_AD_GENERAL
;
886 if (!lmp_bredr_capable(hdev
))
887 flags
|= LE_AD_NO_BREDR
;
889 if (lmp_le_br_capable(hdev
))
890 flags
|= LE_AD_SIM_LE_BREDR_CTRL
;
892 if (lmp_host_le_br_capable(hdev
))
893 flags
|= LE_AD_SIM_LE_BREDR_HOST
;
896 BT_DBG("adv flags 0x%02x", flags
);
906 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
908 ptr
[1] = EIR_TX_POWER
;
909 ptr
[2] = (u8
) hdev
->adv_tx_power
;
915 name_len
= strlen(hdev
->dev_name
);
917 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
919 if (name_len
> max_len
) {
921 ptr
[1] = EIR_NAME_SHORT
;
923 ptr
[1] = EIR_NAME_COMPLETE
;
925 ptr
[0] = name_len
+ 1;
927 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
929 ad_len
+= (name_len
+ 2);
930 ptr
+= (name_len
+ 2);
936 int hci_update_ad(struct hci_dev
*hdev
)
938 struct hci_cp_le_set_adv_data cp
;
944 if (!lmp_le_capable(hdev
)) {
949 memset(&cp
, 0, sizeof(cp
));
951 len
= create_ad(hdev
, cp
.data
);
953 if (hdev
->adv_data_len
== len
&&
954 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0) {
959 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
960 hdev
->adv_data_len
= len
;
963 err
= hci_send_cmd(hdev
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
966 hci_dev_unlock(hdev
);
971 /* ---- HCI ioctl helpers ---- */
973 int hci_dev_open(__u16 dev
)
975 struct hci_dev
*hdev
;
978 hdev
= hci_dev_get(dev
);
982 BT_DBG("%s %p", hdev
->name
, hdev
);
986 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
991 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
996 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1001 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
1002 set_bit(HCI_RAW
, &hdev
->flags
);
1004 /* Treat all non BR/EDR controllers as raw devices if
1005 enable_hs is not set */
1006 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
1007 set_bit(HCI_RAW
, &hdev
->flags
);
1009 if (hdev
->open(hdev
)) {
1014 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1015 atomic_set(&hdev
->cmd_cnt
, 1);
1016 set_bit(HCI_INIT
, &hdev
->flags
);
1017 ret
= __hci_init(hdev
);
1018 clear_bit(HCI_INIT
, &hdev
->flags
);
1023 set_bit(HCI_UP
, &hdev
->flags
);
1024 hci_notify(hdev
, HCI_DEV_UP
);
1025 hci_update_ad(hdev
);
1026 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
1027 mgmt_valid_hdev(hdev
)) {
1029 mgmt_powered(hdev
, 1);
1030 hci_dev_unlock(hdev
);
1033 /* Init failed, cleanup */
1034 flush_work(&hdev
->tx_work
);
1035 flush_work(&hdev
->cmd_work
);
1036 flush_work(&hdev
->rx_work
);
1038 skb_queue_purge(&hdev
->cmd_q
);
1039 skb_queue_purge(&hdev
->rx_q
);
1044 if (hdev
->sent_cmd
) {
1045 kfree_skb(hdev
->sent_cmd
);
1046 hdev
->sent_cmd
= NULL
;
1054 hci_req_unlock(hdev
);
1059 static int hci_dev_do_close(struct hci_dev
*hdev
)
1061 BT_DBG("%s %p", hdev
->name
, hdev
);
1063 cancel_work_sync(&hdev
->le_scan
);
1065 cancel_delayed_work(&hdev
->power_off
);
1067 hci_req_cancel(hdev
, ENODEV
);
1070 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1071 del_timer_sync(&hdev
->cmd_timer
);
1072 hci_req_unlock(hdev
);
1076 /* Flush RX and TX works */
1077 flush_work(&hdev
->tx_work
);
1078 flush_work(&hdev
->rx_work
);
1080 if (hdev
->discov_timeout
> 0) {
1081 cancel_delayed_work(&hdev
->discov_off
);
1082 hdev
->discov_timeout
= 0;
1083 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1086 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1087 cancel_delayed_work(&hdev
->service_cache
);
1089 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1092 inquiry_cache_flush(hdev
);
1093 hci_conn_hash_flush(hdev
);
1094 hci_dev_unlock(hdev
);
1096 hci_notify(hdev
, HCI_DEV_DOWN
);
1102 skb_queue_purge(&hdev
->cmd_q
);
1103 atomic_set(&hdev
->cmd_cnt
, 1);
1104 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
1105 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
1106 set_bit(HCI_INIT
, &hdev
->flags
);
1107 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
1108 clear_bit(HCI_INIT
, &hdev
->flags
);
1111 /* flush cmd work */
1112 flush_work(&hdev
->cmd_work
);
1115 skb_queue_purge(&hdev
->rx_q
);
1116 skb_queue_purge(&hdev
->cmd_q
);
1117 skb_queue_purge(&hdev
->raw_q
);
1119 /* Drop last sent command */
1120 if (hdev
->sent_cmd
) {
1121 del_timer_sync(&hdev
->cmd_timer
);
1122 kfree_skb(hdev
->sent_cmd
);
1123 hdev
->sent_cmd
= NULL
;
1126 /* After this point our queues are empty
1127 * and no tasks are scheduled. */
1130 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
1131 mgmt_valid_hdev(hdev
)) {
1133 mgmt_powered(hdev
, 0);
1134 hci_dev_unlock(hdev
);
1140 /* Controller radio is available but is currently powered down */
1141 hdev
->amp_status
= 0;
1143 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1144 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1146 hci_req_unlock(hdev
);
1152 int hci_dev_close(__u16 dev
)
1154 struct hci_dev
*hdev
;
1157 hdev
= hci_dev_get(dev
);
1161 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1162 cancel_delayed_work(&hdev
->power_off
);
1164 err
= hci_dev_do_close(hdev
);
1170 int hci_dev_reset(__u16 dev
)
1172 struct hci_dev
*hdev
;
1175 hdev
= hci_dev_get(dev
);
1181 if (!test_bit(HCI_UP
, &hdev
->flags
))
1185 skb_queue_purge(&hdev
->rx_q
);
1186 skb_queue_purge(&hdev
->cmd_q
);
1189 inquiry_cache_flush(hdev
);
1190 hci_conn_hash_flush(hdev
);
1191 hci_dev_unlock(hdev
);
1196 atomic_set(&hdev
->cmd_cnt
, 1);
1197 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1199 if (!test_bit(HCI_RAW
, &hdev
->flags
))
1200 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
1203 hci_req_unlock(hdev
);
1208 int hci_dev_reset_stat(__u16 dev
)
1210 struct hci_dev
*hdev
;
1213 hdev
= hci_dev_get(dev
);
1217 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1224 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1226 struct hci_dev
*hdev
;
1227 struct hci_dev_req dr
;
1230 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1233 hdev
= hci_dev_get(dr
.dev_id
);
1239 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1244 if (!lmp_encrypt_capable(hdev
)) {
1249 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1250 /* Auth must be enabled first */
1251 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1257 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1262 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1267 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1271 case HCISETLINKMODE
:
1272 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1273 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1277 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1281 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1282 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1286 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1287 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1299 int hci_get_dev_list(void __user
*arg
)
1301 struct hci_dev
*hdev
;
1302 struct hci_dev_list_req
*dl
;
1303 struct hci_dev_req
*dr
;
1304 int n
= 0, size
, err
;
1307 if (get_user(dev_num
, (__u16 __user
*) arg
))
1310 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1313 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1315 dl
= kzalloc(size
, GFP_KERNEL
);
1321 read_lock(&hci_dev_list_lock
);
1322 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1323 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1324 cancel_delayed_work(&hdev
->power_off
);
1326 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1327 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1329 (dr
+ n
)->dev_id
= hdev
->id
;
1330 (dr
+ n
)->dev_opt
= hdev
->flags
;
1335 read_unlock(&hci_dev_list_lock
);
1338 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1340 err
= copy_to_user(arg
, dl
, size
);
1343 return err
? -EFAULT
: 0;
1346 int hci_get_dev_info(void __user
*arg
)
1348 struct hci_dev
*hdev
;
1349 struct hci_dev_info di
;
1352 if (copy_from_user(&di
, arg
, sizeof(di
)))
1355 hdev
= hci_dev_get(di
.dev_id
);
1359 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1360 cancel_delayed_work_sync(&hdev
->power_off
);
1362 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1363 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1365 strcpy(di
.name
, hdev
->name
);
1366 di
.bdaddr
= hdev
->bdaddr
;
1367 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1368 di
.flags
= hdev
->flags
;
1369 di
.pkt_type
= hdev
->pkt_type
;
1370 if (lmp_bredr_capable(hdev
)) {
1371 di
.acl_mtu
= hdev
->acl_mtu
;
1372 di
.acl_pkts
= hdev
->acl_pkts
;
1373 di
.sco_mtu
= hdev
->sco_mtu
;
1374 di
.sco_pkts
= hdev
->sco_pkts
;
1376 di
.acl_mtu
= hdev
->le_mtu
;
1377 di
.acl_pkts
= hdev
->le_pkts
;
1381 di
.link_policy
= hdev
->link_policy
;
1382 di
.link_mode
= hdev
->link_mode
;
1384 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1385 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1387 if (copy_to_user(arg
, &di
, sizeof(di
)))
1395 /* ---- Interface to HCI drivers ---- */
1397 static int hci_rfkill_set_block(void *data
, bool blocked
)
1399 struct hci_dev
*hdev
= data
;
1401 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1406 hci_dev_do_close(hdev
);
1411 static const struct rfkill_ops hci_rfkill_ops
= {
1412 .set_block
= hci_rfkill_set_block
,
1415 static void hci_power_on(struct work_struct
*work
)
1417 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1419 BT_DBG("%s", hdev
->name
);
1421 if (hci_dev_open(hdev
->id
) < 0)
1424 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1425 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1426 HCI_AUTO_OFF_TIMEOUT
);
1428 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1429 mgmt_index_added(hdev
);
1432 static void hci_power_off(struct work_struct
*work
)
1434 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1437 BT_DBG("%s", hdev
->name
);
1439 hci_dev_do_close(hdev
);
1442 static void hci_discov_off(struct work_struct
*work
)
1444 struct hci_dev
*hdev
;
1445 u8 scan
= SCAN_PAGE
;
1447 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1449 BT_DBG("%s", hdev
->name
);
1453 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1455 hdev
->discov_timeout
= 0;
1457 hci_dev_unlock(hdev
);
1460 int hci_uuids_clear(struct hci_dev
*hdev
)
1462 struct bt_uuid
*uuid
, *tmp
;
1464 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
1465 list_del(&uuid
->list
);
1472 int hci_link_keys_clear(struct hci_dev
*hdev
)
1474 struct list_head
*p
, *n
;
1476 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1477 struct link_key
*key
;
1479 key
= list_entry(p
, struct link_key
, list
);
1488 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1490 struct smp_ltk
*k
, *tmp
;
1492 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1500 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1504 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1505 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1511 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1512 u8 key_type
, u8 old_key_type
)
1515 if (key_type
< 0x03)
1518 /* Debug keys are insecure so don't store them persistently */
1519 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1522 /* Changed combination key and there's no previous one */
1523 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1526 /* Security mode 3 case */
1530 /* Neither local nor remote side had no-bonding as requirement */
1531 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1534 /* Local side had dedicated bonding as requirement */
1535 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1538 /* Remote side had dedicated bonding as requirement */
1539 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1542 /* If none of the above criteria match, then don't store the key
1547 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1551 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1552 if (k
->ediv
!= ediv
||
1553 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1562 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1567 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1568 if (addr_type
== k
->bdaddr_type
&&
1569 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1575 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1576 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1578 struct link_key
*key
, *old_key
;
1582 old_key
= hci_find_link_key(hdev
, bdaddr
);
1584 old_key_type
= old_key
->type
;
1587 old_key_type
= conn
? conn
->key_type
: 0xff;
1588 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1591 list_add(&key
->list
, &hdev
->link_keys
);
1594 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1596 /* Some buggy controller combinations generate a changed
1597 * combination key for legacy pairing even when there's no
1599 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1600 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1601 type
= HCI_LK_COMBINATION
;
1603 conn
->key_type
= type
;
1606 bacpy(&key
->bdaddr
, bdaddr
);
1607 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1608 key
->pin_len
= pin_len
;
1610 if (type
== HCI_LK_CHANGED_COMBINATION
)
1611 key
->type
= old_key_type
;
1618 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1620 mgmt_new_link_key(hdev
, key
, persistent
);
1623 conn
->flush_key
= !persistent
;
1628 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1629 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1632 struct smp_ltk
*key
, *old_key
;
1634 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1637 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1641 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1644 list_add(&key
->list
, &hdev
->long_term_keys
);
1647 bacpy(&key
->bdaddr
, bdaddr
);
1648 key
->bdaddr_type
= addr_type
;
1649 memcpy(key
->val
, tk
, sizeof(key
->val
));
1650 key
->authenticated
= authenticated
;
1652 key
->enc_size
= enc_size
;
1654 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1659 if (type
& HCI_SMP_LTK
)
1660 mgmt_new_ltk(hdev
, key
, 1);
1665 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1667 struct link_key
*key
;
1669 key
= hci_find_link_key(hdev
, bdaddr
);
1673 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1675 list_del(&key
->list
);
1681 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1683 struct smp_ltk
*k
, *tmp
;
1685 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1686 if (bacmp(bdaddr
, &k
->bdaddr
))
1689 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1698 /* HCI command timer function */
1699 static void hci_cmd_timeout(unsigned long arg
)
1701 struct hci_dev
*hdev
= (void *) arg
;
1703 if (hdev
->sent_cmd
) {
1704 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1705 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1707 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1709 BT_ERR("%s command tx timeout", hdev
->name
);
1712 atomic_set(&hdev
->cmd_cnt
, 1);
1713 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1716 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1719 struct oob_data
*data
;
1721 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1722 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1728 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1730 struct oob_data
*data
;
1732 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1736 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1738 list_del(&data
->list
);
1744 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1746 struct oob_data
*data
, *n
;
1748 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1749 list_del(&data
->list
);
1756 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1759 struct oob_data
*data
;
1761 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1764 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1768 bacpy(&data
->bdaddr
, bdaddr
);
1769 list_add(&data
->list
, &hdev
->remote_oob_data
);
1772 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1773 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1775 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1780 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1782 struct bdaddr_list
*b
;
1784 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1785 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1791 int hci_blacklist_clear(struct hci_dev
*hdev
)
1793 struct list_head
*p
, *n
;
1795 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1796 struct bdaddr_list
*b
;
1798 b
= list_entry(p
, struct bdaddr_list
, list
);
1807 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1809 struct bdaddr_list
*entry
;
1811 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1814 if (hci_blacklist_lookup(hdev
, bdaddr
))
1817 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1821 bacpy(&entry
->bdaddr
, bdaddr
);
1823 list_add(&entry
->list
, &hdev
->blacklist
);
1825 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1828 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1830 struct bdaddr_list
*entry
;
1832 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1833 return hci_blacklist_clear(hdev
);
1835 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1839 list_del(&entry
->list
);
1842 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1845 static void le_scan_param_req(struct hci_request
*req
, unsigned long opt
)
1847 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1848 struct hci_cp_le_set_scan_param cp
;
1850 memset(&cp
, 0, sizeof(cp
));
1851 cp
.type
= param
->type
;
1852 cp
.interval
= cpu_to_le16(param
->interval
);
1853 cp
.window
= cpu_to_le16(param
->window
);
1855 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1858 static void le_scan_enable_req(struct hci_request
*req
, unsigned long opt
)
1860 struct hci_cp_le_set_scan_enable cp
;
1862 memset(&cp
, 0, sizeof(cp
));
1866 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1869 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1870 u16 window
, int timeout
)
1872 long timeo
= msecs_to_jiffies(3000);
1873 struct le_scan_params param
;
1876 BT_DBG("%s", hdev
->name
);
1878 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1879 return -EINPROGRESS
;
1882 param
.interval
= interval
;
1883 param
.window
= window
;
1887 err
= __hci_req_sync(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1890 err
= __hci_req_sync(hdev
, le_scan_enable_req
, 0, timeo
);
1892 hci_req_unlock(hdev
);
1897 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
,
1898 msecs_to_jiffies(timeout
));
1903 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1905 BT_DBG("%s", hdev
->name
);
1907 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1910 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1911 struct hci_cp_le_set_scan_enable cp
;
1913 /* Send HCI command to disable LE Scan */
1914 memset(&cp
, 0, sizeof(cp
));
1915 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1921 static void le_scan_disable_work(struct work_struct
*work
)
1923 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1924 le_scan_disable
.work
);
1925 struct hci_cp_le_set_scan_enable cp
;
1927 BT_DBG("%s", hdev
->name
);
1929 memset(&cp
, 0, sizeof(cp
));
1931 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1934 static void le_scan_work(struct work_struct
*work
)
1936 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1937 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1939 BT_DBG("%s", hdev
->name
);
1941 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1945 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1948 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1950 BT_DBG("%s", hdev
->name
);
1952 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
1955 if (work_busy(&hdev
->le_scan
))
1956 return -EINPROGRESS
;
1959 param
->interval
= interval
;
1960 param
->window
= window
;
1961 param
->timeout
= timeout
;
1963 queue_work(system_long_wq
, &hdev
->le_scan
);
1968 /* Alloc HCI device */
1969 struct hci_dev
*hci_alloc_dev(void)
1971 struct hci_dev
*hdev
;
1973 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1977 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1978 hdev
->esco_type
= (ESCO_HV1
);
1979 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1980 hdev
->io_capability
= 0x03; /* No Input No Output */
1981 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
1982 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
1984 hdev
->sniff_max_interval
= 800;
1985 hdev
->sniff_min_interval
= 80;
1987 mutex_init(&hdev
->lock
);
1988 mutex_init(&hdev
->req_lock
);
1990 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1991 INIT_LIST_HEAD(&hdev
->blacklist
);
1992 INIT_LIST_HEAD(&hdev
->uuids
);
1993 INIT_LIST_HEAD(&hdev
->link_keys
);
1994 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1995 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1996 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1998 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1999 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
2000 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
2001 INIT_WORK(&hdev
->power_on
, hci_power_on
);
2002 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
2004 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2005 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
2006 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2008 skb_queue_head_init(&hdev
->driver_init
);
2009 skb_queue_head_init(&hdev
->rx_q
);
2010 skb_queue_head_init(&hdev
->cmd_q
);
2011 skb_queue_head_init(&hdev
->raw_q
);
2013 init_waitqueue_head(&hdev
->req_wait_q
);
2015 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
2017 hci_init_sysfs(hdev
);
2018 discovery_init(hdev
);
2022 EXPORT_SYMBOL(hci_alloc_dev
);
2024 /* Free HCI device */
2025 void hci_free_dev(struct hci_dev
*hdev
)
2027 skb_queue_purge(&hdev
->driver_init
);
2029 /* will free via device release */
2030 put_device(&hdev
->dev
);
2032 EXPORT_SYMBOL(hci_free_dev
);
2034 /* Register HCI device */
2035 int hci_register_dev(struct hci_dev
*hdev
)
2039 if (!hdev
->open
|| !hdev
->close
)
2042 /* Do not allow HCI_AMP devices to register at index 0,
2043 * so the index can be used as the AMP controller ID.
2045 switch (hdev
->dev_type
) {
2047 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
2050 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
2059 sprintf(hdev
->name
, "hci%d", id
);
2062 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2064 write_lock(&hci_dev_list_lock
);
2065 list_add(&hdev
->list
, &hci_dev_list
);
2066 write_unlock(&hci_dev_list_lock
);
2068 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
2070 if (!hdev
->workqueue
) {
2075 hdev
->req_workqueue
= alloc_workqueue(hdev
->name
,
2076 WQ_HIGHPRI
| WQ_UNBOUND
|
2078 if (!hdev
->req_workqueue
) {
2079 destroy_workqueue(hdev
->workqueue
);
2084 error
= hci_add_sysfs(hdev
);
2088 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
2089 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
2092 if (rfkill_register(hdev
->rfkill
) < 0) {
2093 rfkill_destroy(hdev
->rfkill
);
2094 hdev
->rfkill
= NULL
;
2098 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
2100 if (hdev
->dev_type
!= HCI_AMP
)
2101 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
2103 hci_notify(hdev
, HCI_DEV_REG
);
2106 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
2111 destroy_workqueue(hdev
->workqueue
);
2112 destroy_workqueue(hdev
->req_workqueue
);
2114 ida_simple_remove(&hci_index_ida
, hdev
->id
);
2115 write_lock(&hci_dev_list_lock
);
2116 list_del(&hdev
->list
);
2117 write_unlock(&hci_dev_list_lock
);
2121 EXPORT_SYMBOL(hci_register_dev
);
2123 /* Unregister HCI device */
2124 void hci_unregister_dev(struct hci_dev
*hdev
)
2128 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2130 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
2134 write_lock(&hci_dev_list_lock
);
2135 list_del(&hdev
->list
);
2136 write_unlock(&hci_dev_list_lock
);
2138 hci_dev_do_close(hdev
);
2140 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
2141 kfree_skb(hdev
->reassembly
[i
]);
2143 cancel_work_sync(&hdev
->power_on
);
2145 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
2146 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
2148 mgmt_index_removed(hdev
);
2149 hci_dev_unlock(hdev
);
2152 /* mgmt_index_removed should take care of emptying the
2154 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
2156 hci_notify(hdev
, HCI_DEV_UNREG
);
2159 rfkill_unregister(hdev
->rfkill
);
2160 rfkill_destroy(hdev
->rfkill
);
2163 hci_del_sysfs(hdev
);
2165 destroy_workqueue(hdev
->workqueue
);
2166 destroy_workqueue(hdev
->req_workqueue
);
2169 hci_blacklist_clear(hdev
);
2170 hci_uuids_clear(hdev
);
2171 hci_link_keys_clear(hdev
);
2172 hci_smp_ltks_clear(hdev
);
2173 hci_remote_oob_data_clear(hdev
);
2174 hci_dev_unlock(hdev
);
2178 ida_simple_remove(&hci_index_ida
, id
);
2180 EXPORT_SYMBOL(hci_unregister_dev
);
2182 /* Suspend HCI device */
2183 int hci_suspend_dev(struct hci_dev
*hdev
)
2185 hci_notify(hdev
, HCI_DEV_SUSPEND
);
2188 EXPORT_SYMBOL(hci_suspend_dev
);
2190 /* Resume HCI device */
2191 int hci_resume_dev(struct hci_dev
*hdev
)
2193 hci_notify(hdev
, HCI_DEV_RESUME
);
2196 EXPORT_SYMBOL(hci_resume_dev
);
2198 /* Receive frame from HCI drivers */
2199 int hci_recv_frame(struct sk_buff
*skb
)
2201 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2202 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
2203 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
2209 bt_cb(skb
)->incoming
= 1;
2212 __net_timestamp(skb
);
2214 skb_queue_tail(&hdev
->rx_q
, skb
);
2215 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
2219 EXPORT_SYMBOL(hci_recv_frame
);
2221 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
2222 int count
, __u8 index
)
2227 struct sk_buff
*skb
;
2228 struct bt_skb_cb
*scb
;
2230 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
2231 index
>= NUM_REASSEMBLY
)
2234 skb
= hdev
->reassembly
[index
];
2238 case HCI_ACLDATA_PKT
:
2239 len
= HCI_MAX_FRAME_SIZE
;
2240 hlen
= HCI_ACL_HDR_SIZE
;
2243 len
= HCI_MAX_EVENT_SIZE
;
2244 hlen
= HCI_EVENT_HDR_SIZE
;
2246 case HCI_SCODATA_PKT
:
2247 len
= HCI_MAX_SCO_SIZE
;
2248 hlen
= HCI_SCO_HDR_SIZE
;
2252 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2256 scb
= (void *) skb
->cb
;
2258 scb
->pkt_type
= type
;
2260 skb
->dev
= (void *) hdev
;
2261 hdev
->reassembly
[index
] = skb
;
2265 scb
= (void *) skb
->cb
;
2266 len
= min_t(uint
, scb
->expect
, count
);
2268 memcpy(skb_put(skb
, len
), data
, len
);
2277 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
2278 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
2279 scb
->expect
= h
->plen
;
2281 if (skb_tailroom(skb
) < scb
->expect
) {
2283 hdev
->reassembly
[index
] = NULL
;
2289 case HCI_ACLDATA_PKT
:
2290 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
2291 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
2292 scb
->expect
= __le16_to_cpu(h
->dlen
);
2294 if (skb_tailroom(skb
) < scb
->expect
) {
2296 hdev
->reassembly
[index
] = NULL
;
2302 case HCI_SCODATA_PKT
:
2303 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2304 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2305 scb
->expect
= h
->dlen
;
2307 if (skb_tailroom(skb
) < scb
->expect
) {
2309 hdev
->reassembly
[index
] = NULL
;
2316 if (scb
->expect
== 0) {
2317 /* Complete frame */
2319 bt_cb(skb
)->pkt_type
= type
;
2320 hci_recv_frame(skb
);
2322 hdev
->reassembly
[index
] = NULL
;
2330 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2334 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2338 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2342 data
+= (count
- rem
);
2348 EXPORT_SYMBOL(hci_recv_fragment
);
2350 #define STREAM_REASSEMBLY 0
2352 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2358 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2361 struct { char type
; } *pkt
;
2363 /* Start of the frame */
2370 type
= bt_cb(skb
)->pkt_type
;
2372 rem
= hci_reassembly(hdev
, type
, data
, count
,
2377 data
+= (count
- rem
);
2383 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2385 /* ---- Interface to upper protocols ---- */
2387 int hci_register_cb(struct hci_cb
*cb
)
2389 BT_DBG("%p name %s", cb
, cb
->name
);
2391 write_lock(&hci_cb_list_lock
);
2392 list_add(&cb
->list
, &hci_cb_list
);
2393 write_unlock(&hci_cb_list_lock
);
2397 EXPORT_SYMBOL(hci_register_cb
);
2399 int hci_unregister_cb(struct hci_cb
*cb
)
2401 BT_DBG("%p name %s", cb
, cb
->name
);
2403 write_lock(&hci_cb_list_lock
);
2404 list_del(&cb
->list
);
2405 write_unlock(&hci_cb_list_lock
);
2409 EXPORT_SYMBOL(hci_unregister_cb
);
2411 static int hci_send_frame(struct sk_buff
*skb
)
2413 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2420 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2423 __net_timestamp(skb
);
2425 /* Send copy to monitor */
2426 hci_send_to_monitor(hdev
, skb
);
2428 if (atomic_read(&hdev
->promisc
)) {
2429 /* Send copy to the sockets */
2430 hci_send_to_sock(hdev
, skb
);
2433 /* Get rid of skb owner, prior to sending to the driver. */
2436 return hdev
->send(skb
);
2439 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
2441 skb_queue_head_init(&req
->cmd_q
);
2445 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
2447 struct hci_dev
*hdev
= req
->hdev
;
2448 struct sk_buff
*skb
;
2449 unsigned long flags
;
2451 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
2453 /* Do not allow empty requests */
2454 if (skb_queue_empty(&req
->cmd_q
))
2457 skb
= skb_peek_tail(&req
->cmd_q
);
2458 bt_cb(skb
)->req
.complete
= complete
;
2460 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
2461 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
2462 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
2464 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2469 static struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
,
2470 u32 plen
, void *param
)
2472 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2473 struct hci_command_hdr
*hdr
;
2474 struct sk_buff
*skb
;
2476 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2480 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2481 hdr
->opcode
= cpu_to_le16(opcode
);
2485 memcpy(skb_put(skb
, plen
), param
, plen
);
2487 BT_DBG("skb len %d", skb
->len
);
2489 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2490 skb
->dev
= (void *) hdev
;
2495 /* Send HCI command */
2496 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2498 struct sk_buff
*skb
;
2500 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2502 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
2504 BT_ERR("%s no memory for command", hdev
->name
);
2508 /* Stand-alone HCI commands must be flaged as
2509 * single-command requests.
2511 bt_cb(skb
)->req
.start
= true;
2513 skb_queue_tail(&hdev
->cmd_q
, skb
);
2514 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2519 /* Queue a command to an asynchronous HCI request */
2520 int hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
, void *param
)
2522 struct hci_dev
*hdev
= req
->hdev
;
2523 struct sk_buff
*skb
;
2525 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2527 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
2529 BT_ERR("%s no memory for command", hdev
->name
);
2533 if (skb_queue_empty(&req
->cmd_q
))
2534 bt_cb(skb
)->req
.start
= true;
2536 skb_queue_tail(&req
->cmd_q
, skb
);
2541 /* Get data from the previously sent command */
2542 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2544 struct hci_command_hdr
*hdr
;
2546 if (!hdev
->sent_cmd
)
2549 hdr
= (void *) hdev
->sent_cmd
->data
;
2551 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2554 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2556 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2560 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2562 struct hci_acl_hdr
*hdr
;
2565 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2566 skb_reset_transport_header(skb
);
2567 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2568 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2569 hdr
->dlen
= cpu_to_le16(len
);
2572 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2573 struct sk_buff
*skb
, __u16 flags
)
2575 struct hci_conn
*conn
= chan
->conn
;
2576 struct hci_dev
*hdev
= conn
->hdev
;
2577 struct sk_buff
*list
;
2579 skb
->len
= skb_headlen(skb
);
2582 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2584 switch (hdev
->dev_type
) {
2586 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2589 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2592 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2596 list
= skb_shinfo(skb
)->frag_list
;
2598 /* Non fragmented */
2599 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2601 skb_queue_tail(queue
, skb
);
2604 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2606 skb_shinfo(skb
)->frag_list
= NULL
;
2608 /* Queue all fragments atomically */
2609 spin_lock(&queue
->lock
);
2611 __skb_queue_tail(queue
, skb
);
2613 flags
&= ~ACL_START
;
2616 skb
= list
; list
= list
->next
;
2618 skb
->dev
= (void *) hdev
;
2619 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2620 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2622 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2624 __skb_queue_tail(queue
, skb
);
2627 spin_unlock(&queue
->lock
);
2631 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2633 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2635 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2637 skb
->dev
= (void *) hdev
;
2639 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2641 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2645 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2647 struct hci_dev
*hdev
= conn
->hdev
;
2648 struct hci_sco_hdr hdr
;
2650 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2652 hdr
.handle
= cpu_to_le16(conn
->handle
);
2653 hdr
.dlen
= skb
->len
;
2655 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2656 skb_reset_transport_header(skb
);
2657 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2659 skb
->dev
= (void *) hdev
;
2660 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2662 skb_queue_tail(&conn
->data_q
, skb
);
2663 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2666 /* ---- HCI TX task (outgoing data) ---- */
2668 /* HCI Connection scheduler */
2669 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2672 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2673 struct hci_conn
*conn
= NULL
, *c
;
2674 unsigned int num
= 0, min
= ~0;
2676 /* We don't have to lock device here. Connections are always
2677 * added and removed with TX task disabled. */
2681 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2682 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2685 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2690 if (c
->sent
< min
) {
2695 if (hci_conn_num(hdev
, type
) == num
)
2704 switch (conn
->type
) {
2706 cnt
= hdev
->acl_cnt
;
2710 cnt
= hdev
->sco_cnt
;
2713 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2717 BT_ERR("Unknown link type");
2725 BT_DBG("conn %p quote %d", conn
, *quote
);
2729 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2731 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2734 BT_ERR("%s link tx timeout", hdev
->name
);
2738 /* Kill stalled connections */
2739 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2740 if (c
->type
== type
&& c
->sent
) {
2741 BT_ERR("%s killing stalled connection %pMR",
2742 hdev
->name
, &c
->dst
);
2743 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
2750 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2753 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2754 struct hci_chan
*chan
= NULL
;
2755 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2756 struct hci_conn
*conn
;
2757 int cnt
, q
, conn_num
= 0;
2759 BT_DBG("%s", hdev
->name
);
2763 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2764 struct hci_chan
*tmp
;
2766 if (conn
->type
!= type
)
2769 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2774 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2775 struct sk_buff
*skb
;
2777 if (skb_queue_empty(&tmp
->data_q
))
2780 skb
= skb_peek(&tmp
->data_q
);
2781 if (skb
->priority
< cur_prio
)
2784 if (skb
->priority
> cur_prio
) {
2787 cur_prio
= skb
->priority
;
2792 if (conn
->sent
< min
) {
2798 if (hci_conn_num(hdev
, type
) == conn_num
)
2807 switch (chan
->conn
->type
) {
2809 cnt
= hdev
->acl_cnt
;
2812 cnt
= hdev
->block_cnt
;
2816 cnt
= hdev
->sco_cnt
;
2819 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2823 BT_ERR("Unknown link type");
2828 BT_DBG("chan %p quote %d", chan
, *quote
);
2832 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2834 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2835 struct hci_conn
*conn
;
2838 BT_DBG("%s", hdev
->name
);
2842 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2843 struct hci_chan
*chan
;
2845 if (conn
->type
!= type
)
2848 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2853 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2854 struct sk_buff
*skb
;
2861 if (skb_queue_empty(&chan
->data_q
))
2864 skb
= skb_peek(&chan
->data_q
);
2865 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2868 skb
->priority
= HCI_PRIO_MAX
- 1;
2870 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2874 if (hci_conn_num(hdev
, type
) == num
)
2882 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2884 /* Calculate count of blocks used by this packet */
2885 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2888 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2890 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2891 /* ACL tx timeout must be longer than maximum
2892 * link supervision timeout (40.9 seconds) */
2893 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2894 HCI_ACL_TX_TIMEOUT
))
2895 hci_link_tx_to(hdev
, ACL_LINK
);
2899 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2901 unsigned int cnt
= hdev
->acl_cnt
;
2902 struct hci_chan
*chan
;
2903 struct sk_buff
*skb
;
2906 __check_timeout(hdev
, cnt
);
2908 while (hdev
->acl_cnt
&&
2909 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2910 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2911 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2912 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2913 skb
->len
, skb
->priority
);
2915 /* Stop if priority has changed */
2916 if (skb
->priority
< priority
)
2919 skb
= skb_dequeue(&chan
->data_q
);
2921 hci_conn_enter_active_mode(chan
->conn
,
2922 bt_cb(skb
)->force_active
);
2924 hci_send_frame(skb
);
2925 hdev
->acl_last_tx
= jiffies
;
2933 if (cnt
!= hdev
->acl_cnt
)
2934 hci_prio_recalculate(hdev
, ACL_LINK
);
2937 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2939 unsigned int cnt
= hdev
->block_cnt
;
2940 struct hci_chan
*chan
;
2941 struct sk_buff
*skb
;
2945 __check_timeout(hdev
, cnt
);
2947 BT_DBG("%s", hdev
->name
);
2949 if (hdev
->dev_type
== HCI_AMP
)
2954 while (hdev
->block_cnt
> 0 &&
2955 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
2956 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2957 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2960 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2961 skb
->len
, skb
->priority
);
2963 /* Stop if priority has changed */
2964 if (skb
->priority
< priority
)
2967 skb
= skb_dequeue(&chan
->data_q
);
2969 blocks
= __get_blocks(hdev
, skb
);
2970 if (blocks
> hdev
->block_cnt
)
2973 hci_conn_enter_active_mode(chan
->conn
,
2974 bt_cb(skb
)->force_active
);
2976 hci_send_frame(skb
);
2977 hdev
->acl_last_tx
= jiffies
;
2979 hdev
->block_cnt
-= blocks
;
2982 chan
->sent
+= blocks
;
2983 chan
->conn
->sent
+= blocks
;
2987 if (cnt
!= hdev
->block_cnt
)
2988 hci_prio_recalculate(hdev
, type
);
2991 static void hci_sched_acl(struct hci_dev
*hdev
)
2993 BT_DBG("%s", hdev
->name
);
2995 /* No ACL link over BR/EDR controller */
2996 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
2999 /* No AMP link over AMP controller */
3000 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3003 switch (hdev
->flow_ctl_mode
) {
3004 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3005 hci_sched_acl_pkt(hdev
);
3008 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3009 hci_sched_acl_blk(hdev
);
3015 static void hci_sched_sco(struct hci_dev
*hdev
)
3017 struct hci_conn
*conn
;
3018 struct sk_buff
*skb
;
3021 BT_DBG("%s", hdev
->name
);
3023 if (!hci_conn_num(hdev
, SCO_LINK
))
3026 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3027 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3028 BT_DBG("skb %p len %d", skb
, skb
->len
);
3029 hci_send_frame(skb
);
3032 if (conn
->sent
== ~0)
3038 static void hci_sched_esco(struct hci_dev
*hdev
)
3040 struct hci_conn
*conn
;
3041 struct sk_buff
*skb
;
3044 BT_DBG("%s", hdev
->name
);
3046 if (!hci_conn_num(hdev
, ESCO_LINK
))
3049 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3051 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3052 BT_DBG("skb %p len %d", skb
, skb
->len
);
3053 hci_send_frame(skb
);
3056 if (conn
->sent
== ~0)
3062 static void hci_sched_le(struct hci_dev
*hdev
)
3064 struct hci_chan
*chan
;
3065 struct sk_buff
*skb
;
3066 int quote
, cnt
, tmp
;
3068 BT_DBG("%s", hdev
->name
);
3070 if (!hci_conn_num(hdev
, LE_LINK
))
3073 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
3074 /* LE tx timeout must be longer than maximum
3075 * link supervision timeout (40.9 seconds) */
3076 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3077 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3078 hci_link_tx_to(hdev
, LE_LINK
);
3081 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3083 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3084 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3085 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3086 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3087 skb
->len
, skb
->priority
);
3089 /* Stop if priority has changed */
3090 if (skb
->priority
< priority
)
3093 skb
= skb_dequeue(&chan
->data_q
);
3095 hci_send_frame(skb
);
3096 hdev
->le_last_tx
= jiffies
;
3107 hdev
->acl_cnt
= cnt
;
3110 hci_prio_recalculate(hdev
, LE_LINK
);
3113 static void hci_tx_work(struct work_struct
*work
)
3115 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3116 struct sk_buff
*skb
;
3118 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
3119 hdev
->sco_cnt
, hdev
->le_cnt
);
3121 /* Schedule queues and send stuff to HCI driver */
3123 hci_sched_acl(hdev
);
3125 hci_sched_sco(hdev
);
3127 hci_sched_esco(hdev
);
3131 /* Send next queued raw (unknown type) packet */
3132 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3133 hci_send_frame(skb
);
3136 /* ----- HCI RX task (incoming data processing) ----- */
3138 /* ACL data packet */
3139 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3141 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
3142 struct hci_conn
*conn
;
3143 __u16 handle
, flags
;
3145 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
3147 handle
= __le16_to_cpu(hdr
->handle
);
3148 flags
= hci_flags(handle
);
3149 handle
= hci_handle(handle
);
3151 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
3154 hdev
->stat
.acl_rx
++;
3157 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3158 hci_dev_unlock(hdev
);
3161 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
3163 /* Send to upper protocol */
3164 l2cap_recv_acldata(conn
, skb
, flags
);
3167 BT_ERR("%s ACL packet for unknown connection handle %d",
3168 hdev
->name
, handle
);
3174 /* SCO data packet */
3175 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3177 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
3178 struct hci_conn
*conn
;
3181 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
3183 handle
= __le16_to_cpu(hdr
->handle
);
3185 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
3187 hdev
->stat
.sco_rx
++;
3190 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3191 hci_dev_unlock(hdev
);
3194 /* Send to upper protocol */
3195 sco_recv_scodata(conn
, skb
);
3198 BT_ERR("%s SCO packet for unknown connection handle %d",
3199 hdev
->name
, handle
);
3205 static bool hci_req_is_complete(struct hci_dev
*hdev
)
3207 struct sk_buff
*skb
;
3209 skb
= skb_peek(&hdev
->cmd_q
);
3213 return bt_cb(skb
)->req
.start
;
3216 static void hci_resend_last(struct hci_dev
*hdev
)
3218 struct hci_command_hdr
*sent
;
3219 struct sk_buff
*skb
;
3222 if (!hdev
->sent_cmd
)
3225 sent
= (void *) hdev
->sent_cmd
->data
;
3226 opcode
= __le16_to_cpu(sent
->opcode
);
3227 if (opcode
== HCI_OP_RESET
)
3230 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
3234 skb_queue_head(&hdev
->cmd_q
, skb
);
3235 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3238 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
3240 hci_req_complete_t req_complete
= NULL
;
3241 struct sk_buff
*skb
;
3242 unsigned long flags
;
3244 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
3246 /* If the completed command doesn't match the last one that was
3247 * sent we need to do special handling of it.
3249 if (!hci_sent_cmd_data(hdev
, opcode
)) {
3250 /* Some CSR based controllers generate a spontaneous
3251 * reset complete event during init and any pending
3252 * command will never be completed. In such a case we
3253 * need to resend whatever was the last sent
3256 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
3257 hci_resend_last(hdev
);
3262 /* If the command succeeded and there's still more commands in
3263 * this request the request is not yet complete.
3265 if (!status
&& !hci_req_is_complete(hdev
))
3268 /* If this was the last command in a request the complete
3269 * callback would be found in hdev->sent_cmd instead of the
3270 * command queue (hdev->cmd_q).
3272 if (hdev
->sent_cmd
) {
3273 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
3278 /* Remove all pending commands belonging to this request */
3279 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
3280 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
3281 if (bt_cb(skb
)->req
.start
) {
3282 __skb_queue_head(&hdev
->cmd_q
, skb
);
3286 req_complete
= bt_cb(skb
)->req
.complete
;
3289 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
3293 req_complete(hdev
, status
);
3296 void hci_req_cmd_status(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
3298 hci_req_complete_t req_complete
= NULL
;
3300 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
3303 hci_req_cmd_complete(hdev
, opcode
, status
);
3307 /* No need to handle success status if there are more commands */
3308 if (!hci_req_is_complete(hdev
))
3312 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
3314 /* If the request doesn't have a complete callback or there
3315 * are other commands/requests in the hdev queue we consider
3316 * this request as completed.
3318 if (!req_complete
|| !skb_queue_empty(&hdev
->cmd_q
))
3319 hci_req_cmd_complete(hdev
, opcode
, status
);
3322 static void hci_rx_work(struct work_struct
*work
)
3324 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
3325 struct sk_buff
*skb
;
3327 BT_DBG("%s", hdev
->name
);
3329 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
3330 /* Send copy to monitor */
3331 hci_send_to_monitor(hdev
, skb
);
3333 if (atomic_read(&hdev
->promisc
)) {
3334 /* Send copy to the sockets */
3335 hci_send_to_sock(hdev
, skb
);
3338 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
3343 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
3344 /* Don't process data packets in this states. */
3345 switch (bt_cb(skb
)->pkt_type
) {
3346 case HCI_ACLDATA_PKT
:
3347 case HCI_SCODATA_PKT
:
3354 switch (bt_cb(skb
)->pkt_type
) {
3356 BT_DBG("%s Event packet", hdev
->name
);
3357 hci_event_packet(hdev
, skb
);
3360 case HCI_ACLDATA_PKT
:
3361 BT_DBG("%s ACL data packet", hdev
->name
);
3362 hci_acldata_packet(hdev
, skb
);
3365 case HCI_SCODATA_PKT
:
3366 BT_DBG("%s SCO data packet", hdev
->name
);
3367 hci_scodata_packet(hdev
, skb
);
3377 static void hci_cmd_work(struct work_struct
*work
)
3379 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
3380 struct sk_buff
*skb
;
3382 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
3383 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
3385 /* Send queued commands */
3386 if (atomic_read(&hdev
->cmd_cnt
)) {
3387 skb
= skb_dequeue(&hdev
->cmd_q
);
3391 kfree_skb(hdev
->sent_cmd
);
3393 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
3394 if (hdev
->sent_cmd
) {
3395 atomic_dec(&hdev
->cmd_cnt
);
3396 hci_send_frame(skb
);
3397 if (test_bit(HCI_RESET
, &hdev
->flags
))
3398 del_timer(&hdev
->cmd_timer
);
3400 mod_timer(&hdev
->cmd_timer
,
3401 jiffies
+ HCI_CMD_TIMEOUT
);
3403 skb_queue_head(&hdev
->cmd_q
, skb
);
3404 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3409 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
3411 /* General inquiry access code (GIAC) */
3412 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3413 struct hci_cp_inquiry cp
;
3415 BT_DBG("%s", hdev
->name
);
3417 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
3418 return -EINPROGRESS
;
3420 inquiry_cache_flush(hdev
);
3422 memset(&cp
, 0, sizeof(cp
));
3423 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
3426 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
3429 int hci_cancel_inquiry(struct hci_dev
*hdev
)
3431 BT_DBG("%s", hdev
->name
);
3433 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
3436 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
3439 u8
bdaddr_to_le(u8 bdaddr_type
)
3441 switch (bdaddr_type
) {
3442 case BDADDR_LE_PUBLIC
:
3443 return ADDR_LE_DEV_PUBLIC
;
3446 /* Fallback to LE Random address type */
3447 return ADDR_LE_DEV_RANDOM
;