2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
)
62 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
64 if (hdev
->req_status
== HCI_REQ_PEND
) {
65 hdev
->req_result
= result
;
66 hdev
->req_status
= HCI_REQ_DONE
;
67 wake_up_interruptible(&hdev
->req_wait_q
);
71 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
73 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
75 if (hdev
->req_status
== HCI_REQ_PEND
) {
76 hdev
->req_result
= err
;
77 hdev
->req_status
= HCI_REQ_CANCELED
;
78 wake_up_interruptible(&hdev
->req_wait_q
);
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev
*hdev
,
84 void (*func
)(struct hci_request
*req
,
86 unsigned long opt
, __u32 timeout
)
88 struct hci_request req
;
89 DECLARE_WAITQUEUE(wait
, current
);
92 BT_DBG("%s start", hdev
->name
);
94 hci_req_init(&req
, hdev
);
96 hdev
->req_status
= HCI_REQ_PEND
;
100 err
= hci_req_run(&req
, hci_req_sync_complete
);
102 hdev
->req_status
= 0;
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
115 add_wait_queue(&hdev
->req_wait_q
, &wait
);
116 set_current_state(TASK_INTERRUPTIBLE
);
118 schedule_timeout(timeout
);
120 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
122 if (signal_pending(current
))
125 switch (hdev
->req_status
) {
127 err
= -bt_to_errno(hdev
->req_result
);
130 case HCI_REQ_CANCELED
:
131 err
= -hdev
->req_result
;
139 hdev
->req_status
= hdev
->req_result
= 0;
141 BT_DBG("%s end: err %d", hdev
->name
, err
);
146 static int hci_req_sync(struct hci_dev
*hdev
,
147 void (*req
)(struct hci_request
*req
,
149 unsigned long opt
, __u32 timeout
)
153 if (!test_bit(HCI_UP
, &hdev
->flags
))
156 /* Serialize all requests */
158 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
159 hci_req_unlock(hdev
);
164 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
166 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
169 set_bit(HCI_RESET
, &req
->hdev
->flags
);
170 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
173 static void bredr_init(struct hci_request
*req
)
175 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
177 /* Read Local Supported Features */
178 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
180 /* Read Local Version */
181 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
183 /* Read BD Address */
184 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
187 static void amp_init(struct hci_request
*req
)
189 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
191 /* Read Local Version */
192 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
194 /* Read Local AMP Info */
195 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
197 /* Read Data Blk size */
198 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
201 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
203 struct hci_dev
*hdev
= req
->hdev
;
204 struct hci_request init_req
;
207 BT_DBG("%s %ld", hdev
->name
, opt
);
209 /* Driver initialization */
211 hci_req_init(&init_req
, hdev
);
213 /* Special commands */
214 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
215 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
216 skb
->dev
= (void *) hdev
;
218 if (skb_queue_empty(&init_req
.cmd_q
))
219 bt_cb(skb
)->req
.start
= true;
221 skb_queue_tail(&init_req
.cmd_q
, skb
);
223 skb_queue_purge(&hdev
->driver_init
);
225 hci_req_run(&init_req
, NULL
);
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
229 hci_reset_req(req
, 0);
231 switch (hdev
->dev_type
) {
241 BT_ERR("Unknown device type %d", hdev
->dev_type
);
246 static void bredr_setup(struct hci_request
*req
)
248 struct hci_cp_delete_stored_link_key cp
;
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
255 /* Read Class of Device */
256 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
258 /* Read Local Name */
259 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
261 /* Read Voice Setting */
262 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
264 /* Clear Event Filters */
265 flt_type
= HCI_FLT_CLEAR_ALL
;
266 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
268 /* Connection accept timeout ~20 secs */
269 param
= __constant_cpu_to_le16(0x7d00);
270 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
272 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
273 cp
.delete_all
= 0x01;
274 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
277 static void le_setup(struct hci_request
*req
)
279 /* Read LE Buffer Size */
280 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
282 /* Read LE Local Supported Features */
283 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
285 /* Read LE Advertising Channel TX Power */
286 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
288 /* Read LE White List Size */
289 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
291 /* Read LE Supported States */
292 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
295 static u8
hci_get_inquiry_mode(struct hci_dev
*hdev
)
297 if (lmp_ext_inq_capable(hdev
))
300 if (lmp_inq_rssi_capable(hdev
))
303 if (hdev
->manufacturer
== 11 && hdev
->hci_rev
== 0x00 &&
304 hdev
->lmp_subver
== 0x0757)
307 if (hdev
->manufacturer
== 15) {
308 if (hdev
->hci_rev
== 0x03 && hdev
->lmp_subver
== 0x6963)
310 if (hdev
->hci_rev
== 0x09 && hdev
->lmp_subver
== 0x6963)
312 if (hdev
->hci_rev
== 0x00 && hdev
->lmp_subver
== 0x6965)
316 if (hdev
->manufacturer
== 31 && hdev
->hci_rev
== 0x2005 &&
317 hdev
->lmp_subver
== 0x1805)
323 static void hci_setup_inquiry_mode(struct hci_request
*req
)
327 mode
= hci_get_inquiry_mode(req
->hdev
);
329 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
332 static void hci_setup_event_mask(struct hci_request
*req
)
334 struct hci_dev
*hdev
= req
->hdev
;
336 /* The second byte is 0xff instead of 0x9f (two reserved bits
337 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
340 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
342 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343 * any event mask for pre 1.2 devices.
345 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
348 if (lmp_bredr_capable(hdev
)) {
349 events
[4] |= 0x01; /* Flow Specification Complete */
350 events
[4] |= 0x02; /* Inquiry Result with RSSI */
351 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
352 events
[5] |= 0x08; /* Synchronous Connection Complete */
353 events
[5] |= 0x10; /* Synchronous Connection Changed */
356 if (lmp_inq_rssi_capable(hdev
))
357 events
[4] |= 0x02; /* Inquiry Result with RSSI */
359 if (lmp_sniffsubr_capable(hdev
))
360 events
[5] |= 0x20; /* Sniff Subrating */
362 if (lmp_pause_enc_capable(hdev
))
363 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
365 if (lmp_ext_inq_capable(hdev
))
366 events
[5] |= 0x40; /* Extended Inquiry Result */
368 if (lmp_no_flush_capable(hdev
))
369 events
[7] |= 0x01; /* Enhanced Flush Complete */
371 if (lmp_lsto_capable(hdev
))
372 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
374 if (lmp_ssp_capable(hdev
)) {
375 events
[6] |= 0x01; /* IO Capability Request */
376 events
[6] |= 0x02; /* IO Capability Response */
377 events
[6] |= 0x04; /* User Confirmation Request */
378 events
[6] |= 0x08; /* User Passkey Request */
379 events
[6] |= 0x10; /* Remote OOB Data Request */
380 events
[6] |= 0x20; /* Simple Pairing Complete */
381 events
[7] |= 0x04; /* User Passkey Notification */
382 events
[7] |= 0x08; /* Keypress Notification */
383 events
[7] |= 0x10; /* Remote Host Supported
384 * Features Notification
388 if (lmp_le_capable(hdev
))
389 events
[7] |= 0x20; /* LE Meta-Event */
391 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
393 if (lmp_le_capable(hdev
)) {
394 memset(events
, 0, sizeof(events
));
396 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
,
397 sizeof(events
), events
);
401 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
403 struct hci_dev
*hdev
= req
->hdev
;
405 if (lmp_bredr_capable(hdev
))
408 if (lmp_le_capable(hdev
))
411 hci_setup_event_mask(req
);
413 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
)
414 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
416 if (lmp_ssp_capable(hdev
)) {
417 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
419 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
420 sizeof(mode
), &mode
);
422 struct hci_cp_write_eir cp
;
424 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
425 memset(&cp
, 0, sizeof(cp
));
427 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
431 if (lmp_inq_rssi_capable(hdev
))
432 hci_setup_inquiry_mode(req
);
434 if (lmp_inq_tx_pwr_capable(hdev
))
435 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
437 if (lmp_ext_feat_capable(hdev
)) {
438 struct hci_cp_read_local_ext_features cp
;
441 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
445 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
)) {
447 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
452 static void hci_setup_link_policy(struct hci_request
*req
)
454 struct hci_dev
*hdev
= req
->hdev
;
455 struct hci_cp_write_def_link_policy cp
;
458 if (lmp_rswitch_capable(hdev
))
459 link_policy
|= HCI_LP_RSWITCH
;
460 if (lmp_hold_capable(hdev
))
461 link_policy
|= HCI_LP_HOLD
;
462 if (lmp_sniff_capable(hdev
))
463 link_policy
|= HCI_LP_SNIFF
;
464 if (lmp_park_capable(hdev
))
465 link_policy
|= HCI_LP_PARK
;
467 cp
.policy
= cpu_to_le16(link_policy
);
468 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
471 static void hci_set_le_support(struct hci_request
*req
)
473 struct hci_dev
*hdev
= req
->hdev
;
474 struct hci_cp_write_le_host_supported cp
;
476 memset(&cp
, 0, sizeof(cp
));
478 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
480 cp
.simul
= lmp_le_br_capable(hdev
);
483 if (cp
.le
!= lmp_host_le_capable(hdev
))
484 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
488 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
490 struct hci_dev
*hdev
= req
->hdev
;
492 if (hdev
->commands
[5] & 0x10)
493 hci_setup_link_policy(req
);
495 if (lmp_le_capable(hdev
)) {
496 hci_set_le_support(req
);
501 static int __hci_init(struct hci_dev
*hdev
)
505 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
509 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
510 * BR/EDR/LE type controllers. AMP controllers only need the
513 if (hdev
->dev_type
!= HCI_BREDR
)
516 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
520 return __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
523 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
527 BT_DBG("%s %x", req
->hdev
->name
, scan
);
529 /* Inquiry and Page scans */
530 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
533 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
537 BT_DBG("%s %x", req
->hdev
->name
, auth
);
540 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
543 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
547 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
550 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
553 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
555 __le16 policy
= cpu_to_le16(opt
);
557 BT_DBG("%s %x", req
->hdev
->name
, policy
);
559 /* Default link policy */
560 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
563 /* Get HCI device by index.
564 * Device is held on return. */
565 struct hci_dev
*hci_dev_get(int index
)
567 struct hci_dev
*hdev
= NULL
, *d
;
574 read_lock(&hci_dev_list_lock
);
575 list_for_each_entry(d
, &hci_dev_list
, list
) {
576 if (d
->id
== index
) {
577 hdev
= hci_dev_hold(d
);
581 read_unlock(&hci_dev_list_lock
);
585 /* ---- Inquiry support ---- */
587 bool hci_discovery_active(struct hci_dev
*hdev
)
589 struct discovery_state
*discov
= &hdev
->discovery
;
591 switch (discov
->state
) {
592 case DISCOVERY_FINDING
:
593 case DISCOVERY_RESOLVING
:
601 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
603 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
605 if (hdev
->discovery
.state
== state
)
609 case DISCOVERY_STOPPED
:
610 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
611 mgmt_discovering(hdev
, 0);
613 case DISCOVERY_STARTING
:
615 case DISCOVERY_FINDING
:
616 mgmt_discovering(hdev
, 1);
618 case DISCOVERY_RESOLVING
:
620 case DISCOVERY_STOPPING
:
624 hdev
->discovery
.state
= state
;
627 static void inquiry_cache_flush(struct hci_dev
*hdev
)
629 struct discovery_state
*cache
= &hdev
->discovery
;
630 struct inquiry_entry
*p
, *n
;
632 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
637 INIT_LIST_HEAD(&cache
->unknown
);
638 INIT_LIST_HEAD(&cache
->resolve
);
641 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
644 struct discovery_state
*cache
= &hdev
->discovery
;
645 struct inquiry_entry
*e
;
647 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
649 list_for_each_entry(e
, &cache
->all
, all
) {
650 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
657 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
660 struct discovery_state
*cache
= &hdev
->discovery
;
661 struct inquiry_entry
*e
;
663 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
665 list_for_each_entry(e
, &cache
->unknown
, list
) {
666 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
673 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
677 struct discovery_state
*cache
= &hdev
->discovery
;
678 struct inquiry_entry
*e
;
680 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
682 list_for_each_entry(e
, &cache
->resolve
, list
) {
683 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
685 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
692 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
693 struct inquiry_entry
*ie
)
695 struct discovery_state
*cache
= &hdev
->discovery
;
696 struct list_head
*pos
= &cache
->resolve
;
697 struct inquiry_entry
*p
;
701 list_for_each_entry(p
, &cache
->resolve
, list
) {
702 if (p
->name_state
!= NAME_PENDING
&&
703 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
708 list_add(&ie
->list
, pos
);
711 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
712 bool name_known
, bool *ssp
)
714 struct discovery_state
*cache
= &hdev
->discovery
;
715 struct inquiry_entry
*ie
;
717 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
719 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
722 *ssp
= data
->ssp_mode
;
724 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
726 if (ie
->data
.ssp_mode
&& ssp
)
729 if (ie
->name_state
== NAME_NEEDED
&&
730 data
->rssi
!= ie
->data
.rssi
) {
731 ie
->data
.rssi
= data
->rssi
;
732 hci_inquiry_cache_update_resolve(hdev
, ie
);
738 /* Entry not in the cache. Add new one. */
739 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
743 list_add(&ie
->all
, &cache
->all
);
746 ie
->name_state
= NAME_KNOWN
;
748 ie
->name_state
= NAME_NOT_KNOWN
;
749 list_add(&ie
->list
, &cache
->unknown
);
753 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
754 ie
->name_state
!= NAME_PENDING
) {
755 ie
->name_state
= NAME_KNOWN
;
759 memcpy(&ie
->data
, data
, sizeof(*data
));
760 ie
->timestamp
= jiffies
;
761 cache
->timestamp
= jiffies
;
763 if (ie
->name_state
== NAME_NOT_KNOWN
)
769 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
771 struct discovery_state
*cache
= &hdev
->discovery
;
772 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
773 struct inquiry_entry
*e
;
776 list_for_each_entry(e
, &cache
->all
, all
) {
777 struct inquiry_data
*data
= &e
->data
;
782 bacpy(&info
->bdaddr
, &data
->bdaddr
);
783 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
784 info
->pscan_period_mode
= data
->pscan_period_mode
;
785 info
->pscan_mode
= data
->pscan_mode
;
786 memcpy(info
->dev_class
, data
->dev_class
, 3);
787 info
->clock_offset
= data
->clock_offset
;
793 BT_DBG("cache %p, copied %d", cache
, copied
);
797 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
799 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
800 struct hci_dev
*hdev
= req
->hdev
;
801 struct hci_cp_inquiry cp
;
803 BT_DBG("%s", hdev
->name
);
805 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
809 memcpy(&cp
.lap
, &ir
->lap
, 3);
810 cp
.length
= ir
->length
;
811 cp
.num_rsp
= ir
->num_rsp
;
812 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
815 int hci_inquiry(void __user
*arg
)
817 __u8 __user
*ptr
= arg
;
818 struct hci_inquiry_req ir
;
819 struct hci_dev
*hdev
;
820 int err
= 0, do_inquiry
= 0, max_rsp
;
824 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
827 hdev
= hci_dev_get(ir
.dev_id
);
832 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
833 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
834 inquiry_cache_flush(hdev
);
837 hci_dev_unlock(hdev
);
839 timeo
= ir
.length
* msecs_to_jiffies(2000);
842 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
848 /* for unlimited number of responses we will use buffer with
851 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
853 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
854 * copy it to the user space.
856 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
863 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
864 hci_dev_unlock(hdev
);
866 BT_DBG("num_rsp %d", ir
.num_rsp
);
868 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
870 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
883 static u8
create_ad(struct hci_dev
*hdev
, u8
*ptr
)
885 u8 ad_len
= 0, flags
= 0;
888 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
889 flags
|= LE_AD_GENERAL
;
891 if (!lmp_bredr_capable(hdev
))
892 flags
|= LE_AD_NO_BREDR
;
894 if (lmp_le_br_capable(hdev
))
895 flags
|= LE_AD_SIM_LE_BREDR_CTRL
;
897 if (lmp_host_le_br_capable(hdev
))
898 flags
|= LE_AD_SIM_LE_BREDR_HOST
;
901 BT_DBG("adv flags 0x%02x", flags
);
911 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
913 ptr
[1] = EIR_TX_POWER
;
914 ptr
[2] = (u8
) hdev
->adv_tx_power
;
920 name_len
= strlen(hdev
->dev_name
);
922 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
924 if (name_len
> max_len
) {
926 ptr
[1] = EIR_NAME_SHORT
;
928 ptr
[1] = EIR_NAME_COMPLETE
;
930 ptr
[0] = name_len
+ 1;
932 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
934 ad_len
+= (name_len
+ 2);
935 ptr
+= (name_len
+ 2);
941 void hci_update_ad(struct hci_request
*req
)
943 struct hci_dev
*hdev
= req
->hdev
;
944 struct hci_cp_le_set_adv_data cp
;
947 if (!lmp_le_capable(hdev
))
950 memset(&cp
, 0, sizeof(cp
));
952 len
= create_ad(hdev
, cp
.data
);
954 if (hdev
->adv_data_len
== len
&&
955 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
958 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
959 hdev
->adv_data_len
= len
;
963 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
966 /* ---- HCI ioctl helpers ---- */
968 int hci_dev_open(__u16 dev
)
970 struct hci_dev
*hdev
;
973 hdev
= hci_dev_get(dev
);
977 BT_DBG("%s %p", hdev
->name
, hdev
);
981 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
986 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
991 if (test_bit(HCI_UP
, &hdev
->flags
)) {
996 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
997 set_bit(HCI_RAW
, &hdev
->flags
);
999 /* Treat all non BR/EDR controllers as raw devices if
1000 enable_hs is not set */
1001 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
1002 set_bit(HCI_RAW
, &hdev
->flags
);
1004 if (hdev
->open(hdev
)) {
1009 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1010 atomic_set(&hdev
->cmd_cnt
, 1);
1011 set_bit(HCI_INIT
, &hdev
->flags
);
1012 ret
= __hci_init(hdev
);
1013 clear_bit(HCI_INIT
, &hdev
->flags
);
1018 set_bit(HCI_UP
, &hdev
->flags
);
1019 hci_notify(hdev
, HCI_DEV_UP
);
1020 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
1021 mgmt_valid_hdev(hdev
)) {
1023 mgmt_powered(hdev
, 1);
1024 hci_dev_unlock(hdev
);
1027 /* Init failed, cleanup */
1028 flush_work(&hdev
->tx_work
);
1029 flush_work(&hdev
->cmd_work
);
1030 flush_work(&hdev
->rx_work
);
1032 skb_queue_purge(&hdev
->cmd_q
);
1033 skb_queue_purge(&hdev
->rx_q
);
1038 if (hdev
->sent_cmd
) {
1039 kfree_skb(hdev
->sent_cmd
);
1040 hdev
->sent_cmd
= NULL
;
1048 hci_req_unlock(hdev
);
1053 static int hci_dev_do_close(struct hci_dev
*hdev
)
1055 BT_DBG("%s %p", hdev
->name
, hdev
);
1057 cancel_work_sync(&hdev
->le_scan
);
1059 cancel_delayed_work(&hdev
->power_off
);
1061 hci_req_cancel(hdev
, ENODEV
);
1064 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1065 del_timer_sync(&hdev
->cmd_timer
);
1066 hci_req_unlock(hdev
);
1070 /* Flush RX and TX works */
1071 flush_work(&hdev
->tx_work
);
1072 flush_work(&hdev
->rx_work
);
1074 if (hdev
->discov_timeout
> 0) {
1075 cancel_delayed_work(&hdev
->discov_off
);
1076 hdev
->discov_timeout
= 0;
1077 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1080 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1081 cancel_delayed_work(&hdev
->service_cache
);
1083 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1086 inquiry_cache_flush(hdev
);
1087 hci_conn_hash_flush(hdev
);
1088 hci_dev_unlock(hdev
);
1090 hci_notify(hdev
, HCI_DEV_DOWN
);
1096 skb_queue_purge(&hdev
->cmd_q
);
1097 atomic_set(&hdev
->cmd_cnt
, 1);
1098 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
1099 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
1100 set_bit(HCI_INIT
, &hdev
->flags
);
1101 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
1102 clear_bit(HCI_INIT
, &hdev
->flags
);
1105 /* flush cmd work */
1106 flush_work(&hdev
->cmd_work
);
1109 skb_queue_purge(&hdev
->rx_q
);
1110 skb_queue_purge(&hdev
->cmd_q
);
1111 skb_queue_purge(&hdev
->raw_q
);
1113 /* Drop last sent command */
1114 if (hdev
->sent_cmd
) {
1115 del_timer_sync(&hdev
->cmd_timer
);
1116 kfree_skb(hdev
->sent_cmd
);
1117 hdev
->sent_cmd
= NULL
;
1120 /* After this point our queues are empty
1121 * and no tasks are scheduled. */
1126 hdev
->dev_flags
&= ~HCI_PERSISTENT_MASK
;
1128 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
1129 mgmt_valid_hdev(hdev
)) {
1131 mgmt_powered(hdev
, 0);
1132 hci_dev_unlock(hdev
);
1135 /* Controller radio is available but is currently powered down */
1136 hdev
->amp_status
= 0;
1138 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1139 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1141 hci_req_unlock(hdev
);
1147 int hci_dev_close(__u16 dev
)
1149 struct hci_dev
*hdev
;
1152 hdev
= hci_dev_get(dev
);
1156 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1157 cancel_delayed_work(&hdev
->power_off
);
1159 err
= hci_dev_do_close(hdev
);
1165 int hci_dev_reset(__u16 dev
)
1167 struct hci_dev
*hdev
;
1170 hdev
= hci_dev_get(dev
);
1176 if (!test_bit(HCI_UP
, &hdev
->flags
))
1180 skb_queue_purge(&hdev
->rx_q
);
1181 skb_queue_purge(&hdev
->cmd_q
);
1184 inquiry_cache_flush(hdev
);
1185 hci_conn_hash_flush(hdev
);
1186 hci_dev_unlock(hdev
);
1191 atomic_set(&hdev
->cmd_cnt
, 1);
1192 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1194 if (!test_bit(HCI_RAW
, &hdev
->flags
))
1195 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
1198 hci_req_unlock(hdev
);
1203 int hci_dev_reset_stat(__u16 dev
)
1205 struct hci_dev
*hdev
;
1208 hdev
= hci_dev_get(dev
);
1212 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1219 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1221 struct hci_dev
*hdev
;
1222 struct hci_dev_req dr
;
1225 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1228 hdev
= hci_dev_get(dr
.dev_id
);
1234 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1239 if (!lmp_encrypt_capable(hdev
)) {
1244 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1245 /* Auth must be enabled first */
1246 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1252 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1257 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1262 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1266 case HCISETLINKMODE
:
1267 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1268 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1272 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1276 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1277 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1281 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1282 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1294 int hci_get_dev_list(void __user
*arg
)
1296 struct hci_dev
*hdev
;
1297 struct hci_dev_list_req
*dl
;
1298 struct hci_dev_req
*dr
;
1299 int n
= 0, size
, err
;
1302 if (get_user(dev_num
, (__u16 __user
*) arg
))
1305 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1308 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1310 dl
= kzalloc(size
, GFP_KERNEL
);
1316 read_lock(&hci_dev_list_lock
);
1317 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1318 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1319 cancel_delayed_work(&hdev
->power_off
);
1321 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1322 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1324 (dr
+ n
)->dev_id
= hdev
->id
;
1325 (dr
+ n
)->dev_opt
= hdev
->flags
;
1330 read_unlock(&hci_dev_list_lock
);
1333 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1335 err
= copy_to_user(arg
, dl
, size
);
1338 return err
? -EFAULT
: 0;
1341 int hci_get_dev_info(void __user
*arg
)
1343 struct hci_dev
*hdev
;
1344 struct hci_dev_info di
;
1347 if (copy_from_user(&di
, arg
, sizeof(di
)))
1350 hdev
= hci_dev_get(di
.dev_id
);
1354 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1355 cancel_delayed_work_sync(&hdev
->power_off
);
1357 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1358 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1360 strcpy(di
.name
, hdev
->name
);
1361 di
.bdaddr
= hdev
->bdaddr
;
1362 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1363 di
.flags
= hdev
->flags
;
1364 di
.pkt_type
= hdev
->pkt_type
;
1365 if (lmp_bredr_capable(hdev
)) {
1366 di
.acl_mtu
= hdev
->acl_mtu
;
1367 di
.acl_pkts
= hdev
->acl_pkts
;
1368 di
.sco_mtu
= hdev
->sco_mtu
;
1369 di
.sco_pkts
= hdev
->sco_pkts
;
1371 di
.acl_mtu
= hdev
->le_mtu
;
1372 di
.acl_pkts
= hdev
->le_pkts
;
1376 di
.link_policy
= hdev
->link_policy
;
1377 di
.link_mode
= hdev
->link_mode
;
1379 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1380 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1382 if (copy_to_user(arg
, &di
, sizeof(di
)))
1390 /* ---- Interface to HCI drivers ---- */
1392 static int hci_rfkill_set_block(void *data
, bool blocked
)
1394 struct hci_dev
*hdev
= data
;
1396 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1401 hci_dev_do_close(hdev
);
1406 static const struct rfkill_ops hci_rfkill_ops
= {
1407 .set_block
= hci_rfkill_set_block
,
1410 static void hci_power_on(struct work_struct
*work
)
1412 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1414 BT_DBG("%s", hdev
->name
);
1416 if (hci_dev_open(hdev
->id
) < 0)
1419 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1420 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1421 HCI_AUTO_OFF_TIMEOUT
);
1423 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1424 mgmt_index_added(hdev
);
1427 static void hci_power_off(struct work_struct
*work
)
1429 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1432 BT_DBG("%s", hdev
->name
);
1434 hci_dev_do_close(hdev
);
1437 static void hci_discov_off(struct work_struct
*work
)
1439 struct hci_dev
*hdev
;
1440 u8 scan
= SCAN_PAGE
;
1442 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1444 BT_DBG("%s", hdev
->name
);
1448 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1450 hdev
->discov_timeout
= 0;
1452 hci_dev_unlock(hdev
);
1455 int hci_uuids_clear(struct hci_dev
*hdev
)
1457 struct bt_uuid
*uuid
, *tmp
;
1459 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
1460 list_del(&uuid
->list
);
1467 int hci_link_keys_clear(struct hci_dev
*hdev
)
1469 struct list_head
*p
, *n
;
1471 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1472 struct link_key
*key
;
1474 key
= list_entry(p
, struct link_key
, list
);
1483 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1485 struct smp_ltk
*k
, *tmp
;
1487 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1495 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1499 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1500 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1506 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1507 u8 key_type
, u8 old_key_type
)
1510 if (key_type
< 0x03)
1513 /* Debug keys are insecure so don't store them persistently */
1514 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1517 /* Changed combination key and there's no previous one */
1518 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1521 /* Security mode 3 case */
1525 /* Neither local nor remote side had no-bonding as requirement */
1526 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1529 /* Local side had dedicated bonding as requirement */
1530 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1533 /* Remote side had dedicated bonding as requirement */
1534 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1537 /* If none of the above criteria match, then don't store the key
1542 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1546 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1547 if (k
->ediv
!= ediv
||
1548 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1557 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1562 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1563 if (addr_type
== k
->bdaddr_type
&&
1564 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1570 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1571 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1573 struct link_key
*key
, *old_key
;
1577 old_key
= hci_find_link_key(hdev
, bdaddr
);
1579 old_key_type
= old_key
->type
;
1582 old_key_type
= conn
? conn
->key_type
: 0xff;
1583 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1586 list_add(&key
->list
, &hdev
->link_keys
);
1589 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1591 /* Some buggy controller combinations generate a changed
1592 * combination key for legacy pairing even when there's no
1594 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1595 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1596 type
= HCI_LK_COMBINATION
;
1598 conn
->key_type
= type
;
1601 bacpy(&key
->bdaddr
, bdaddr
);
1602 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1603 key
->pin_len
= pin_len
;
1605 if (type
== HCI_LK_CHANGED_COMBINATION
)
1606 key
->type
= old_key_type
;
1613 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1615 mgmt_new_link_key(hdev
, key
, persistent
);
1618 conn
->flush_key
= !persistent
;
1623 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1624 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1627 struct smp_ltk
*key
, *old_key
;
1629 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1632 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1636 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1639 list_add(&key
->list
, &hdev
->long_term_keys
);
1642 bacpy(&key
->bdaddr
, bdaddr
);
1643 key
->bdaddr_type
= addr_type
;
1644 memcpy(key
->val
, tk
, sizeof(key
->val
));
1645 key
->authenticated
= authenticated
;
1647 key
->enc_size
= enc_size
;
1649 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1654 if (type
& HCI_SMP_LTK
)
1655 mgmt_new_ltk(hdev
, key
, 1);
1660 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1662 struct link_key
*key
;
1664 key
= hci_find_link_key(hdev
, bdaddr
);
1668 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1670 list_del(&key
->list
);
1676 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1678 struct smp_ltk
*k
, *tmp
;
1680 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1681 if (bacmp(bdaddr
, &k
->bdaddr
))
1684 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1693 /* HCI command timer function */
1694 static void hci_cmd_timeout(unsigned long arg
)
1696 struct hci_dev
*hdev
= (void *) arg
;
1698 if (hdev
->sent_cmd
) {
1699 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1700 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1702 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1704 BT_ERR("%s command tx timeout", hdev
->name
);
1707 atomic_set(&hdev
->cmd_cnt
, 1);
1708 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1711 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1714 struct oob_data
*data
;
1716 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1717 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1723 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1725 struct oob_data
*data
;
1727 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1731 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1733 list_del(&data
->list
);
1739 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1741 struct oob_data
*data
, *n
;
1743 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1744 list_del(&data
->list
);
1751 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1754 struct oob_data
*data
;
1756 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1759 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1763 bacpy(&data
->bdaddr
, bdaddr
);
1764 list_add(&data
->list
, &hdev
->remote_oob_data
);
1767 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1768 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1770 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1775 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1777 struct bdaddr_list
*b
;
1779 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1780 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1786 int hci_blacklist_clear(struct hci_dev
*hdev
)
1788 struct list_head
*p
, *n
;
1790 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1791 struct bdaddr_list
*b
;
1793 b
= list_entry(p
, struct bdaddr_list
, list
);
1802 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1804 struct bdaddr_list
*entry
;
1806 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1809 if (hci_blacklist_lookup(hdev
, bdaddr
))
1812 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1816 bacpy(&entry
->bdaddr
, bdaddr
);
1818 list_add(&entry
->list
, &hdev
->blacklist
);
1820 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1823 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1825 struct bdaddr_list
*entry
;
1827 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1828 return hci_blacklist_clear(hdev
);
1830 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1834 list_del(&entry
->list
);
1837 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1840 static void le_scan_param_req(struct hci_request
*req
, unsigned long opt
)
1842 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1843 struct hci_cp_le_set_scan_param cp
;
1845 memset(&cp
, 0, sizeof(cp
));
1846 cp
.type
= param
->type
;
1847 cp
.interval
= cpu_to_le16(param
->interval
);
1848 cp
.window
= cpu_to_le16(param
->window
);
1850 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1853 static void le_scan_enable_req(struct hci_request
*req
, unsigned long opt
)
1855 struct hci_cp_le_set_scan_enable cp
;
1857 memset(&cp
, 0, sizeof(cp
));
1861 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1864 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1865 u16 window
, int timeout
)
1867 long timeo
= msecs_to_jiffies(3000);
1868 struct le_scan_params param
;
1871 BT_DBG("%s", hdev
->name
);
1873 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1874 return -EINPROGRESS
;
1877 param
.interval
= interval
;
1878 param
.window
= window
;
1882 err
= __hci_req_sync(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1885 err
= __hci_req_sync(hdev
, le_scan_enable_req
, 0, timeo
);
1887 hci_req_unlock(hdev
);
1892 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
,
1893 msecs_to_jiffies(timeout
));
1898 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1900 BT_DBG("%s", hdev
->name
);
1902 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1905 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1906 struct hci_cp_le_set_scan_enable cp
;
1908 /* Send HCI command to disable LE Scan */
1909 memset(&cp
, 0, sizeof(cp
));
1910 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1916 static void le_scan_disable_work(struct work_struct
*work
)
1918 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1919 le_scan_disable
.work
);
1920 struct hci_cp_le_set_scan_enable cp
;
1922 BT_DBG("%s", hdev
->name
);
1924 memset(&cp
, 0, sizeof(cp
));
1926 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1929 static void le_scan_work(struct work_struct
*work
)
1931 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1932 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1934 BT_DBG("%s", hdev
->name
);
1936 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1940 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1943 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1945 BT_DBG("%s", hdev
->name
);
1947 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
1950 if (work_busy(&hdev
->le_scan
))
1951 return -EINPROGRESS
;
1954 param
->interval
= interval
;
1955 param
->window
= window
;
1956 param
->timeout
= timeout
;
1958 queue_work(system_long_wq
, &hdev
->le_scan
);
1963 /* Alloc HCI device */
1964 struct hci_dev
*hci_alloc_dev(void)
1966 struct hci_dev
*hdev
;
1968 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1972 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1973 hdev
->esco_type
= (ESCO_HV1
);
1974 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1975 hdev
->io_capability
= 0x03; /* No Input No Output */
1976 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
1977 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
1979 hdev
->sniff_max_interval
= 800;
1980 hdev
->sniff_min_interval
= 80;
1982 mutex_init(&hdev
->lock
);
1983 mutex_init(&hdev
->req_lock
);
1985 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1986 INIT_LIST_HEAD(&hdev
->blacklist
);
1987 INIT_LIST_HEAD(&hdev
->uuids
);
1988 INIT_LIST_HEAD(&hdev
->link_keys
);
1989 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1990 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1991 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1993 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1994 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1995 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1996 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1997 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1999 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2000 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
2001 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2003 skb_queue_head_init(&hdev
->driver_init
);
2004 skb_queue_head_init(&hdev
->rx_q
);
2005 skb_queue_head_init(&hdev
->cmd_q
);
2006 skb_queue_head_init(&hdev
->raw_q
);
2008 init_waitqueue_head(&hdev
->req_wait_q
);
2010 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
2012 hci_init_sysfs(hdev
);
2013 discovery_init(hdev
);
2017 EXPORT_SYMBOL(hci_alloc_dev
);
2019 /* Free HCI device */
2020 void hci_free_dev(struct hci_dev
*hdev
)
2022 skb_queue_purge(&hdev
->driver_init
);
2024 /* will free via device release */
2025 put_device(&hdev
->dev
);
2027 EXPORT_SYMBOL(hci_free_dev
);
2029 /* Register HCI device */
2030 int hci_register_dev(struct hci_dev
*hdev
)
2034 if (!hdev
->open
|| !hdev
->close
)
2037 /* Do not allow HCI_AMP devices to register at index 0,
2038 * so the index can be used as the AMP controller ID.
2040 switch (hdev
->dev_type
) {
2042 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
2045 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
2054 sprintf(hdev
->name
, "hci%d", id
);
2057 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2059 write_lock(&hci_dev_list_lock
);
2060 list_add(&hdev
->list
, &hci_dev_list
);
2061 write_unlock(&hci_dev_list_lock
);
2063 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
2065 if (!hdev
->workqueue
) {
2070 hdev
->req_workqueue
= alloc_workqueue(hdev
->name
,
2071 WQ_HIGHPRI
| WQ_UNBOUND
|
2073 if (!hdev
->req_workqueue
) {
2074 destroy_workqueue(hdev
->workqueue
);
2079 error
= hci_add_sysfs(hdev
);
2083 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
2084 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
2087 if (rfkill_register(hdev
->rfkill
) < 0) {
2088 rfkill_destroy(hdev
->rfkill
);
2089 hdev
->rfkill
= NULL
;
2093 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
2095 if (hdev
->dev_type
!= HCI_AMP
)
2096 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
2098 hci_notify(hdev
, HCI_DEV_REG
);
2101 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
2106 destroy_workqueue(hdev
->workqueue
);
2107 destroy_workqueue(hdev
->req_workqueue
);
2109 ida_simple_remove(&hci_index_ida
, hdev
->id
);
2110 write_lock(&hci_dev_list_lock
);
2111 list_del(&hdev
->list
);
2112 write_unlock(&hci_dev_list_lock
);
2116 EXPORT_SYMBOL(hci_register_dev
);
2118 /* Unregister HCI device */
2119 void hci_unregister_dev(struct hci_dev
*hdev
)
2123 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2125 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
2129 write_lock(&hci_dev_list_lock
);
2130 list_del(&hdev
->list
);
2131 write_unlock(&hci_dev_list_lock
);
2133 hci_dev_do_close(hdev
);
2135 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
2136 kfree_skb(hdev
->reassembly
[i
]);
2138 cancel_work_sync(&hdev
->power_on
);
2140 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
2141 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
2143 mgmt_index_removed(hdev
);
2144 hci_dev_unlock(hdev
);
2147 /* mgmt_index_removed should take care of emptying the
2149 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
2151 hci_notify(hdev
, HCI_DEV_UNREG
);
2154 rfkill_unregister(hdev
->rfkill
);
2155 rfkill_destroy(hdev
->rfkill
);
2158 hci_del_sysfs(hdev
);
2160 destroy_workqueue(hdev
->workqueue
);
2161 destroy_workqueue(hdev
->req_workqueue
);
2164 hci_blacklist_clear(hdev
);
2165 hci_uuids_clear(hdev
);
2166 hci_link_keys_clear(hdev
);
2167 hci_smp_ltks_clear(hdev
);
2168 hci_remote_oob_data_clear(hdev
);
2169 hci_dev_unlock(hdev
);
2173 ida_simple_remove(&hci_index_ida
, id
);
2175 EXPORT_SYMBOL(hci_unregister_dev
);
2177 /* Suspend HCI device */
2178 int hci_suspend_dev(struct hci_dev
*hdev
)
2180 hci_notify(hdev
, HCI_DEV_SUSPEND
);
2183 EXPORT_SYMBOL(hci_suspend_dev
);
2185 /* Resume HCI device */
2186 int hci_resume_dev(struct hci_dev
*hdev
)
2188 hci_notify(hdev
, HCI_DEV_RESUME
);
2191 EXPORT_SYMBOL(hci_resume_dev
);
2193 /* Receive frame from HCI drivers */
2194 int hci_recv_frame(struct sk_buff
*skb
)
2196 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2197 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
2198 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
2204 bt_cb(skb
)->incoming
= 1;
2207 __net_timestamp(skb
);
2209 skb_queue_tail(&hdev
->rx_q
, skb
);
2210 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
2214 EXPORT_SYMBOL(hci_recv_frame
);
2216 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
2217 int count
, __u8 index
)
2222 struct sk_buff
*skb
;
2223 struct bt_skb_cb
*scb
;
2225 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
2226 index
>= NUM_REASSEMBLY
)
2229 skb
= hdev
->reassembly
[index
];
2233 case HCI_ACLDATA_PKT
:
2234 len
= HCI_MAX_FRAME_SIZE
;
2235 hlen
= HCI_ACL_HDR_SIZE
;
2238 len
= HCI_MAX_EVENT_SIZE
;
2239 hlen
= HCI_EVENT_HDR_SIZE
;
2241 case HCI_SCODATA_PKT
:
2242 len
= HCI_MAX_SCO_SIZE
;
2243 hlen
= HCI_SCO_HDR_SIZE
;
2247 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2251 scb
= (void *) skb
->cb
;
2253 scb
->pkt_type
= type
;
2255 skb
->dev
= (void *) hdev
;
2256 hdev
->reassembly
[index
] = skb
;
2260 scb
= (void *) skb
->cb
;
2261 len
= min_t(uint
, scb
->expect
, count
);
2263 memcpy(skb_put(skb
, len
), data
, len
);
2272 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
2273 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
2274 scb
->expect
= h
->plen
;
2276 if (skb_tailroom(skb
) < scb
->expect
) {
2278 hdev
->reassembly
[index
] = NULL
;
2284 case HCI_ACLDATA_PKT
:
2285 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
2286 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
2287 scb
->expect
= __le16_to_cpu(h
->dlen
);
2289 if (skb_tailroom(skb
) < scb
->expect
) {
2291 hdev
->reassembly
[index
] = NULL
;
2297 case HCI_SCODATA_PKT
:
2298 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2299 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2300 scb
->expect
= h
->dlen
;
2302 if (skb_tailroom(skb
) < scb
->expect
) {
2304 hdev
->reassembly
[index
] = NULL
;
2311 if (scb
->expect
== 0) {
2312 /* Complete frame */
2314 bt_cb(skb
)->pkt_type
= type
;
2315 hci_recv_frame(skb
);
2317 hdev
->reassembly
[index
] = NULL
;
2325 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2329 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2333 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2337 data
+= (count
- rem
);
2343 EXPORT_SYMBOL(hci_recv_fragment
);
2345 #define STREAM_REASSEMBLY 0
2347 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2353 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2356 struct { char type
; } *pkt
;
2358 /* Start of the frame */
2365 type
= bt_cb(skb
)->pkt_type
;
2367 rem
= hci_reassembly(hdev
, type
, data
, count
,
2372 data
+= (count
- rem
);
2378 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2380 /* ---- Interface to upper protocols ---- */
2382 int hci_register_cb(struct hci_cb
*cb
)
2384 BT_DBG("%p name %s", cb
, cb
->name
);
2386 write_lock(&hci_cb_list_lock
);
2387 list_add(&cb
->list
, &hci_cb_list
);
2388 write_unlock(&hci_cb_list_lock
);
2392 EXPORT_SYMBOL(hci_register_cb
);
2394 int hci_unregister_cb(struct hci_cb
*cb
)
2396 BT_DBG("%p name %s", cb
, cb
->name
);
2398 write_lock(&hci_cb_list_lock
);
2399 list_del(&cb
->list
);
2400 write_unlock(&hci_cb_list_lock
);
2404 EXPORT_SYMBOL(hci_unregister_cb
);
2406 static int hci_send_frame(struct sk_buff
*skb
)
2408 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2415 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2418 __net_timestamp(skb
);
2420 /* Send copy to monitor */
2421 hci_send_to_monitor(hdev
, skb
);
2423 if (atomic_read(&hdev
->promisc
)) {
2424 /* Send copy to the sockets */
2425 hci_send_to_sock(hdev
, skb
);
2428 /* Get rid of skb owner, prior to sending to the driver. */
2431 return hdev
->send(skb
);
2434 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
2436 skb_queue_head_init(&req
->cmd_q
);
2441 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
2443 struct hci_dev
*hdev
= req
->hdev
;
2444 struct sk_buff
*skb
;
2445 unsigned long flags
;
2447 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
2449 /* If an error occured during request building, remove all HCI
2450 * commands queued on the HCI request queue.
2453 skb_queue_purge(&req
->cmd_q
);
2457 /* Do not allow empty requests */
2458 if (skb_queue_empty(&req
->cmd_q
))
2461 skb
= skb_peek_tail(&req
->cmd_q
);
2462 bt_cb(skb
)->req
.complete
= complete
;
2464 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
2465 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
2466 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
2468 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2473 static struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
,
2474 u32 plen
, void *param
)
2476 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2477 struct hci_command_hdr
*hdr
;
2478 struct sk_buff
*skb
;
2480 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2484 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2485 hdr
->opcode
= cpu_to_le16(opcode
);
2489 memcpy(skb_put(skb
, plen
), param
, plen
);
2491 BT_DBG("skb len %d", skb
->len
);
2493 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2494 skb
->dev
= (void *) hdev
;
2499 /* Send HCI command */
2500 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2502 struct sk_buff
*skb
;
2504 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2506 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
2508 BT_ERR("%s no memory for command", hdev
->name
);
2512 /* Stand-alone HCI commands must be flaged as
2513 * single-command requests.
2515 bt_cb(skb
)->req
.start
= true;
2517 skb_queue_tail(&hdev
->cmd_q
, skb
);
2518 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2523 /* Queue a command to an asynchronous HCI request */
2524 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
, void *param
)
2526 struct hci_dev
*hdev
= req
->hdev
;
2527 struct sk_buff
*skb
;
2529 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2531 /* If an error occured during request building, there is no point in
2532 * queueing the HCI command. We can simply return.
2537 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
2539 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2540 hdev
->name
, opcode
);
2545 if (skb_queue_empty(&req
->cmd_q
))
2546 bt_cb(skb
)->req
.start
= true;
2548 skb_queue_tail(&req
->cmd_q
, skb
);
2551 /* Get data from the previously sent command */
2552 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2554 struct hci_command_hdr
*hdr
;
2556 if (!hdev
->sent_cmd
)
2559 hdr
= (void *) hdev
->sent_cmd
->data
;
2561 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2564 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2566 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2570 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2572 struct hci_acl_hdr
*hdr
;
2575 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2576 skb_reset_transport_header(skb
);
2577 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2578 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2579 hdr
->dlen
= cpu_to_le16(len
);
2582 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2583 struct sk_buff
*skb
, __u16 flags
)
2585 struct hci_conn
*conn
= chan
->conn
;
2586 struct hci_dev
*hdev
= conn
->hdev
;
2587 struct sk_buff
*list
;
2589 skb
->len
= skb_headlen(skb
);
2592 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2594 switch (hdev
->dev_type
) {
2596 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2599 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2602 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2606 list
= skb_shinfo(skb
)->frag_list
;
2608 /* Non fragmented */
2609 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2611 skb_queue_tail(queue
, skb
);
2614 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2616 skb_shinfo(skb
)->frag_list
= NULL
;
2618 /* Queue all fragments atomically */
2619 spin_lock(&queue
->lock
);
2621 __skb_queue_tail(queue
, skb
);
2623 flags
&= ~ACL_START
;
2626 skb
= list
; list
= list
->next
;
2628 skb
->dev
= (void *) hdev
;
2629 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2630 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2632 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2634 __skb_queue_tail(queue
, skb
);
2637 spin_unlock(&queue
->lock
);
2641 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2643 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2645 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2647 skb
->dev
= (void *) hdev
;
2649 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2651 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2655 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2657 struct hci_dev
*hdev
= conn
->hdev
;
2658 struct hci_sco_hdr hdr
;
2660 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2662 hdr
.handle
= cpu_to_le16(conn
->handle
);
2663 hdr
.dlen
= skb
->len
;
2665 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2666 skb_reset_transport_header(skb
);
2667 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2669 skb
->dev
= (void *) hdev
;
2670 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2672 skb_queue_tail(&conn
->data_q
, skb
);
2673 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2676 /* ---- HCI TX task (outgoing data) ---- */
2678 /* HCI Connection scheduler */
2679 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2682 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2683 struct hci_conn
*conn
= NULL
, *c
;
2684 unsigned int num
= 0, min
= ~0;
2686 /* We don't have to lock device here. Connections are always
2687 * added and removed with TX task disabled. */
2691 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2692 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2695 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2700 if (c
->sent
< min
) {
2705 if (hci_conn_num(hdev
, type
) == num
)
2714 switch (conn
->type
) {
2716 cnt
= hdev
->acl_cnt
;
2720 cnt
= hdev
->sco_cnt
;
2723 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2727 BT_ERR("Unknown link type");
2735 BT_DBG("conn %p quote %d", conn
, *quote
);
2739 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2741 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2744 BT_ERR("%s link tx timeout", hdev
->name
);
2748 /* Kill stalled connections */
2749 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2750 if (c
->type
== type
&& c
->sent
) {
2751 BT_ERR("%s killing stalled connection %pMR",
2752 hdev
->name
, &c
->dst
);
2753 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
2760 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2763 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2764 struct hci_chan
*chan
= NULL
;
2765 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2766 struct hci_conn
*conn
;
2767 int cnt
, q
, conn_num
= 0;
2769 BT_DBG("%s", hdev
->name
);
2773 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2774 struct hci_chan
*tmp
;
2776 if (conn
->type
!= type
)
2779 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2784 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2785 struct sk_buff
*skb
;
2787 if (skb_queue_empty(&tmp
->data_q
))
2790 skb
= skb_peek(&tmp
->data_q
);
2791 if (skb
->priority
< cur_prio
)
2794 if (skb
->priority
> cur_prio
) {
2797 cur_prio
= skb
->priority
;
2802 if (conn
->sent
< min
) {
2808 if (hci_conn_num(hdev
, type
) == conn_num
)
2817 switch (chan
->conn
->type
) {
2819 cnt
= hdev
->acl_cnt
;
2822 cnt
= hdev
->block_cnt
;
2826 cnt
= hdev
->sco_cnt
;
2829 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2833 BT_ERR("Unknown link type");
2838 BT_DBG("chan %p quote %d", chan
, *quote
);
2842 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2844 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2845 struct hci_conn
*conn
;
2848 BT_DBG("%s", hdev
->name
);
2852 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2853 struct hci_chan
*chan
;
2855 if (conn
->type
!= type
)
2858 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2863 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2864 struct sk_buff
*skb
;
2871 if (skb_queue_empty(&chan
->data_q
))
2874 skb
= skb_peek(&chan
->data_q
);
2875 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2878 skb
->priority
= HCI_PRIO_MAX
- 1;
2880 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2884 if (hci_conn_num(hdev
, type
) == num
)
2892 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2894 /* Calculate count of blocks used by this packet */
2895 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2898 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2900 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2901 /* ACL tx timeout must be longer than maximum
2902 * link supervision timeout (40.9 seconds) */
2903 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2904 HCI_ACL_TX_TIMEOUT
))
2905 hci_link_tx_to(hdev
, ACL_LINK
);
2909 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2911 unsigned int cnt
= hdev
->acl_cnt
;
2912 struct hci_chan
*chan
;
2913 struct sk_buff
*skb
;
2916 __check_timeout(hdev
, cnt
);
2918 while (hdev
->acl_cnt
&&
2919 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2920 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2921 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2922 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2923 skb
->len
, skb
->priority
);
2925 /* Stop if priority has changed */
2926 if (skb
->priority
< priority
)
2929 skb
= skb_dequeue(&chan
->data_q
);
2931 hci_conn_enter_active_mode(chan
->conn
,
2932 bt_cb(skb
)->force_active
);
2934 hci_send_frame(skb
);
2935 hdev
->acl_last_tx
= jiffies
;
2943 if (cnt
!= hdev
->acl_cnt
)
2944 hci_prio_recalculate(hdev
, ACL_LINK
);
2947 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2949 unsigned int cnt
= hdev
->block_cnt
;
2950 struct hci_chan
*chan
;
2951 struct sk_buff
*skb
;
2955 __check_timeout(hdev
, cnt
);
2957 BT_DBG("%s", hdev
->name
);
2959 if (hdev
->dev_type
== HCI_AMP
)
2964 while (hdev
->block_cnt
> 0 &&
2965 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
2966 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2967 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2970 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2971 skb
->len
, skb
->priority
);
2973 /* Stop if priority has changed */
2974 if (skb
->priority
< priority
)
2977 skb
= skb_dequeue(&chan
->data_q
);
2979 blocks
= __get_blocks(hdev
, skb
);
2980 if (blocks
> hdev
->block_cnt
)
2983 hci_conn_enter_active_mode(chan
->conn
,
2984 bt_cb(skb
)->force_active
);
2986 hci_send_frame(skb
);
2987 hdev
->acl_last_tx
= jiffies
;
2989 hdev
->block_cnt
-= blocks
;
2992 chan
->sent
+= blocks
;
2993 chan
->conn
->sent
+= blocks
;
2997 if (cnt
!= hdev
->block_cnt
)
2998 hci_prio_recalculate(hdev
, type
);
3001 static void hci_sched_acl(struct hci_dev
*hdev
)
3003 BT_DBG("%s", hdev
->name
);
3005 /* No ACL link over BR/EDR controller */
3006 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
3009 /* No AMP link over AMP controller */
3010 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3013 switch (hdev
->flow_ctl_mode
) {
3014 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3015 hci_sched_acl_pkt(hdev
);
3018 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3019 hci_sched_acl_blk(hdev
);
3025 static void hci_sched_sco(struct hci_dev
*hdev
)
3027 struct hci_conn
*conn
;
3028 struct sk_buff
*skb
;
3031 BT_DBG("%s", hdev
->name
);
3033 if (!hci_conn_num(hdev
, SCO_LINK
))
3036 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3037 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3038 BT_DBG("skb %p len %d", skb
, skb
->len
);
3039 hci_send_frame(skb
);
3042 if (conn
->sent
== ~0)
3048 static void hci_sched_esco(struct hci_dev
*hdev
)
3050 struct hci_conn
*conn
;
3051 struct sk_buff
*skb
;
3054 BT_DBG("%s", hdev
->name
);
3056 if (!hci_conn_num(hdev
, ESCO_LINK
))
3059 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3061 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3062 BT_DBG("skb %p len %d", skb
, skb
->len
);
3063 hci_send_frame(skb
);
3066 if (conn
->sent
== ~0)
3072 static void hci_sched_le(struct hci_dev
*hdev
)
3074 struct hci_chan
*chan
;
3075 struct sk_buff
*skb
;
3076 int quote
, cnt
, tmp
;
3078 BT_DBG("%s", hdev
->name
);
3080 if (!hci_conn_num(hdev
, LE_LINK
))
3083 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
3084 /* LE tx timeout must be longer than maximum
3085 * link supervision timeout (40.9 seconds) */
3086 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3087 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3088 hci_link_tx_to(hdev
, LE_LINK
);
3091 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3093 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3094 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3095 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3096 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3097 skb
->len
, skb
->priority
);
3099 /* Stop if priority has changed */
3100 if (skb
->priority
< priority
)
3103 skb
= skb_dequeue(&chan
->data_q
);
3105 hci_send_frame(skb
);
3106 hdev
->le_last_tx
= jiffies
;
3117 hdev
->acl_cnt
= cnt
;
3120 hci_prio_recalculate(hdev
, LE_LINK
);
3123 static void hci_tx_work(struct work_struct
*work
)
3125 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3126 struct sk_buff
*skb
;
3128 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
3129 hdev
->sco_cnt
, hdev
->le_cnt
);
3131 /* Schedule queues and send stuff to HCI driver */
3133 hci_sched_acl(hdev
);
3135 hci_sched_sco(hdev
);
3137 hci_sched_esco(hdev
);
3141 /* Send next queued raw (unknown type) packet */
3142 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3143 hci_send_frame(skb
);
3146 /* ----- HCI RX task (incoming data processing) ----- */
3148 /* ACL data packet */
3149 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3151 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
3152 struct hci_conn
*conn
;
3153 __u16 handle
, flags
;
3155 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
3157 handle
= __le16_to_cpu(hdr
->handle
);
3158 flags
= hci_flags(handle
);
3159 handle
= hci_handle(handle
);
3161 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
3164 hdev
->stat
.acl_rx
++;
3167 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3168 hci_dev_unlock(hdev
);
3171 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
3173 /* Send to upper protocol */
3174 l2cap_recv_acldata(conn
, skb
, flags
);
3177 BT_ERR("%s ACL packet for unknown connection handle %d",
3178 hdev
->name
, handle
);
3184 /* SCO data packet */
3185 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3187 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
3188 struct hci_conn
*conn
;
3191 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
3193 handle
= __le16_to_cpu(hdr
->handle
);
3195 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
3197 hdev
->stat
.sco_rx
++;
3200 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3201 hci_dev_unlock(hdev
);
3204 /* Send to upper protocol */
3205 sco_recv_scodata(conn
, skb
);
3208 BT_ERR("%s SCO packet for unknown connection handle %d",
3209 hdev
->name
, handle
);
3215 static bool hci_req_is_complete(struct hci_dev
*hdev
)
3217 struct sk_buff
*skb
;
3219 skb
= skb_peek(&hdev
->cmd_q
);
3223 return bt_cb(skb
)->req
.start
;
3226 static void hci_resend_last(struct hci_dev
*hdev
)
3228 struct hci_command_hdr
*sent
;
3229 struct sk_buff
*skb
;
3232 if (!hdev
->sent_cmd
)
3235 sent
= (void *) hdev
->sent_cmd
->data
;
3236 opcode
= __le16_to_cpu(sent
->opcode
);
3237 if (opcode
== HCI_OP_RESET
)
3240 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
3244 skb_queue_head(&hdev
->cmd_q
, skb
);
3245 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3248 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
3250 hci_req_complete_t req_complete
= NULL
;
3251 struct sk_buff
*skb
;
3252 unsigned long flags
;
3254 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
3256 /* If the completed command doesn't match the last one that was
3257 * sent we need to do special handling of it.
3259 if (!hci_sent_cmd_data(hdev
, opcode
)) {
3260 /* Some CSR based controllers generate a spontaneous
3261 * reset complete event during init and any pending
3262 * command will never be completed. In such a case we
3263 * need to resend whatever was the last sent
3266 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
3267 hci_resend_last(hdev
);
3272 /* If the command succeeded and there's still more commands in
3273 * this request the request is not yet complete.
3275 if (!status
&& !hci_req_is_complete(hdev
))
3278 /* If this was the last command in a request the complete
3279 * callback would be found in hdev->sent_cmd instead of the
3280 * command queue (hdev->cmd_q).
3282 if (hdev
->sent_cmd
) {
3283 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
3288 /* Remove all pending commands belonging to this request */
3289 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
3290 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
3291 if (bt_cb(skb
)->req
.start
) {
3292 __skb_queue_head(&hdev
->cmd_q
, skb
);
3296 req_complete
= bt_cb(skb
)->req
.complete
;
3299 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
3303 req_complete(hdev
, status
);
3306 void hci_req_cmd_status(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
3308 hci_req_complete_t req_complete
= NULL
;
3310 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
3313 hci_req_cmd_complete(hdev
, opcode
, status
);
3317 /* No need to handle success status if there are more commands */
3318 if (!hci_req_is_complete(hdev
))
3322 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
3324 /* If the request doesn't have a complete callback or there
3325 * are other commands/requests in the hdev queue we consider
3326 * this request as completed.
3328 if (!req_complete
|| !skb_queue_empty(&hdev
->cmd_q
))
3329 hci_req_cmd_complete(hdev
, opcode
, status
);
3332 static void hci_rx_work(struct work_struct
*work
)
3334 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
3335 struct sk_buff
*skb
;
3337 BT_DBG("%s", hdev
->name
);
3339 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
3340 /* Send copy to monitor */
3341 hci_send_to_monitor(hdev
, skb
);
3343 if (atomic_read(&hdev
->promisc
)) {
3344 /* Send copy to the sockets */
3345 hci_send_to_sock(hdev
, skb
);
3348 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
3353 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
3354 /* Don't process data packets in this states. */
3355 switch (bt_cb(skb
)->pkt_type
) {
3356 case HCI_ACLDATA_PKT
:
3357 case HCI_SCODATA_PKT
:
3364 switch (bt_cb(skb
)->pkt_type
) {
3366 BT_DBG("%s Event packet", hdev
->name
);
3367 hci_event_packet(hdev
, skb
);
3370 case HCI_ACLDATA_PKT
:
3371 BT_DBG("%s ACL data packet", hdev
->name
);
3372 hci_acldata_packet(hdev
, skb
);
3375 case HCI_SCODATA_PKT
:
3376 BT_DBG("%s SCO data packet", hdev
->name
);
3377 hci_scodata_packet(hdev
, skb
);
3387 static void hci_cmd_work(struct work_struct
*work
)
3389 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
3390 struct sk_buff
*skb
;
3392 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
3393 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
3395 /* Send queued commands */
3396 if (atomic_read(&hdev
->cmd_cnt
)) {
3397 skb
= skb_dequeue(&hdev
->cmd_q
);
3401 kfree_skb(hdev
->sent_cmd
);
3403 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
3404 if (hdev
->sent_cmd
) {
3405 atomic_dec(&hdev
->cmd_cnt
);
3406 hci_send_frame(skb
);
3407 if (test_bit(HCI_RESET
, &hdev
->flags
))
3408 del_timer(&hdev
->cmd_timer
);
3410 mod_timer(&hdev
->cmd_timer
,
3411 jiffies
+ HCI_CMD_TIMEOUT
);
3413 skb_queue_head(&hdev
->cmd_q
, skb
);
3414 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3419 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
3421 /* General inquiry access code (GIAC) */
3422 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3423 struct hci_cp_inquiry cp
;
3425 BT_DBG("%s", hdev
->name
);
3427 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
3428 return -EINPROGRESS
;
3430 inquiry_cache_flush(hdev
);
3432 memset(&cp
, 0, sizeof(cp
));
3433 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
3436 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
3439 int hci_cancel_inquiry(struct hci_dev
*hdev
)
3441 BT_DBG("%s", hdev
->name
);
3443 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
3446 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
3449 u8
bdaddr_to_le(u8 bdaddr_type
)
3451 switch (bdaddr_type
) {
3452 case BDADDR_LE_PUBLIC
:
3453 return ADDR_LE_DEV_PUBLIC
;
3456 /* Fallback to LE Random address type */
3457 return ADDR_LE_DEV_RANDOM
;