Bluetooth: Rename LE_SCANNING_* macros
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80 }
81
82 struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
83 {
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131 failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134 }
135
136 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
137 void *param, u8 event, u32 timeout)
138 {
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
147 hci_req_add_ev(&req, opcode, plen, param, event);
148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
186 return hci_get_cmd_complete(hdev, opcode, event);
187 }
188 EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
191 void *param, u32 timeout)
192 {
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
194 }
195 EXPORT_SYMBOL(__hci_cmd_sync);
196
197 /* Execute request and wait for completion. */
198 static int __hci_req_sync(struct hci_dev *hdev,
199 void (*func)(struct hci_request *req,
200 unsigned long opt),
201 unsigned long opt, __u32 timeout)
202 {
203 struct hci_request req;
204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
209 hci_req_init(&req, hdev);
210
211 hdev->req_status = HCI_REQ_PEND;
212
213 func(&req, opt);
214
215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
217 hdev->req_status = 0;
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
223 */
224 if (err == -ENODATA)
225 return 0;
226
227 return err;
228 }
229
230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
242 err = -bt_to_errno(hdev->req_result);
243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
252 }
253
254 hdev->req_status = hdev->req_result = 0;
255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259 }
260
261 static int hci_req_sync(struct hci_dev *hdev,
262 void (*req)(struct hci_request *req,
263 unsigned long opt),
264 unsigned long opt, __u32 timeout)
265 {
266 int ret;
267
268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
271 /* Serialize all requests */
272 hci_req_lock(hdev);
273 ret = __hci_req_sync(hdev, req, opt, timeout);
274 hci_req_unlock(hdev);
275
276 return ret;
277 }
278
279 static void hci_reset_req(struct hci_request *req, unsigned long opt)
280 {
281 BT_DBG("%s %ld", req->hdev->name, opt);
282
283 /* Reset device */
284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
286 }
287
288 static void bredr_init(struct hci_request *req)
289 {
290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
291
292 /* Read Local Supported Features */
293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
294
295 /* Read Local Version */
296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
297
298 /* Read BD Address */
299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
300 }
301
302 static void amp_init(struct hci_request *req)
303 {
304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
305
306 /* Read Local Version */
307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
308
309 /* Read Local AMP Info */
310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
311
312 /* Read Data Blk size */
313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
314 }
315
316 static void hci_init1_req(struct hci_request *req, unsigned long opt)
317 {
318 struct hci_dev *hdev = req->hdev;
319
320 BT_DBG("%s %ld", hdev->name, opt);
321
322 /* Reset */
323 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
324 hci_reset_req(req, 0);
325
326 switch (hdev->dev_type) {
327 case HCI_BREDR:
328 bredr_init(req);
329 break;
330
331 case HCI_AMP:
332 amp_init(req);
333 break;
334
335 default:
336 BT_ERR("Unknown device type %d", hdev->dev_type);
337 break;
338 }
339 }
340
341 static void bredr_setup(struct hci_request *req)
342 {
343 struct hci_cp_delete_stored_link_key cp;
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367 bacpy(&cp.bdaddr, BDADDR_ANY);
368 cp.delete_all = 0x01;
369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
370
371 /* Read page scan parameters */
372 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
375 }
376 }
377
378 static void le_setup(struct hci_request *req)
379 {
380 /* Read LE Buffer Size */
381 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
382
383 /* Read LE Local Supported Features */
384 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
385
386 /* Read LE Advertising Channel TX Power */
387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
388
389 /* Read LE White List Size */
390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
391
392 /* Read LE Supported States */
393 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
394 }
395
396 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
397 {
398 if (lmp_ext_inq_capable(hdev))
399 return 0x02;
400
401 if (lmp_inq_rssi_capable(hdev))
402 return 0x01;
403
404 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
405 hdev->lmp_subver == 0x0757)
406 return 0x01;
407
408 if (hdev->manufacturer == 15) {
409 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
410 return 0x01;
411 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
414 return 0x01;
415 }
416
417 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
418 hdev->lmp_subver == 0x1805)
419 return 0x01;
420
421 return 0x00;
422 }
423
424 static void hci_setup_inquiry_mode(struct hci_request *req)
425 {
426 u8 mode;
427
428 mode = hci_get_inquiry_mode(req->hdev);
429
430 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
431 }
432
433 static void hci_setup_event_mask(struct hci_request *req)
434 {
435 struct hci_dev *hdev = req->hdev;
436
437 /* The second byte is 0xff instead of 0x9f (two reserved bits
438 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
439 * command otherwise.
440 */
441 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
442
443 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
444 * any event mask for pre 1.2 devices.
445 */
446 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
447 return;
448
449 if (lmp_bredr_capable(hdev)) {
450 events[4] |= 0x01; /* Flow Specification Complete */
451 events[4] |= 0x02; /* Inquiry Result with RSSI */
452 events[4] |= 0x04; /* Read Remote Extended Features Complete */
453 events[5] |= 0x08; /* Synchronous Connection Complete */
454 events[5] |= 0x10; /* Synchronous Connection Changed */
455 }
456
457 if (lmp_inq_rssi_capable(hdev))
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459
460 if (lmp_sniffsubr_capable(hdev))
461 events[5] |= 0x20; /* Sniff Subrating */
462
463 if (lmp_pause_enc_capable(hdev))
464 events[5] |= 0x80; /* Encryption Key Refresh Complete */
465
466 if (lmp_ext_inq_capable(hdev))
467 events[5] |= 0x40; /* Extended Inquiry Result */
468
469 if (lmp_no_flush_capable(hdev))
470 events[7] |= 0x01; /* Enhanced Flush Complete */
471
472 if (lmp_lsto_capable(hdev))
473 events[6] |= 0x80; /* Link Supervision Timeout Changed */
474
475 if (lmp_ssp_capable(hdev)) {
476 events[6] |= 0x01; /* IO Capability Request */
477 events[6] |= 0x02; /* IO Capability Response */
478 events[6] |= 0x04; /* User Confirmation Request */
479 events[6] |= 0x08; /* User Passkey Request */
480 events[6] |= 0x10; /* Remote OOB Data Request */
481 events[6] |= 0x20; /* Simple Pairing Complete */
482 events[7] |= 0x04; /* User Passkey Notification */
483 events[7] |= 0x08; /* Keypress Notification */
484 events[7] |= 0x10; /* Remote Host Supported
485 * Features Notification
486 */
487 }
488
489 if (lmp_le_capable(hdev))
490 events[7] |= 0x20; /* LE Meta-Event */
491
492 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
493
494 if (lmp_le_capable(hdev)) {
495 memset(events, 0, sizeof(events));
496 events[0] = 0x1f;
497 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
498 sizeof(events), events);
499 }
500 }
501
502 static void hci_init2_req(struct hci_request *req, unsigned long opt)
503 {
504 struct hci_dev *hdev = req->hdev;
505
506 if (lmp_bredr_capable(hdev))
507 bredr_setup(req);
508
509 if (lmp_le_capable(hdev))
510 le_setup(req);
511
512 hci_setup_event_mask(req);
513
514 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
515 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
516
517 if (lmp_ssp_capable(hdev)) {
518 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
519 u8 mode = 0x01;
520 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
521 sizeof(mode), &mode);
522 } else {
523 struct hci_cp_write_eir cp;
524
525 memset(hdev->eir, 0, sizeof(hdev->eir));
526 memset(&cp, 0, sizeof(cp));
527
528 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
529 }
530 }
531
532 if (lmp_inq_rssi_capable(hdev))
533 hci_setup_inquiry_mode(req);
534
535 if (lmp_inq_tx_pwr_capable(hdev))
536 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
537
538 if (lmp_ext_feat_capable(hdev)) {
539 struct hci_cp_read_local_ext_features cp;
540
541 cp.page = 0x01;
542 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
543 sizeof(cp), &cp);
544 }
545
546 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
547 u8 enable = 1;
548 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
549 &enable);
550 }
551 }
552
553 static void hci_setup_link_policy(struct hci_request *req)
554 {
555 struct hci_dev *hdev = req->hdev;
556 struct hci_cp_write_def_link_policy cp;
557 u16 link_policy = 0;
558
559 if (lmp_rswitch_capable(hdev))
560 link_policy |= HCI_LP_RSWITCH;
561 if (lmp_hold_capable(hdev))
562 link_policy |= HCI_LP_HOLD;
563 if (lmp_sniff_capable(hdev))
564 link_policy |= HCI_LP_SNIFF;
565 if (lmp_park_capable(hdev))
566 link_policy |= HCI_LP_PARK;
567
568 cp.policy = cpu_to_le16(link_policy);
569 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
570 }
571
572 static void hci_set_le_support(struct hci_request *req)
573 {
574 struct hci_dev *hdev = req->hdev;
575 struct hci_cp_write_le_host_supported cp;
576
577 memset(&cp, 0, sizeof(cp));
578
579 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
580 cp.le = 0x01;
581 cp.simul = lmp_le_br_capable(hdev);
582 }
583
584 if (cp.le != lmp_host_le_capable(hdev))
585 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
586 &cp);
587 }
588
589 static void hci_init3_req(struct hci_request *req, unsigned long opt)
590 {
591 struct hci_dev *hdev = req->hdev;
592 u8 p;
593
594 if (hdev->commands[5] & 0x10)
595 hci_setup_link_policy(req);
596
597 if (lmp_le_capable(hdev)) {
598 hci_set_le_support(req);
599 hci_update_ad(req);
600 }
601
602 /* Read features beyond page 1 if available */
603 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
604 struct hci_cp_read_local_ext_features cp;
605
606 cp.page = p;
607 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
608 sizeof(cp), &cp);
609 }
610 }
611
612 static int __hci_init(struct hci_dev *hdev)
613 {
614 int err;
615
616 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
617 if (err < 0)
618 return err;
619
620 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
621 * BR/EDR/LE type controllers. AMP controllers only need the
622 * first stage init.
623 */
624 if (hdev->dev_type != HCI_BREDR)
625 return 0;
626
627 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
628 if (err < 0)
629 return err;
630
631 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
632 }
633
634 static void hci_scan_req(struct hci_request *req, unsigned long opt)
635 {
636 __u8 scan = opt;
637
638 BT_DBG("%s %x", req->hdev->name, scan);
639
640 /* Inquiry and Page scans */
641 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
642 }
643
644 static void hci_auth_req(struct hci_request *req, unsigned long opt)
645 {
646 __u8 auth = opt;
647
648 BT_DBG("%s %x", req->hdev->name, auth);
649
650 /* Authentication */
651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
652 }
653
654 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
655 {
656 __u8 encrypt = opt;
657
658 BT_DBG("%s %x", req->hdev->name, encrypt);
659
660 /* Encryption */
661 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
662 }
663
664 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
665 {
666 __le16 policy = cpu_to_le16(opt);
667
668 BT_DBG("%s %x", req->hdev->name, policy);
669
670 /* Default link policy */
671 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
672 }
673
674 /* Get HCI device by index.
675 * Device is held on return. */
676 struct hci_dev *hci_dev_get(int index)
677 {
678 struct hci_dev *hdev = NULL, *d;
679
680 BT_DBG("%d", index);
681
682 if (index < 0)
683 return NULL;
684
685 read_lock(&hci_dev_list_lock);
686 list_for_each_entry(d, &hci_dev_list, list) {
687 if (d->id == index) {
688 hdev = hci_dev_hold(d);
689 break;
690 }
691 }
692 read_unlock(&hci_dev_list_lock);
693 return hdev;
694 }
695
696 /* ---- Inquiry support ---- */
697
698 bool hci_discovery_active(struct hci_dev *hdev)
699 {
700 struct discovery_state *discov = &hdev->discovery;
701
702 switch (discov->state) {
703 case DISCOVERY_FINDING:
704 case DISCOVERY_RESOLVING:
705 return true;
706
707 default:
708 return false;
709 }
710 }
711
712 void hci_discovery_set_state(struct hci_dev *hdev, int state)
713 {
714 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
715
716 if (hdev->discovery.state == state)
717 return;
718
719 switch (state) {
720 case DISCOVERY_STOPPED:
721 if (hdev->discovery.state != DISCOVERY_STARTING)
722 mgmt_discovering(hdev, 0);
723 break;
724 case DISCOVERY_STARTING:
725 break;
726 case DISCOVERY_FINDING:
727 mgmt_discovering(hdev, 1);
728 break;
729 case DISCOVERY_RESOLVING:
730 break;
731 case DISCOVERY_STOPPING:
732 break;
733 }
734
735 hdev->discovery.state = state;
736 }
737
738 static void inquiry_cache_flush(struct hci_dev *hdev)
739 {
740 struct discovery_state *cache = &hdev->discovery;
741 struct inquiry_entry *p, *n;
742
743 list_for_each_entry_safe(p, n, &cache->all, all) {
744 list_del(&p->all);
745 kfree(p);
746 }
747
748 INIT_LIST_HEAD(&cache->unknown);
749 INIT_LIST_HEAD(&cache->resolve);
750 }
751
752 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
753 bdaddr_t *bdaddr)
754 {
755 struct discovery_state *cache = &hdev->discovery;
756 struct inquiry_entry *e;
757
758 BT_DBG("cache %p, %pMR", cache, bdaddr);
759
760 list_for_each_entry(e, &cache->all, all) {
761 if (!bacmp(&e->data.bdaddr, bdaddr))
762 return e;
763 }
764
765 return NULL;
766 }
767
768 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
769 bdaddr_t *bdaddr)
770 {
771 struct discovery_state *cache = &hdev->discovery;
772 struct inquiry_entry *e;
773
774 BT_DBG("cache %p, %pMR", cache, bdaddr);
775
776 list_for_each_entry(e, &cache->unknown, list) {
777 if (!bacmp(&e->data.bdaddr, bdaddr))
778 return e;
779 }
780
781 return NULL;
782 }
783
784 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
785 bdaddr_t *bdaddr,
786 int state)
787 {
788 struct discovery_state *cache = &hdev->discovery;
789 struct inquiry_entry *e;
790
791 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
792
793 list_for_each_entry(e, &cache->resolve, list) {
794 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
795 return e;
796 if (!bacmp(&e->data.bdaddr, bdaddr))
797 return e;
798 }
799
800 return NULL;
801 }
802
803 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
804 struct inquiry_entry *ie)
805 {
806 struct discovery_state *cache = &hdev->discovery;
807 struct list_head *pos = &cache->resolve;
808 struct inquiry_entry *p;
809
810 list_del(&ie->list);
811
812 list_for_each_entry(p, &cache->resolve, list) {
813 if (p->name_state != NAME_PENDING &&
814 abs(p->data.rssi) >= abs(ie->data.rssi))
815 break;
816 pos = &p->list;
817 }
818
819 list_add(&ie->list, pos);
820 }
821
822 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
823 bool name_known, bool *ssp)
824 {
825 struct discovery_state *cache = &hdev->discovery;
826 struct inquiry_entry *ie;
827
828 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
829
830 hci_remove_remote_oob_data(hdev, &data->bdaddr);
831
832 if (ssp)
833 *ssp = data->ssp_mode;
834
835 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
836 if (ie) {
837 if (ie->data.ssp_mode && ssp)
838 *ssp = true;
839
840 if (ie->name_state == NAME_NEEDED &&
841 data->rssi != ie->data.rssi) {
842 ie->data.rssi = data->rssi;
843 hci_inquiry_cache_update_resolve(hdev, ie);
844 }
845
846 goto update;
847 }
848
849 /* Entry not in the cache. Add new one. */
850 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
851 if (!ie)
852 return false;
853
854 list_add(&ie->all, &cache->all);
855
856 if (name_known) {
857 ie->name_state = NAME_KNOWN;
858 } else {
859 ie->name_state = NAME_NOT_KNOWN;
860 list_add(&ie->list, &cache->unknown);
861 }
862
863 update:
864 if (name_known && ie->name_state != NAME_KNOWN &&
865 ie->name_state != NAME_PENDING) {
866 ie->name_state = NAME_KNOWN;
867 list_del(&ie->list);
868 }
869
870 memcpy(&ie->data, data, sizeof(*data));
871 ie->timestamp = jiffies;
872 cache->timestamp = jiffies;
873
874 if (ie->name_state == NAME_NOT_KNOWN)
875 return false;
876
877 return true;
878 }
879
880 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
881 {
882 struct discovery_state *cache = &hdev->discovery;
883 struct inquiry_info *info = (struct inquiry_info *) buf;
884 struct inquiry_entry *e;
885 int copied = 0;
886
887 list_for_each_entry(e, &cache->all, all) {
888 struct inquiry_data *data = &e->data;
889
890 if (copied >= num)
891 break;
892
893 bacpy(&info->bdaddr, &data->bdaddr);
894 info->pscan_rep_mode = data->pscan_rep_mode;
895 info->pscan_period_mode = data->pscan_period_mode;
896 info->pscan_mode = data->pscan_mode;
897 memcpy(info->dev_class, data->dev_class, 3);
898 info->clock_offset = data->clock_offset;
899
900 info++;
901 copied++;
902 }
903
904 BT_DBG("cache %p, copied %d", cache, copied);
905 return copied;
906 }
907
908 static void hci_inq_req(struct hci_request *req, unsigned long opt)
909 {
910 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
911 struct hci_dev *hdev = req->hdev;
912 struct hci_cp_inquiry cp;
913
914 BT_DBG("%s", hdev->name);
915
916 if (test_bit(HCI_INQUIRY, &hdev->flags))
917 return;
918
919 /* Start Inquiry */
920 memcpy(&cp.lap, &ir->lap, 3);
921 cp.length = ir->length;
922 cp.num_rsp = ir->num_rsp;
923 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
924 }
925
926 static int wait_inquiry(void *word)
927 {
928 schedule();
929 return signal_pending(current);
930 }
931
932 int hci_inquiry(void __user *arg)
933 {
934 __u8 __user *ptr = arg;
935 struct hci_inquiry_req ir;
936 struct hci_dev *hdev;
937 int err = 0, do_inquiry = 0, max_rsp;
938 long timeo;
939 __u8 *buf;
940
941 if (copy_from_user(&ir, ptr, sizeof(ir)))
942 return -EFAULT;
943
944 hdev = hci_dev_get(ir.dev_id);
945 if (!hdev)
946 return -ENODEV;
947
948 hci_dev_lock(hdev);
949 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
950 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
951 inquiry_cache_flush(hdev);
952 do_inquiry = 1;
953 }
954 hci_dev_unlock(hdev);
955
956 timeo = ir.length * msecs_to_jiffies(2000);
957
958 if (do_inquiry) {
959 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
960 timeo);
961 if (err < 0)
962 goto done;
963
964 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
965 * cleared). If it is interrupted by a signal, return -EINTR.
966 */
967 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
968 TASK_INTERRUPTIBLE))
969 return -EINTR;
970 }
971
972 /* for unlimited number of responses we will use buffer with
973 * 255 entries
974 */
975 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
976
977 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
978 * copy it to the user space.
979 */
980 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
981 if (!buf) {
982 err = -ENOMEM;
983 goto done;
984 }
985
986 hci_dev_lock(hdev);
987 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
988 hci_dev_unlock(hdev);
989
990 BT_DBG("num_rsp %d", ir.num_rsp);
991
992 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
993 ptr += sizeof(ir);
994 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
995 ir.num_rsp))
996 err = -EFAULT;
997 } else
998 err = -EFAULT;
999
1000 kfree(buf);
1001
1002 done:
1003 hci_dev_put(hdev);
1004 return err;
1005 }
1006
1007 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1008 {
1009 u8 ad_len = 0, flags = 0;
1010 size_t name_len;
1011
1012 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1013 flags |= LE_AD_GENERAL;
1014
1015 if (!lmp_bredr_capable(hdev))
1016 flags |= LE_AD_NO_BREDR;
1017
1018 if (lmp_le_br_capable(hdev))
1019 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1020
1021 if (lmp_host_le_br_capable(hdev))
1022 flags |= LE_AD_SIM_LE_BREDR_HOST;
1023
1024 if (flags) {
1025 BT_DBG("adv flags 0x%02x", flags);
1026
1027 ptr[0] = 2;
1028 ptr[1] = EIR_FLAGS;
1029 ptr[2] = flags;
1030
1031 ad_len += 3;
1032 ptr += 3;
1033 }
1034
1035 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1036 ptr[0] = 2;
1037 ptr[1] = EIR_TX_POWER;
1038 ptr[2] = (u8) hdev->adv_tx_power;
1039
1040 ad_len += 3;
1041 ptr += 3;
1042 }
1043
1044 name_len = strlen(hdev->dev_name);
1045 if (name_len > 0) {
1046 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1047
1048 if (name_len > max_len) {
1049 name_len = max_len;
1050 ptr[1] = EIR_NAME_SHORT;
1051 } else
1052 ptr[1] = EIR_NAME_COMPLETE;
1053
1054 ptr[0] = name_len + 1;
1055
1056 memcpy(ptr + 2, hdev->dev_name, name_len);
1057
1058 ad_len += (name_len + 2);
1059 ptr += (name_len + 2);
1060 }
1061
1062 return ad_len;
1063 }
1064
1065 void hci_update_ad(struct hci_request *req)
1066 {
1067 struct hci_dev *hdev = req->hdev;
1068 struct hci_cp_le_set_adv_data cp;
1069 u8 len;
1070
1071 if (!lmp_le_capable(hdev))
1072 return;
1073
1074 memset(&cp, 0, sizeof(cp));
1075
1076 len = create_ad(hdev, cp.data);
1077
1078 if (hdev->adv_data_len == len &&
1079 memcmp(cp.data, hdev->adv_data, len) == 0)
1080 return;
1081
1082 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1083 hdev->adv_data_len = len;
1084
1085 cp.length = len;
1086
1087 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1088 }
1089
1090 /* ---- HCI ioctl helpers ---- */
1091
1092 int hci_dev_open(__u16 dev)
1093 {
1094 struct hci_dev *hdev;
1095 int ret = 0;
1096
1097 hdev = hci_dev_get(dev);
1098 if (!hdev)
1099 return -ENODEV;
1100
1101 BT_DBG("%s %p", hdev->name, hdev);
1102
1103 hci_req_lock(hdev);
1104
1105 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1106 ret = -ENODEV;
1107 goto done;
1108 }
1109
1110 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1111 ret = -ERFKILL;
1112 goto done;
1113 }
1114
1115 if (test_bit(HCI_UP, &hdev->flags)) {
1116 ret = -EALREADY;
1117 goto done;
1118 }
1119
1120 if (hdev->open(hdev)) {
1121 ret = -EIO;
1122 goto done;
1123 }
1124
1125 atomic_set(&hdev->cmd_cnt, 1);
1126 set_bit(HCI_INIT, &hdev->flags);
1127
1128 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1129 ret = hdev->setup(hdev);
1130
1131 if (!ret) {
1132 /* Treat all non BR/EDR controllers as raw devices if
1133 * enable_hs is not set.
1134 */
1135 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1136 set_bit(HCI_RAW, &hdev->flags);
1137
1138 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1139 set_bit(HCI_RAW, &hdev->flags);
1140
1141 if (!test_bit(HCI_RAW, &hdev->flags))
1142 ret = __hci_init(hdev);
1143 }
1144
1145 clear_bit(HCI_INIT, &hdev->flags);
1146
1147 if (!ret) {
1148 hci_dev_hold(hdev);
1149 set_bit(HCI_UP, &hdev->flags);
1150 hci_notify(hdev, HCI_DEV_UP);
1151 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1152 mgmt_valid_hdev(hdev)) {
1153 hci_dev_lock(hdev);
1154 mgmt_powered(hdev, 1);
1155 hci_dev_unlock(hdev);
1156 }
1157 } else {
1158 /* Init failed, cleanup */
1159 flush_work(&hdev->tx_work);
1160 flush_work(&hdev->cmd_work);
1161 flush_work(&hdev->rx_work);
1162
1163 skb_queue_purge(&hdev->cmd_q);
1164 skb_queue_purge(&hdev->rx_q);
1165
1166 if (hdev->flush)
1167 hdev->flush(hdev);
1168
1169 if (hdev->sent_cmd) {
1170 kfree_skb(hdev->sent_cmd);
1171 hdev->sent_cmd = NULL;
1172 }
1173
1174 hdev->close(hdev);
1175 hdev->flags = 0;
1176 }
1177
1178 done:
1179 hci_req_unlock(hdev);
1180 hci_dev_put(hdev);
1181 return ret;
1182 }
1183
1184 static int hci_dev_do_close(struct hci_dev *hdev)
1185 {
1186 BT_DBG("%s %p", hdev->name, hdev);
1187
1188 cancel_work_sync(&hdev->le_scan);
1189
1190 cancel_delayed_work(&hdev->power_off);
1191
1192 hci_req_cancel(hdev, ENODEV);
1193 hci_req_lock(hdev);
1194
1195 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1196 del_timer_sync(&hdev->cmd_timer);
1197 hci_req_unlock(hdev);
1198 return 0;
1199 }
1200
1201 /* Flush RX and TX works */
1202 flush_work(&hdev->tx_work);
1203 flush_work(&hdev->rx_work);
1204
1205 if (hdev->discov_timeout > 0) {
1206 cancel_delayed_work(&hdev->discov_off);
1207 hdev->discov_timeout = 0;
1208 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1209 }
1210
1211 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1212 cancel_delayed_work(&hdev->service_cache);
1213
1214 cancel_delayed_work_sync(&hdev->le_scan_disable);
1215
1216 hci_dev_lock(hdev);
1217 inquiry_cache_flush(hdev);
1218 hci_conn_hash_flush(hdev);
1219 hci_dev_unlock(hdev);
1220
1221 hci_notify(hdev, HCI_DEV_DOWN);
1222
1223 if (hdev->flush)
1224 hdev->flush(hdev);
1225
1226 /* Reset device */
1227 skb_queue_purge(&hdev->cmd_q);
1228 atomic_set(&hdev->cmd_cnt, 1);
1229 if (!test_bit(HCI_RAW, &hdev->flags) &&
1230 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1231 set_bit(HCI_INIT, &hdev->flags);
1232 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1233 clear_bit(HCI_INIT, &hdev->flags);
1234 }
1235
1236 /* flush cmd work */
1237 flush_work(&hdev->cmd_work);
1238
1239 /* Drop queues */
1240 skb_queue_purge(&hdev->rx_q);
1241 skb_queue_purge(&hdev->cmd_q);
1242 skb_queue_purge(&hdev->raw_q);
1243
1244 /* Drop last sent command */
1245 if (hdev->sent_cmd) {
1246 del_timer_sync(&hdev->cmd_timer);
1247 kfree_skb(hdev->sent_cmd);
1248 hdev->sent_cmd = NULL;
1249 }
1250
1251 kfree_skb(hdev->recv_evt);
1252 hdev->recv_evt = NULL;
1253
1254 /* After this point our queues are empty
1255 * and no tasks are scheduled. */
1256 hdev->close(hdev);
1257
1258 /* Clear flags */
1259 hdev->flags = 0;
1260 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1261
1262 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1263 mgmt_valid_hdev(hdev)) {
1264 hci_dev_lock(hdev);
1265 mgmt_powered(hdev, 0);
1266 hci_dev_unlock(hdev);
1267 }
1268
1269 /* Controller radio is available but is currently powered down */
1270 hdev->amp_status = 0;
1271
1272 memset(hdev->eir, 0, sizeof(hdev->eir));
1273 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1274
1275 hci_req_unlock(hdev);
1276
1277 hci_dev_put(hdev);
1278 return 0;
1279 }
1280
1281 int hci_dev_close(__u16 dev)
1282 {
1283 struct hci_dev *hdev;
1284 int err;
1285
1286 hdev = hci_dev_get(dev);
1287 if (!hdev)
1288 return -ENODEV;
1289
1290 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1291 cancel_delayed_work(&hdev->power_off);
1292
1293 err = hci_dev_do_close(hdev);
1294
1295 hci_dev_put(hdev);
1296 return err;
1297 }
1298
1299 int hci_dev_reset(__u16 dev)
1300 {
1301 struct hci_dev *hdev;
1302 int ret = 0;
1303
1304 hdev = hci_dev_get(dev);
1305 if (!hdev)
1306 return -ENODEV;
1307
1308 hci_req_lock(hdev);
1309
1310 if (!test_bit(HCI_UP, &hdev->flags))
1311 goto done;
1312
1313 /* Drop queues */
1314 skb_queue_purge(&hdev->rx_q);
1315 skb_queue_purge(&hdev->cmd_q);
1316
1317 hci_dev_lock(hdev);
1318 inquiry_cache_flush(hdev);
1319 hci_conn_hash_flush(hdev);
1320 hci_dev_unlock(hdev);
1321
1322 if (hdev->flush)
1323 hdev->flush(hdev);
1324
1325 atomic_set(&hdev->cmd_cnt, 1);
1326 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1327
1328 if (!test_bit(HCI_RAW, &hdev->flags))
1329 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1330
1331 done:
1332 hci_req_unlock(hdev);
1333 hci_dev_put(hdev);
1334 return ret;
1335 }
1336
1337 int hci_dev_reset_stat(__u16 dev)
1338 {
1339 struct hci_dev *hdev;
1340 int ret = 0;
1341
1342 hdev = hci_dev_get(dev);
1343 if (!hdev)
1344 return -ENODEV;
1345
1346 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1347
1348 hci_dev_put(hdev);
1349
1350 return ret;
1351 }
1352
1353 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1354 {
1355 struct hci_dev *hdev;
1356 struct hci_dev_req dr;
1357 int err = 0;
1358
1359 if (copy_from_user(&dr, arg, sizeof(dr)))
1360 return -EFAULT;
1361
1362 hdev = hci_dev_get(dr.dev_id);
1363 if (!hdev)
1364 return -ENODEV;
1365
1366 switch (cmd) {
1367 case HCISETAUTH:
1368 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1369 HCI_INIT_TIMEOUT);
1370 break;
1371
1372 case HCISETENCRYPT:
1373 if (!lmp_encrypt_capable(hdev)) {
1374 err = -EOPNOTSUPP;
1375 break;
1376 }
1377
1378 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1379 /* Auth must be enabled first */
1380 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1381 HCI_INIT_TIMEOUT);
1382 if (err)
1383 break;
1384 }
1385
1386 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1387 HCI_INIT_TIMEOUT);
1388 break;
1389
1390 case HCISETSCAN:
1391 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1392 HCI_INIT_TIMEOUT);
1393 break;
1394
1395 case HCISETLINKPOL:
1396 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1397 HCI_INIT_TIMEOUT);
1398 break;
1399
1400 case HCISETLINKMODE:
1401 hdev->link_mode = ((__u16) dr.dev_opt) &
1402 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1403 break;
1404
1405 case HCISETPTYPE:
1406 hdev->pkt_type = (__u16) dr.dev_opt;
1407 break;
1408
1409 case HCISETACLMTU:
1410 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1411 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1412 break;
1413
1414 case HCISETSCOMTU:
1415 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1416 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1417 break;
1418
1419 default:
1420 err = -EINVAL;
1421 break;
1422 }
1423
1424 hci_dev_put(hdev);
1425 return err;
1426 }
1427
1428 int hci_get_dev_list(void __user *arg)
1429 {
1430 struct hci_dev *hdev;
1431 struct hci_dev_list_req *dl;
1432 struct hci_dev_req *dr;
1433 int n = 0, size, err;
1434 __u16 dev_num;
1435
1436 if (get_user(dev_num, (__u16 __user *) arg))
1437 return -EFAULT;
1438
1439 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1440 return -EINVAL;
1441
1442 size = sizeof(*dl) + dev_num * sizeof(*dr);
1443
1444 dl = kzalloc(size, GFP_KERNEL);
1445 if (!dl)
1446 return -ENOMEM;
1447
1448 dr = dl->dev_req;
1449
1450 read_lock(&hci_dev_list_lock);
1451 list_for_each_entry(hdev, &hci_dev_list, list) {
1452 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1453 cancel_delayed_work(&hdev->power_off);
1454
1455 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1456 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1457
1458 (dr + n)->dev_id = hdev->id;
1459 (dr + n)->dev_opt = hdev->flags;
1460
1461 if (++n >= dev_num)
1462 break;
1463 }
1464 read_unlock(&hci_dev_list_lock);
1465
1466 dl->dev_num = n;
1467 size = sizeof(*dl) + n * sizeof(*dr);
1468
1469 err = copy_to_user(arg, dl, size);
1470 kfree(dl);
1471
1472 return err ? -EFAULT : 0;
1473 }
1474
1475 int hci_get_dev_info(void __user *arg)
1476 {
1477 struct hci_dev *hdev;
1478 struct hci_dev_info di;
1479 int err = 0;
1480
1481 if (copy_from_user(&di, arg, sizeof(di)))
1482 return -EFAULT;
1483
1484 hdev = hci_dev_get(di.dev_id);
1485 if (!hdev)
1486 return -ENODEV;
1487
1488 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1489 cancel_delayed_work_sync(&hdev->power_off);
1490
1491 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1492 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1493
1494 strcpy(di.name, hdev->name);
1495 di.bdaddr = hdev->bdaddr;
1496 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1497 di.flags = hdev->flags;
1498 di.pkt_type = hdev->pkt_type;
1499 if (lmp_bredr_capable(hdev)) {
1500 di.acl_mtu = hdev->acl_mtu;
1501 di.acl_pkts = hdev->acl_pkts;
1502 di.sco_mtu = hdev->sco_mtu;
1503 di.sco_pkts = hdev->sco_pkts;
1504 } else {
1505 di.acl_mtu = hdev->le_mtu;
1506 di.acl_pkts = hdev->le_pkts;
1507 di.sco_mtu = 0;
1508 di.sco_pkts = 0;
1509 }
1510 di.link_policy = hdev->link_policy;
1511 di.link_mode = hdev->link_mode;
1512
1513 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1514 memcpy(&di.features, &hdev->features, sizeof(di.features));
1515
1516 if (copy_to_user(arg, &di, sizeof(di)))
1517 err = -EFAULT;
1518
1519 hci_dev_put(hdev);
1520
1521 return err;
1522 }
1523
1524 /* ---- Interface to HCI drivers ---- */
1525
1526 static int hci_rfkill_set_block(void *data, bool blocked)
1527 {
1528 struct hci_dev *hdev = data;
1529
1530 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1531
1532 if (!blocked)
1533 return 0;
1534
1535 hci_dev_do_close(hdev);
1536
1537 return 0;
1538 }
1539
1540 static const struct rfkill_ops hci_rfkill_ops = {
1541 .set_block = hci_rfkill_set_block,
1542 };
1543
1544 static void hci_power_on(struct work_struct *work)
1545 {
1546 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1547
1548 BT_DBG("%s", hdev->name);
1549
1550 if (hci_dev_open(hdev->id) < 0)
1551 return;
1552
1553 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1554 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1555 HCI_AUTO_OFF_TIMEOUT);
1556
1557 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1558 mgmt_index_added(hdev);
1559 }
1560
1561 static void hci_power_off(struct work_struct *work)
1562 {
1563 struct hci_dev *hdev = container_of(work, struct hci_dev,
1564 power_off.work);
1565
1566 BT_DBG("%s", hdev->name);
1567
1568 hci_dev_do_close(hdev);
1569 }
1570
1571 static void hci_discov_off(struct work_struct *work)
1572 {
1573 struct hci_dev *hdev;
1574 u8 scan = SCAN_PAGE;
1575
1576 hdev = container_of(work, struct hci_dev, discov_off.work);
1577
1578 BT_DBG("%s", hdev->name);
1579
1580 hci_dev_lock(hdev);
1581
1582 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1583
1584 hdev->discov_timeout = 0;
1585
1586 hci_dev_unlock(hdev);
1587 }
1588
1589 int hci_uuids_clear(struct hci_dev *hdev)
1590 {
1591 struct bt_uuid *uuid, *tmp;
1592
1593 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1594 list_del(&uuid->list);
1595 kfree(uuid);
1596 }
1597
1598 return 0;
1599 }
1600
1601 int hci_link_keys_clear(struct hci_dev *hdev)
1602 {
1603 struct list_head *p, *n;
1604
1605 list_for_each_safe(p, n, &hdev->link_keys) {
1606 struct link_key *key;
1607
1608 key = list_entry(p, struct link_key, list);
1609
1610 list_del(p);
1611 kfree(key);
1612 }
1613
1614 return 0;
1615 }
1616
1617 int hci_smp_ltks_clear(struct hci_dev *hdev)
1618 {
1619 struct smp_ltk *k, *tmp;
1620
1621 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1622 list_del(&k->list);
1623 kfree(k);
1624 }
1625
1626 return 0;
1627 }
1628
1629 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1630 {
1631 struct link_key *k;
1632
1633 list_for_each_entry(k, &hdev->link_keys, list)
1634 if (bacmp(bdaddr, &k->bdaddr) == 0)
1635 return k;
1636
1637 return NULL;
1638 }
1639
1640 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1641 u8 key_type, u8 old_key_type)
1642 {
1643 /* Legacy key */
1644 if (key_type < 0x03)
1645 return true;
1646
1647 /* Debug keys are insecure so don't store them persistently */
1648 if (key_type == HCI_LK_DEBUG_COMBINATION)
1649 return false;
1650
1651 /* Changed combination key and there's no previous one */
1652 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1653 return false;
1654
1655 /* Security mode 3 case */
1656 if (!conn)
1657 return true;
1658
1659 /* Neither local nor remote side had no-bonding as requirement */
1660 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1661 return true;
1662
1663 /* Local side had dedicated bonding as requirement */
1664 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1665 return true;
1666
1667 /* Remote side had dedicated bonding as requirement */
1668 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1669 return true;
1670
1671 /* If none of the above criteria match, then don't store the key
1672 * persistently */
1673 return false;
1674 }
1675
1676 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1677 {
1678 struct smp_ltk *k;
1679
1680 list_for_each_entry(k, &hdev->long_term_keys, list) {
1681 if (k->ediv != ediv ||
1682 memcmp(rand, k->rand, sizeof(k->rand)))
1683 continue;
1684
1685 return k;
1686 }
1687
1688 return NULL;
1689 }
1690
1691 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1692 u8 addr_type)
1693 {
1694 struct smp_ltk *k;
1695
1696 list_for_each_entry(k, &hdev->long_term_keys, list)
1697 if (addr_type == k->bdaddr_type &&
1698 bacmp(bdaddr, &k->bdaddr) == 0)
1699 return k;
1700
1701 return NULL;
1702 }
1703
1704 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1705 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1706 {
1707 struct link_key *key, *old_key;
1708 u8 old_key_type;
1709 bool persistent;
1710
1711 old_key = hci_find_link_key(hdev, bdaddr);
1712 if (old_key) {
1713 old_key_type = old_key->type;
1714 key = old_key;
1715 } else {
1716 old_key_type = conn ? conn->key_type : 0xff;
1717 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1718 if (!key)
1719 return -ENOMEM;
1720 list_add(&key->list, &hdev->link_keys);
1721 }
1722
1723 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1724
1725 /* Some buggy controller combinations generate a changed
1726 * combination key for legacy pairing even when there's no
1727 * previous key */
1728 if (type == HCI_LK_CHANGED_COMBINATION &&
1729 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1730 type = HCI_LK_COMBINATION;
1731 if (conn)
1732 conn->key_type = type;
1733 }
1734
1735 bacpy(&key->bdaddr, bdaddr);
1736 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1737 key->pin_len = pin_len;
1738
1739 if (type == HCI_LK_CHANGED_COMBINATION)
1740 key->type = old_key_type;
1741 else
1742 key->type = type;
1743
1744 if (!new_key)
1745 return 0;
1746
1747 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1748
1749 mgmt_new_link_key(hdev, key, persistent);
1750
1751 if (conn)
1752 conn->flush_key = !persistent;
1753
1754 return 0;
1755 }
1756
1757 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1758 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1759 ediv, u8 rand[8])
1760 {
1761 struct smp_ltk *key, *old_key;
1762
1763 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1764 return 0;
1765
1766 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1767 if (old_key)
1768 key = old_key;
1769 else {
1770 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1771 if (!key)
1772 return -ENOMEM;
1773 list_add(&key->list, &hdev->long_term_keys);
1774 }
1775
1776 bacpy(&key->bdaddr, bdaddr);
1777 key->bdaddr_type = addr_type;
1778 memcpy(key->val, tk, sizeof(key->val));
1779 key->authenticated = authenticated;
1780 key->ediv = ediv;
1781 key->enc_size = enc_size;
1782 key->type = type;
1783 memcpy(key->rand, rand, sizeof(key->rand));
1784
1785 if (!new_key)
1786 return 0;
1787
1788 if (type & HCI_SMP_LTK)
1789 mgmt_new_ltk(hdev, key, 1);
1790
1791 return 0;
1792 }
1793
1794 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1795 {
1796 struct link_key *key;
1797
1798 key = hci_find_link_key(hdev, bdaddr);
1799 if (!key)
1800 return -ENOENT;
1801
1802 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1803
1804 list_del(&key->list);
1805 kfree(key);
1806
1807 return 0;
1808 }
1809
1810 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1811 {
1812 struct smp_ltk *k, *tmp;
1813
1814 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1815 if (bacmp(bdaddr, &k->bdaddr))
1816 continue;
1817
1818 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1819
1820 list_del(&k->list);
1821 kfree(k);
1822 }
1823
1824 return 0;
1825 }
1826
1827 /* HCI command timer function */
1828 static void hci_cmd_timeout(unsigned long arg)
1829 {
1830 struct hci_dev *hdev = (void *) arg;
1831
1832 if (hdev->sent_cmd) {
1833 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1834 u16 opcode = __le16_to_cpu(sent->opcode);
1835
1836 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1837 } else {
1838 BT_ERR("%s command tx timeout", hdev->name);
1839 }
1840
1841 atomic_set(&hdev->cmd_cnt, 1);
1842 queue_work(hdev->workqueue, &hdev->cmd_work);
1843 }
1844
1845 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1846 bdaddr_t *bdaddr)
1847 {
1848 struct oob_data *data;
1849
1850 list_for_each_entry(data, &hdev->remote_oob_data, list)
1851 if (bacmp(bdaddr, &data->bdaddr) == 0)
1852 return data;
1853
1854 return NULL;
1855 }
1856
1857 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1858 {
1859 struct oob_data *data;
1860
1861 data = hci_find_remote_oob_data(hdev, bdaddr);
1862 if (!data)
1863 return -ENOENT;
1864
1865 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1866
1867 list_del(&data->list);
1868 kfree(data);
1869
1870 return 0;
1871 }
1872
1873 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1874 {
1875 struct oob_data *data, *n;
1876
1877 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1878 list_del(&data->list);
1879 kfree(data);
1880 }
1881
1882 return 0;
1883 }
1884
1885 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1886 u8 *randomizer)
1887 {
1888 struct oob_data *data;
1889
1890 data = hci_find_remote_oob_data(hdev, bdaddr);
1891
1892 if (!data) {
1893 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1894 if (!data)
1895 return -ENOMEM;
1896
1897 bacpy(&data->bdaddr, bdaddr);
1898 list_add(&data->list, &hdev->remote_oob_data);
1899 }
1900
1901 memcpy(data->hash, hash, sizeof(data->hash));
1902 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1903
1904 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1905
1906 return 0;
1907 }
1908
1909 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1910 {
1911 struct bdaddr_list *b;
1912
1913 list_for_each_entry(b, &hdev->blacklist, list)
1914 if (bacmp(bdaddr, &b->bdaddr) == 0)
1915 return b;
1916
1917 return NULL;
1918 }
1919
1920 int hci_blacklist_clear(struct hci_dev *hdev)
1921 {
1922 struct list_head *p, *n;
1923
1924 list_for_each_safe(p, n, &hdev->blacklist) {
1925 struct bdaddr_list *b;
1926
1927 b = list_entry(p, struct bdaddr_list, list);
1928
1929 list_del(p);
1930 kfree(b);
1931 }
1932
1933 return 0;
1934 }
1935
1936 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1937 {
1938 struct bdaddr_list *entry;
1939
1940 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1941 return -EBADF;
1942
1943 if (hci_blacklist_lookup(hdev, bdaddr))
1944 return -EEXIST;
1945
1946 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1947 if (!entry)
1948 return -ENOMEM;
1949
1950 bacpy(&entry->bdaddr, bdaddr);
1951
1952 list_add(&entry->list, &hdev->blacklist);
1953
1954 return mgmt_device_blocked(hdev, bdaddr, type);
1955 }
1956
1957 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1958 {
1959 struct bdaddr_list *entry;
1960
1961 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1962 return hci_blacklist_clear(hdev);
1963
1964 entry = hci_blacklist_lookup(hdev, bdaddr);
1965 if (!entry)
1966 return -ENOENT;
1967
1968 list_del(&entry->list);
1969 kfree(entry);
1970
1971 return mgmt_device_unblocked(hdev, bdaddr, type);
1972 }
1973
1974 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1975 {
1976 struct le_scan_params *param = (struct le_scan_params *) opt;
1977 struct hci_cp_le_set_scan_param cp;
1978
1979 memset(&cp, 0, sizeof(cp));
1980 cp.type = param->type;
1981 cp.interval = cpu_to_le16(param->interval);
1982 cp.window = cpu_to_le16(param->window);
1983
1984 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1985 }
1986
1987 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1988 {
1989 struct hci_cp_le_set_scan_enable cp;
1990
1991 memset(&cp, 0, sizeof(cp));
1992 cp.enable = LE_SCAN_ENABLE;
1993 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1994
1995 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1996 }
1997
1998 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1999 u16 window, int timeout)
2000 {
2001 long timeo = msecs_to_jiffies(3000);
2002 struct le_scan_params param;
2003 int err;
2004
2005 BT_DBG("%s", hdev->name);
2006
2007 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2008 return -EINPROGRESS;
2009
2010 param.type = type;
2011 param.interval = interval;
2012 param.window = window;
2013
2014 hci_req_lock(hdev);
2015
2016 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2017 timeo);
2018 if (!err)
2019 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2020
2021 hci_req_unlock(hdev);
2022
2023 if (err < 0)
2024 return err;
2025
2026 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2027 timeout);
2028
2029 return 0;
2030 }
2031
2032 int hci_cancel_le_scan(struct hci_dev *hdev)
2033 {
2034 BT_DBG("%s", hdev->name);
2035
2036 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2037 return -EALREADY;
2038
2039 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2040 struct hci_cp_le_set_scan_enable cp;
2041
2042 /* Send HCI command to disable LE Scan */
2043 memset(&cp, 0, sizeof(cp));
2044 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2045 }
2046
2047 return 0;
2048 }
2049
2050 static void le_scan_disable_work(struct work_struct *work)
2051 {
2052 struct hci_dev *hdev = container_of(work, struct hci_dev,
2053 le_scan_disable.work);
2054 struct hci_cp_le_set_scan_enable cp;
2055
2056 BT_DBG("%s", hdev->name);
2057
2058 memset(&cp, 0, sizeof(cp));
2059
2060 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2061 }
2062
2063 static void le_scan_work(struct work_struct *work)
2064 {
2065 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2066 struct le_scan_params *param = &hdev->le_scan_params;
2067
2068 BT_DBG("%s", hdev->name);
2069
2070 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2071 param->timeout);
2072 }
2073
2074 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2075 int timeout)
2076 {
2077 struct le_scan_params *param = &hdev->le_scan_params;
2078
2079 BT_DBG("%s", hdev->name);
2080
2081 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2082 return -ENOTSUPP;
2083
2084 if (work_busy(&hdev->le_scan))
2085 return -EINPROGRESS;
2086
2087 param->type = type;
2088 param->interval = interval;
2089 param->window = window;
2090 param->timeout = timeout;
2091
2092 queue_work(system_long_wq, &hdev->le_scan);
2093
2094 return 0;
2095 }
2096
2097 /* Alloc HCI device */
2098 struct hci_dev *hci_alloc_dev(void)
2099 {
2100 struct hci_dev *hdev;
2101
2102 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2103 if (!hdev)
2104 return NULL;
2105
2106 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2107 hdev->esco_type = (ESCO_HV1);
2108 hdev->link_mode = (HCI_LM_ACCEPT);
2109 hdev->io_capability = 0x03; /* No Input No Output */
2110 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2111 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2112
2113 hdev->sniff_max_interval = 800;
2114 hdev->sniff_min_interval = 80;
2115
2116 mutex_init(&hdev->lock);
2117 mutex_init(&hdev->req_lock);
2118
2119 INIT_LIST_HEAD(&hdev->mgmt_pending);
2120 INIT_LIST_HEAD(&hdev->blacklist);
2121 INIT_LIST_HEAD(&hdev->uuids);
2122 INIT_LIST_HEAD(&hdev->link_keys);
2123 INIT_LIST_HEAD(&hdev->long_term_keys);
2124 INIT_LIST_HEAD(&hdev->remote_oob_data);
2125 INIT_LIST_HEAD(&hdev->conn_hash.list);
2126
2127 INIT_WORK(&hdev->rx_work, hci_rx_work);
2128 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2129 INIT_WORK(&hdev->tx_work, hci_tx_work);
2130 INIT_WORK(&hdev->power_on, hci_power_on);
2131 INIT_WORK(&hdev->le_scan, le_scan_work);
2132
2133 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2134 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2135 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2136
2137 skb_queue_head_init(&hdev->rx_q);
2138 skb_queue_head_init(&hdev->cmd_q);
2139 skb_queue_head_init(&hdev->raw_q);
2140
2141 init_waitqueue_head(&hdev->req_wait_q);
2142
2143 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2144
2145 hci_init_sysfs(hdev);
2146 discovery_init(hdev);
2147
2148 return hdev;
2149 }
2150 EXPORT_SYMBOL(hci_alloc_dev);
2151
2152 /* Free HCI device */
2153 void hci_free_dev(struct hci_dev *hdev)
2154 {
2155 /* will free via device release */
2156 put_device(&hdev->dev);
2157 }
2158 EXPORT_SYMBOL(hci_free_dev);
2159
2160 /* Register HCI device */
2161 int hci_register_dev(struct hci_dev *hdev)
2162 {
2163 int id, error;
2164
2165 if (!hdev->open || !hdev->close)
2166 return -EINVAL;
2167
2168 /* Do not allow HCI_AMP devices to register at index 0,
2169 * so the index can be used as the AMP controller ID.
2170 */
2171 switch (hdev->dev_type) {
2172 case HCI_BREDR:
2173 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2174 break;
2175 case HCI_AMP:
2176 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2177 break;
2178 default:
2179 return -EINVAL;
2180 }
2181
2182 if (id < 0)
2183 return id;
2184
2185 sprintf(hdev->name, "hci%d", id);
2186 hdev->id = id;
2187
2188 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2189
2190 write_lock(&hci_dev_list_lock);
2191 list_add(&hdev->list, &hci_dev_list);
2192 write_unlock(&hci_dev_list_lock);
2193
2194 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2195 WQ_MEM_RECLAIM, 1);
2196 if (!hdev->workqueue) {
2197 error = -ENOMEM;
2198 goto err;
2199 }
2200
2201 hdev->req_workqueue = alloc_workqueue(hdev->name,
2202 WQ_HIGHPRI | WQ_UNBOUND |
2203 WQ_MEM_RECLAIM, 1);
2204 if (!hdev->req_workqueue) {
2205 destroy_workqueue(hdev->workqueue);
2206 error = -ENOMEM;
2207 goto err;
2208 }
2209
2210 error = hci_add_sysfs(hdev);
2211 if (error < 0)
2212 goto err_wqueue;
2213
2214 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2215 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2216 hdev);
2217 if (hdev->rfkill) {
2218 if (rfkill_register(hdev->rfkill) < 0) {
2219 rfkill_destroy(hdev->rfkill);
2220 hdev->rfkill = NULL;
2221 }
2222 }
2223
2224 set_bit(HCI_SETUP, &hdev->dev_flags);
2225
2226 if (hdev->dev_type != HCI_AMP)
2227 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2228
2229 hci_notify(hdev, HCI_DEV_REG);
2230 hci_dev_hold(hdev);
2231
2232 queue_work(hdev->req_workqueue, &hdev->power_on);
2233
2234 return id;
2235
2236 err_wqueue:
2237 destroy_workqueue(hdev->workqueue);
2238 destroy_workqueue(hdev->req_workqueue);
2239 err:
2240 ida_simple_remove(&hci_index_ida, hdev->id);
2241 write_lock(&hci_dev_list_lock);
2242 list_del(&hdev->list);
2243 write_unlock(&hci_dev_list_lock);
2244
2245 return error;
2246 }
2247 EXPORT_SYMBOL(hci_register_dev);
2248
2249 /* Unregister HCI device */
2250 void hci_unregister_dev(struct hci_dev *hdev)
2251 {
2252 int i, id;
2253
2254 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2255
2256 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2257
2258 id = hdev->id;
2259
2260 write_lock(&hci_dev_list_lock);
2261 list_del(&hdev->list);
2262 write_unlock(&hci_dev_list_lock);
2263
2264 hci_dev_do_close(hdev);
2265
2266 for (i = 0; i < NUM_REASSEMBLY; i++)
2267 kfree_skb(hdev->reassembly[i]);
2268
2269 cancel_work_sync(&hdev->power_on);
2270
2271 if (!test_bit(HCI_INIT, &hdev->flags) &&
2272 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2273 hci_dev_lock(hdev);
2274 mgmt_index_removed(hdev);
2275 hci_dev_unlock(hdev);
2276 }
2277
2278 /* mgmt_index_removed should take care of emptying the
2279 * pending list */
2280 BUG_ON(!list_empty(&hdev->mgmt_pending));
2281
2282 hci_notify(hdev, HCI_DEV_UNREG);
2283
2284 if (hdev->rfkill) {
2285 rfkill_unregister(hdev->rfkill);
2286 rfkill_destroy(hdev->rfkill);
2287 }
2288
2289 hci_del_sysfs(hdev);
2290
2291 destroy_workqueue(hdev->workqueue);
2292 destroy_workqueue(hdev->req_workqueue);
2293
2294 hci_dev_lock(hdev);
2295 hci_blacklist_clear(hdev);
2296 hci_uuids_clear(hdev);
2297 hci_link_keys_clear(hdev);
2298 hci_smp_ltks_clear(hdev);
2299 hci_remote_oob_data_clear(hdev);
2300 hci_dev_unlock(hdev);
2301
2302 hci_dev_put(hdev);
2303
2304 ida_simple_remove(&hci_index_ida, id);
2305 }
2306 EXPORT_SYMBOL(hci_unregister_dev);
2307
2308 /* Suspend HCI device */
2309 int hci_suspend_dev(struct hci_dev *hdev)
2310 {
2311 hci_notify(hdev, HCI_DEV_SUSPEND);
2312 return 0;
2313 }
2314 EXPORT_SYMBOL(hci_suspend_dev);
2315
2316 /* Resume HCI device */
2317 int hci_resume_dev(struct hci_dev *hdev)
2318 {
2319 hci_notify(hdev, HCI_DEV_RESUME);
2320 return 0;
2321 }
2322 EXPORT_SYMBOL(hci_resume_dev);
2323
2324 /* Receive frame from HCI drivers */
2325 int hci_recv_frame(struct sk_buff *skb)
2326 {
2327 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2328 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2329 && !test_bit(HCI_INIT, &hdev->flags))) {
2330 kfree_skb(skb);
2331 return -ENXIO;
2332 }
2333
2334 /* Incoming skb */
2335 bt_cb(skb)->incoming = 1;
2336
2337 /* Time stamp */
2338 __net_timestamp(skb);
2339
2340 skb_queue_tail(&hdev->rx_q, skb);
2341 queue_work(hdev->workqueue, &hdev->rx_work);
2342
2343 return 0;
2344 }
2345 EXPORT_SYMBOL(hci_recv_frame);
2346
2347 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2348 int count, __u8 index)
2349 {
2350 int len = 0;
2351 int hlen = 0;
2352 int remain = count;
2353 struct sk_buff *skb;
2354 struct bt_skb_cb *scb;
2355
2356 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2357 index >= NUM_REASSEMBLY)
2358 return -EILSEQ;
2359
2360 skb = hdev->reassembly[index];
2361
2362 if (!skb) {
2363 switch (type) {
2364 case HCI_ACLDATA_PKT:
2365 len = HCI_MAX_FRAME_SIZE;
2366 hlen = HCI_ACL_HDR_SIZE;
2367 break;
2368 case HCI_EVENT_PKT:
2369 len = HCI_MAX_EVENT_SIZE;
2370 hlen = HCI_EVENT_HDR_SIZE;
2371 break;
2372 case HCI_SCODATA_PKT:
2373 len = HCI_MAX_SCO_SIZE;
2374 hlen = HCI_SCO_HDR_SIZE;
2375 break;
2376 }
2377
2378 skb = bt_skb_alloc(len, GFP_ATOMIC);
2379 if (!skb)
2380 return -ENOMEM;
2381
2382 scb = (void *) skb->cb;
2383 scb->expect = hlen;
2384 scb->pkt_type = type;
2385
2386 skb->dev = (void *) hdev;
2387 hdev->reassembly[index] = skb;
2388 }
2389
2390 while (count) {
2391 scb = (void *) skb->cb;
2392 len = min_t(uint, scb->expect, count);
2393
2394 memcpy(skb_put(skb, len), data, len);
2395
2396 count -= len;
2397 data += len;
2398 scb->expect -= len;
2399 remain = count;
2400
2401 switch (type) {
2402 case HCI_EVENT_PKT:
2403 if (skb->len == HCI_EVENT_HDR_SIZE) {
2404 struct hci_event_hdr *h = hci_event_hdr(skb);
2405 scb->expect = h->plen;
2406
2407 if (skb_tailroom(skb) < scb->expect) {
2408 kfree_skb(skb);
2409 hdev->reassembly[index] = NULL;
2410 return -ENOMEM;
2411 }
2412 }
2413 break;
2414
2415 case HCI_ACLDATA_PKT:
2416 if (skb->len == HCI_ACL_HDR_SIZE) {
2417 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2418 scb->expect = __le16_to_cpu(h->dlen);
2419
2420 if (skb_tailroom(skb) < scb->expect) {
2421 kfree_skb(skb);
2422 hdev->reassembly[index] = NULL;
2423 return -ENOMEM;
2424 }
2425 }
2426 break;
2427
2428 case HCI_SCODATA_PKT:
2429 if (skb->len == HCI_SCO_HDR_SIZE) {
2430 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2431 scb->expect = h->dlen;
2432
2433 if (skb_tailroom(skb) < scb->expect) {
2434 kfree_skb(skb);
2435 hdev->reassembly[index] = NULL;
2436 return -ENOMEM;
2437 }
2438 }
2439 break;
2440 }
2441
2442 if (scb->expect == 0) {
2443 /* Complete frame */
2444
2445 bt_cb(skb)->pkt_type = type;
2446 hci_recv_frame(skb);
2447
2448 hdev->reassembly[index] = NULL;
2449 return remain;
2450 }
2451 }
2452
2453 return remain;
2454 }
2455
2456 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2457 {
2458 int rem = 0;
2459
2460 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2461 return -EILSEQ;
2462
2463 while (count) {
2464 rem = hci_reassembly(hdev, type, data, count, type - 1);
2465 if (rem < 0)
2466 return rem;
2467
2468 data += (count - rem);
2469 count = rem;
2470 }
2471
2472 return rem;
2473 }
2474 EXPORT_SYMBOL(hci_recv_fragment);
2475
2476 #define STREAM_REASSEMBLY 0
2477
2478 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2479 {
2480 int type;
2481 int rem = 0;
2482
2483 while (count) {
2484 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2485
2486 if (!skb) {
2487 struct { char type; } *pkt;
2488
2489 /* Start of the frame */
2490 pkt = data;
2491 type = pkt->type;
2492
2493 data++;
2494 count--;
2495 } else
2496 type = bt_cb(skb)->pkt_type;
2497
2498 rem = hci_reassembly(hdev, type, data, count,
2499 STREAM_REASSEMBLY);
2500 if (rem < 0)
2501 return rem;
2502
2503 data += (count - rem);
2504 count = rem;
2505 }
2506
2507 return rem;
2508 }
2509 EXPORT_SYMBOL(hci_recv_stream_fragment);
2510
2511 /* ---- Interface to upper protocols ---- */
2512
2513 int hci_register_cb(struct hci_cb *cb)
2514 {
2515 BT_DBG("%p name %s", cb, cb->name);
2516
2517 write_lock(&hci_cb_list_lock);
2518 list_add(&cb->list, &hci_cb_list);
2519 write_unlock(&hci_cb_list_lock);
2520
2521 return 0;
2522 }
2523 EXPORT_SYMBOL(hci_register_cb);
2524
2525 int hci_unregister_cb(struct hci_cb *cb)
2526 {
2527 BT_DBG("%p name %s", cb, cb->name);
2528
2529 write_lock(&hci_cb_list_lock);
2530 list_del(&cb->list);
2531 write_unlock(&hci_cb_list_lock);
2532
2533 return 0;
2534 }
2535 EXPORT_SYMBOL(hci_unregister_cb);
2536
2537 static int hci_send_frame(struct sk_buff *skb)
2538 {
2539 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2540
2541 if (!hdev) {
2542 kfree_skb(skb);
2543 return -ENODEV;
2544 }
2545
2546 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2547
2548 /* Time stamp */
2549 __net_timestamp(skb);
2550
2551 /* Send copy to monitor */
2552 hci_send_to_monitor(hdev, skb);
2553
2554 if (atomic_read(&hdev->promisc)) {
2555 /* Send copy to the sockets */
2556 hci_send_to_sock(hdev, skb);
2557 }
2558
2559 /* Get rid of skb owner, prior to sending to the driver. */
2560 skb_orphan(skb);
2561
2562 return hdev->send(skb);
2563 }
2564
2565 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2566 {
2567 skb_queue_head_init(&req->cmd_q);
2568 req->hdev = hdev;
2569 req->err = 0;
2570 }
2571
2572 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2573 {
2574 struct hci_dev *hdev = req->hdev;
2575 struct sk_buff *skb;
2576 unsigned long flags;
2577
2578 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2579
2580 /* If an error occured during request building, remove all HCI
2581 * commands queued on the HCI request queue.
2582 */
2583 if (req->err) {
2584 skb_queue_purge(&req->cmd_q);
2585 return req->err;
2586 }
2587
2588 /* Do not allow empty requests */
2589 if (skb_queue_empty(&req->cmd_q))
2590 return -ENODATA;
2591
2592 skb = skb_peek_tail(&req->cmd_q);
2593 bt_cb(skb)->req.complete = complete;
2594
2595 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2596 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2597 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2598
2599 queue_work(hdev->workqueue, &hdev->cmd_work);
2600
2601 return 0;
2602 }
2603
2604 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2605 u32 plen, void *param)
2606 {
2607 int len = HCI_COMMAND_HDR_SIZE + plen;
2608 struct hci_command_hdr *hdr;
2609 struct sk_buff *skb;
2610
2611 skb = bt_skb_alloc(len, GFP_ATOMIC);
2612 if (!skb)
2613 return NULL;
2614
2615 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2616 hdr->opcode = cpu_to_le16(opcode);
2617 hdr->plen = plen;
2618
2619 if (plen)
2620 memcpy(skb_put(skb, plen), param, plen);
2621
2622 BT_DBG("skb len %d", skb->len);
2623
2624 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2625 skb->dev = (void *) hdev;
2626
2627 return skb;
2628 }
2629
2630 /* Send HCI command */
2631 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2632 {
2633 struct sk_buff *skb;
2634
2635 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2636
2637 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2638 if (!skb) {
2639 BT_ERR("%s no memory for command", hdev->name);
2640 return -ENOMEM;
2641 }
2642
2643 /* Stand-alone HCI commands must be flaged as
2644 * single-command requests.
2645 */
2646 bt_cb(skb)->req.start = true;
2647
2648 skb_queue_tail(&hdev->cmd_q, skb);
2649 queue_work(hdev->workqueue, &hdev->cmd_work);
2650
2651 return 0;
2652 }
2653
2654 /* Queue a command to an asynchronous HCI request */
2655 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2656 u8 event)
2657 {
2658 struct hci_dev *hdev = req->hdev;
2659 struct sk_buff *skb;
2660
2661 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2662
2663 /* If an error occured during request building, there is no point in
2664 * queueing the HCI command. We can simply return.
2665 */
2666 if (req->err)
2667 return;
2668
2669 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2670 if (!skb) {
2671 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2672 hdev->name, opcode);
2673 req->err = -ENOMEM;
2674 return;
2675 }
2676
2677 if (skb_queue_empty(&req->cmd_q))
2678 bt_cb(skb)->req.start = true;
2679
2680 bt_cb(skb)->req.event = event;
2681
2682 skb_queue_tail(&req->cmd_q, skb);
2683 }
2684
2685 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2686 {
2687 hci_req_add_ev(req, opcode, plen, param, 0);
2688 }
2689
2690 /* Get data from the previously sent command */
2691 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2692 {
2693 struct hci_command_hdr *hdr;
2694
2695 if (!hdev->sent_cmd)
2696 return NULL;
2697
2698 hdr = (void *) hdev->sent_cmd->data;
2699
2700 if (hdr->opcode != cpu_to_le16(opcode))
2701 return NULL;
2702
2703 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2704
2705 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2706 }
2707
2708 /* Send ACL data */
2709 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2710 {
2711 struct hci_acl_hdr *hdr;
2712 int len = skb->len;
2713
2714 skb_push(skb, HCI_ACL_HDR_SIZE);
2715 skb_reset_transport_header(skb);
2716 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2717 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2718 hdr->dlen = cpu_to_le16(len);
2719 }
2720
2721 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2722 struct sk_buff *skb, __u16 flags)
2723 {
2724 struct hci_conn *conn = chan->conn;
2725 struct hci_dev *hdev = conn->hdev;
2726 struct sk_buff *list;
2727
2728 skb->len = skb_headlen(skb);
2729 skb->data_len = 0;
2730
2731 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2732
2733 switch (hdev->dev_type) {
2734 case HCI_BREDR:
2735 hci_add_acl_hdr(skb, conn->handle, flags);
2736 break;
2737 case HCI_AMP:
2738 hci_add_acl_hdr(skb, chan->handle, flags);
2739 break;
2740 default:
2741 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2742 return;
2743 }
2744
2745 list = skb_shinfo(skb)->frag_list;
2746 if (!list) {
2747 /* Non fragmented */
2748 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2749
2750 skb_queue_tail(queue, skb);
2751 } else {
2752 /* Fragmented */
2753 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2754
2755 skb_shinfo(skb)->frag_list = NULL;
2756
2757 /* Queue all fragments atomically */
2758 spin_lock(&queue->lock);
2759
2760 __skb_queue_tail(queue, skb);
2761
2762 flags &= ~ACL_START;
2763 flags |= ACL_CONT;
2764 do {
2765 skb = list; list = list->next;
2766
2767 skb->dev = (void *) hdev;
2768 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2769 hci_add_acl_hdr(skb, conn->handle, flags);
2770
2771 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2772
2773 __skb_queue_tail(queue, skb);
2774 } while (list);
2775
2776 spin_unlock(&queue->lock);
2777 }
2778 }
2779
2780 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2781 {
2782 struct hci_dev *hdev = chan->conn->hdev;
2783
2784 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2785
2786 skb->dev = (void *) hdev;
2787
2788 hci_queue_acl(chan, &chan->data_q, skb, flags);
2789
2790 queue_work(hdev->workqueue, &hdev->tx_work);
2791 }
2792
2793 /* Send SCO data */
2794 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2795 {
2796 struct hci_dev *hdev = conn->hdev;
2797 struct hci_sco_hdr hdr;
2798
2799 BT_DBG("%s len %d", hdev->name, skb->len);
2800
2801 hdr.handle = cpu_to_le16(conn->handle);
2802 hdr.dlen = skb->len;
2803
2804 skb_push(skb, HCI_SCO_HDR_SIZE);
2805 skb_reset_transport_header(skb);
2806 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2807
2808 skb->dev = (void *) hdev;
2809 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2810
2811 skb_queue_tail(&conn->data_q, skb);
2812 queue_work(hdev->workqueue, &hdev->tx_work);
2813 }
2814
2815 /* ---- HCI TX task (outgoing data) ---- */
2816
2817 /* HCI Connection scheduler */
2818 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2819 int *quote)
2820 {
2821 struct hci_conn_hash *h = &hdev->conn_hash;
2822 struct hci_conn *conn = NULL, *c;
2823 unsigned int num = 0, min = ~0;
2824
2825 /* We don't have to lock device here. Connections are always
2826 * added and removed with TX task disabled. */
2827
2828 rcu_read_lock();
2829
2830 list_for_each_entry_rcu(c, &h->list, list) {
2831 if (c->type != type || skb_queue_empty(&c->data_q))
2832 continue;
2833
2834 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2835 continue;
2836
2837 num++;
2838
2839 if (c->sent < min) {
2840 min = c->sent;
2841 conn = c;
2842 }
2843
2844 if (hci_conn_num(hdev, type) == num)
2845 break;
2846 }
2847
2848 rcu_read_unlock();
2849
2850 if (conn) {
2851 int cnt, q;
2852
2853 switch (conn->type) {
2854 case ACL_LINK:
2855 cnt = hdev->acl_cnt;
2856 break;
2857 case SCO_LINK:
2858 case ESCO_LINK:
2859 cnt = hdev->sco_cnt;
2860 break;
2861 case LE_LINK:
2862 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2863 break;
2864 default:
2865 cnt = 0;
2866 BT_ERR("Unknown link type");
2867 }
2868
2869 q = cnt / num;
2870 *quote = q ? q : 1;
2871 } else
2872 *quote = 0;
2873
2874 BT_DBG("conn %p quote %d", conn, *quote);
2875 return conn;
2876 }
2877
2878 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2879 {
2880 struct hci_conn_hash *h = &hdev->conn_hash;
2881 struct hci_conn *c;
2882
2883 BT_ERR("%s link tx timeout", hdev->name);
2884
2885 rcu_read_lock();
2886
2887 /* Kill stalled connections */
2888 list_for_each_entry_rcu(c, &h->list, list) {
2889 if (c->type == type && c->sent) {
2890 BT_ERR("%s killing stalled connection %pMR",
2891 hdev->name, &c->dst);
2892 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2893 }
2894 }
2895
2896 rcu_read_unlock();
2897 }
2898
2899 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2900 int *quote)
2901 {
2902 struct hci_conn_hash *h = &hdev->conn_hash;
2903 struct hci_chan *chan = NULL;
2904 unsigned int num = 0, min = ~0, cur_prio = 0;
2905 struct hci_conn *conn;
2906 int cnt, q, conn_num = 0;
2907
2908 BT_DBG("%s", hdev->name);
2909
2910 rcu_read_lock();
2911
2912 list_for_each_entry_rcu(conn, &h->list, list) {
2913 struct hci_chan *tmp;
2914
2915 if (conn->type != type)
2916 continue;
2917
2918 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2919 continue;
2920
2921 conn_num++;
2922
2923 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2924 struct sk_buff *skb;
2925
2926 if (skb_queue_empty(&tmp->data_q))
2927 continue;
2928
2929 skb = skb_peek(&tmp->data_q);
2930 if (skb->priority < cur_prio)
2931 continue;
2932
2933 if (skb->priority > cur_prio) {
2934 num = 0;
2935 min = ~0;
2936 cur_prio = skb->priority;
2937 }
2938
2939 num++;
2940
2941 if (conn->sent < min) {
2942 min = conn->sent;
2943 chan = tmp;
2944 }
2945 }
2946
2947 if (hci_conn_num(hdev, type) == conn_num)
2948 break;
2949 }
2950
2951 rcu_read_unlock();
2952
2953 if (!chan)
2954 return NULL;
2955
2956 switch (chan->conn->type) {
2957 case ACL_LINK:
2958 cnt = hdev->acl_cnt;
2959 break;
2960 case AMP_LINK:
2961 cnt = hdev->block_cnt;
2962 break;
2963 case SCO_LINK:
2964 case ESCO_LINK:
2965 cnt = hdev->sco_cnt;
2966 break;
2967 case LE_LINK:
2968 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2969 break;
2970 default:
2971 cnt = 0;
2972 BT_ERR("Unknown link type");
2973 }
2974
2975 q = cnt / num;
2976 *quote = q ? q : 1;
2977 BT_DBG("chan %p quote %d", chan, *quote);
2978 return chan;
2979 }
2980
2981 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2982 {
2983 struct hci_conn_hash *h = &hdev->conn_hash;
2984 struct hci_conn *conn;
2985 int num = 0;
2986
2987 BT_DBG("%s", hdev->name);
2988
2989 rcu_read_lock();
2990
2991 list_for_each_entry_rcu(conn, &h->list, list) {
2992 struct hci_chan *chan;
2993
2994 if (conn->type != type)
2995 continue;
2996
2997 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2998 continue;
2999
3000 num++;
3001
3002 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3003 struct sk_buff *skb;
3004
3005 if (chan->sent) {
3006 chan->sent = 0;
3007 continue;
3008 }
3009
3010 if (skb_queue_empty(&chan->data_q))
3011 continue;
3012
3013 skb = skb_peek(&chan->data_q);
3014 if (skb->priority >= HCI_PRIO_MAX - 1)
3015 continue;
3016
3017 skb->priority = HCI_PRIO_MAX - 1;
3018
3019 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3020 skb->priority);
3021 }
3022
3023 if (hci_conn_num(hdev, type) == num)
3024 break;
3025 }
3026
3027 rcu_read_unlock();
3028
3029 }
3030
3031 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3032 {
3033 /* Calculate count of blocks used by this packet */
3034 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3035 }
3036
3037 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3038 {
3039 if (!test_bit(HCI_RAW, &hdev->flags)) {
3040 /* ACL tx timeout must be longer than maximum
3041 * link supervision timeout (40.9 seconds) */
3042 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3043 HCI_ACL_TX_TIMEOUT))
3044 hci_link_tx_to(hdev, ACL_LINK);
3045 }
3046 }
3047
3048 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3049 {
3050 unsigned int cnt = hdev->acl_cnt;
3051 struct hci_chan *chan;
3052 struct sk_buff *skb;
3053 int quote;
3054
3055 __check_timeout(hdev, cnt);
3056
3057 while (hdev->acl_cnt &&
3058 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3059 u32 priority = (skb_peek(&chan->data_q))->priority;
3060 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3061 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3062 skb->len, skb->priority);
3063
3064 /* Stop if priority has changed */
3065 if (skb->priority < priority)
3066 break;
3067
3068 skb = skb_dequeue(&chan->data_q);
3069
3070 hci_conn_enter_active_mode(chan->conn,
3071 bt_cb(skb)->force_active);
3072
3073 hci_send_frame(skb);
3074 hdev->acl_last_tx = jiffies;
3075
3076 hdev->acl_cnt--;
3077 chan->sent++;
3078 chan->conn->sent++;
3079 }
3080 }
3081
3082 if (cnt != hdev->acl_cnt)
3083 hci_prio_recalculate(hdev, ACL_LINK);
3084 }
3085
3086 static void hci_sched_acl_blk(struct hci_dev *hdev)
3087 {
3088 unsigned int cnt = hdev->block_cnt;
3089 struct hci_chan *chan;
3090 struct sk_buff *skb;
3091 int quote;
3092 u8 type;
3093
3094 __check_timeout(hdev, cnt);
3095
3096 BT_DBG("%s", hdev->name);
3097
3098 if (hdev->dev_type == HCI_AMP)
3099 type = AMP_LINK;
3100 else
3101 type = ACL_LINK;
3102
3103 while (hdev->block_cnt > 0 &&
3104 (chan = hci_chan_sent(hdev, type, &quote))) {
3105 u32 priority = (skb_peek(&chan->data_q))->priority;
3106 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3107 int blocks;
3108
3109 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3110 skb->len, skb->priority);
3111
3112 /* Stop if priority has changed */
3113 if (skb->priority < priority)
3114 break;
3115
3116 skb = skb_dequeue(&chan->data_q);
3117
3118 blocks = __get_blocks(hdev, skb);
3119 if (blocks > hdev->block_cnt)
3120 return;
3121
3122 hci_conn_enter_active_mode(chan->conn,
3123 bt_cb(skb)->force_active);
3124
3125 hci_send_frame(skb);
3126 hdev->acl_last_tx = jiffies;
3127
3128 hdev->block_cnt -= blocks;
3129 quote -= blocks;
3130
3131 chan->sent += blocks;
3132 chan->conn->sent += blocks;
3133 }
3134 }
3135
3136 if (cnt != hdev->block_cnt)
3137 hci_prio_recalculate(hdev, type);
3138 }
3139
3140 static void hci_sched_acl(struct hci_dev *hdev)
3141 {
3142 BT_DBG("%s", hdev->name);
3143
3144 /* No ACL link over BR/EDR controller */
3145 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3146 return;
3147
3148 /* No AMP link over AMP controller */
3149 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3150 return;
3151
3152 switch (hdev->flow_ctl_mode) {
3153 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3154 hci_sched_acl_pkt(hdev);
3155 break;
3156
3157 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3158 hci_sched_acl_blk(hdev);
3159 break;
3160 }
3161 }
3162
3163 /* Schedule SCO */
3164 static void hci_sched_sco(struct hci_dev *hdev)
3165 {
3166 struct hci_conn *conn;
3167 struct sk_buff *skb;
3168 int quote;
3169
3170 BT_DBG("%s", hdev->name);
3171
3172 if (!hci_conn_num(hdev, SCO_LINK))
3173 return;
3174
3175 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3176 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3177 BT_DBG("skb %p len %d", skb, skb->len);
3178 hci_send_frame(skb);
3179
3180 conn->sent++;
3181 if (conn->sent == ~0)
3182 conn->sent = 0;
3183 }
3184 }
3185 }
3186
3187 static void hci_sched_esco(struct hci_dev *hdev)
3188 {
3189 struct hci_conn *conn;
3190 struct sk_buff *skb;
3191 int quote;
3192
3193 BT_DBG("%s", hdev->name);
3194
3195 if (!hci_conn_num(hdev, ESCO_LINK))
3196 return;
3197
3198 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3199 &quote))) {
3200 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3201 BT_DBG("skb %p len %d", skb, skb->len);
3202 hci_send_frame(skb);
3203
3204 conn->sent++;
3205 if (conn->sent == ~0)
3206 conn->sent = 0;
3207 }
3208 }
3209 }
3210
3211 static void hci_sched_le(struct hci_dev *hdev)
3212 {
3213 struct hci_chan *chan;
3214 struct sk_buff *skb;
3215 int quote, cnt, tmp;
3216
3217 BT_DBG("%s", hdev->name);
3218
3219 if (!hci_conn_num(hdev, LE_LINK))
3220 return;
3221
3222 if (!test_bit(HCI_RAW, &hdev->flags)) {
3223 /* LE tx timeout must be longer than maximum
3224 * link supervision timeout (40.9 seconds) */
3225 if (!hdev->le_cnt && hdev->le_pkts &&
3226 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3227 hci_link_tx_to(hdev, LE_LINK);
3228 }
3229
3230 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3231 tmp = cnt;
3232 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3233 u32 priority = (skb_peek(&chan->data_q))->priority;
3234 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3235 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3236 skb->len, skb->priority);
3237
3238 /* Stop if priority has changed */
3239 if (skb->priority < priority)
3240 break;
3241
3242 skb = skb_dequeue(&chan->data_q);
3243
3244 hci_send_frame(skb);
3245 hdev->le_last_tx = jiffies;
3246
3247 cnt--;
3248 chan->sent++;
3249 chan->conn->sent++;
3250 }
3251 }
3252
3253 if (hdev->le_pkts)
3254 hdev->le_cnt = cnt;
3255 else
3256 hdev->acl_cnt = cnt;
3257
3258 if (cnt != tmp)
3259 hci_prio_recalculate(hdev, LE_LINK);
3260 }
3261
3262 static void hci_tx_work(struct work_struct *work)
3263 {
3264 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3265 struct sk_buff *skb;
3266
3267 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3268 hdev->sco_cnt, hdev->le_cnt);
3269
3270 /* Schedule queues and send stuff to HCI driver */
3271
3272 hci_sched_acl(hdev);
3273
3274 hci_sched_sco(hdev);
3275
3276 hci_sched_esco(hdev);
3277
3278 hci_sched_le(hdev);
3279
3280 /* Send next queued raw (unknown type) packet */
3281 while ((skb = skb_dequeue(&hdev->raw_q)))
3282 hci_send_frame(skb);
3283 }
3284
3285 /* ----- HCI RX task (incoming data processing) ----- */
3286
3287 /* ACL data packet */
3288 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3289 {
3290 struct hci_acl_hdr *hdr = (void *) skb->data;
3291 struct hci_conn *conn;
3292 __u16 handle, flags;
3293
3294 skb_pull(skb, HCI_ACL_HDR_SIZE);
3295
3296 handle = __le16_to_cpu(hdr->handle);
3297 flags = hci_flags(handle);
3298 handle = hci_handle(handle);
3299
3300 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3301 handle, flags);
3302
3303 hdev->stat.acl_rx++;
3304
3305 hci_dev_lock(hdev);
3306 conn = hci_conn_hash_lookup_handle(hdev, handle);
3307 hci_dev_unlock(hdev);
3308
3309 if (conn) {
3310 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3311
3312 /* Send to upper protocol */
3313 l2cap_recv_acldata(conn, skb, flags);
3314 return;
3315 } else {
3316 BT_ERR("%s ACL packet for unknown connection handle %d",
3317 hdev->name, handle);
3318 }
3319
3320 kfree_skb(skb);
3321 }
3322
3323 /* SCO data packet */
3324 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3325 {
3326 struct hci_sco_hdr *hdr = (void *) skb->data;
3327 struct hci_conn *conn;
3328 __u16 handle;
3329
3330 skb_pull(skb, HCI_SCO_HDR_SIZE);
3331
3332 handle = __le16_to_cpu(hdr->handle);
3333
3334 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3335
3336 hdev->stat.sco_rx++;
3337
3338 hci_dev_lock(hdev);
3339 conn = hci_conn_hash_lookup_handle(hdev, handle);
3340 hci_dev_unlock(hdev);
3341
3342 if (conn) {
3343 /* Send to upper protocol */
3344 sco_recv_scodata(conn, skb);
3345 return;
3346 } else {
3347 BT_ERR("%s SCO packet for unknown connection handle %d",
3348 hdev->name, handle);
3349 }
3350
3351 kfree_skb(skb);
3352 }
3353
3354 static bool hci_req_is_complete(struct hci_dev *hdev)
3355 {
3356 struct sk_buff *skb;
3357
3358 skb = skb_peek(&hdev->cmd_q);
3359 if (!skb)
3360 return true;
3361
3362 return bt_cb(skb)->req.start;
3363 }
3364
3365 static void hci_resend_last(struct hci_dev *hdev)
3366 {
3367 struct hci_command_hdr *sent;
3368 struct sk_buff *skb;
3369 u16 opcode;
3370
3371 if (!hdev->sent_cmd)
3372 return;
3373
3374 sent = (void *) hdev->sent_cmd->data;
3375 opcode = __le16_to_cpu(sent->opcode);
3376 if (opcode == HCI_OP_RESET)
3377 return;
3378
3379 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3380 if (!skb)
3381 return;
3382
3383 skb_queue_head(&hdev->cmd_q, skb);
3384 queue_work(hdev->workqueue, &hdev->cmd_work);
3385 }
3386
3387 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3388 {
3389 hci_req_complete_t req_complete = NULL;
3390 struct sk_buff *skb;
3391 unsigned long flags;
3392
3393 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3394
3395 /* If the completed command doesn't match the last one that was
3396 * sent we need to do special handling of it.
3397 */
3398 if (!hci_sent_cmd_data(hdev, opcode)) {
3399 /* Some CSR based controllers generate a spontaneous
3400 * reset complete event during init and any pending
3401 * command will never be completed. In such a case we
3402 * need to resend whatever was the last sent
3403 * command.
3404 */
3405 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3406 hci_resend_last(hdev);
3407
3408 return;
3409 }
3410
3411 /* If the command succeeded and there's still more commands in
3412 * this request the request is not yet complete.
3413 */
3414 if (!status && !hci_req_is_complete(hdev))
3415 return;
3416
3417 /* If this was the last command in a request the complete
3418 * callback would be found in hdev->sent_cmd instead of the
3419 * command queue (hdev->cmd_q).
3420 */
3421 if (hdev->sent_cmd) {
3422 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3423 if (req_complete)
3424 goto call_complete;
3425 }
3426
3427 /* Remove all pending commands belonging to this request */
3428 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3429 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3430 if (bt_cb(skb)->req.start) {
3431 __skb_queue_head(&hdev->cmd_q, skb);
3432 break;
3433 }
3434
3435 req_complete = bt_cb(skb)->req.complete;
3436 kfree_skb(skb);
3437 }
3438 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3439
3440 call_complete:
3441 if (req_complete)
3442 req_complete(hdev, status);
3443 }
3444
3445 static void hci_rx_work(struct work_struct *work)
3446 {
3447 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3448 struct sk_buff *skb;
3449
3450 BT_DBG("%s", hdev->name);
3451
3452 while ((skb = skb_dequeue(&hdev->rx_q))) {
3453 /* Send copy to monitor */
3454 hci_send_to_monitor(hdev, skb);
3455
3456 if (atomic_read(&hdev->promisc)) {
3457 /* Send copy to the sockets */
3458 hci_send_to_sock(hdev, skb);
3459 }
3460
3461 if (test_bit(HCI_RAW, &hdev->flags)) {
3462 kfree_skb(skb);
3463 continue;
3464 }
3465
3466 if (test_bit(HCI_INIT, &hdev->flags)) {
3467 /* Don't process data packets in this states. */
3468 switch (bt_cb(skb)->pkt_type) {
3469 case HCI_ACLDATA_PKT:
3470 case HCI_SCODATA_PKT:
3471 kfree_skb(skb);
3472 continue;
3473 }
3474 }
3475
3476 /* Process frame */
3477 switch (bt_cb(skb)->pkt_type) {
3478 case HCI_EVENT_PKT:
3479 BT_DBG("%s Event packet", hdev->name);
3480 hci_event_packet(hdev, skb);
3481 break;
3482
3483 case HCI_ACLDATA_PKT:
3484 BT_DBG("%s ACL data packet", hdev->name);
3485 hci_acldata_packet(hdev, skb);
3486 break;
3487
3488 case HCI_SCODATA_PKT:
3489 BT_DBG("%s SCO data packet", hdev->name);
3490 hci_scodata_packet(hdev, skb);
3491 break;
3492
3493 default:
3494 kfree_skb(skb);
3495 break;
3496 }
3497 }
3498 }
3499
3500 static void hci_cmd_work(struct work_struct *work)
3501 {
3502 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3503 struct sk_buff *skb;
3504
3505 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3506 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3507
3508 /* Send queued commands */
3509 if (atomic_read(&hdev->cmd_cnt)) {
3510 skb = skb_dequeue(&hdev->cmd_q);
3511 if (!skb)
3512 return;
3513
3514 kfree_skb(hdev->sent_cmd);
3515
3516 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3517 if (hdev->sent_cmd) {
3518 atomic_dec(&hdev->cmd_cnt);
3519 hci_send_frame(skb);
3520 if (test_bit(HCI_RESET, &hdev->flags))
3521 del_timer(&hdev->cmd_timer);
3522 else
3523 mod_timer(&hdev->cmd_timer,
3524 jiffies + HCI_CMD_TIMEOUT);
3525 } else {
3526 skb_queue_head(&hdev->cmd_q, skb);
3527 queue_work(hdev->workqueue, &hdev->cmd_work);
3528 }
3529 }
3530 }
3531
3532 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3533 {
3534 /* General inquiry access code (GIAC) */
3535 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3536 struct hci_cp_inquiry cp;
3537
3538 BT_DBG("%s", hdev->name);
3539
3540 if (test_bit(HCI_INQUIRY, &hdev->flags))
3541 return -EINPROGRESS;
3542
3543 inquiry_cache_flush(hdev);
3544
3545 memset(&cp, 0, sizeof(cp));
3546 memcpy(&cp.lap, lap, sizeof(cp.lap));
3547 cp.length = length;
3548
3549 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3550 }
3551
3552 int hci_cancel_inquiry(struct hci_dev *hdev)
3553 {
3554 BT_DBG("%s", hdev->name);
3555
3556 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3557 return -EALREADY;
3558
3559 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3560 }
3561
3562 u8 bdaddr_to_le(u8 bdaddr_type)
3563 {
3564 switch (bdaddr_type) {
3565 case BDADDR_LE_PUBLIC:
3566 return ADDR_LE_DEV_PUBLIC;
3567
3568 default:
3569 /* Fallback to LE Random address type */
3570 return ADDR_LE_DEV_RANDOM;
3571 }
3572 }