Bluetooth: Add opcode to error message
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 #define AUTO_OFF_TIMEOUT 2000
37
38 static void hci_rx_work(struct work_struct *work);
39 static void hci_cmd_work(struct work_struct *work);
40 static void hci_tx_work(struct work_struct *work);
41
42 /* HCI device list */
43 LIST_HEAD(hci_dev_list);
44 DEFINE_RWLOCK(hci_dev_list_lock);
45
46 /* HCI callback list */
47 LIST_HEAD(hci_cb_list);
48 DEFINE_RWLOCK(hci_cb_list_lock);
49
50 /* HCI ID Numbering */
51 static DEFINE_IDA(hci_index_ida);
52
53 /* ---- HCI notifications ---- */
54
55 static void hci_notify(struct hci_dev *hdev, int event)
56 {
57 hci_sock_dev_event(hdev, event);
58 }
59
60 /* ---- HCI requests ---- */
61
62 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
63 {
64 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
65
66 /* If this is the init phase check if the completed command matches
67 * the last init command, and if not just return.
68 */
69 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
70 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
71 u16 opcode = __le16_to_cpu(sent->opcode);
72 struct sk_buff *skb;
73
74 /* Some CSR based controllers generate a spontaneous
75 * reset complete event during init and any pending
76 * command will never be completed. In such a case we
77 * need to resend whatever was the last sent
78 * command.
79 */
80
81 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
82 return;
83
84 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
85 if (skb) {
86 skb_queue_head(&hdev->cmd_q, skb);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88 }
89
90 return;
91 }
92
93 if (hdev->req_status == HCI_REQ_PEND) {
94 hdev->req_result = result;
95 hdev->req_status = HCI_REQ_DONE;
96 wake_up_interruptible(&hdev->req_wait_q);
97 }
98 }
99
100 static void hci_req_cancel(struct hci_dev *hdev, int err)
101 {
102 BT_DBG("%s err 0x%2.2x", hdev->name, err);
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = err;
106 hdev->req_status = HCI_REQ_CANCELED;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109 }
110
111 /* Execute request and wait for completion. */
112 static int __hci_request(struct hci_dev *hdev,
113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
115 {
116 DECLARE_WAITQUEUE(wait, current);
117 int err = 0;
118
119 BT_DBG("%s start", hdev->name);
120
121 hdev->req_status = HCI_REQ_PEND;
122
123 add_wait_queue(&hdev->req_wait_q, &wait);
124 set_current_state(TASK_INTERRUPTIBLE);
125
126 req(hdev, opt);
127 schedule_timeout(timeout);
128
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130
131 if (signal_pending(current))
132 return -EINTR;
133
134 switch (hdev->req_status) {
135 case HCI_REQ_DONE:
136 err = -bt_to_errno(hdev->req_result);
137 break;
138
139 case HCI_REQ_CANCELED:
140 err = -hdev->req_result;
141 break;
142
143 default:
144 err = -ETIMEDOUT;
145 break;
146 }
147
148 hdev->req_status = hdev->req_result = 0;
149
150 BT_DBG("%s end: err %d", hdev->name, err);
151
152 return err;
153 }
154
155 static int hci_request(struct hci_dev *hdev,
156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
158 {
159 int ret;
160
161 if (!test_bit(HCI_UP, &hdev->flags))
162 return -ENETDOWN;
163
164 /* Serialize all requests */
165 hci_req_lock(hdev);
166 ret = __hci_request(hdev, req, opt, timeout);
167 hci_req_unlock(hdev);
168
169 return ret;
170 }
171
172 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173 {
174 BT_DBG("%s %ld", hdev->name, opt);
175
176 /* Reset device */
177 set_bit(HCI_RESET, &hdev->flags);
178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
179 }
180
181 static void bredr_init(struct hci_dev *hdev)
182 {
183 struct hci_cp_delete_stored_link_key cp;
184 __le16 param;
185 __u8 flt_type;
186
187 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
188
189 /* Mandatory initialization */
190
191 /* Reset */
192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
193 set_bit(HCI_RESET, &hdev->flags);
194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
195 }
196
197 /* Read Local Supported Features */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
199
200 /* Read Local Version */
201 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
202
203 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
204 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
205
206 /* Read BD Address */
207 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
208
209 /* Read Class of Device */
210 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
211
212 /* Read Local Name */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
214
215 /* Read Voice Setting */
216 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
217
218 /* Optional initialization */
219
220 /* Clear Event Filters */
221 flt_type = HCI_FLT_CLEAR_ALL;
222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
223
224 /* Connection accept timeout ~20 secs */
225 param = __constant_cpu_to_le16(0x7d00);
226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
227
228 bacpy(&cp.bdaddr, BDADDR_ANY);
229 cp.delete_all = 1;
230 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
231 }
232
233 static void amp_init(struct hci_dev *hdev)
234 {
235 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
236
237 /* Reset */
238 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
239
240 /* Read Local Version */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
242
243 /* Read Local AMP Info */
244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
245 }
246
247 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
248 {
249 struct sk_buff *skb;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Driver initialization */
254
255 /* Special commands */
256 while ((skb = skb_dequeue(&hdev->driver_init))) {
257 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
258 skb->dev = (void *) hdev;
259
260 skb_queue_tail(&hdev->cmd_q, skb);
261 queue_work(hdev->workqueue, &hdev->cmd_work);
262 }
263 skb_queue_purge(&hdev->driver_init);
264
265 switch (hdev->dev_type) {
266 case HCI_BREDR:
267 bredr_init(hdev);
268 break;
269
270 case HCI_AMP:
271 amp_init(hdev);
272 break;
273
274 default:
275 BT_ERR("Unknown device type %d", hdev->dev_type);
276 break;
277 }
278
279 }
280
281 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
282 {
283 BT_DBG("%s", hdev->name);
284
285 /* Read LE buffer size */
286 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287 }
288
289 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
290 {
291 __u8 scan = opt;
292
293 BT_DBG("%s %x", hdev->name, scan);
294
295 /* Inquiry and Page scans */
296 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
297 }
298
299 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
300 {
301 __u8 auth = opt;
302
303 BT_DBG("%s %x", hdev->name, auth);
304
305 /* Authentication */
306 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
307 }
308
309 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
310 {
311 __u8 encrypt = opt;
312
313 BT_DBG("%s %x", hdev->name, encrypt);
314
315 /* Encryption */
316 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
317 }
318
319 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
320 {
321 __le16 policy = cpu_to_le16(opt);
322
323 BT_DBG("%s %x", hdev->name, policy);
324
325 /* Default link policy */
326 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
327 }
328
329 /* Get HCI device by index.
330 * Device is held on return. */
331 struct hci_dev *hci_dev_get(int index)
332 {
333 struct hci_dev *hdev = NULL, *d;
334
335 BT_DBG("%d", index);
336
337 if (index < 0)
338 return NULL;
339
340 read_lock(&hci_dev_list_lock);
341 list_for_each_entry(d, &hci_dev_list, list) {
342 if (d->id == index) {
343 hdev = hci_dev_hold(d);
344 break;
345 }
346 }
347 read_unlock(&hci_dev_list_lock);
348 return hdev;
349 }
350
351 /* ---- Inquiry support ---- */
352
353 bool hci_discovery_active(struct hci_dev *hdev)
354 {
355 struct discovery_state *discov = &hdev->discovery;
356
357 switch (discov->state) {
358 case DISCOVERY_FINDING:
359 case DISCOVERY_RESOLVING:
360 return true;
361
362 default:
363 return false;
364 }
365 }
366
367 void hci_discovery_set_state(struct hci_dev *hdev, int state)
368 {
369 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
370
371 if (hdev->discovery.state == state)
372 return;
373
374 switch (state) {
375 case DISCOVERY_STOPPED:
376 if (hdev->discovery.state != DISCOVERY_STARTING)
377 mgmt_discovering(hdev, 0);
378 break;
379 case DISCOVERY_STARTING:
380 break;
381 case DISCOVERY_FINDING:
382 mgmt_discovering(hdev, 1);
383 break;
384 case DISCOVERY_RESOLVING:
385 break;
386 case DISCOVERY_STOPPING:
387 break;
388 }
389
390 hdev->discovery.state = state;
391 }
392
393 static void inquiry_cache_flush(struct hci_dev *hdev)
394 {
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *p, *n;
397
398 list_for_each_entry_safe(p, n, &cache->all, all) {
399 list_del(&p->all);
400 kfree(p);
401 }
402
403 INIT_LIST_HEAD(&cache->unknown);
404 INIT_LIST_HEAD(&cache->resolve);
405 }
406
407 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
409 {
410 struct discovery_state *cache = &hdev->discovery;
411 struct inquiry_entry *e;
412
413 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
414
415 list_for_each_entry(e, &cache->all, all) {
416 if (!bacmp(&e->data.bdaddr, bdaddr))
417 return e;
418 }
419
420 return NULL;
421 }
422
423 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
424 bdaddr_t *bdaddr)
425 {
426 struct discovery_state *cache = &hdev->discovery;
427 struct inquiry_entry *e;
428
429 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
430
431 list_for_each_entry(e, &cache->unknown, list) {
432 if (!bacmp(&e->data.bdaddr, bdaddr))
433 return e;
434 }
435
436 return NULL;
437 }
438
439 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
440 bdaddr_t *bdaddr,
441 int state)
442 {
443 struct discovery_state *cache = &hdev->discovery;
444 struct inquiry_entry *e;
445
446 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
447
448 list_for_each_entry(e, &cache->resolve, list) {
449 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
450 return e;
451 if (!bacmp(&e->data.bdaddr, bdaddr))
452 return e;
453 }
454
455 return NULL;
456 }
457
458 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
459 struct inquiry_entry *ie)
460 {
461 struct discovery_state *cache = &hdev->discovery;
462 struct list_head *pos = &cache->resolve;
463 struct inquiry_entry *p;
464
465 list_del(&ie->list);
466
467 list_for_each_entry(p, &cache->resolve, list) {
468 if (p->name_state != NAME_PENDING &&
469 abs(p->data.rssi) >= abs(ie->data.rssi))
470 break;
471 pos = &p->list;
472 }
473
474 list_add(&ie->list, pos);
475 }
476
477 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
478 bool name_known, bool *ssp)
479 {
480 struct discovery_state *cache = &hdev->discovery;
481 struct inquiry_entry *ie;
482
483 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
484
485 if (ssp)
486 *ssp = data->ssp_mode;
487
488 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
489 if (ie) {
490 if (ie->data.ssp_mode && ssp)
491 *ssp = true;
492
493 if (ie->name_state == NAME_NEEDED &&
494 data->rssi != ie->data.rssi) {
495 ie->data.rssi = data->rssi;
496 hci_inquiry_cache_update_resolve(hdev, ie);
497 }
498
499 goto update;
500 }
501
502 /* Entry not in the cache. Add new one. */
503 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
504 if (!ie)
505 return false;
506
507 list_add(&ie->all, &cache->all);
508
509 if (name_known) {
510 ie->name_state = NAME_KNOWN;
511 } else {
512 ie->name_state = NAME_NOT_KNOWN;
513 list_add(&ie->list, &cache->unknown);
514 }
515
516 update:
517 if (name_known && ie->name_state != NAME_KNOWN &&
518 ie->name_state != NAME_PENDING) {
519 ie->name_state = NAME_KNOWN;
520 list_del(&ie->list);
521 }
522
523 memcpy(&ie->data, data, sizeof(*data));
524 ie->timestamp = jiffies;
525 cache->timestamp = jiffies;
526
527 if (ie->name_state == NAME_NOT_KNOWN)
528 return false;
529
530 return true;
531 }
532
533 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
534 {
535 struct discovery_state *cache = &hdev->discovery;
536 struct inquiry_info *info = (struct inquiry_info *) buf;
537 struct inquiry_entry *e;
538 int copied = 0;
539
540 list_for_each_entry(e, &cache->all, all) {
541 struct inquiry_data *data = &e->data;
542
543 if (copied >= num)
544 break;
545
546 bacpy(&info->bdaddr, &data->bdaddr);
547 info->pscan_rep_mode = data->pscan_rep_mode;
548 info->pscan_period_mode = data->pscan_period_mode;
549 info->pscan_mode = data->pscan_mode;
550 memcpy(info->dev_class, data->dev_class, 3);
551 info->clock_offset = data->clock_offset;
552
553 info++;
554 copied++;
555 }
556
557 BT_DBG("cache %p, copied %d", cache, copied);
558 return copied;
559 }
560
561 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
562 {
563 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
564 struct hci_cp_inquiry cp;
565
566 BT_DBG("%s", hdev->name);
567
568 if (test_bit(HCI_INQUIRY, &hdev->flags))
569 return;
570
571 /* Start Inquiry */
572 memcpy(&cp.lap, &ir->lap, 3);
573 cp.length = ir->length;
574 cp.num_rsp = ir->num_rsp;
575 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
576 }
577
578 int hci_inquiry(void __user *arg)
579 {
580 __u8 __user *ptr = arg;
581 struct hci_inquiry_req ir;
582 struct hci_dev *hdev;
583 int err = 0, do_inquiry = 0, max_rsp;
584 long timeo;
585 __u8 *buf;
586
587 if (copy_from_user(&ir, ptr, sizeof(ir)))
588 return -EFAULT;
589
590 hdev = hci_dev_get(ir.dev_id);
591 if (!hdev)
592 return -ENODEV;
593
594 hci_dev_lock(hdev);
595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
597 inquiry_cache_flush(hdev);
598 do_inquiry = 1;
599 }
600 hci_dev_unlock(hdev);
601
602 timeo = ir.length * msecs_to_jiffies(2000);
603
604 if (do_inquiry) {
605 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 if (err < 0)
607 goto done;
608 }
609
610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
619 if (!buf) {
620 err = -ENOMEM;
621 goto done;
622 }
623
624 hci_dev_lock(hdev);
625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
626 hci_dev_unlock(hdev);
627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
633 ir.num_rsp))
634 err = -EFAULT;
635 } else
636 err = -EFAULT;
637
638 kfree(buf);
639
640 done:
641 hci_dev_put(hdev);
642 return err;
643 }
644
645 /* ---- HCI ioctl helpers ---- */
646
647 int hci_dev_open(__u16 dev)
648 {
649 struct hci_dev *hdev;
650 int ret = 0;
651
652 hdev = hci_dev_get(dev);
653 if (!hdev)
654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
660 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 ret = -ENODEV;
662 goto done;
663 }
664
665 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 ret = -ERFKILL;
667 goto done;
668 }
669
670 if (test_bit(HCI_UP, &hdev->flags)) {
671 ret = -EALREADY;
672 goto done;
673 }
674
675 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
676 set_bit(HCI_RAW, &hdev->flags);
677
678 /* Treat all non BR/EDR controllers as raw devices if
679 enable_hs is not set */
680 if (hdev->dev_type != HCI_BREDR && !enable_hs)
681 set_bit(HCI_RAW, &hdev->flags);
682
683 if (hdev->open(hdev)) {
684 ret = -EIO;
685 goto done;
686 }
687
688 if (!test_bit(HCI_RAW, &hdev->flags)) {
689 atomic_set(&hdev->cmd_cnt, 1);
690 set_bit(HCI_INIT, &hdev->flags);
691 hdev->init_last_cmd = 0;
692
693 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
694
695 if (lmp_host_le_capable(hdev))
696 ret = __hci_request(hdev, hci_le_init_req, 0,
697 HCI_INIT_TIMEOUT);
698
699 clear_bit(HCI_INIT, &hdev->flags);
700 }
701
702 if (!ret) {
703 hci_dev_hold(hdev);
704 set_bit(HCI_UP, &hdev->flags);
705 hci_notify(hdev, HCI_DEV_UP);
706 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
707 hci_dev_lock(hdev);
708 mgmt_powered(hdev, 1);
709 hci_dev_unlock(hdev);
710 }
711 } else {
712 /* Init failed, cleanup */
713 flush_work(&hdev->tx_work);
714 flush_work(&hdev->cmd_work);
715 flush_work(&hdev->rx_work);
716
717 skb_queue_purge(&hdev->cmd_q);
718 skb_queue_purge(&hdev->rx_q);
719
720 if (hdev->flush)
721 hdev->flush(hdev);
722
723 if (hdev->sent_cmd) {
724 kfree_skb(hdev->sent_cmd);
725 hdev->sent_cmd = NULL;
726 }
727
728 hdev->close(hdev);
729 hdev->flags = 0;
730 }
731
732 done:
733 hci_req_unlock(hdev);
734 hci_dev_put(hdev);
735 return ret;
736 }
737
738 static int hci_dev_do_close(struct hci_dev *hdev)
739 {
740 BT_DBG("%s %p", hdev->name, hdev);
741
742 cancel_work_sync(&hdev->le_scan);
743
744 hci_req_cancel(hdev, ENODEV);
745 hci_req_lock(hdev);
746
747 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
748 del_timer_sync(&hdev->cmd_timer);
749 hci_req_unlock(hdev);
750 return 0;
751 }
752
753 /* Flush RX and TX works */
754 flush_work(&hdev->tx_work);
755 flush_work(&hdev->rx_work);
756
757 if (hdev->discov_timeout > 0) {
758 cancel_delayed_work(&hdev->discov_off);
759 hdev->discov_timeout = 0;
760 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
761 }
762
763 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
764 cancel_delayed_work(&hdev->service_cache);
765
766 cancel_delayed_work_sync(&hdev->le_scan_disable);
767
768 hci_dev_lock(hdev);
769 inquiry_cache_flush(hdev);
770 hci_conn_hash_flush(hdev);
771 hci_dev_unlock(hdev);
772
773 hci_notify(hdev, HCI_DEV_DOWN);
774
775 if (hdev->flush)
776 hdev->flush(hdev);
777
778 /* Reset device */
779 skb_queue_purge(&hdev->cmd_q);
780 atomic_set(&hdev->cmd_cnt, 1);
781 if (!test_bit(HCI_RAW, &hdev->flags) &&
782 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
783 set_bit(HCI_INIT, &hdev->flags);
784 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
785 clear_bit(HCI_INIT, &hdev->flags);
786 }
787
788 /* flush cmd work */
789 flush_work(&hdev->cmd_work);
790
791 /* Drop queues */
792 skb_queue_purge(&hdev->rx_q);
793 skb_queue_purge(&hdev->cmd_q);
794 skb_queue_purge(&hdev->raw_q);
795
796 /* Drop last sent command */
797 if (hdev->sent_cmd) {
798 del_timer_sync(&hdev->cmd_timer);
799 kfree_skb(hdev->sent_cmd);
800 hdev->sent_cmd = NULL;
801 }
802
803 /* After this point our queues are empty
804 * and no tasks are scheduled. */
805 hdev->close(hdev);
806
807 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
808 hci_dev_lock(hdev);
809 mgmt_powered(hdev, 0);
810 hci_dev_unlock(hdev);
811 }
812
813 /* Clear flags */
814 hdev->flags = 0;
815
816 memset(hdev->eir, 0, sizeof(hdev->eir));
817 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
818
819 hci_req_unlock(hdev);
820
821 hci_dev_put(hdev);
822 return 0;
823 }
824
825 int hci_dev_close(__u16 dev)
826 {
827 struct hci_dev *hdev;
828 int err;
829
830 hdev = hci_dev_get(dev);
831 if (!hdev)
832 return -ENODEV;
833
834 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
835 cancel_delayed_work(&hdev->power_off);
836
837 err = hci_dev_do_close(hdev);
838
839 hci_dev_put(hdev);
840 return err;
841 }
842
843 int hci_dev_reset(__u16 dev)
844 {
845 struct hci_dev *hdev;
846 int ret = 0;
847
848 hdev = hci_dev_get(dev);
849 if (!hdev)
850 return -ENODEV;
851
852 hci_req_lock(hdev);
853
854 if (!test_bit(HCI_UP, &hdev->flags))
855 goto done;
856
857 /* Drop queues */
858 skb_queue_purge(&hdev->rx_q);
859 skb_queue_purge(&hdev->cmd_q);
860
861 hci_dev_lock(hdev);
862 inquiry_cache_flush(hdev);
863 hci_conn_hash_flush(hdev);
864 hci_dev_unlock(hdev);
865
866 if (hdev->flush)
867 hdev->flush(hdev);
868
869 atomic_set(&hdev->cmd_cnt, 1);
870 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
871
872 if (!test_bit(HCI_RAW, &hdev->flags))
873 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
874
875 done:
876 hci_req_unlock(hdev);
877 hci_dev_put(hdev);
878 return ret;
879 }
880
881 int hci_dev_reset_stat(__u16 dev)
882 {
883 struct hci_dev *hdev;
884 int ret = 0;
885
886 hdev = hci_dev_get(dev);
887 if (!hdev)
888 return -ENODEV;
889
890 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
891
892 hci_dev_put(hdev);
893
894 return ret;
895 }
896
897 int hci_dev_cmd(unsigned int cmd, void __user *arg)
898 {
899 struct hci_dev *hdev;
900 struct hci_dev_req dr;
901 int err = 0;
902
903 if (copy_from_user(&dr, arg, sizeof(dr)))
904 return -EFAULT;
905
906 hdev = hci_dev_get(dr.dev_id);
907 if (!hdev)
908 return -ENODEV;
909
910 switch (cmd) {
911 case HCISETAUTH:
912 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
913 HCI_INIT_TIMEOUT);
914 break;
915
916 case HCISETENCRYPT:
917 if (!lmp_encrypt_capable(hdev)) {
918 err = -EOPNOTSUPP;
919 break;
920 }
921
922 if (!test_bit(HCI_AUTH, &hdev->flags)) {
923 /* Auth must be enabled first */
924 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925 HCI_INIT_TIMEOUT);
926 if (err)
927 break;
928 }
929
930 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
931 HCI_INIT_TIMEOUT);
932 break;
933
934 case HCISETSCAN:
935 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
936 HCI_INIT_TIMEOUT);
937 break;
938
939 case HCISETLINKPOL:
940 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
941 HCI_INIT_TIMEOUT);
942 break;
943
944 case HCISETLINKMODE:
945 hdev->link_mode = ((__u16) dr.dev_opt) &
946 (HCI_LM_MASTER | HCI_LM_ACCEPT);
947 break;
948
949 case HCISETPTYPE:
950 hdev->pkt_type = (__u16) dr.dev_opt;
951 break;
952
953 case HCISETACLMTU:
954 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
955 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
956 break;
957
958 case HCISETSCOMTU:
959 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
960 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
961 break;
962
963 default:
964 err = -EINVAL;
965 break;
966 }
967
968 hci_dev_put(hdev);
969 return err;
970 }
971
972 int hci_get_dev_list(void __user *arg)
973 {
974 struct hci_dev *hdev;
975 struct hci_dev_list_req *dl;
976 struct hci_dev_req *dr;
977 int n = 0, size, err;
978 __u16 dev_num;
979
980 if (get_user(dev_num, (__u16 __user *) arg))
981 return -EFAULT;
982
983 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
984 return -EINVAL;
985
986 size = sizeof(*dl) + dev_num * sizeof(*dr);
987
988 dl = kzalloc(size, GFP_KERNEL);
989 if (!dl)
990 return -ENOMEM;
991
992 dr = dl->dev_req;
993
994 read_lock(&hci_dev_list_lock);
995 list_for_each_entry(hdev, &hci_dev_list, list) {
996 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
997 cancel_delayed_work(&hdev->power_off);
998
999 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1000 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1001
1002 (dr + n)->dev_id = hdev->id;
1003 (dr + n)->dev_opt = hdev->flags;
1004
1005 if (++n >= dev_num)
1006 break;
1007 }
1008 read_unlock(&hci_dev_list_lock);
1009
1010 dl->dev_num = n;
1011 size = sizeof(*dl) + n * sizeof(*dr);
1012
1013 err = copy_to_user(arg, dl, size);
1014 kfree(dl);
1015
1016 return err ? -EFAULT : 0;
1017 }
1018
1019 int hci_get_dev_info(void __user *arg)
1020 {
1021 struct hci_dev *hdev;
1022 struct hci_dev_info di;
1023 int err = 0;
1024
1025 if (copy_from_user(&di, arg, sizeof(di)))
1026 return -EFAULT;
1027
1028 hdev = hci_dev_get(di.dev_id);
1029 if (!hdev)
1030 return -ENODEV;
1031
1032 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1033 cancel_delayed_work_sync(&hdev->power_off);
1034
1035 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1036 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1037
1038 strcpy(di.name, hdev->name);
1039 di.bdaddr = hdev->bdaddr;
1040 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1041 di.flags = hdev->flags;
1042 di.pkt_type = hdev->pkt_type;
1043 di.acl_mtu = hdev->acl_mtu;
1044 di.acl_pkts = hdev->acl_pkts;
1045 di.sco_mtu = hdev->sco_mtu;
1046 di.sco_pkts = hdev->sco_pkts;
1047 di.link_policy = hdev->link_policy;
1048 di.link_mode = hdev->link_mode;
1049
1050 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1051 memcpy(&di.features, &hdev->features, sizeof(di.features));
1052
1053 if (copy_to_user(arg, &di, sizeof(di)))
1054 err = -EFAULT;
1055
1056 hci_dev_put(hdev);
1057
1058 return err;
1059 }
1060
1061 /* ---- Interface to HCI drivers ---- */
1062
1063 static int hci_rfkill_set_block(void *data, bool blocked)
1064 {
1065 struct hci_dev *hdev = data;
1066
1067 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1068
1069 if (!blocked)
1070 return 0;
1071
1072 hci_dev_do_close(hdev);
1073
1074 return 0;
1075 }
1076
1077 static const struct rfkill_ops hci_rfkill_ops = {
1078 .set_block = hci_rfkill_set_block,
1079 };
1080
1081 static void hci_power_on(struct work_struct *work)
1082 {
1083 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1084
1085 BT_DBG("%s", hdev->name);
1086
1087 if (hci_dev_open(hdev->id) < 0)
1088 return;
1089
1090 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1091 schedule_delayed_work(&hdev->power_off,
1092 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1093
1094 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1095 mgmt_index_added(hdev);
1096 }
1097
1098 static void hci_power_off(struct work_struct *work)
1099 {
1100 struct hci_dev *hdev = container_of(work, struct hci_dev,
1101 power_off.work);
1102
1103 BT_DBG("%s", hdev->name);
1104
1105 hci_dev_do_close(hdev);
1106 }
1107
1108 static void hci_discov_off(struct work_struct *work)
1109 {
1110 struct hci_dev *hdev;
1111 u8 scan = SCAN_PAGE;
1112
1113 hdev = container_of(work, struct hci_dev, discov_off.work);
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 hci_dev_lock(hdev);
1118
1119 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1120
1121 hdev->discov_timeout = 0;
1122
1123 hci_dev_unlock(hdev);
1124 }
1125
1126 int hci_uuids_clear(struct hci_dev *hdev)
1127 {
1128 struct list_head *p, *n;
1129
1130 list_for_each_safe(p, n, &hdev->uuids) {
1131 struct bt_uuid *uuid;
1132
1133 uuid = list_entry(p, struct bt_uuid, list);
1134
1135 list_del(p);
1136 kfree(uuid);
1137 }
1138
1139 return 0;
1140 }
1141
1142 int hci_link_keys_clear(struct hci_dev *hdev)
1143 {
1144 struct list_head *p, *n;
1145
1146 list_for_each_safe(p, n, &hdev->link_keys) {
1147 struct link_key *key;
1148
1149 key = list_entry(p, struct link_key, list);
1150
1151 list_del(p);
1152 kfree(key);
1153 }
1154
1155 return 0;
1156 }
1157
1158 int hci_smp_ltks_clear(struct hci_dev *hdev)
1159 {
1160 struct smp_ltk *k, *tmp;
1161
1162 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1163 list_del(&k->list);
1164 kfree(k);
1165 }
1166
1167 return 0;
1168 }
1169
1170 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1171 {
1172 struct link_key *k;
1173
1174 list_for_each_entry(k, &hdev->link_keys, list)
1175 if (bacmp(bdaddr, &k->bdaddr) == 0)
1176 return k;
1177
1178 return NULL;
1179 }
1180
1181 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1182 u8 key_type, u8 old_key_type)
1183 {
1184 /* Legacy key */
1185 if (key_type < 0x03)
1186 return true;
1187
1188 /* Debug keys are insecure so don't store them persistently */
1189 if (key_type == HCI_LK_DEBUG_COMBINATION)
1190 return false;
1191
1192 /* Changed combination key and there's no previous one */
1193 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1194 return false;
1195
1196 /* Security mode 3 case */
1197 if (!conn)
1198 return true;
1199
1200 /* Neither local nor remote side had no-bonding as requirement */
1201 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1202 return true;
1203
1204 /* Local side had dedicated bonding as requirement */
1205 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1206 return true;
1207
1208 /* Remote side had dedicated bonding as requirement */
1209 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1210 return true;
1211
1212 /* If none of the above criteria match, then don't store the key
1213 * persistently */
1214 return false;
1215 }
1216
1217 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1218 {
1219 struct smp_ltk *k;
1220
1221 list_for_each_entry(k, &hdev->long_term_keys, list) {
1222 if (k->ediv != ediv ||
1223 memcmp(rand, k->rand, sizeof(k->rand)))
1224 continue;
1225
1226 return k;
1227 }
1228
1229 return NULL;
1230 }
1231
1232 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1233 u8 addr_type)
1234 {
1235 struct smp_ltk *k;
1236
1237 list_for_each_entry(k, &hdev->long_term_keys, list)
1238 if (addr_type == k->bdaddr_type &&
1239 bacmp(bdaddr, &k->bdaddr) == 0)
1240 return k;
1241
1242 return NULL;
1243 }
1244
1245 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1246 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1247 {
1248 struct link_key *key, *old_key;
1249 u8 old_key_type;
1250 bool persistent;
1251
1252 old_key = hci_find_link_key(hdev, bdaddr);
1253 if (old_key) {
1254 old_key_type = old_key->type;
1255 key = old_key;
1256 } else {
1257 old_key_type = conn ? conn->key_type : 0xff;
1258 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1259 if (!key)
1260 return -ENOMEM;
1261 list_add(&key->list, &hdev->link_keys);
1262 }
1263
1264 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1265
1266 /* Some buggy controller combinations generate a changed
1267 * combination key for legacy pairing even when there's no
1268 * previous key */
1269 if (type == HCI_LK_CHANGED_COMBINATION &&
1270 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1271 type = HCI_LK_COMBINATION;
1272 if (conn)
1273 conn->key_type = type;
1274 }
1275
1276 bacpy(&key->bdaddr, bdaddr);
1277 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1278 key->pin_len = pin_len;
1279
1280 if (type == HCI_LK_CHANGED_COMBINATION)
1281 key->type = old_key_type;
1282 else
1283 key->type = type;
1284
1285 if (!new_key)
1286 return 0;
1287
1288 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1289
1290 mgmt_new_link_key(hdev, key, persistent);
1291
1292 if (conn)
1293 conn->flush_key = !persistent;
1294
1295 return 0;
1296 }
1297
1298 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1299 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1300 ediv, u8 rand[8])
1301 {
1302 struct smp_ltk *key, *old_key;
1303
1304 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1305 return 0;
1306
1307 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1308 if (old_key)
1309 key = old_key;
1310 else {
1311 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1312 if (!key)
1313 return -ENOMEM;
1314 list_add(&key->list, &hdev->long_term_keys);
1315 }
1316
1317 bacpy(&key->bdaddr, bdaddr);
1318 key->bdaddr_type = addr_type;
1319 memcpy(key->val, tk, sizeof(key->val));
1320 key->authenticated = authenticated;
1321 key->ediv = ediv;
1322 key->enc_size = enc_size;
1323 key->type = type;
1324 memcpy(key->rand, rand, sizeof(key->rand));
1325
1326 if (!new_key)
1327 return 0;
1328
1329 if (type & HCI_SMP_LTK)
1330 mgmt_new_ltk(hdev, key, 1);
1331
1332 return 0;
1333 }
1334
1335 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336 {
1337 struct link_key *key;
1338
1339 key = hci_find_link_key(hdev, bdaddr);
1340 if (!key)
1341 return -ENOENT;
1342
1343 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1344
1345 list_del(&key->list);
1346 kfree(key);
1347
1348 return 0;
1349 }
1350
1351 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1352 {
1353 struct smp_ltk *k, *tmp;
1354
1355 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1356 if (bacmp(bdaddr, &k->bdaddr))
1357 continue;
1358
1359 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1360
1361 list_del(&k->list);
1362 kfree(k);
1363 }
1364
1365 return 0;
1366 }
1367
1368 /* HCI command timer function */
1369 static void hci_cmd_timeout(unsigned long arg)
1370 {
1371 struct hci_dev *hdev = (void *) arg;
1372
1373 if (hdev->sent_cmd) {
1374 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1375 u16 opcode = __le16_to_cpu(sent->opcode);
1376
1377 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1378 } else {
1379 BT_ERR("%s command tx timeout", hdev->name);
1380 }
1381
1382 atomic_set(&hdev->cmd_cnt, 1);
1383 queue_work(hdev->workqueue, &hdev->cmd_work);
1384 }
1385
1386 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1387 bdaddr_t *bdaddr)
1388 {
1389 struct oob_data *data;
1390
1391 list_for_each_entry(data, &hdev->remote_oob_data, list)
1392 if (bacmp(bdaddr, &data->bdaddr) == 0)
1393 return data;
1394
1395 return NULL;
1396 }
1397
1398 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399 {
1400 struct oob_data *data;
1401
1402 data = hci_find_remote_oob_data(hdev, bdaddr);
1403 if (!data)
1404 return -ENOENT;
1405
1406 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1407
1408 list_del(&data->list);
1409 kfree(data);
1410
1411 return 0;
1412 }
1413
1414 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1415 {
1416 struct oob_data *data, *n;
1417
1418 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1419 list_del(&data->list);
1420 kfree(data);
1421 }
1422
1423 return 0;
1424 }
1425
1426 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1427 u8 *randomizer)
1428 {
1429 struct oob_data *data;
1430
1431 data = hci_find_remote_oob_data(hdev, bdaddr);
1432
1433 if (!data) {
1434 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1435 if (!data)
1436 return -ENOMEM;
1437
1438 bacpy(&data->bdaddr, bdaddr);
1439 list_add(&data->list, &hdev->remote_oob_data);
1440 }
1441
1442 memcpy(data->hash, hash, sizeof(data->hash));
1443 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1444
1445 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1446
1447 return 0;
1448 }
1449
1450 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1451 {
1452 struct bdaddr_list *b;
1453
1454 list_for_each_entry(b, &hdev->blacklist, list)
1455 if (bacmp(bdaddr, &b->bdaddr) == 0)
1456 return b;
1457
1458 return NULL;
1459 }
1460
1461 int hci_blacklist_clear(struct hci_dev *hdev)
1462 {
1463 struct list_head *p, *n;
1464
1465 list_for_each_safe(p, n, &hdev->blacklist) {
1466 struct bdaddr_list *b;
1467
1468 b = list_entry(p, struct bdaddr_list, list);
1469
1470 list_del(p);
1471 kfree(b);
1472 }
1473
1474 return 0;
1475 }
1476
1477 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1478 {
1479 struct bdaddr_list *entry;
1480
1481 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1482 return -EBADF;
1483
1484 if (hci_blacklist_lookup(hdev, bdaddr))
1485 return -EEXIST;
1486
1487 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1488 if (!entry)
1489 return -ENOMEM;
1490
1491 bacpy(&entry->bdaddr, bdaddr);
1492
1493 list_add(&entry->list, &hdev->blacklist);
1494
1495 return mgmt_device_blocked(hdev, bdaddr, type);
1496 }
1497
1498 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1499 {
1500 struct bdaddr_list *entry;
1501
1502 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1503 return hci_blacklist_clear(hdev);
1504
1505 entry = hci_blacklist_lookup(hdev, bdaddr);
1506 if (!entry)
1507 return -ENOENT;
1508
1509 list_del(&entry->list);
1510 kfree(entry);
1511
1512 return mgmt_device_unblocked(hdev, bdaddr, type);
1513 }
1514
1515 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1516 {
1517 struct le_scan_params *param = (struct le_scan_params *) opt;
1518 struct hci_cp_le_set_scan_param cp;
1519
1520 memset(&cp, 0, sizeof(cp));
1521 cp.type = param->type;
1522 cp.interval = cpu_to_le16(param->interval);
1523 cp.window = cpu_to_le16(param->window);
1524
1525 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1526 }
1527
1528 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1529 {
1530 struct hci_cp_le_set_scan_enable cp;
1531
1532 memset(&cp, 0, sizeof(cp));
1533 cp.enable = 1;
1534 cp.filter_dup = 1;
1535
1536 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1537 }
1538
1539 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1540 u16 window, int timeout)
1541 {
1542 long timeo = msecs_to_jiffies(3000);
1543 struct le_scan_params param;
1544 int err;
1545
1546 BT_DBG("%s", hdev->name);
1547
1548 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1549 return -EINPROGRESS;
1550
1551 param.type = type;
1552 param.interval = interval;
1553 param.window = window;
1554
1555 hci_req_lock(hdev);
1556
1557 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1558 timeo);
1559 if (!err)
1560 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1561
1562 hci_req_unlock(hdev);
1563
1564 if (err < 0)
1565 return err;
1566
1567 schedule_delayed_work(&hdev->le_scan_disable,
1568 msecs_to_jiffies(timeout));
1569
1570 return 0;
1571 }
1572
1573 int hci_cancel_le_scan(struct hci_dev *hdev)
1574 {
1575 BT_DBG("%s", hdev->name);
1576
1577 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1578 return -EALREADY;
1579
1580 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1581 struct hci_cp_le_set_scan_enable cp;
1582
1583 /* Send HCI command to disable LE Scan */
1584 memset(&cp, 0, sizeof(cp));
1585 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1586 }
1587
1588 return 0;
1589 }
1590
1591 static void le_scan_disable_work(struct work_struct *work)
1592 {
1593 struct hci_dev *hdev = container_of(work, struct hci_dev,
1594 le_scan_disable.work);
1595 struct hci_cp_le_set_scan_enable cp;
1596
1597 BT_DBG("%s", hdev->name);
1598
1599 memset(&cp, 0, sizeof(cp));
1600
1601 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1602 }
1603
1604 static void le_scan_work(struct work_struct *work)
1605 {
1606 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1607 struct le_scan_params *param = &hdev->le_scan_params;
1608
1609 BT_DBG("%s", hdev->name);
1610
1611 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1612 param->timeout);
1613 }
1614
1615 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1616 int timeout)
1617 {
1618 struct le_scan_params *param = &hdev->le_scan_params;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (work_busy(&hdev->le_scan))
1623 return -EINPROGRESS;
1624
1625 param->type = type;
1626 param->interval = interval;
1627 param->window = window;
1628 param->timeout = timeout;
1629
1630 queue_work(system_long_wq, &hdev->le_scan);
1631
1632 return 0;
1633 }
1634
1635 /* Alloc HCI device */
1636 struct hci_dev *hci_alloc_dev(void)
1637 {
1638 struct hci_dev *hdev;
1639
1640 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1641 if (!hdev)
1642 return NULL;
1643
1644 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1645 hdev->esco_type = (ESCO_HV1);
1646 hdev->link_mode = (HCI_LM_ACCEPT);
1647 hdev->io_capability = 0x03; /* No Input No Output */
1648
1649 hdev->sniff_max_interval = 800;
1650 hdev->sniff_min_interval = 80;
1651
1652 mutex_init(&hdev->lock);
1653 mutex_init(&hdev->req_lock);
1654
1655 INIT_LIST_HEAD(&hdev->mgmt_pending);
1656 INIT_LIST_HEAD(&hdev->blacklist);
1657 INIT_LIST_HEAD(&hdev->uuids);
1658 INIT_LIST_HEAD(&hdev->link_keys);
1659 INIT_LIST_HEAD(&hdev->long_term_keys);
1660 INIT_LIST_HEAD(&hdev->remote_oob_data);
1661
1662 INIT_WORK(&hdev->rx_work, hci_rx_work);
1663 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1664 INIT_WORK(&hdev->tx_work, hci_tx_work);
1665 INIT_WORK(&hdev->power_on, hci_power_on);
1666 INIT_WORK(&hdev->le_scan, le_scan_work);
1667
1668 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1669 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1670 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1671
1672 skb_queue_head_init(&hdev->driver_init);
1673 skb_queue_head_init(&hdev->rx_q);
1674 skb_queue_head_init(&hdev->cmd_q);
1675 skb_queue_head_init(&hdev->raw_q);
1676
1677 init_waitqueue_head(&hdev->req_wait_q);
1678
1679 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1680
1681 hci_init_sysfs(hdev);
1682 discovery_init(hdev);
1683 hci_conn_hash_init(hdev);
1684
1685 return hdev;
1686 }
1687 EXPORT_SYMBOL(hci_alloc_dev);
1688
1689 /* Free HCI device */
1690 void hci_free_dev(struct hci_dev *hdev)
1691 {
1692 skb_queue_purge(&hdev->driver_init);
1693
1694 /* will free via device release */
1695 put_device(&hdev->dev);
1696 }
1697 EXPORT_SYMBOL(hci_free_dev);
1698
1699 /* Register HCI device */
1700 int hci_register_dev(struct hci_dev *hdev)
1701 {
1702 int id, error;
1703
1704 if (!hdev->open || !hdev->close)
1705 return -EINVAL;
1706
1707 /* Do not allow HCI_AMP devices to register at index 0,
1708 * so the index can be used as the AMP controller ID.
1709 */
1710 switch (hdev->dev_type) {
1711 case HCI_BREDR:
1712 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1713 break;
1714 case HCI_AMP:
1715 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1716 break;
1717 default:
1718 return -EINVAL;
1719 }
1720
1721 if (id < 0)
1722 return id;
1723
1724 sprintf(hdev->name, "hci%d", id);
1725 hdev->id = id;
1726
1727 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1728
1729 write_lock(&hci_dev_list_lock);
1730 list_add(&hdev->list, &hci_dev_list);
1731 write_unlock(&hci_dev_list_lock);
1732
1733 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1734 WQ_MEM_RECLAIM, 1);
1735 if (!hdev->workqueue) {
1736 error = -ENOMEM;
1737 goto err;
1738 }
1739
1740 error = hci_add_sysfs(hdev);
1741 if (error < 0)
1742 goto err_wqueue;
1743
1744 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1745 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1746 hdev);
1747 if (hdev->rfkill) {
1748 if (rfkill_register(hdev->rfkill) < 0) {
1749 rfkill_destroy(hdev->rfkill);
1750 hdev->rfkill = NULL;
1751 }
1752 }
1753
1754 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1755 set_bit(HCI_SETUP, &hdev->dev_flags);
1756 schedule_work(&hdev->power_on);
1757
1758 hci_notify(hdev, HCI_DEV_REG);
1759 hci_dev_hold(hdev);
1760
1761 return id;
1762
1763 err_wqueue:
1764 destroy_workqueue(hdev->workqueue);
1765 err:
1766 ida_simple_remove(&hci_index_ida, hdev->id);
1767 write_lock(&hci_dev_list_lock);
1768 list_del(&hdev->list);
1769 write_unlock(&hci_dev_list_lock);
1770
1771 return error;
1772 }
1773 EXPORT_SYMBOL(hci_register_dev);
1774
1775 /* Unregister HCI device */
1776 void hci_unregister_dev(struct hci_dev *hdev)
1777 {
1778 int i, id;
1779
1780 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1781
1782 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1783
1784 id = hdev->id;
1785
1786 write_lock(&hci_dev_list_lock);
1787 list_del(&hdev->list);
1788 write_unlock(&hci_dev_list_lock);
1789
1790 hci_dev_do_close(hdev);
1791
1792 for (i = 0; i < NUM_REASSEMBLY; i++)
1793 kfree_skb(hdev->reassembly[i]);
1794
1795 if (!test_bit(HCI_INIT, &hdev->flags) &&
1796 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1797 hci_dev_lock(hdev);
1798 mgmt_index_removed(hdev);
1799 hci_dev_unlock(hdev);
1800 }
1801
1802 /* mgmt_index_removed should take care of emptying the
1803 * pending list */
1804 BUG_ON(!list_empty(&hdev->mgmt_pending));
1805
1806 hci_notify(hdev, HCI_DEV_UNREG);
1807
1808 if (hdev->rfkill) {
1809 rfkill_unregister(hdev->rfkill);
1810 rfkill_destroy(hdev->rfkill);
1811 }
1812
1813 hci_del_sysfs(hdev);
1814
1815 destroy_workqueue(hdev->workqueue);
1816
1817 hci_dev_lock(hdev);
1818 hci_blacklist_clear(hdev);
1819 hci_uuids_clear(hdev);
1820 hci_link_keys_clear(hdev);
1821 hci_smp_ltks_clear(hdev);
1822 hci_remote_oob_data_clear(hdev);
1823 hci_dev_unlock(hdev);
1824
1825 hci_dev_put(hdev);
1826
1827 ida_simple_remove(&hci_index_ida, id);
1828 }
1829 EXPORT_SYMBOL(hci_unregister_dev);
1830
1831 /* Suspend HCI device */
1832 int hci_suspend_dev(struct hci_dev *hdev)
1833 {
1834 hci_notify(hdev, HCI_DEV_SUSPEND);
1835 return 0;
1836 }
1837 EXPORT_SYMBOL(hci_suspend_dev);
1838
1839 /* Resume HCI device */
1840 int hci_resume_dev(struct hci_dev *hdev)
1841 {
1842 hci_notify(hdev, HCI_DEV_RESUME);
1843 return 0;
1844 }
1845 EXPORT_SYMBOL(hci_resume_dev);
1846
1847 /* Receive frame from HCI drivers */
1848 int hci_recv_frame(struct sk_buff *skb)
1849 {
1850 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1851 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1852 && !test_bit(HCI_INIT, &hdev->flags))) {
1853 kfree_skb(skb);
1854 return -ENXIO;
1855 }
1856
1857 /* Incomming skb */
1858 bt_cb(skb)->incoming = 1;
1859
1860 /* Time stamp */
1861 __net_timestamp(skb);
1862
1863 skb_queue_tail(&hdev->rx_q, skb);
1864 queue_work(hdev->workqueue, &hdev->rx_work);
1865
1866 return 0;
1867 }
1868 EXPORT_SYMBOL(hci_recv_frame);
1869
1870 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1871 int count, __u8 index)
1872 {
1873 int len = 0;
1874 int hlen = 0;
1875 int remain = count;
1876 struct sk_buff *skb;
1877 struct bt_skb_cb *scb;
1878
1879 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1880 index >= NUM_REASSEMBLY)
1881 return -EILSEQ;
1882
1883 skb = hdev->reassembly[index];
1884
1885 if (!skb) {
1886 switch (type) {
1887 case HCI_ACLDATA_PKT:
1888 len = HCI_MAX_FRAME_SIZE;
1889 hlen = HCI_ACL_HDR_SIZE;
1890 break;
1891 case HCI_EVENT_PKT:
1892 len = HCI_MAX_EVENT_SIZE;
1893 hlen = HCI_EVENT_HDR_SIZE;
1894 break;
1895 case HCI_SCODATA_PKT:
1896 len = HCI_MAX_SCO_SIZE;
1897 hlen = HCI_SCO_HDR_SIZE;
1898 break;
1899 }
1900
1901 skb = bt_skb_alloc(len, GFP_ATOMIC);
1902 if (!skb)
1903 return -ENOMEM;
1904
1905 scb = (void *) skb->cb;
1906 scb->expect = hlen;
1907 scb->pkt_type = type;
1908
1909 skb->dev = (void *) hdev;
1910 hdev->reassembly[index] = skb;
1911 }
1912
1913 while (count) {
1914 scb = (void *) skb->cb;
1915 len = min_t(uint, scb->expect, count);
1916
1917 memcpy(skb_put(skb, len), data, len);
1918
1919 count -= len;
1920 data += len;
1921 scb->expect -= len;
1922 remain = count;
1923
1924 switch (type) {
1925 case HCI_EVENT_PKT:
1926 if (skb->len == HCI_EVENT_HDR_SIZE) {
1927 struct hci_event_hdr *h = hci_event_hdr(skb);
1928 scb->expect = h->plen;
1929
1930 if (skb_tailroom(skb) < scb->expect) {
1931 kfree_skb(skb);
1932 hdev->reassembly[index] = NULL;
1933 return -ENOMEM;
1934 }
1935 }
1936 break;
1937
1938 case HCI_ACLDATA_PKT:
1939 if (skb->len == HCI_ACL_HDR_SIZE) {
1940 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1941 scb->expect = __le16_to_cpu(h->dlen);
1942
1943 if (skb_tailroom(skb) < scb->expect) {
1944 kfree_skb(skb);
1945 hdev->reassembly[index] = NULL;
1946 return -ENOMEM;
1947 }
1948 }
1949 break;
1950
1951 case HCI_SCODATA_PKT:
1952 if (skb->len == HCI_SCO_HDR_SIZE) {
1953 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1954 scb->expect = h->dlen;
1955
1956 if (skb_tailroom(skb) < scb->expect) {
1957 kfree_skb(skb);
1958 hdev->reassembly[index] = NULL;
1959 return -ENOMEM;
1960 }
1961 }
1962 break;
1963 }
1964
1965 if (scb->expect == 0) {
1966 /* Complete frame */
1967
1968 bt_cb(skb)->pkt_type = type;
1969 hci_recv_frame(skb);
1970
1971 hdev->reassembly[index] = NULL;
1972 return remain;
1973 }
1974 }
1975
1976 return remain;
1977 }
1978
1979 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1980 {
1981 int rem = 0;
1982
1983 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1984 return -EILSEQ;
1985
1986 while (count) {
1987 rem = hci_reassembly(hdev, type, data, count, type - 1);
1988 if (rem < 0)
1989 return rem;
1990
1991 data += (count - rem);
1992 count = rem;
1993 }
1994
1995 return rem;
1996 }
1997 EXPORT_SYMBOL(hci_recv_fragment);
1998
1999 #define STREAM_REASSEMBLY 0
2000
2001 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2002 {
2003 int type;
2004 int rem = 0;
2005
2006 while (count) {
2007 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2008
2009 if (!skb) {
2010 struct { char type; } *pkt;
2011
2012 /* Start of the frame */
2013 pkt = data;
2014 type = pkt->type;
2015
2016 data++;
2017 count--;
2018 } else
2019 type = bt_cb(skb)->pkt_type;
2020
2021 rem = hci_reassembly(hdev, type, data, count,
2022 STREAM_REASSEMBLY);
2023 if (rem < 0)
2024 return rem;
2025
2026 data += (count - rem);
2027 count = rem;
2028 }
2029
2030 return rem;
2031 }
2032 EXPORT_SYMBOL(hci_recv_stream_fragment);
2033
2034 /* ---- Interface to upper protocols ---- */
2035
2036 int hci_register_cb(struct hci_cb *cb)
2037 {
2038 BT_DBG("%p name %s", cb, cb->name);
2039
2040 write_lock(&hci_cb_list_lock);
2041 list_add(&cb->list, &hci_cb_list);
2042 write_unlock(&hci_cb_list_lock);
2043
2044 return 0;
2045 }
2046 EXPORT_SYMBOL(hci_register_cb);
2047
2048 int hci_unregister_cb(struct hci_cb *cb)
2049 {
2050 BT_DBG("%p name %s", cb, cb->name);
2051
2052 write_lock(&hci_cb_list_lock);
2053 list_del(&cb->list);
2054 write_unlock(&hci_cb_list_lock);
2055
2056 return 0;
2057 }
2058 EXPORT_SYMBOL(hci_unregister_cb);
2059
2060 static int hci_send_frame(struct sk_buff *skb)
2061 {
2062 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2063
2064 if (!hdev) {
2065 kfree_skb(skb);
2066 return -ENODEV;
2067 }
2068
2069 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2070
2071 /* Time stamp */
2072 __net_timestamp(skb);
2073
2074 /* Send copy to monitor */
2075 hci_send_to_monitor(hdev, skb);
2076
2077 if (atomic_read(&hdev->promisc)) {
2078 /* Send copy to the sockets */
2079 hci_send_to_sock(hdev, skb);
2080 }
2081
2082 /* Get rid of skb owner, prior to sending to the driver. */
2083 skb_orphan(skb);
2084
2085 return hdev->send(skb);
2086 }
2087
2088 /* Send HCI command */
2089 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2090 {
2091 int len = HCI_COMMAND_HDR_SIZE + plen;
2092 struct hci_command_hdr *hdr;
2093 struct sk_buff *skb;
2094
2095 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2096
2097 skb = bt_skb_alloc(len, GFP_ATOMIC);
2098 if (!skb) {
2099 BT_ERR("%s no memory for command", hdev->name);
2100 return -ENOMEM;
2101 }
2102
2103 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2104 hdr->opcode = cpu_to_le16(opcode);
2105 hdr->plen = plen;
2106
2107 if (plen)
2108 memcpy(skb_put(skb, plen), param, plen);
2109
2110 BT_DBG("skb len %d", skb->len);
2111
2112 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2113 skb->dev = (void *) hdev;
2114
2115 if (test_bit(HCI_INIT, &hdev->flags))
2116 hdev->init_last_cmd = opcode;
2117
2118 skb_queue_tail(&hdev->cmd_q, skb);
2119 queue_work(hdev->workqueue, &hdev->cmd_work);
2120
2121 return 0;
2122 }
2123
2124 /* Get data from the previously sent command */
2125 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2126 {
2127 struct hci_command_hdr *hdr;
2128
2129 if (!hdev->sent_cmd)
2130 return NULL;
2131
2132 hdr = (void *) hdev->sent_cmd->data;
2133
2134 if (hdr->opcode != cpu_to_le16(opcode))
2135 return NULL;
2136
2137 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2138
2139 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2140 }
2141
2142 /* Send ACL data */
2143 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2144 {
2145 struct hci_acl_hdr *hdr;
2146 int len = skb->len;
2147
2148 skb_push(skb, HCI_ACL_HDR_SIZE);
2149 skb_reset_transport_header(skb);
2150 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2151 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2152 hdr->dlen = cpu_to_le16(len);
2153 }
2154
2155 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2156 struct sk_buff *skb, __u16 flags)
2157 {
2158 struct hci_dev *hdev = conn->hdev;
2159 struct sk_buff *list;
2160
2161 skb->len = skb_headlen(skb);
2162 skb->data_len = 0;
2163
2164 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2165 hci_add_acl_hdr(skb, conn->handle, flags);
2166
2167 list = skb_shinfo(skb)->frag_list;
2168 if (!list) {
2169 /* Non fragmented */
2170 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2171
2172 skb_queue_tail(queue, skb);
2173 } else {
2174 /* Fragmented */
2175 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2176
2177 skb_shinfo(skb)->frag_list = NULL;
2178
2179 /* Queue all fragments atomically */
2180 spin_lock(&queue->lock);
2181
2182 __skb_queue_tail(queue, skb);
2183
2184 flags &= ~ACL_START;
2185 flags |= ACL_CONT;
2186 do {
2187 skb = list; list = list->next;
2188
2189 skb->dev = (void *) hdev;
2190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2191 hci_add_acl_hdr(skb, conn->handle, flags);
2192
2193 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2194
2195 __skb_queue_tail(queue, skb);
2196 } while (list);
2197
2198 spin_unlock(&queue->lock);
2199 }
2200 }
2201
2202 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2203 {
2204 struct hci_conn *conn = chan->conn;
2205 struct hci_dev *hdev = conn->hdev;
2206
2207 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2208
2209 skb->dev = (void *) hdev;
2210
2211 hci_queue_acl(conn, &chan->data_q, skb, flags);
2212
2213 queue_work(hdev->workqueue, &hdev->tx_work);
2214 }
2215
2216 /* Send SCO data */
2217 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2218 {
2219 struct hci_dev *hdev = conn->hdev;
2220 struct hci_sco_hdr hdr;
2221
2222 BT_DBG("%s len %d", hdev->name, skb->len);
2223
2224 hdr.handle = cpu_to_le16(conn->handle);
2225 hdr.dlen = skb->len;
2226
2227 skb_push(skb, HCI_SCO_HDR_SIZE);
2228 skb_reset_transport_header(skb);
2229 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2230
2231 skb->dev = (void *) hdev;
2232 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2233
2234 skb_queue_tail(&conn->data_q, skb);
2235 queue_work(hdev->workqueue, &hdev->tx_work);
2236 }
2237
2238 /* ---- HCI TX task (outgoing data) ---- */
2239
2240 /* HCI Connection scheduler */
2241 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2242 int *quote)
2243 {
2244 struct hci_conn_hash *h = &hdev->conn_hash;
2245 struct hci_conn *conn = NULL, *c;
2246 unsigned int num = 0, min = ~0;
2247
2248 /* We don't have to lock device here. Connections are always
2249 * added and removed with TX task disabled. */
2250
2251 rcu_read_lock();
2252
2253 list_for_each_entry_rcu(c, &h->list, list) {
2254 if (c->type != type || skb_queue_empty(&c->data_q))
2255 continue;
2256
2257 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2258 continue;
2259
2260 num++;
2261
2262 if (c->sent < min) {
2263 min = c->sent;
2264 conn = c;
2265 }
2266
2267 if (hci_conn_num(hdev, type) == num)
2268 break;
2269 }
2270
2271 rcu_read_unlock();
2272
2273 if (conn) {
2274 int cnt, q;
2275
2276 switch (conn->type) {
2277 case ACL_LINK:
2278 cnt = hdev->acl_cnt;
2279 break;
2280 case SCO_LINK:
2281 case ESCO_LINK:
2282 cnt = hdev->sco_cnt;
2283 break;
2284 case LE_LINK:
2285 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2286 break;
2287 default:
2288 cnt = 0;
2289 BT_ERR("Unknown link type");
2290 }
2291
2292 q = cnt / num;
2293 *quote = q ? q : 1;
2294 } else
2295 *quote = 0;
2296
2297 BT_DBG("conn %p quote %d", conn, *quote);
2298 return conn;
2299 }
2300
2301 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2302 {
2303 struct hci_conn_hash *h = &hdev->conn_hash;
2304 struct hci_conn *c;
2305
2306 BT_ERR("%s link tx timeout", hdev->name);
2307
2308 rcu_read_lock();
2309
2310 /* Kill stalled connections */
2311 list_for_each_entry_rcu(c, &h->list, list) {
2312 if (c->type == type && c->sent) {
2313 BT_ERR("%s killing stalled connection %s",
2314 hdev->name, batostr(&c->dst));
2315 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2316 }
2317 }
2318
2319 rcu_read_unlock();
2320 }
2321
2322 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2323 int *quote)
2324 {
2325 struct hci_conn_hash *h = &hdev->conn_hash;
2326 struct hci_chan *chan = NULL;
2327 unsigned int num = 0, min = ~0, cur_prio = 0;
2328 struct hci_conn *conn;
2329 int cnt, q, conn_num = 0;
2330
2331 BT_DBG("%s", hdev->name);
2332
2333 rcu_read_lock();
2334
2335 list_for_each_entry_rcu(conn, &h->list, list) {
2336 struct hci_chan *tmp;
2337
2338 if (conn->type != type)
2339 continue;
2340
2341 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2342 continue;
2343
2344 conn_num++;
2345
2346 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2347 struct sk_buff *skb;
2348
2349 if (skb_queue_empty(&tmp->data_q))
2350 continue;
2351
2352 skb = skb_peek(&tmp->data_q);
2353 if (skb->priority < cur_prio)
2354 continue;
2355
2356 if (skb->priority > cur_prio) {
2357 num = 0;
2358 min = ~0;
2359 cur_prio = skb->priority;
2360 }
2361
2362 num++;
2363
2364 if (conn->sent < min) {
2365 min = conn->sent;
2366 chan = tmp;
2367 }
2368 }
2369
2370 if (hci_conn_num(hdev, type) == conn_num)
2371 break;
2372 }
2373
2374 rcu_read_unlock();
2375
2376 if (!chan)
2377 return NULL;
2378
2379 switch (chan->conn->type) {
2380 case ACL_LINK:
2381 cnt = hdev->acl_cnt;
2382 break;
2383 case SCO_LINK:
2384 case ESCO_LINK:
2385 cnt = hdev->sco_cnt;
2386 break;
2387 case LE_LINK:
2388 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2389 break;
2390 default:
2391 cnt = 0;
2392 BT_ERR("Unknown link type");
2393 }
2394
2395 q = cnt / num;
2396 *quote = q ? q : 1;
2397 BT_DBG("chan %p quote %d", chan, *quote);
2398 return chan;
2399 }
2400
2401 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2402 {
2403 struct hci_conn_hash *h = &hdev->conn_hash;
2404 struct hci_conn *conn;
2405 int num = 0;
2406
2407 BT_DBG("%s", hdev->name);
2408
2409 rcu_read_lock();
2410
2411 list_for_each_entry_rcu(conn, &h->list, list) {
2412 struct hci_chan *chan;
2413
2414 if (conn->type != type)
2415 continue;
2416
2417 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2418 continue;
2419
2420 num++;
2421
2422 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2423 struct sk_buff *skb;
2424
2425 if (chan->sent) {
2426 chan->sent = 0;
2427 continue;
2428 }
2429
2430 if (skb_queue_empty(&chan->data_q))
2431 continue;
2432
2433 skb = skb_peek(&chan->data_q);
2434 if (skb->priority >= HCI_PRIO_MAX - 1)
2435 continue;
2436
2437 skb->priority = HCI_PRIO_MAX - 1;
2438
2439 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2440 skb->priority);
2441 }
2442
2443 if (hci_conn_num(hdev, type) == num)
2444 break;
2445 }
2446
2447 rcu_read_unlock();
2448
2449 }
2450
2451 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2452 {
2453 /* Calculate count of blocks used by this packet */
2454 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2455 }
2456
2457 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2458 {
2459 if (!test_bit(HCI_RAW, &hdev->flags)) {
2460 /* ACL tx timeout must be longer than maximum
2461 * link supervision timeout (40.9 seconds) */
2462 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2463 HCI_ACL_TX_TIMEOUT))
2464 hci_link_tx_to(hdev, ACL_LINK);
2465 }
2466 }
2467
2468 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2469 {
2470 unsigned int cnt = hdev->acl_cnt;
2471 struct hci_chan *chan;
2472 struct sk_buff *skb;
2473 int quote;
2474
2475 __check_timeout(hdev, cnt);
2476
2477 while (hdev->acl_cnt &&
2478 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2479 u32 priority = (skb_peek(&chan->data_q))->priority;
2480 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2481 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2482 skb->len, skb->priority);
2483
2484 /* Stop if priority has changed */
2485 if (skb->priority < priority)
2486 break;
2487
2488 skb = skb_dequeue(&chan->data_q);
2489
2490 hci_conn_enter_active_mode(chan->conn,
2491 bt_cb(skb)->force_active);
2492
2493 hci_send_frame(skb);
2494 hdev->acl_last_tx = jiffies;
2495
2496 hdev->acl_cnt--;
2497 chan->sent++;
2498 chan->conn->sent++;
2499 }
2500 }
2501
2502 if (cnt != hdev->acl_cnt)
2503 hci_prio_recalculate(hdev, ACL_LINK);
2504 }
2505
2506 static void hci_sched_acl_blk(struct hci_dev *hdev)
2507 {
2508 unsigned int cnt = hdev->block_cnt;
2509 struct hci_chan *chan;
2510 struct sk_buff *skb;
2511 int quote;
2512
2513 __check_timeout(hdev, cnt);
2514
2515 while (hdev->block_cnt > 0 &&
2516 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2517 u32 priority = (skb_peek(&chan->data_q))->priority;
2518 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2519 int blocks;
2520
2521 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2522 skb->len, skb->priority);
2523
2524 /* Stop if priority has changed */
2525 if (skb->priority < priority)
2526 break;
2527
2528 skb = skb_dequeue(&chan->data_q);
2529
2530 blocks = __get_blocks(hdev, skb);
2531 if (blocks > hdev->block_cnt)
2532 return;
2533
2534 hci_conn_enter_active_mode(chan->conn,
2535 bt_cb(skb)->force_active);
2536
2537 hci_send_frame(skb);
2538 hdev->acl_last_tx = jiffies;
2539
2540 hdev->block_cnt -= blocks;
2541 quote -= blocks;
2542
2543 chan->sent += blocks;
2544 chan->conn->sent += blocks;
2545 }
2546 }
2547
2548 if (cnt != hdev->block_cnt)
2549 hci_prio_recalculate(hdev, ACL_LINK);
2550 }
2551
2552 static void hci_sched_acl(struct hci_dev *hdev)
2553 {
2554 BT_DBG("%s", hdev->name);
2555
2556 if (!hci_conn_num(hdev, ACL_LINK))
2557 return;
2558
2559 switch (hdev->flow_ctl_mode) {
2560 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2561 hci_sched_acl_pkt(hdev);
2562 break;
2563
2564 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2565 hci_sched_acl_blk(hdev);
2566 break;
2567 }
2568 }
2569
2570 /* Schedule SCO */
2571 static void hci_sched_sco(struct hci_dev *hdev)
2572 {
2573 struct hci_conn *conn;
2574 struct sk_buff *skb;
2575 int quote;
2576
2577 BT_DBG("%s", hdev->name);
2578
2579 if (!hci_conn_num(hdev, SCO_LINK))
2580 return;
2581
2582 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2583 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2584 BT_DBG("skb %p len %d", skb, skb->len);
2585 hci_send_frame(skb);
2586
2587 conn->sent++;
2588 if (conn->sent == ~0)
2589 conn->sent = 0;
2590 }
2591 }
2592 }
2593
2594 static void hci_sched_esco(struct hci_dev *hdev)
2595 {
2596 struct hci_conn *conn;
2597 struct sk_buff *skb;
2598 int quote;
2599
2600 BT_DBG("%s", hdev->name);
2601
2602 if (!hci_conn_num(hdev, ESCO_LINK))
2603 return;
2604
2605 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2606 &quote))) {
2607 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2608 BT_DBG("skb %p len %d", skb, skb->len);
2609 hci_send_frame(skb);
2610
2611 conn->sent++;
2612 if (conn->sent == ~0)
2613 conn->sent = 0;
2614 }
2615 }
2616 }
2617
2618 static void hci_sched_le(struct hci_dev *hdev)
2619 {
2620 struct hci_chan *chan;
2621 struct sk_buff *skb;
2622 int quote, cnt, tmp;
2623
2624 BT_DBG("%s", hdev->name);
2625
2626 if (!hci_conn_num(hdev, LE_LINK))
2627 return;
2628
2629 if (!test_bit(HCI_RAW, &hdev->flags)) {
2630 /* LE tx timeout must be longer than maximum
2631 * link supervision timeout (40.9 seconds) */
2632 if (!hdev->le_cnt && hdev->le_pkts &&
2633 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2634 hci_link_tx_to(hdev, LE_LINK);
2635 }
2636
2637 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2638 tmp = cnt;
2639 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2640 u32 priority = (skb_peek(&chan->data_q))->priority;
2641 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2642 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2643 skb->len, skb->priority);
2644
2645 /* Stop if priority has changed */
2646 if (skb->priority < priority)
2647 break;
2648
2649 skb = skb_dequeue(&chan->data_q);
2650
2651 hci_send_frame(skb);
2652 hdev->le_last_tx = jiffies;
2653
2654 cnt--;
2655 chan->sent++;
2656 chan->conn->sent++;
2657 }
2658 }
2659
2660 if (hdev->le_pkts)
2661 hdev->le_cnt = cnt;
2662 else
2663 hdev->acl_cnt = cnt;
2664
2665 if (cnt != tmp)
2666 hci_prio_recalculate(hdev, LE_LINK);
2667 }
2668
2669 static void hci_tx_work(struct work_struct *work)
2670 {
2671 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2672 struct sk_buff *skb;
2673
2674 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2675 hdev->sco_cnt, hdev->le_cnt);
2676
2677 /* Schedule queues and send stuff to HCI driver */
2678
2679 hci_sched_acl(hdev);
2680
2681 hci_sched_sco(hdev);
2682
2683 hci_sched_esco(hdev);
2684
2685 hci_sched_le(hdev);
2686
2687 /* Send next queued raw (unknown type) packet */
2688 while ((skb = skb_dequeue(&hdev->raw_q)))
2689 hci_send_frame(skb);
2690 }
2691
2692 /* ----- HCI RX task (incoming data processing) ----- */
2693
2694 /* ACL data packet */
2695 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2696 {
2697 struct hci_acl_hdr *hdr = (void *) skb->data;
2698 struct hci_conn *conn;
2699 __u16 handle, flags;
2700
2701 skb_pull(skb, HCI_ACL_HDR_SIZE);
2702
2703 handle = __le16_to_cpu(hdr->handle);
2704 flags = hci_flags(handle);
2705 handle = hci_handle(handle);
2706
2707 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2708 handle, flags);
2709
2710 hdev->stat.acl_rx++;
2711
2712 hci_dev_lock(hdev);
2713 conn = hci_conn_hash_lookup_handle(hdev, handle);
2714 hci_dev_unlock(hdev);
2715
2716 if (conn) {
2717 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2718
2719 hci_dev_lock(hdev);
2720 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2721 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2722 mgmt_device_connected(hdev, &conn->dst, conn->type,
2723 conn->dst_type, 0, NULL, 0,
2724 conn->dev_class);
2725 hci_dev_unlock(hdev);
2726
2727 /* Send to upper protocol */
2728 l2cap_recv_acldata(conn, skb, flags);
2729 return;
2730 } else {
2731 BT_ERR("%s ACL packet for unknown connection handle %d",
2732 hdev->name, handle);
2733 }
2734
2735 kfree_skb(skb);
2736 }
2737
2738 /* SCO data packet */
2739 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2740 {
2741 struct hci_sco_hdr *hdr = (void *) skb->data;
2742 struct hci_conn *conn;
2743 __u16 handle;
2744
2745 skb_pull(skb, HCI_SCO_HDR_SIZE);
2746
2747 handle = __le16_to_cpu(hdr->handle);
2748
2749 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2750
2751 hdev->stat.sco_rx++;
2752
2753 hci_dev_lock(hdev);
2754 conn = hci_conn_hash_lookup_handle(hdev, handle);
2755 hci_dev_unlock(hdev);
2756
2757 if (conn) {
2758 /* Send to upper protocol */
2759 sco_recv_scodata(conn, skb);
2760 return;
2761 } else {
2762 BT_ERR("%s SCO packet for unknown connection handle %d",
2763 hdev->name, handle);
2764 }
2765
2766 kfree_skb(skb);
2767 }
2768
2769 static void hci_rx_work(struct work_struct *work)
2770 {
2771 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2772 struct sk_buff *skb;
2773
2774 BT_DBG("%s", hdev->name);
2775
2776 while ((skb = skb_dequeue(&hdev->rx_q))) {
2777 /* Send copy to monitor */
2778 hci_send_to_monitor(hdev, skb);
2779
2780 if (atomic_read(&hdev->promisc)) {
2781 /* Send copy to the sockets */
2782 hci_send_to_sock(hdev, skb);
2783 }
2784
2785 if (test_bit(HCI_RAW, &hdev->flags)) {
2786 kfree_skb(skb);
2787 continue;
2788 }
2789
2790 if (test_bit(HCI_INIT, &hdev->flags)) {
2791 /* Don't process data packets in this states. */
2792 switch (bt_cb(skb)->pkt_type) {
2793 case HCI_ACLDATA_PKT:
2794 case HCI_SCODATA_PKT:
2795 kfree_skb(skb);
2796 continue;
2797 }
2798 }
2799
2800 /* Process frame */
2801 switch (bt_cb(skb)->pkt_type) {
2802 case HCI_EVENT_PKT:
2803 BT_DBG("%s Event packet", hdev->name);
2804 hci_event_packet(hdev, skb);
2805 break;
2806
2807 case HCI_ACLDATA_PKT:
2808 BT_DBG("%s ACL data packet", hdev->name);
2809 hci_acldata_packet(hdev, skb);
2810 break;
2811
2812 case HCI_SCODATA_PKT:
2813 BT_DBG("%s SCO data packet", hdev->name);
2814 hci_scodata_packet(hdev, skb);
2815 break;
2816
2817 default:
2818 kfree_skb(skb);
2819 break;
2820 }
2821 }
2822 }
2823
2824 static void hci_cmd_work(struct work_struct *work)
2825 {
2826 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2827 struct sk_buff *skb;
2828
2829 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2830
2831 /* Send queued commands */
2832 if (atomic_read(&hdev->cmd_cnt)) {
2833 skb = skb_dequeue(&hdev->cmd_q);
2834 if (!skb)
2835 return;
2836
2837 kfree_skb(hdev->sent_cmd);
2838
2839 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2840 if (hdev->sent_cmd) {
2841 atomic_dec(&hdev->cmd_cnt);
2842 hci_send_frame(skb);
2843 if (test_bit(HCI_RESET, &hdev->flags))
2844 del_timer(&hdev->cmd_timer);
2845 else
2846 mod_timer(&hdev->cmd_timer,
2847 jiffies + HCI_CMD_TIMEOUT);
2848 } else {
2849 skb_queue_head(&hdev->cmd_q, skb);
2850 queue_work(hdev->workqueue, &hdev->cmd_work);
2851 }
2852 }
2853 }
2854
2855 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2856 {
2857 /* General inquiry access code (GIAC) */
2858 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2859 struct hci_cp_inquiry cp;
2860
2861 BT_DBG("%s", hdev->name);
2862
2863 if (test_bit(HCI_INQUIRY, &hdev->flags))
2864 return -EINPROGRESS;
2865
2866 inquiry_cache_flush(hdev);
2867
2868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2870 cp.length = length;
2871
2872 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2873 }
2874
2875 int hci_cancel_inquiry(struct hci_dev *hdev)
2876 {
2877 BT_DBG("%s", hdev->name);
2878
2879 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2880 return -EALREADY;
2881
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2883 }
2884
2885 u8 bdaddr_to_le(u8 bdaddr_type)
2886 {
2887 switch (bdaddr_type) {
2888 case BDADDR_LE_PUBLIC:
2889 return ADDR_LE_DEV_PUBLIC;
2890
2891 default:
2892 /* Fallback to LE Random address type */
2893 return ADDR_LE_DEV_RANDOM;
2894 }
2895 }