Bluetooth: Fix __hci_req_sync() handling of empty requests
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_req_sync(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113 {
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125
126 /* If the request didn't send any commands return immediately */
127 if (skb_queue_empty(&hdev->cmd_q) && atomic_read(&hdev->cmd_cnt)) {
128 hdev->req_status = 0;
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130 return err;
131 }
132
133 schedule_timeout(timeout);
134
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136
137 if (signal_pending(current))
138 return -EINTR;
139
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
142 err = -bt_to_errno(hdev->req_result);
143 break;
144
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
148
149 default:
150 err = -ETIMEDOUT;
151 break;
152 }
153
154 hdev->req_status = hdev->req_result = 0;
155
156 BT_DBG("%s end: err %d", hdev->name, err);
157
158 return err;
159 }
160
161 static int hci_req_sync(struct hci_dev *hdev,
162 void (*req)(struct hci_dev *hdev, unsigned long opt),
163 unsigned long opt, __u32 timeout)
164 {
165 int ret;
166
167 if (!test_bit(HCI_UP, &hdev->flags))
168 return -ENETDOWN;
169
170 /* Serialize all requests */
171 hci_req_lock(hdev);
172 ret = __hci_req_sync(hdev, req, opt, timeout);
173 hci_req_unlock(hdev);
174
175 return ret;
176 }
177
178 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
179 {
180 BT_DBG("%s %ld", hdev->name, opt);
181
182 /* Reset device */
183 set_bit(HCI_RESET, &hdev->flags);
184 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
185 }
186
187 static void bredr_init(struct hci_dev *hdev)
188 {
189 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
190
191 /* Read Local Supported Features */
192 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
193
194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196 }
197
198 static void amp_init(struct hci_dev *hdev)
199 {
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
201
202 /* Read Local Version */
203 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204
205 /* Read Local AMP Info */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
207
208 /* Read Data Blk size */
209 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
210 }
211
212 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
213 {
214 struct sk_buff *skb;
215
216 BT_DBG("%s %ld", hdev->name, opt);
217
218 /* Driver initialization */
219
220 /* Special commands */
221 while ((skb = skb_dequeue(&hdev->driver_init))) {
222 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
223 skb->dev = (void *) hdev;
224
225 skb_queue_tail(&hdev->cmd_q, skb);
226 queue_work(hdev->workqueue, &hdev->cmd_work);
227 }
228 skb_queue_purge(&hdev->driver_init);
229
230 /* Reset */
231 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
232 hci_reset_req(hdev, 0);
233
234 switch (hdev->dev_type) {
235 case HCI_BREDR:
236 bredr_init(hdev);
237 break;
238
239 case HCI_AMP:
240 amp_init(hdev);
241 break;
242
243 default:
244 BT_ERR("Unknown device type %d", hdev->dev_type);
245 break;
246 }
247 }
248
249 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
250 {
251 __u8 scan = opt;
252
253 BT_DBG("%s %x", hdev->name, scan);
254
255 /* Inquiry and Page scans */
256 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
257 }
258
259 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
260 {
261 __u8 auth = opt;
262
263 BT_DBG("%s %x", hdev->name, auth);
264
265 /* Authentication */
266 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
267 }
268
269 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
270 {
271 __u8 encrypt = opt;
272
273 BT_DBG("%s %x", hdev->name, encrypt);
274
275 /* Encryption */
276 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
277 }
278
279 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
280 {
281 __le16 policy = cpu_to_le16(opt);
282
283 BT_DBG("%s %x", hdev->name, policy);
284
285 /* Default link policy */
286 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
287 }
288
289 /* Get HCI device by index.
290 * Device is held on return. */
291 struct hci_dev *hci_dev_get(int index)
292 {
293 struct hci_dev *hdev = NULL, *d;
294
295 BT_DBG("%d", index);
296
297 if (index < 0)
298 return NULL;
299
300 read_lock(&hci_dev_list_lock);
301 list_for_each_entry(d, &hci_dev_list, list) {
302 if (d->id == index) {
303 hdev = hci_dev_hold(d);
304 break;
305 }
306 }
307 read_unlock(&hci_dev_list_lock);
308 return hdev;
309 }
310
311 /* ---- Inquiry support ---- */
312
313 bool hci_discovery_active(struct hci_dev *hdev)
314 {
315 struct discovery_state *discov = &hdev->discovery;
316
317 switch (discov->state) {
318 case DISCOVERY_FINDING:
319 case DISCOVERY_RESOLVING:
320 return true;
321
322 default:
323 return false;
324 }
325 }
326
327 void hci_discovery_set_state(struct hci_dev *hdev, int state)
328 {
329 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
330
331 if (hdev->discovery.state == state)
332 return;
333
334 switch (state) {
335 case DISCOVERY_STOPPED:
336 if (hdev->discovery.state != DISCOVERY_STARTING)
337 mgmt_discovering(hdev, 0);
338 break;
339 case DISCOVERY_STARTING:
340 break;
341 case DISCOVERY_FINDING:
342 mgmt_discovering(hdev, 1);
343 break;
344 case DISCOVERY_RESOLVING:
345 break;
346 case DISCOVERY_STOPPING:
347 break;
348 }
349
350 hdev->discovery.state = state;
351 }
352
353 static void inquiry_cache_flush(struct hci_dev *hdev)
354 {
355 struct discovery_state *cache = &hdev->discovery;
356 struct inquiry_entry *p, *n;
357
358 list_for_each_entry_safe(p, n, &cache->all, all) {
359 list_del(&p->all);
360 kfree(p);
361 }
362
363 INIT_LIST_HEAD(&cache->unknown);
364 INIT_LIST_HEAD(&cache->resolve);
365 }
366
367 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
368 bdaddr_t *bdaddr)
369 {
370 struct discovery_state *cache = &hdev->discovery;
371 struct inquiry_entry *e;
372
373 BT_DBG("cache %p, %pMR", cache, bdaddr);
374
375 list_for_each_entry(e, &cache->all, all) {
376 if (!bacmp(&e->data.bdaddr, bdaddr))
377 return e;
378 }
379
380 return NULL;
381 }
382
383 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
384 bdaddr_t *bdaddr)
385 {
386 struct discovery_state *cache = &hdev->discovery;
387 struct inquiry_entry *e;
388
389 BT_DBG("cache %p, %pMR", cache, bdaddr);
390
391 list_for_each_entry(e, &cache->unknown, list) {
392 if (!bacmp(&e->data.bdaddr, bdaddr))
393 return e;
394 }
395
396 return NULL;
397 }
398
399 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
400 bdaddr_t *bdaddr,
401 int state)
402 {
403 struct discovery_state *cache = &hdev->discovery;
404 struct inquiry_entry *e;
405
406 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
407
408 list_for_each_entry(e, &cache->resolve, list) {
409 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
410 return e;
411 if (!bacmp(&e->data.bdaddr, bdaddr))
412 return e;
413 }
414
415 return NULL;
416 }
417
418 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
419 struct inquiry_entry *ie)
420 {
421 struct discovery_state *cache = &hdev->discovery;
422 struct list_head *pos = &cache->resolve;
423 struct inquiry_entry *p;
424
425 list_del(&ie->list);
426
427 list_for_each_entry(p, &cache->resolve, list) {
428 if (p->name_state != NAME_PENDING &&
429 abs(p->data.rssi) >= abs(ie->data.rssi))
430 break;
431 pos = &p->list;
432 }
433
434 list_add(&ie->list, pos);
435 }
436
437 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
438 bool name_known, bool *ssp)
439 {
440 struct discovery_state *cache = &hdev->discovery;
441 struct inquiry_entry *ie;
442
443 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
444
445 hci_remove_remote_oob_data(hdev, &data->bdaddr);
446
447 if (ssp)
448 *ssp = data->ssp_mode;
449
450 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
451 if (ie) {
452 if (ie->data.ssp_mode && ssp)
453 *ssp = true;
454
455 if (ie->name_state == NAME_NEEDED &&
456 data->rssi != ie->data.rssi) {
457 ie->data.rssi = data->rssi;
458 hci_inquiry_cache_update_resolve(hdev, ie);
459 }
460
461 goto update;
462 }
463
464 /* Entry not in the cache. Add new one. */
465 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
466 if (!ie)
467 return false;
468
469 list_add(&ie->all, &cache->all);
470
471 if (name_known) {
472 ie->name_state = NAME_KNOWN;
473 } else {
474 ie->name_state = NAME_NOT_KNOWN;
475 list_add(&ie->list, &cache->unknown);
476 }
477
478 update:
479 if (name_known && ie->name_state != NAME_KNOWN &&
480 ie->name_state != NAME_PENDING) {
481 ie->name_state = NAME_KNOWN;
482 list_del(&ie->list);
483 }
484
485 memcpy(&ie->data, data, sizeof(*data));
486 ie->timestamp = jiffies;
487 cache->timestamp = jiffies;
488
489 if (ie->name_state == NAME_NOT_KNOWN)
490 return false;
491
492 return true;
493 }
494
495 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
496 {
497 struct discovery_state *cache = &hdev->discovery;
498 struct inquiry_info *info = (struct inquiry_info *) buf;
499 struct inquiry_entry *e;
500 int copied = 0;
501
502 list_for_each_entry(e, &cache->all, all) {
503 struct inquiry_data *data = &e->data;
504
505 if (copied >= num)
506 break;
507
508 bacpy(&info->bdaddr, &data->bdaddr);
509 info->pscan_rep_mode = data->pscan_rep_mode;
510 info->pscan_period_mode = data->pscan_period_mode;
511 info->pscan_mode = data->pscan_mode;
512 memcpy(info->dev_class, data->dev_class, 3);
513 info->clock_offset = data->clock_offset;
514
515 info++;
516 copied++;
517 }
518
519 BT_DBG("cache %p, copied %d", cache, copied);
520 return copied;
521 }
522
523 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
524 {
525 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
526 struct hci_cp_inquiry cp;
527
528 BT_DBG("%s", hdev->name);
529
530 if (test_bit(HCI_INQUIRY, &hdev->flags))
531 return;
532
533 /* Start Inquiry */
534 memcpy(&cp.lap, &ir->lap, 3);
535 cp.length = ir->length;
536 cp.num_rsp = ir->num_rsp;
537 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
538 }
539
540 int hci_inquiry(void __user *arg)
541 {
542 __u8 __user *ptr = arg;
543 struct hci_inquiry_req ir;
544 struct hci_dev *hdev;
545 int err = 0, do_inquiry = 0, max_rsp;
546 long timeo;
547 __u8 *buf;
548
549 if (copy_from_user(&ir, ptr, sizeof(ir)))
550 return -EFAULT;
551
552 hdev = hci_dev_get(ir.dev_id);
553 if (!hdev)
554 return -ENODEV;
555
556 hci_dev_lock(hdev);
557 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
558 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
559 inquiry_cache_flush(hdev);
560 do_inquiry = 1;
561 }
562 hci_dev_unlock(hdev);
563
564 timeo = ir.length * msecs_to_jiffies(2000);
565
566 if (do_inquiry) {
567 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
568 timeo);
569 if (err < 0)
570 goto done;
571 }
572
573 /* for unlimited number of responses we will use buffer with
574 * 255 entries
575 */
576 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
577
578 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
579 * copy it to the user space.
580 */
581 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
582 if (!buf) {
583 err = -ENOMEM;
584 goto done;
585 }
586
587 hci_dev_lock(hdev);
588 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
589 hci_dev_unlock(hdev);
590
591 BT_DBG("num_rsp %d", ir.num_rsp);
592
593 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
594 ptr += sizeof(ir);
595 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
596 ir.num_rsp))
597 err = -EFAULT;
598 } else
599 err = -EFAULT;
600
601 kfree(buf);
602
603 done:
604 hci_dev_put(hdev);
605 return err;
606 }
607
608 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
609 {
610 u8 ad_len = 0, flags = 0;
611 size_t name_len;
612
613 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
614 flags |= LE_AD_GENERAL;
615
616 if (!lmp_bredr_capable(hdev))
617 flags |= LE_AD_NO_BREDR;
618
619 if (lmp_le_br_capable(hdev))
620 flags |= LE_AD_SIM_LE_BREDR_CTRL;
621
622 if (lmp_host_le_br_capable(hdev))
623 flags |= LE_AD_SIM_LE_BREDR_HOST;
624
625 if (flags) {
626 BT_DBG("adv flags 0x%02x", flags);
627
628 ptr[0] = 2;
629 ptr[1] = EIR_FLAGS;
630 ptr[2] = flags;
631
632 ad_len += 3;
633 ptr += 3;
634 }
635
636 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
637 ptr[0] = 2;
638 ptr[1] = EIR_TX_POWER;
639 ptr[2] = (u8) hdev->adv_tx_power;
640
641 ad_len += 3;
642 ptr += 3;
643 }
644
645 name_len = strlen(hdev->dev_name);
646 if (name_len > 0) {
647 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
648
649 if (name_len > max_len) {
650 name_len = max_len;
651 ptr[1] = EIR_NAME_SHORT;
652 } else
653 ptr[1] = EIR_NAME_COMPLETE;
654
655 ptr[0] = name_len + 1;
656
657 memcpy(ptr + 2, hdev->dev_name, name_len);
658
659 ad_len += (name_len + 2);
660 ptr += (name_len + 2);
661 }
662
663 return ad_len;
664 }
665
666 int hci_update_ad(struct hci_dev *hdev)
667 {
668 struct hci_cp_le_set_adv_data cp;
669 u8 len;
670 int err;
671
672 hci_dev_lock(hdev);
673
674 if (!lmp_le_capable(hdev)) {
675 err = -EINVAL;
676 goto unlock;
677 }
678
679 memset(&cp, 0, sizeof(cp));
680
681 len = create_ad(hdev, cp.data);
682
683 if (hdev->adv_data_len == len &&
684 memcmp(cp.data, hdev->adv_data, len) == 0) {
685 err = 0;
686 goto unlock;
687 }
688
689 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
690 hdev->adv_data_len = len;
691
692 cp.length = len;
693 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
694
695 unlock:
696 hci_dev_unlock(hdev);
697
698 return err;
699 }
700
701 /* ---- HCI ioctl helpers ---- */
702
703 int hci_dev_open(__u16 dev)
704 {
705 struct hci_dev *hdev;
706 int ret = 0;
707
708 hdev = hci_dev_get(dev);
709 if (!hdev)
710 return -ENODEV;
711
712 BT_DBG("%s %p", hdev->name, hdev);
713
714 hci_req_lock(hdev);
715
716 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
717 ret = -ENODEV;
718 goto done;
719 }
720
721 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
722 ret = -ERFKILL;
723 goto done;
724 }
725
726 if (test_bit(HCI_UP, &hdev->flags)) {
727 ret = -EALREADY;
728 goto done;
729 }
730
731 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
732 set_bit(HCI_RAW, &hdev->flags);
733
734 /* Treat all non BR/EDR controllers as raw devices if
735 enable_hs is not set */
736 if (hdev->dev_type != HCI_BREDR && !enable_hs)
737 set_bit(HCI_RAW, &hdev->flags);
738
739 if (hdev->open(hdev)) {
740 ret = -EIO;
741 goto done;
742 }
743
744 if (!test_bit(HCI_RAW, &hdev->flags)) {
745 atomic_set(&hdev->cmd_cnt, 1);
746 set_bit(HCI_INIT, &hdev->flags);
747 hdev->init_last_cmd = 0;
748
749 ret = __hci_req_sync(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
750
751 clear_bit(HCI_INIT, &hdev->flags);
752 }
753
754 if (!ret) {
755 hci_dev_hold(hdev);
756 set_bit(HCI_UP, &hdev->flags);
757 hci_notify(hdev, HCI_DEV_UP);
758 hci_update_ad(hdev);
759 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
760 mgmt_valid_hdev(hdev)) {
761 hci_dev_lock(hdev);
762 mgmt_powered(hdev, 1);
763 hci_dev_unlock(hdev);
764 }
765 } else {
766 /* Init failed, cleanup */
767 flush_work(&hdev->tx_work);
768 flush_work(&hdev->cmd_work);
769 flush_work(&hdev->rx_work);
770
771 skb_queue_purge(&hdev->cmd_q);
772 skb_queue_purge(&hdev->rx_q);
773
774 if (hdev->flush)
775 hdev->flush(hdev);
776
777 if (hdev->sent_cmd) {
778 kfree_skb(hdev->sent_cmd);
779 hdev->sent_cmd = NULL;
780 }
781
782 hdev->close(hdev);
783 hdev->flags = 0;
784 }
785
786 done:
787 hci_req_unlock(hdev);
788 hci_dev_put(hdev);
789 return ret;
790 }
791
792 static int hci_dev_do_close(struct hci_dev *hdev)
793 {
794 BT_DBG("%s %p", hdev->name, hdev);
795
796 cancel_work_sync(&hdev->le_scan);
797
798 cancel_delayed_work(&hdev->power_off);
799
800 hci_req_cancel(hdev, ENODEV);
801 hci_req_lock(hdev);
802
803 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
804 del_timer_sync(&hdev->cmd_timer);
805 hci_req_unlock(hdev);
806 return 0;
807 }
808
809 /* Flush RX and TX works */
810 flush_work(&hdev->tx_work);
811 flush_work(&hdev->rx_work);
812
813 if (hdev->discov_timeout > 0) {
814 cancel_delayed_work(&hdev->discov_off);
815 hdev->discov_timeout = 0;
816 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
817 }
818
819 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
820 cancel_delayed_work(&hdev->service_cache);
821
822 cancel_delayed_work_sync(&hdev->le_scan_disable);
823
824 hci_dev_lock(hdev);
825 inquiry_cache_flush(hdev);
826 hci_conn_hash_flush(hdev);
827 hci_dev_unlock(hdev);
828
829 hci_notify(hdev, HCI_DEV_DOWN);
830
831 if (hdev->flush)
832 hdev->flush(hdev);
833
834 /* Reset device */
835 skb_queue_purge(&hdev->cmd_q);
836 atomic_set(&hdev->cmd_cnt, 1);
837 if (!test_bit(HCI_RAW, &hdev->flags) &&
838 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
839 set_bit(HCI_INIT, &hdev->flags);
840 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
841 clear_bit(HCI_INIT, &hdev->flags);
842 }
843
844 /* flush cmd work */
845 flush_work(&hdev->cmd_work);
846
847 /* Drop queues */
848 skb_queue_purge(&hdev->rx_q);
849 skb_queue_purge(&hdev->cmd_q);
850 skb_queue_purge(&hdev->raw_q);
851
852 /* Drop last sent command */
853 if (hdev->sent_cmd) {
854 del_timer_sync(&hdev->cmd_timer);
855 kfree_skb(hdev->sent_cmd);
856 hdev->sent_cmd = NULL;
857 }
858
859 /* After this point our queues are empty
860 * and no tasks are scheduled. */
861 hdev->close(hdev);
862
863 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
864 mgmt_valid_hdev(hdev)) {
865 hci_dev_lock(hdev);
866 mgmt_powered(hdev, 0);
867 hci_dev_unlock(hdev);
868 }
869
870 /* Clear flags */
871 hdev->flags = 0;
872
873 /* Controller radio is available but is currently powered down */
874 hdev->amp_status = 0;
875
876 memset(hdev->eir, 0, sizeof(hdev->eir));
877 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
878
879 hci_req_unlock(hdev);
880
881 hci_dev_put(hdev);
882 return 0;
883 }
884
885 int hci_dev_close(__u16 dev)
886 {
887 struct hci_dev *hdev;
888 int err;
889
890 hdev = hci_dev_get(dev);
891 if (!hdev)
892 return -ENODEV;
893
894 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
895 cancel_delayed_work(&hdev->power_off);
896
897 err = hci_dev_do_close(hdev);
898
899 hci_dev_put(hdev);
900 return err;
901 }
902
903 int hci_dev_reset(__u16 dev)
904 {
905 struct hci_dev *hdev;
906 int ret = 0;
907
908 hdev = hci_dev_get(dev);
909 if (!hdev)
910 return -ENODEV;
911
912 hci_req_lock(hdev);
913
914 if (!test_bit(HCI_UP, &hdev->flags))
915 goto done;
916
917 /* Drop queues */
918 skb_queue_purge(&hdev->rx_q);
919 skb_queue_purge(&hdev->cmd_q);
920
921 hci_dev_lock(hdev);
922 inquiry_cache_flush(hdev);
923 hci_conn_hash_flush(hdev);
924 hci_dev_unlock(hdev);
925
926 if (hdev->flush)
927 hdev->flush(hdev);
928
929 atomic_set(&hdev->cmd_cnt, 1);
930 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
931
932 if (!test_bit(HCI_RAW, &hdev->flags))
933 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
934
935 done:
936 hci_req_unlock(hdev);
937 hci_dev_put(hdev);
938 return ret;
939 }
940
941 int hci_dev_reset_stat(__u16 dev)
942 {
943 struct hci_dev *hdev;
944 int ret = 0;
945
946 hdev = hci_dev_get(dev);
947 if (!hdev)
948 return -ENODEV;
949
950 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
951
952 hci_dev_put(hdev);
953
954 return ret;
955 }
956
957 int hci_dev_cmd(unsigned int cmd, void __user *arg)
958 {
959 struct hci_dev *hdev;
960 struct hci_dev_req dr;
961 int err = 0;
962
963 if (copy_from_user(&dr, arg, sizeof(dr)))
964 return -EFAULT;
965
966 hdev = hci_dev_get(dr.dev_id);
967 if (!hdev)
968 return -ENODEV;
969
970 switch (cmd) {
971 case HCISETAUTH:
972 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
973 HCI_INIT_TIMEOUT);
974 break;
975
976 case HCISETENCRYPT:
977 if (!lmp_encrypt_capable(hdev)) {
978 err = -EOPNOTSUPP;
979 break;
980 }
981
982 if (!test_bit(HCI_AUTH, &hdev->flags)) {
983 /* Auth must be enabled first */
984 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
985 HCI_INIT_TIMEOUT);
986 if (err)
987 break;
988 }
989
990 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
991 HCI_INIT_TIMEOUT);
992 break;
993
994 case HCISETSCAN:
995 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
996 HCI_INIT_TIMEOUT);
997 break;
998
999 case HCISETLINKPOL:
1000 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1001 HCI_INIT_TIMEOUT);
1002 break;
1003
1004 case HCISETLINKMODE:
1005 hdev->link_mode = ((__u16) dr.dev_opt) &
1006 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1007 break;
1008
1009 case HCISETPTYPE:
1010 hdev->pkt_type = (__u16) dr.dev_opt;
1011 break;
1012
1013 case HCISETACLMTU:
1014 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1015 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1016 break;
1017
1018 case HCISETSCOMTU:
1019 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1020 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1021 break;
1022
1023 default:
1024 err = -EINVAL;
1025 break;
1026 }
1027
1028 hci_dev_put(hdev);
1029 return err;
1030 }
1031
1032 int hci_get_dev_list(void __user *arg)
1033 {
1034 struct hci_dev *hdev;
1035 struct hci_dev_list_req *dl;
1036 struct hci_dev_req *dr;
1037 int n = 0, size, err;
1038 __u16 dev_num;
1039
1040 if (get_user(dev_num, (__u16 __user *) arg))
1041 return -EFAULT;
1042
1043 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1044 return -EINVAL;
1045
1046 size = sizeof(*dl) + dev_num * sizeof(*dr);
1047
1048 dl = kzalloc(size, GFP_KERNEL);
1049 if (!dl)
1050 return -ENOMEM;
1051
1052 dr = dl->dev_req;
1053
1054 read_lock(&hci_dev_list_lock);
1055 list_for_each_entry(hdev, &hci_dev_list, list) {
1056 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1057 cancel_delayed_work(&hdev->power_off);
1058
1059 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1060 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1061
1062 (dr + n)->dev_id = hdev->id;
1063 (dr + n)->dev_opt = hdev->flags;
1064
1065 if (++n >= dev_num)
1066 break;
1067 }
1068 read_unlock(&hci_dev_list_lock);
1069
1070 dl->dev_num = n;
1071 size = sizeof(*dl) + n * sizeof(*dr);
1072
1073 err = copy_to_user(arg, dl, size);
1074 kfree(dl);
1075
1076 return err ? -EFAULT : 0;
1077 }
1078
1079 int hci_get_dev_info(void __user *arg)
1080 {
1081 struct hci_dev *hdev;
1082 struct hci_dev_info di;
1083 int err = 0;
1084
1085 if (copy_from_user(&di, arg, sizeof(di)))
1086 return -EFAULT;
1087
1088 hdev = hci_dev_get(di.dev_id);
1089 if (!hdev)
1090 return -ENODEV;
1091
1092 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1093 cancel_delayed_work_sync(&hdev->power_off);
1094
1095 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1096 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1097
1098 strcpy(di.name, hdev->name);
1099 di.bdaddr = hdev->bdaddr;
1100 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1101 di.flags = hdev->flags;
1102 di.pkt_type = hdev->pkt_type;
1103 if (lmp_bredr_capable(hdev)) {
1104 di.acl_mtu = hdev->acl_mtu;
1105 di.acl_pkts = hdev->acl_pkts;
1106 di.sco_mtu = hdev->sco_mtu;
1107 di.sco_pkts = hdev->sco_pkts;
1108 } else {
1109 di.acl_mtu = hdev->le_mtu;
1110 di.acl_pkts = hdev->le_pkts;
1111 di.sco_mtu = 0;
1112 di.sco_pkts = 0;
1113 }
1114 di.link_policy = hdev->link_policy;
1115 di.link_mode = hdev->link_mode;
1116
1117 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1118 memcpy(&di.features, &hdev->features, sizeof(di.features));
1119
1120 if (copy_to_user(arg, &di, sizeof(di)))
1121 err = -EFAULT;
1122
1123 hci_dev_put(hdev);
1124
1125 return err;
1126 }
1127
1128 /* ---- Interface to HCI drivers ---- */
1129
1130 static int hci_rfkill_set_block(void *data, bool blocked)
1131 {
1132 struct hci_dev *hdev = data;
1133
1134 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1135
1136 if (!blocked)
1137 return 0;
1138
1139 hci_dev_do_close(hdev);
1140
1141 return 0;
1142 }
1143
1144 static const struct rfkill_ops hci_rfkill_ops = {
1145 .set_block = hci_rfkill_set_block,
1146 };
1147
1148 static void hci_power_on(struct work_struct *work)
1149 {
1150 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1151
1152 BT_DBG("%s", hdev->name);
1153
1154 if (hci_dev_open(hdev->id) < 0)
1155 return;
1156
1157 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1158 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1159 HCI_AUTO_OFF_TIMEOUT);
1160
1161 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1162 mgmt_index_added(hdev);
1163 }
1164
1165 static void hci_power_off(struct work_struct *work)
1166 {
1167 struct hci_dev *hdev = container_of(work, struct hci_dev,
1168 power_off.work);
1169
1170 BT_DBG("%s", hdev->name);
1171
1172 hci_dev_do_close(hdev);
1173 }
1174
1175 static void hci_discov_off(struct work_struct *work)
1176 {
1177 struct hci_dev *hdev;
1178 u8 scan = SCAN_PAGE;
1179
1180 hdev = container_of(work, struct hci_dev, discov_off.work);
1181
1182 BT_DBG("%s", hdev->name);
1183
1184 hci_dev_lock(hdev);
1185
1186 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1187
1188 hdev->discov_timeout = 0;
1189
1190 hci_dev_unlock(hdev);
1191 }
1192
1193 int hci_uuids_clear(struct hci_dev *hdev)
1194 {
1195 struct bt_uuid *uuid, *tmp;
1196
1197 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1198 list_del(&uuid->list);
1199 kfree(uuid);
1200 }
1201
1202 return 0;
1203 }
1204
1205 int hci_link_keys_clear(struct hci_dev *hdev)
1206 {
1207 struct list_head *p, *n;
1208
1209 list_for_each_safe(p, n, &hdev->link_keys) {
1210 struct link_key *key;
1211
1212 key = list_entry(p, struct link_key, list);
1213
1214 list_del(p);
1215 kfree(key);
1216 }
1217
1218 return 0;
1219 }
1220
1221 int hci_smp_ltks_clear(struct hci_dev *hdev)
1222 {
1223 struct smp_ltk *k, *tmp;
1224
1225 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1226 list_del(&k->list);
1227 kfree(k);
1228 }
1229
1230 return 0;
1231 }
1232
1233 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1234 {
1235 struct link_key *k;
1236
1237 list_for_each_entry(k, &hdev->link_keys, list)
1238 if (bacmp(bdaddr, &k->bdaddr) == 0)
1239 return k;
1240
1241 return NULL;
1242 }
1243
1244 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1245 u8 key_type, u8 old_key_type)
1246 {
1247 /* Legacy key */
1248 if (key_type < 0x03)
1249 return true;
1250
1251 /* Debug keys are insecure so don't store them persistently */
1252 if (key_type == HCI_LK_DEBUG_COMBINATION)
1253 return false;
1254
1255 /* Changed combination key and there's no previous one */
1256 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1257 return false;
1258
1259 /* Security mode 3 case */
1260 if (!conn)
1261 return true;
1262
1263 /* Neither local nor remote side had no-bonding as requirement */
1264 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1265 return true;
1266
1267 /* Local side had dedicated bonding as requirement */
1268 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1269 return true;
1270
1271 /* Remote side had dedicated bonding as requirement */
1272 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1273 return true;
1274
1275 /* If none of the above criteria match, then don't store the key
1276 * persistently */
1277 return false;
1278 }
1279
1280 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1281 {
1282 struct smp_ltk *k;
1283
1284 list_for_each_entry(k, &hdev->long_term_keys, list) {
1285 if (k->ediv != ediv ||
1286 memcmp(rand, k->rand, sizeof(k->rand)))
1287 continue;
1288
1289 return k;
1290 }
1291
1292 return NULL;
1293 }
1294
1295 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1296 u8 addr_type)
1297 {
1298 struct smp_ltk *k;
1299
1300 list_for_each_entry(k, &hdev->long_term_keys, list)
1301 if (addr_type == k->bdaddr_type &&
1302 bacmp(bdaddr, &k->bdaddr) == 0)
1303 return k;
1304
1305 return NULL;
1306 }
1307
1308 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1309 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1310 {
1311 struct link_key *key, *old_key;
1312 u8 old_key_type;
1313 bool persistent;
1314
1315 old_key = hci_find_link_key(hdev, bdaddr);
1316 if (old_key) {
1317 old_key_type = old_key->type;
1318 key = old_key;
1319 } else {
1320 old_key_type = conn ? conn->key_type : 0xff;
1321 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1322 if (!key)
1323 return -ENOMEM;
1324 list_add(&key->list, &hdev->link_keys);
1325 }
1326
1327 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1328
1329 /* Some buggy controller combinations generate a changed
1330 * combination key for legacy pairing even when there's no
1331 * previous key */
1332 if (type == HCI_LK_CHANGED_COMBINATION &&
1333 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1334 type = HCI_LK_COMBINATION;
1335 if (conn)
1336 conn->key_type = type;
1337 }
1338
1339 bacpy(&key->bdaddr, bdaddr);
1340 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1341 key->pin_len = pin_len;
1342
1343 if (type == HCI_LK_CHANGED_COMBINATION)
1344 key->type = old_key_type;
1345 else
1346 key->type = type;
1347
1348 if (!new_key)
1349 return 0;
1350
1351 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1352
1353 mgmt_new_link_key(hdev, key, persistent);
1354
1355 if (conn)
1356 conn->flush_key = !persistent;
1357
1358 return 0;
1359 }
1360
1361 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1362 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1363 ediv, u8 rand[8])
1364 {
1365 struct smp_ltk *key, *old_key;
1366
1367 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1368 return 0;
1369
1370 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1371 if (old_key)
1372 key = old_key;
1373 else {
1374 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1375 if (!key)
1376 return -ENOMEM;
1377 list_add(&key->list, &hdev->long_term_keys);
1378 }
1379
1380 bacpy(&key->bdaddr, bdaddr);
1381 key->bdaddr_type = addr_type;
1382 memcpy(key->val, tk, sizeof(key->val));
1383 key->authenticated = authenticated;
1384 key->ediv = ediv;
1385 key->enc_size = enc_size;
1386 key->type = type;
1387 memcpy(key->rand, rand, sizeof(key->rand));
1388
1389 if (!new_key)
1390 return 0;
1391
1392 if (type & HCI_SMP_LTK)
1393 mgmt_new_ltk(hdev, key, 1);
1394
1395 return 0;
1396 }
1397
1398 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399 {
1400 struct link_key *key;
1401
1402 key = hci_find_link_key(hdev, bdaddr);
1403 if (!key)
1404 return -ENOENT;
1405
1406 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1407
1408 list_del(&key->list);
1409 kfree(key);
1410
1411 return 0;
1412 }
1413
1414 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1415 {
1416 struct smp_ltk *k, *tmp;
1417
1418 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1419 if (bacmp(bdaddr, &k->bdaddr))
1420 continue;
1421
1422 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1423
1424 list_del(&k->list);
1425 kfree(k);
1426 }
1427
1428 return 0;
1429 }
1430
1431 /* HCI command timer function */
1432 static void hci_cmd_timeout(unsigned long arg)
1433 {
1434 struct hci_dev *hdev = (void *) arg;
1435
1436 if (hdev->sent_cmd) {
1437 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1438 u16 opcode = __le16_to_cpu(sent->opcode);
1439
1440 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1441 } else {
1442 BT_ERR("%s command tx timeout", hdev->name);
1443 }
1444
1445 atomic_set(&hdev->cmd_cnt, 1);
1446 queue_work(hdev->workqueue, &hdev->cmd_work);
1447 }
1448
1449 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1450 bdaddr_t *bdaddr)
1451 {
1452 struct oob_data *data;
1453
1454 list_for_each_entry(data, &hdev->remote_oob_data, list)
1455 if (bacmp(bdaddr, &data->bdaddr) == 0)
1456 return data;
1457
1458 return NULL;
1459 }
1460
1461 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1462 {
1463 struct oob_data *data;
1464
1465 data = hci_find_remote_oob_data(hdev, bdaddr);
1466 if (!data)
1467 return -ENOENT;
1468
1469 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1470
1471 list_del(&data->list);
1472 kfree(data);
1473
1474 return 0;
1475 }
1476
1477 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1478 {
1479 struct oob_data *data, *n;
1480
1481 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1482 list_del(&data->list);
1483 kfree(data);
1484 }
1485
1486 return 0;
1487 }
1488
1489 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1490 u8 *randomizer)
1491 {
1492 struct oob_data *data;
1493
1494 data = hci_find_remote_oob_data(hdev, bdaddr);
1495
1496 if (!data) {
1497 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1498 if (!data)
1499 return -ENOMEM;
1500
1501 bacpy(&data->bdaddr, bdaddr);
1502 list_add(&data->list, &hdev->remote_oob_data);
1503 }
1504
1505 memcpy(data->hash, hash, sizeof(data->hash));
1506 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1507
1508 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1509
1510 return 0;
1511 }
1512
1513 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1514 {
1515 struct bdaddr_list *b;
1516
1517 list_for_each_entry(b, &hdev->blacklist, list)
1518 if (bacmp(bdaddr, &b->bdaddr) == 0)
1519 return b;
1520
1521 return NULL;
1522 }
1523
1524 int hci_blacklist_clear(struct hci_dev *hdev)
1525 {
1526 struct list_head *p, *n;
1527
1528 list_for_each_safe(p, n, &hdev->blacklist) {
1529 struct bdaddr_list *b;
1530
1531 b = list_entry(p, struct bdaddr_list, list);
1532
1533 list_del(p);
1534 kfree(b);
1535 }
1536
1537 return 0;
1538 }
1539
1540 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1541 {
1542 struct bdaddr_list *entry;
1543
1544 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1545 return -EBADF;
1546
1547 if (hci_blacklist_lookup(hdev, bdaddr))
1548 return -EEXIST;
1549
1550 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1551 if (!entry)
1552 return -ENOMEM;
1553
1554 bacpy(&entry->bdaddr, bdaddr);
1555
1556 list_add(&entry->list, &hdev->blacklist);
1557
1558 return mgmt_device_blocked(hdev, bdaddr, type);
1559 }
1560
1561 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1562 {
1563 struct bdaddr_list *entry;
1564
1565 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1566 return hci_blacklist_clear(hdev);
1567
1568 entry = hci_blacklist_lookup(hdev, bdaddr);
1569 if (!entry)
1570 return -ENOENT;
1571
1572 list_del(&entry->list);
1573 kfree(entry);
1574
1575 return mgmt_device_unblocked(hdev, bdaddr, type);
1576 }
1577
1578 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1579 {
1580 struct le_scan_params *param = (struct le_scan_params *) opt;
1581 struct hci_cp_le_set_scan_param cp;
1582
1583 memset(&cp, 0, sizeof(cp));
1584 cp.type = param->type;
1585 cp.interval = cpu_to_le16(param->interval);
1586 cp.window = cpu_to_le16(param->window);
1587
1588 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1589 }
1590
1591 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1592 {
1593 struct hci_cp_le_set_scan_enable cp;
1594
1595 memset(&cp, 0, sizeof(cp));
1596 cp.enable = 1;
1597 cp.filter_dup = 1;
1598
1599 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600 }
1601
1602 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1603 u16 window, int timeout)
1604 {
1605 long timeo = msecs_to_jiffies(3000);
1606 struct le_scan_params param;
1607 int err;
1608
1609 BT_DBG("%s", hdev->name);
1610
1611 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1612 return -EINPROGRESS;
1613
1614 param.type = type;
1615 param.interval = interval;
1616 param.window = window;
1617
1618 hci_req_lock(hdev);
1619
1620 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1621 timeo);
1622 if (!err)
1623 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1624
1625 hci_req_unlock(hdev);
1626
1627 if (err < 0)
1628 return err;
1629
1630 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1631 msecs_to_jiffies(timeout));
1632
1633 return 0;
1634 }
1635
1636 int hci_cancel_le_scan(struct hci_dev *hdev)
1637 {
1638 BT_DBG("%s", hdev->name);
1639
1640 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1641 return -EALREADY;
1642
1643 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1644 struct hci_cp_le_set_scan_enable cp;
1645
1646 /* Send HCI command to disable LE Scan */
1647 memset(&cp, 0, sizeof(cp));
1648 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1649 }
1650
1651 return 0;
1652 }
1653
1654 static void le_scan_disable_work(struct work_struct *work)
1655 {
1656 struct hci_dev *hdev = container_of(work, struct hci_dev,
1657 le_scan_disable.work);
1658 struct hci_cp_le_set_scan_enable cp;
1659
1660 BT_DBG("%s", hdev->name);
1661
1662 memset(&cp, 0, sizeof(cp));
1663
1664 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1665 }
1666
1667 static void le_scan_work(struct work_struct *work)
1668 {
1669 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1670 struct le_scan_params *param = &hdev->le_scan_params;
1671
1672 BT_DBG("%s", hdev->name);
1673
1674 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1675 param->timeout);
1676 }
1677
1678 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1679 int timeout)
1680 {
1681 struct le_scan_params *param = &hdev->le_scan_params;
1682
1683 BT_DBG("%s", hdev->name);
1684
1685 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1686 return -ENOTSUPP;
1687
1688 if (work_busy(&hdev->le_scan))
1689 return -EINPROGRESS;
1690
1691 param->type = type;
1692 param->interval = interval;
1693 param->window = window;
1694 param->timeout = timeout;
1695
1696 queue_work(system_long_wq, &hdev->le_scan);
1697
1698 return 0;
1699 }
1700
1701 /* Alloc HCI device */
1702 struct hci_dev *hci_alloc_dev(void)
1703 {
1704 struct hci_dev *hdev;
1705
1706 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1707 if (!hdev)
1708 return NULL;
1709
1710 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1711 hdev->esco_type = (ESCO_HV1);
1712 hdev->link_mode = (HCI_LM_ACCEPT);
1713 hdev->io_capability = 0x03; /* No Input No Output */
1714 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1715 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1716
1717 hdev->sniff_max_interval = 800;
1718 hdev->sniff_min_interval = 80;
1719
1720 mutex_init(&hdev->lock);
1721 mutex_init(&hdev->req_lock);
1722
1723 INIT_LIST_HEAD(&hdev->mgmt_pending);
1724 INIT_LIST_HEAD(&hdev->blacklist);
1725 INIT_LIST_HEAD(&hdev->uuids);
1726 INIT_LIST_HEAD(&hdev->link_keys);
1727 INIT_LIST_HEAD(&hdev->long_term_keys);
1728 INIT_LIST_HEAD(&hdev->remote_oob_data);
1729 INIT_LIST_HEAD(&hdev->conn_hash.list);
1730
1731 INIT_WORK(&hdev->rx_work, hci_rx_work);
1732 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1733 INIT_WORK(&hdev->tx_work, hci_tx_work);
1734 INIT_WORK(&hdev->power_on, hci_power_on);
1735 INIT_WORK(&hdev->le_scan, le_scan_work);
1736
1737 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1738 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1739 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1740
1741 skb_queue_head_init(&hdev->driver_init);
1742 skb_queue_head_init(&hdev->rx_q);
1743 skb_queue_head_init(&hdev->cmd_q);
1744 skb_queue_head_init(&hdev->raw_q);
1745
1746 init_waitqueue_head(&hdev->req_wait_q);
1747
1748 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1749
1750 hci_init_sysfs(hdev);
1751 discovery_init(hdev);
1752
1753 return hdev;
1754 }
1755 EXPORT_SYMBOL(hci_alloc_dev);
1756
1757 /* Free HCI device */
1758 void hci_free_dev(struct hci_dev *hdev)
1759 {
1760 skb_queue_purge(&hdev->driver_init);
1761
1762 /* will free via device release */
1763 put_device(&hdev->dev);
1764 }
1765 EXPORT_SYMBOL(hci_free_dev);
1766
1767 /* Register HCI device */
1768 int hci_register_dev(struct hci_dev *hdev)
1769 {
1770 int id, error;
1771
1772 if (!hdev->open || !hdev->close)
1773 return -EINVAL;
1774
1775 /* Do not allow HCI_AMP devices to register at index 0,
1776 * so the index can be used as the AMP controller ID.
1777 */
1778 switch (hdev->dev_type) {
1779 case HCI_BREDR:
1780 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1781 break;
1782 case HCI_AMP:
1783 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1784 break;
1785 default:
1786 return -EINVAL;
1787 }
1788
1789 if (id < 0)
1790 return id;
1791
1792 sprintf(hdev->name, "hci%d", id);
1793 hdev->id = id;
1794
1795 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1796
1797 write_lock(&hci_dev_list_lock);
1798 list_add(&hdev->list, &hci_dev_list);
1799 write_unlock(&hci_dev_list_lock);
1800
1801 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1802 WQ_MEM_RECLAIM, 1);
1803 if (!hdev->workqueue) {
1804 error = -ENOMEM;
1805 goto err;
1806 }
1807
1808 hdev->req_workqueue = alloc_workqueue(hdev->name,
1809 WQ_HIGHPRI | WQ_UNBOUND |
1810 WQ_MEM_RECLAIM, 1);
1811 if (!hdev->req_workqueue) {
1812 destroy_workqueue(hdev->workqueue);
1813 error = -ENOMEM;
1814 goto err;
1815 }
1816
1817 error = hci_add_sysfs(hdev);
1818 if (error < 0)
1819 goto err_wqueue;
1820
1821 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1822 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1823 hdev);
1824 if (hdev->rfkill) {
1825 if (rfkill_register(hdev->rfkill) < 0) {
1826 rfkill_destroy(hdev->rfkill);
1827 hdev->rfkill = NULL;
1828 }
1829 }
1830
1831 set_bit(HCI_SETUP, &hdev->dev_flags);
1832
1833 if (hdev->dev_type != HCI_AMP)
1834 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1835
1836 hci_notify(hdev, HCI_DEV_REG);
1837 hci_dev_hold(hdev);
1838
1839 queue_work(hdev->req_workqueue, &hdev->power_on);
1840
1841 return id;
1842
1843 err_wqueue:
1844 destroy_workqueue(hdev->workqueue);
1845 destroy_workqueue(hdev->req_workqueue);
1846 err:
1847 ida_simple_remove(&hci_index_ida, hdev->id);
1848 write_lock(&hci_dev_list_lock);
1849 list_del(&hdev->list);
1850 write_unlock(&hci_dev_list_lock);
1851
1852 return error;
1853 }
1854 EXPORT_SYMBOL(hci_register_dev);
1855
1856 /* Unregister HCI device */
1857 void hci_unregister_dev(struct hci_dev *hdev)
1858 {
1859 int i, id;
1860
1861 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1862
1863 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1864
1865 id = hdev->id;
1866
1867 write_lock(&hci_dev_list_lock);
1868 list_del(&hdev->list);
1869 write_unlock(&hci_dev_list_lock);
1870
1871 hci_dev_do_close(hdev);
1872
1873 for (i = 0; i < NUM_REASSEMBLY; i++)
1874 kfree_skb(hdev->reassembly[i]);
1875
1876 cancel_work_sync(&hdev->power_on);
1877
1878 if (!test_bit(HCI_INIT, &hdev->flags) &&
1879 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1880 hci_dev_lock(hdev);
1881 mgmt_index_removed(hdev);
1882 hci_dev_unlock(hdev);
1883 }
1884
1885 /* mgmt_index_removed should take care of emptying the
1886 * pending list */
1887 BUG_ON(!list_empty(&hdev->mgmt_pending));
1888
1889 hci_notify(hdev, HCI_DEV_UNREG);
1890
1891 if (hdev->rfkill) {
1892 rfkill_unregister(hdev->rfkill);
1893 rfkill_destroy(hdev->rfkill);
1894 }
1895
1896 hci_del_sysfs(hdev);
1897
1898 destroy_workqueue(hdev->workqueue);
1899 destroy_workqueue(hdev->req_workqueue);
1900
1901 hci_dev_lock(hdev);
1902 hci_blacklist_clear(hdev);
1903 hci_uuids_clear(hdev);
1904 hci_link_keys_clear(hdev);
1905 hci_smp_ltks_clear(hdev);
1906 hci_remote_oob_data_clear(hdev);
1907 hci_dev_unlock(hdev);
1908
1909 hci_dev_put(hdev);
1910
1911 ida_simple_remove(&hci_index_ida, id);
1912 }
1913 EXPORT_SYMBOL(hci_unregister_dev);
1914
1915 /* Suspend HCI device */
1916 int hci_suspend_dev(struct hci_dev *hdev)
1917 {
1918 hci_notify(hdev, HCI_DEV_SUSPEND);
1919 return 0;
1920 }
1921 EXPORT_SYMBOL(hci_suspend_dev);
1922
1923 /* Resume HCI device */
1924 int hci_resume_dev(struct hci_dev *hdev)
1925 {
1926 hci_notify(hdev, HCI_DEV_RESUME);
1927 return 0;
1928 }
1929 EXPORT_SYMBOL(hci_resume_dev);
1930
1931 /* Receive frame from HCI drivers */
1932 int hci_recv_frame(struct sk_buff *skb)
1933 {
1934 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1935 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1936 && !test_bit(HCI_INIT, &hdev->flags))) {
1937 kfree_skb(skb);
1938 return -ENXIO;
1939 }
1940
1941 /* Incoming skb */
1942 bt_cb(skb)->incoming = 1;
1943
1944 /* Time stamp */
1945 __net_timestamp(skb);
1946
1947 skb_queue_tail(&hdev->rx_q, skb);
1948 queue_work(hdev->workqueue, &hdev->rx_work);
1949
1950 return 0;
1951 }
1952 EXPORT_SYMBOL(hci_recv_frame);
1953
1954 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1955 int count, __u8 index)
1956 {
1957 int len = 0;
1958 int hlen = 0;
1959 int remain = count;
1960 struct sk_buff *skb;
1961 struct bt_skb_cb *scb;
1962
1963 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1964 index >= NUM_REASSEMBLY)
1965 return -EILSEQ;
1966
1967 skb = hdev->reassembly[index];
1968
1969 if (!skb) {
1970 switch (type) {
1971 case HCI_ACLDATA_PKT:
1972 len = HCI_MAX_FRAME_SIZE;
1973 hlen = HCI_ACL_HDR_SIZE;
1974 break;
1975 case HCI_EVENT_PKT:
1976 len = HCI_MAX_EVENT_SIZE;
1977 hlen = HCI_EVENT_HDR_SIZE;
1978 break;
1979 case HCI_SCODATA_PKT:
1980 len = HCI_MAX_SCO_SIZE;
1981 hlen = HCI_SCO_HDR_SIZE;
1982 break;
1983 }
1984
1985 skb = bt_skb_alloc(len, GFP_ATOMIC);
1986 if (!skb)
1987 return -ENOMEM;
1988
1989 scb = (void *) skb->cb;
1990 scb->expect = hlen;
1991 scb->pkt_type = type;
1992
1993 skb->dev = (void *) hdev;
1994 hdev->reassembly[index] = skb;
1995 }
1996
1997 while (count) {
1998 scb = (void *) skb->cb;
1999 len = min_t(uint, scb->expect, count);
2000
2001 memcpy(skb_put(skb, len), data, len);
2002
2003 count -= len;
2004 data += len;
2005 scb->expect -= len;
2006 remain = count;
2007
2008 switch (type) {
2009 case HCI_EVENT_PKT:
2010 if (skb->len == HCI_EVENT_HDR_SIZE) {
2011 struct hci_event_hdr *h = hci_event_hdr(skb);
2012 scb->expect = h->plen;
2013
2014 if (skb_tailroom(skb) < scb->expect) {
2015 kfree_skb(skb);
2016 hdev->reassembly[index] = NULL;
2017 return -ENOMEM;
2018 }
2019 }
2020 break;
2021
2022 case HCI_ACLDATA_PKT:
2023 if (skb->len == HCI_ACL_HDR_SIZE) {
2024 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2025 scb->expect = __le16_to_cpu(h->dlen);
2026
2027 if (skb_tailroom(skb) < scb->expect) {
2028 kfree_skb(skb);
2029 hdev->reassembly[index] = NULL;
2030 return -ENOMEM;
2031 }
2032 }
2033 break;
2034
2035 case HCI_SCODATA_PKT:
2036 if (skb->len == HCI_SCO_HDR_SIZE) {
2037 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2038 scb->expect = h->dlen;
2039
2040 if (skb_tailroom(skb) < scb->expect) {
2041 kfree_skb(skb);
2042 hdev->reassembly[index] = NULL;
2043 return -ENOMEM;
2044 }
2045 }
2046 break;
2047 }
2048
2049 if (scb->expect == 0) {
2050 /* Complete frame */
2051
2052 bt_cb(skb)->pkt_type = type;
2053 hci_recv_frame(skb);
2054
2055 hdev->reassembly[index] = NULL;
2056 return remain;
2057 }
2058 }
2059
2060 return remain;
2061 }
2062
2063 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2064 {
2065 int rem = 0;
2066
2067 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2068 return -EILSEQ;
2069
2070 while (count) {
2071 rem = hci_reassembly(hdev, type, data, count, type - 1);
2072 if (rem < 0)
2073 return rem;
2074
2075 data += (count - rem);
2076 count = rem;
2077 }
2078
2079 return rem;
2080 }
2081 EXPORT_SYMBOL(hci_recv_fragment);
2082
2083 #define STREAM_REASSEMBLY 0
2084
2085 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2086 {
2087 int type;
2088 int rem = 0;
2089
2090 while (count) {
2091 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2092
2093 if (!skb) {
2094 struct { char type; } *pkt;
2095
2096 /* Start of the frame */
2097 pkt = data;
2098 type = pkt->type;
2099
2100 data++;
2101 count--;
2102 } else
2103 type = bt_cb(skb)->pkt_type;
2104
2105 rem = hci_reassembly(hdev, type, data, count,
2106 STREAM_REASSEMBLY);
2107 if (rem < 0)
2108 return rem;
2109
2110 data += (count - rem);
2111 count = rem;
2112 }
2113
2114 return rem;
2115 }
2116 EXPORT_SYMBOL(hci_recv_stream_fragment);
2117
2118 /* ---- Interface to upper protocols ---- */
2119
2120 int hci_register_cb(struct hci_cb *cb)
2121 {
2122 BT_DBG("%p name %s", cb, cb->name);
2123
2124 write_lock(&hci_cb_list_lock);
2125 list_add(&cb->list, &hci_cb_list);
2126 write_unlock(&hci_cb_list_lock);
2127
2128 return 0;
2129 }
2130 EXPORT_SYMBOL(hci_register_cb);
2131
2132 int hci_unregister_cb(struct hci_cb *cb)
2133 {
2134 BT_DBG("%p name %s", cb, cb->name);
2135
2136 write_lock(&hci_cb_list_lock);
2137 list_del(&cb->list);
2138 write_unlock(&hci_cb_list_lock);
2139
2140 return 0;
2141 }
2142 EXPORT_SYMBOL(hci_unregister_cb);
2143
2144 static int hci_send_frame(struct sk_buff *skb)
2145 {
2146 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2147
2148 if (!hdev) {
2149 kfree_skb(skb);
2150 return -ENODEV;
2151 }
2152
2153 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2154
2155 /* Time stamp */
2156 __net_timestamp(skb);
2157
2158 /* Send copy to monitor */
2159 hci_send_to_monitor(hdev, skb);
2160
2161 if (atomic_read(&hdev->promisc)) {
2162 /* Send copy to the sockets */
2163 hci_send_to_sock(hdev, skb);
2164 }
2165
2166 /* Get rid of skb owner, prior to sending to the driver. */
2167 skb_orphan(skb);
2168
2169 return hdev->send(skb);
2170 }
2171
2172 /* Send HCI command */
2173 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2174 {
2175 int len = HCI_COMMAND_HDR_SIZE + plen;
2176 struct hci_command_hdr *hdr;
2177 struct sk_buff *skb;
2178
2179 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2180
2181 skb = bt_skb_alloc(len, GFP_ATOMIC);
2182 if (!skb) {
2183 BT_ERR("%s no memory for command", hdev->name);
2184 return -ENOMEM;
2185 }
2186
2187 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2188 hdr->opcode = cpu_to_le16(opcode);
2189 hdr->plen = plen;
2190
2191 if (plen)
2192 memcpy(skb_put(skb, plen), param, plen);
2193
2194 BT_DBG("skb len %d", skb->len);
2195
2196 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2197 skb->dev = (void *) hdev;
2198
2199 if (test_bit(HCI_INIT, &hdev->flags))
2200 hdev->init_last_cmd = opcode;
2201
2202 skb_queue_tail(&hdev->cmd_q, skb);
2203 queue_work(hdev->workqueue, &hdev->cmd_work);
2204
2205 return 0;
2206 }
2207
2208 /* Get data from the previously sent command */
2209 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2210 {
2211 struct hci_command_hdr *hdr;
2212
2213 if (!hdev->sent_cmd)
2214 return NULL;
2215
2216 hdr = (void *) hdev->sent_cmd->data;
2217
2218 if (hdr->opcode != cpu_to_le16(opcode))
2219 return NULL;
2220
2221 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2222
2223 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2224 }
2225
2226 /* Send ACL data */
2227 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2228 {
2229 struct hci_acl_hdr *hdr;
2230 int len = skb->len;
2231
2232 skb_push(skb, HCI_ACL_HDR_SIZE);
2233 skb_reset_transport_header(skb);
2234 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2235 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2236 hdr->dlen = cpu_to_le16(len);
2237 }
2238
2239 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2240 struct sk_buff *skb, __u16 flags)
2241 {
2242 struct hci_conn *conn = chan->conn;
2243 struct hci_dev *hdev = conn->hdev;
2244 struct sk_buff *list;
2245
2246 skb->len = skb_headlen(skb);
2247 skb->data_len = 0;
2248
2249 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2250
2251 switch (hdev->dev_type) {
2252 case HCI_BREDR:
2253 hci_add_acl_hdr(skb, conn->handle, flags);
2254 break;
2255 case HCI_AMP:
2256 hci_add_acl_hdr(skb, chan->handle, flags);
2257 break;
2258 default:
2259 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2260 return;
2261 }
2262
2263 list = skb_shinfo(skb)->frag_list;
2264 if (!list) {
2265 /* Non fragmented */
2266 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2267
2268 skb_queue_tail(queue, skb);
2269 } else {
2270 /* Fragmented */
2271 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2272
2273 skb_shinfo(skb)->frag_list = NULL;
2274
2275 /* Queue all fragments atomically */
2276 spin_lock(&queue->lock);
2277
2278 __skb_queue_tail(queue, skb);
2279
2280 flags &= ~ACL_START;
2281 flags |= ACL_CONT;
2282 do {
2283 skb = list; list = list->next;
2284
2285 skb->dev = (void *) hdev;
2286 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2287 hci_add_acl_hdr(skb, conn->handle, flags);
2288
2289 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2290
2291 __skb_queue_tail(queue, skb);
2292 } while (list);
2293
2294 spin_unlock(&queue->lock);
2295 }
2296 }
2297
2298 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2299 {
2300 struct hci_dev *hdev = chan->conn->hdev;
2301
2302 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2303
2304 skb->dev = (void *) hdev;
2305
2306 hci_queue_acl(chan, &chan->data_q, skb, flags);
2307
2308 queue_work(hdev->workqueue, &hdev->tx_work);
2309 }
2310
2311 /* Send SCO data */
2312 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2313 {
2314 struct hci_dev *hdev = conn->hdev;
2315 struct hci_sco_hdr hdr;
2316
2317 BT_DBG("%s len %d", hdev->name, skb->len);
2318
2319 hdr.handle = cpu_to_le16(conn->handle);
2320 hdr.dlen = skb->len;
2321
2322 skb_push(skb, HCI_SCO_HDR_SIZE);
2323 skb_reset_transport_header(skb);
2324 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2325
2326 skb->dev = (void *) hdev;
2327 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2328
2329 skb_queue_tail(&conn->data_q, skb);
2330 queue_work(hdev->workqueue, &hdev->tx_work);
2331 }
2332
2333 /* ---- HCI TX task (outgoing data) ---- */
2334
2335 /* HCI Connection scheduler */
2336 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2337 int *quote)
2338 {
2339 struct hci_conn_hash *h = &hdev->conn_hash;
2340 struct hci_conn *conn = NULL, *c;
2341 unsigned int num = 0, min = ~0;
2342
2343 /* We don't have to lock device here. Connections are always
2344 * added and removed with TX task disabled. */
2345
2346 rcu_read_lock();
2347
2348 list_for_each_entry_rcu(c, &h->list, list) {
2349 if (c->type != type || skb_queue_empty(&c->data_q))
2350 continue;
2351
2352 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2353 continue;
2354
2355 num++;
2356
2357 if (c->sent < min) {
2358 min = c->sent;
2359 conn = c;
2360 }
2361
2362 if (hci_conn_num(hdev, type) == num)
2363 break;
2364 }
2365
2366 rcu_read_unlock();
2367
2368 if (conn) {
2369 int cnt, q;
2370
2371 switch (conn->type) {
2372 case ACL_LINK:
2373 cnt = hdev->acl_cnt;
2374 break;
2375 case SCO_LINK:
2376 case ESCO_LINK:
2377 cnt = hdev->sco_cnt;
2378 break;
2379 case LE_LINK:
2380 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2381 break;
2382 default:
2383 cnt = 0;
2384 BT_ERR("Unknown link type");
2385 }
2386
2387 q = cnt / num;
2388 *quote = q ? q : 1;
2389 } else
2390 *quote = 0;
2391
2392 BT_DBG("conn %p quote %d", conn, *quote);
2393 return conn;
2394 }
2395
2396 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2397 {
2398 struct hci_conn_hash *h = &hdev->conn_hash;
2399 struct hci_conn *c;
2400
2401 BT_ERR("%s link tx timeout", hdev->name);
2402
2403 rcu_read_lock();
2404
2405 /* Kill stalled connections */
2406 list_for_each_entry_rcu(c, &h->list, list) {
2407 if (c->type == type && c->sent) {
2408 BT_ERR("%s killing stalled connection %pMR",
2409 hdev->name, &c->dst);
2410 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2411 }
2412 }
2413
2414 rcu_read_unlock();
2415 }
2416
2417 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2418 int *quote)
2419 {
2420 struct hci_conn_hash *h = &hdev->conn_hash;
2421 struct hci_chan *chan = NULL;
2422 unsigned int num = 0, min = ~0, cur_prio = 0;
2423 struct hci_conn *conn;
2424 int cnt, q, conn_num = 0;
2425
2426 BT_DBG("%s", hdev->name);
2427
2428 rcu_read_lock();
2429
2430 list_for_each_entry_rcu(conn, &h->list, list) {
2431 struct hci_chan *tmp;
2432
2433 if (conn->type != type)
2434 continue;
2435
2436 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2437 continue;
2438
2439 conn_num++;
2440
2441 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2442 struct sk_buff *skb;
2443
2444 if (skb_queue_empty(&tmp->data_q))
2445 continue;
2446
2447 skb = skb_peek(&tmp->data_q);
2448 if (skb->priority < cur_prio)
2449 continue;
2450
2451 if (skb->priority > cur_prio) {
2452 num = 0;
2453 min = ~0;
2454 cur_prio = skb->priority;
2455 }
2456
2457 num++;
2458
2459 if (conn->sent < min) {
2460 min = conn->sent;
2461 chan = tmp;
2462 }
2463 }
2464
2465 if (hci_conn_num(hdev, type) == conn_num)
2466 break;
2467 }
2468
2469 rcu_read_unlock();
2470
2471 if (!chan)
2472 return NULL;
2473
2474 switch (chan->conn->type) {
2475 case ACL_LINK:
2476 cnt = hdev->acl_cnt;
2477 break;
2478 case AMP_LINK:
2479 cnt = hdev->block_cnt;
2480 break;
2481 case SCO_LINK:
2482 case ESCO_LINK:
2483 cnt = hdev->sco_cnt;
2484 break;
2485 case LE_LINK:
2486 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2487 break;
2488 default:
2489 cnt = 0;
2490 BT_ERR("Unknown link type");
2491 }
2492
2493 q = cnt / num;
2494 *quote = q ? q : 1;
2495 BT_DBG("chan %p quote %d", chan, *quote);
2496 return chan;
2497 }
2498
2499 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2500 {
2501 struct hci_conn_hash *h = &hdev->conn_hash;
2502 struct hci_conn *conn;
2503 int num = 0;
2504
2505 BT_DBG("%s", hdev->name);
2506
2507 rcu_read_lock();
2508
2509 list_for_each_entry_rcu(conn, &h->list, list) {
2510 struct hci_chan *chan;
2511
2512 if (conn->type != type)
2513 continue;
2514
2515 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2516 continue;
2517
2518 num++;
2519
2520 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2521 struct sk_buff *skb;
2522
2523 if (chan->sent) {
2524 chan->sent = 0;
2525 continue;
2526 }
2527
2528 if (skb_queue_empty(&chan->data_q))
2529 continue;
2530
2531 skb = skb_peek(&chan->data_q);
2532 if (skb->priority >= HCI_PRIO_MAX - 1)
2533 continue;
2534
2535 skb->priority = HCI_PRIO_MAX - 1;
2536
2537 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2538 skb->priority);
2539 }
2540
2541 if (hci_conn_num(hdev, type) == num)
2542 break;
2543 }
2544
2545 rcu_read_unlock();
2546
2547 }
2548
2549 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2550 {
2551 /* Calculate count of blocks used by this packet */
2552 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2553 }
2554
2555 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2556 {
2557 if (!test_bit(HCI_RAW, &hdev->flags)) {
2558 /* ACL tx timeout must be longer than maximum
2559 * link supervision timeout (40.9 seconds) */
2560 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2561 HCI_ACL_TX_TIMEOUT))
2562 hci_link_tx_to(hdev, ACL_LINK);
2563 }
2564 }
2565
2566 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2567 {
2568 unsigned int cnt = hdev->acl_cnt;
2569 struct hci_chan *chan;
2570 struct sk_buff *skb;
2571 int quote;
2572
2573 __check_timeout(hdev, cnt);
2574
2575 while (hdev->acl_cnt &&
2576 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2577 u32 priority = (skb_peek(&chan->data_q))->priority;
2578 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2579 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2580 skb->len, skb->priority);
2581
2582 /* Stop if priority has changed */
2583 if (skb->priority < priority)
2584 break;
2585
2586 skb = skb_dequeue(&chan->data_q);
2587
2588 hci_conn_enter_active_mode(chan->conn,
2589 bt_cb(skb)->force_active);
2590
2591 hci_send_frame(skb);
2592 hdev->acl_last_tx = jiffies;
2593
2594 hdev->acl_cnt--;
2595 chan->sent++;
2596 chan->conn->sent++;
2597 }
2598 }
2599
2600 if (cnt != hdev->acl_cnt)
2601 hci_prio_recalculate(hdev, ACL_LINK);
2602 }
2603
2604 static void hci_sched_acl_blk(struct hci_dev *hdev)
2605 {
2606 unsigned int cnt = hdev->block_cnt;
2607 struct hci_chan *chan;
2608 struct sk_buff *skb;
2609 int quote;
2610 u8 type;
2611
2612 __check_timeout(hdev, cnt);
2613
2614 BT_DBG("%s", hdev->name);
2615
2616 if (hdev->dev_type == HCI_AMP)
2617 type = AMP_LINK;
2618 else
2619 type = ACL_LINK;
2620
2621 while (hdev->block_cnt > 0 &&
2622 (chan = hci_chan_sent(hdev, type, &quote))) {
2623 u32 priority = (skb_peek(&chan->data_q))->priority;
2624 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2625 int blocks;
2626
2627 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2628 skb->len, skb->priority);
2629
2630 /* Stop if priority has changed */
2631 if (skb->priority < priority)
2632 break;
2633
2634 skb = skb_dequeue(&chan->data_q);
2635
2636 blocks = __get_blocks(hdev, skb);
2637 if (blocks > hdev->block_cnt)
2638 return;
2639
2640 hci_conn_enter_active_mode(chan->conn,
2641 bt_cb(skb)->force_active);
2642
2643 hci_send_frame(skb);
2644 hdev->acl_last_tx = jiffies;
2645
2646 hdev->block_cnt -= blocks;
2647 quote -= blocks;
2648
2649 chan->sent += blocks;
2650 chan->conn->sent += blocks;
2651 }
2652 }
2653
2654 if (cnt != hdev->block_cnt)
2655 hci_prio_recalculate(hdev, type);
2656 }
2657
2658 static void hci_sched_acl(struct hci_dev *hdev)
2659 {
2660 BT_DBG("%s", hdev->name);
2661
2662 /* No ACL link over BR/EDR controller */
2663 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2664 return;
2665
2666 /* No AMP link over AMP controller */
2667 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2668 return;
2669
2670 switch (hdev->flow_ctl_mode) {
2671 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2672 hci_sched_acl_pkt(hdev);
2673 break;
2674
2675 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2676 hci_sched_acl_blk(hdev);
2677 break;
2678 }
2679 }
2680
2681 /* Schedule SCO */
2682 static void hci_sched_sco(struct hci_dev *hdev)
2683 {
2684 struct hci_conn *conn;
2685 struct sk_buff *skb;
2686 int quote;
2687
2688 BT_DBG("%s", hdev->name);
2689
2690 if (!hci_conn_num(hdev, SCO_LINK))
2691 return;
2692
2693 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2694 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2695 BT_DBG("skb %p len %d", skb, skb->len);
2696 hci_send_frame(skb);
2697
2698 conn->sent++;
2699 if (conn->sent == ~0)
2700 conn->sent = 0;
2701 }
2702 }
2703 }
2704
2705 static void hci_sched_esco(struct hci_dev *hdev)
2706 {
2707 struct hci_conn *conn;
2708 struct sk_buff *skb;
2709 int quote;
2710
2711 BT_DBG("%s", hdev->name);
2712
2713 if (!hci_conn_num(hdev, ESCO_LINK))
2714 return;
2715
2716 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2717 &quote))) {
2718 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2719 BT_DBG("skb %p len %d", skb, skb->len);
2720 hci_send_frame(skb);
2721
2722 conn->sent++;
2723 if (conn->sent == ~0)
2724 conn->sent = 0;
2725 }
2726 }
2727 }
2728
2729 static void hci_sched_le(struct hci_dev *hdev)
2730 {
2731 struct hci_chan *chan;
2732 struct sk_buff *skb;
2733 int quote, cnt, tmp;
2734
2735 BT_DBG("%s", hdev->name);
2736
2737 if (!hci_conn_num(hdev, LE_LINK))
2738 return;
2739
2740 if (!test_bit(HCI_RAW, &hdev->flags)) {
2741 /* LE tx timeout must be longer than maximum
2742 * link supervision timeout (40.9 seconds) */
2743 if (!hdev->le_cnt && hdev->le_pkts &&
2744 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2745 hci_link_tx_to(hdev, LE_LINK);
2746 }
2747
2748 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2749 tmp = cnt;
2750 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2751 u32 priority = (skb_peek(&chan->data_q))->priority;
2752 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2753 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2754 skb->len, skb->priority);
2755
2756 /* Stop if priority has changed */
2757 if (skb->priority < priority)
2758 break;
2759
2760 skb = skb_dequeue(&chan->data_q);
2761
2762 hci_send_frame(skb);
2763 hdev->le_last_tx = jiffies;
2764
2765 cnt--;
2766 chan->sent++;
2767 chan->conn->sent++;
2768 }
2769 }
2770
2771 if (hdev->le_pkts)
2772 hdev->le_cnt = cnt;
2773 else
2774 hdev->acl_cnt = cnt;
2775
2776 if (cnt != tmp)
2777 hci_prio_recalculate(hdev, LE_LINK);
2778 }
2779
2780 static void hci_tx_work(struct work_struct *work)
2781 {
2782 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2783 struct sk_buff *skb;
2784
2785 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2786 hdev->sco_cnt, hdev->le_cnt);
2787
2788 /* Schedule queues and send stuff to HCI driver */
2789
2790 hci_sched_acl(hdev);
2791
2792 hci_sched_sco(hdev);
2793
2794 hci_sched_esco(hdev);
2795
2796 hci_sched_le(hdev);
2797
2798 /* Send next queued raw (unknown type) packet */
2799 while ((skb = skb_dequeue(&hdev->raw_q)))
2800 hci_send_frame(skb);
2801 }
2802
2803 /* ----- HCI RX task (incoming data processing) ----- */
2804
2805 /* ACL data packet */
2806 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2807 {
2808 struct hci_acl_hdr *hdr = (void *) skb->data;
2809 struct hci_conn *conn;
2810 __u16 handle, flags;
2811
2812 skb_pull(skb, HCI_ACL_HDR_SIZE);
2813
2814 handle = __le16_to_cpu(hdr->handle);
2815 flags = hci_flags(handle);
2816 handle = hci_handle(handle);
2817
2818 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2819 handle, flags);
2820
2821 hdev->stat.acl_rx++;
2822
2823 hci_dev_lock(hdev);
2824 conn = hci_conn_hash_lookup_handle(hdev, handle);
2825 hci_dev_unlock(hdev);
2826
2827 if (conn) {
2828 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2829
2830 /* Send to upper protocol */
2831 l2cap_recv_acldata(conn, skb, flags);
2832 return;
2833 } else {
2834 BT_ERR("%s ACL packet for unknown connection handle %d",
2835 hdev->name, handle);
2836 }
2837
2838 kfree_skb(skb);
2839 }
2840
2841 /* SCO data packet */
2842 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2843 {
2844 struct hci_sco_hdr *hdr = (void *) skb->data;
2845 struct hci_conn *conn;
2846 __u16 handle;
2847
2848 skb_pull(skb, HCI_SCO_HDR_SIZE);
2849
2850 handle = __le16_to_cpu(hdr->handle);
2851
2852 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2853
2854 hdev->stat.sco_rx++;
2855
2856 hci_dev_lock(hdev);
2857 conn = hci_conn_hash_lookup_handle(hdev, handle);
2858 hci_dev_unlock(hdev);
2859
2860 if (conn) {
2861 /* Send to upper protocol */
2862 sco_recv_scodata(conn, skb);
2863 return;
2864 } else {
2865 BT_ERR("%s SCO packet for unknown connection handle %d",
2866 hdev->name, handle);
2867 }
2868
2869 kfree_skb(skb);
2870 }
2871
2872 static void hci_rx_work(struct work_struct *work)
2873 {
2874 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2875 struct sk_buff *skb;
2876
2877 BT_DBG("%s", hdev->name);
2878
2879 while ((skb = skb_dequeue(&hdev->rx_q))) {
2880 /* Send copy to monitor */
2881 hci_send_to_monitor(hdev, skb);
2882
2883 if (atomic_read(&hdev->promisc)) {
2884 /* Send copy to the sockets */
2885 hci_send_to_sock(hdev, skb);
2886 }
2887
2888 if (test_bit(HCI_RAW, &hdev->flags)) {
2889 kfree_skb(skb);
2890 continue;
2891 }
2892
2893 if (test_bit(HCI_INIT, &hdev->flags)) {
2894 /* Don't process data packets in this states. */
2895 switch (bt_cb(skb)->pkt_type) {
2896 case HCI_ACLDATA_PKT:
2897 case HCI_SCODATA_PKT:
2898 kfree_skb(skb);
2899 continue;
2900 }
2901 }
2902
2903 /* Process frame */
2904 switch (bt_cb(skb)->pkt_type) {
2905 case HCI_EVENT_PKT:
2906 BT_DBG("%s Event packet", hdev->name);
2907 hci_event_packet(hdev, skb);
2908 break;
2909
2910 case HCI_ACLDATA_PKT:
2911 BT_DBG("%s ACL data packet", hdev->name);
2912 hci_acldata_packet(hdev, skb);
2913 break;
2914
2915 case HCI_SCODATA_PKT:
2916 BT_DBG("%s SCO data packet", hdev->name);
2917 hci_scodata_packet(hdev, skb);
2918 break;
2919
2920 default:
2921 kfree_skb(skb);
2922 break;
2923 }
2924 }
2925 }
2926
2927 static void hci_cmd_work(struct work_struct *work)
2928 {
2929 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2930 struct sk_buff *skb;
2931
2932 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2933 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2934
2935 /* Send queued commands */
2936 if (atomic_read(&hdev->cmd_cnt)) {
2937 skb = skb_dequeue(&hdev->cmd_q);
2938 if (!skb)
2939 return;
2940
2941 kfree_skb(hdev->sent_cmd);
2942
2943 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2944 if (hdev->sent_cmd) {
2945 atomic_dec(&hdev->cmd_cnt);
2946 hci_send_frame(skb);
2947 if (test_bit(HCI_RESET, &hdev->flags))
2948 del_timer(&hdev->cmd_timer);
2949 else
2950 mod_timer(&hdev->cmd_timer,
2951 jiffies + HCI_CMD_TIMEOUT);
2952 } else {
2953 skb_queue_head(&hdev->cmd_q, skb);
2954 queue_work(hdev->workqueue, &hdev->cmd_work);
2955 }
2956 }
2957 }
2958
2959 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2960 {
2961 /* General inquiry access code (GIAC) */
2962 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2963 struct hci_cp_inquiry cp;
2964
2965 BT_DBG("%s", hdev->name);
2966
2967 if (test_bit(HCI_INQUIRY, &hdev->flags))
2968 return -EINPROGRESS;
2969
2970 inquiry_cache_flush(hdev);
2971
2972 memset(&cp, 0, sizeof(cp));
2973 memcpy(&cp.lap, lap, sizeof(cp.lap));
2974 cp.length = length;
2975
2976 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2977 }
2978
2979 int hci_cancel_inquiry(struct hci_dev *hdev)
2980 {
2981 BT_DBG("%s", hdev->name);
2982
2983 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2984 return -EALREADY;
2985
2986 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2987 }
2988
2989 u8 bdaddr_to_le(u8 bdaddr_type)
2990 {
2991 switch (bdaddr_type) {
2992 case BDADDR_LE_PUBLIC:
2993 return ADDR_LE_DEV_PUBLIC;
2994
2995 default:
2996 /* Fallback to LE Random address type */
2997 return ADDR_LE_DEV_RANDOM;
2998 }
2999 }