Bluetooth: AMP: Set no FCS for incoming L2CAP chan
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113 {
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
134 err = -bt_to_errno(hdev->req_result);
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
144 }
145
146 hdev->req_status = hdev->req_result = 0;
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151 }
152
153 static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
156 {
157 int ret;
158
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168 }
169
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178
179 static void bredr_init(struct hci_dev *hdev)
180 {
181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
183 /* Read Local Supported Features */
184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
185
186 /* Read Local Version */
187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
188 }
189
190 static void amp_init(struct hci_dev *hdev)
191 {
192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
202 }
203
204 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205 {
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
239 }
240
241 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242 {
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
249 }
250
251 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252 {
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
259 }
260
261 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262 {
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
267 /* Encryption */
268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
269 }
270
271 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272 {
273 __le16 policy = cpu_to_le16(opt);
274
275 BT_DBG("%s %x", hdev->name, policy);
276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279 }
280
281 /* Get HCI device by index.
282 * Device is held on return. */
283 struct hci_dev *hci_dev_get(int index)
284 {
285 struct hci_dev *hdev = NULL, *d;
286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
293 list_for_each_entry(d, &hci_dev_list, list) {
294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301 }
302
303 /* ---- Inquiry support ---- */
304
305 bool hci_discovery_active(struct hci_dev *hdev)
306 {
307 struct discovery_state *discov = &hdev->discovery;
308
309 switch (discov->state) {
310 case DISCOVERY_FINDING:
311 case DISCOVERY_RESOLVING:
312 return true;
313
314 default:
315 return false;
316 }
317 }
318
319 void hci_discovery_set_state(struct hci_dev *hdev, int state)
320 {
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
330 break;
331 case DISCOVERY_STARTING:
332 break;
333 case DISCOVERY_FINDING:
334 mgmt_discovering(hdev, 1);
335 break;
336 case DISCOVERY_RESOLVING:
337 break;
338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343 }
344
345 static void inquiry_cache_flush(struct hci_dev *hdev)
346 {
347 struct discovery_state *cache = &hdev->discovery;
348 struct inquiry_entry *p, *n;
349
350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
352 kfree(p);
353 }
354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
357 }
358
359 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
361 {
362 struct discovery_state *cache = &hdev->discovery;
363 struct inquiry_entry *e;
364
365 BT_DBG("cache %p, %pMR", cache, bdaddr);
366
367 list_for_each_entry(e, &cache->all, all) {
368 if (!bacmp(&e->data.bdaddr, bdaddr))
369 return e;
370 }
371
372 return NULL;
373 }
374
375 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
376 bdaddr_t *bdaddr)
377 {
378 struct discovery_state *cache = &hdev->discovery;
379 struct inquiry_entry *e;
380
381 BT_DBG("cache %p, %pMR", cache, bdaddr);
382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389 }
390
391 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
392 bdaddr_t *bdaddr,
393 int state)
394 {
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408 }
409
410 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
411 struct inquiry_entry *ie)
412 {
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
421 abs(p->data.rssi) >= abs(ie->data.rssi))
422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427 }
428
429 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
430 bool name_known, bool *ssp)
431 {
432 struct discovery_state *cache = &hdev->discovery;
433 struct inquiry_entry *ie;
434
435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
436
437 if (ssp)
438 *ssp = data->ssp_mode;
439
440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
441 if (ie) {
442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
445 if (ie->name_state == NAME_NEEDED &&
446 data->rssi != ie->data.rssi) {
447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
451 goto update;
452 }
453
454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
457 return false;
458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
467
468 update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
470 ie->name_state != NAME_PENDING) {
471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
473 }
474
475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
477 cache->timestamp = jiffies;
478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
483 }
484
485 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486 {
487 struct discovery_state *cache = &hdev->discovery;
488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
492 list_for_each_entry(e, &cache->all, all) {
493 struct inquiry_data *data = &e->data;
494
495 if (copied >= num)
496 break;
497
498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
504
505 info++;
506 copied++;
507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511 }
512
513 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514 {
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
528 }
529
530 int hci_inquiry(void __user *arg)
531 {
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
544 return -ENODEV;
545
546 hci_dev_lock(hdev);
547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
552 hci_dev_unlock(hdev);
553
554 timeo = ir.length * msecs_to_jiffies(2000);
555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
561
562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
571 if (!buf) {
572 err = -ENOMEM;
573 goto done;
574 }
575
576 hci_dev_lock(hdev);
577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
578 hci_dev_unlock(hdev);
579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
585 ir.num_rsp))
586 err = -EFAULT;
587 } else
588 err = -EFAULT;
589
590 kfree(buf);
591
592 done:
593 hci_dev_put(hdev);
594 return err;
595 }
596
597 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
598 {
599 u8 ad_len = 0, flags = 0;
600 size_t name_len;
601
602 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
603 flags |= LE_AD_GENERAL;
604
605 if (!lmp_bredr_capable(hdev))
606 flags |= LE_AD_NO_BREDR;
607
608 if (lmp_le_br_capable(hdev))
609 flags |= LE_AD_SIM_LE_BREDR_CTRL;
610
611 if (lmp_host_le_br_capable(hdev))
612 flags |= LE_AD_SIM_LE_BREDR_HOST;
613
614 if (flags) {
615 BT_DBG("adv flags 0x%02x", flags);
616
617 ptr[0] = 2;
618 ptr[1] = EIR_FLAGS;
619 ptr[2] = flags;
620
621 ad_len += 3;
622 ptr += 3;
623 }
624
625 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
626 ptr[0] = 2;
627 ptr[1] = EIR_TX_POWER;
628 ptr[2] = (u8) hdev->adv_tx_power;
629
630 ad_len += 3;
631 ptr += 3;
632 }
633
634 name_len = strlen(hdev->dev_name);
635 if (name_len > 0) {
636 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
637
638 if (name_len > max_len) {
639 name_len = max_len;
640 ptr[1] = EIR_NAME_SHORT;
641 } else
642 ptr[1] = EIR_NAME_COMPLETE;
643
644 ptr[0] = name_len + 1;
645
646 memcpy(ptr + 2, hdev->dev_name, name_len);
647
648 ad_len += (name_len + 2);
649 ptr += (name_len + 2);
650 }
651
652 return ad_len;
653 }
654
655 int hci_update_ad(struct hci_dev *hdev)
656 {
657 struct hci_cp_le_set_adv_data cp;
658 u8 len;
659 int err;
660
661 hci_dev_lock(hdev);
662
663 if (!lmp_le_capable(hdev)) {
664 err = -EINVAL;
665 goto unlock;
666 }
667
668 memset(&cp, 0, sizeof(cp));
669
670 len = create_ad(hdev, cp.data);
671
672 if (hdev->adv_data_len == len &&
673 memcmp(cp.data, hdev->adv_data, len) == 0) {
674 err = 0;
675 goto unlock;
676 }
677
678 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
679 hdev->adv_data_len = len;
680
681 cp.length = len;
682 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
683
684 unlock:
685 hci_dev_unlock(hdev);
686
687 return err;
688 }
689
690 /* ---- HCI ioctl helpers ---- */
691
692 int hci_dev_open(__u16 dev)
693 {
694 struct hci_dev *hdev;
695 int ret = 0;
696
697 hdev = hci_dev_get(dev);
698 if (!hdev)
699 return -ENODEV;
700
701 BT_DBG("%s %p", hdev->name, hdev);
702
703 hci_req_lock(hdev);
704
705 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
706 ret = -ENODEV;
707 goto done;
708 }
709
710 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
711 ret = -ERFKILL;
712 goto done;
713 }
714
715 if (test_bit(HCI_UP, &hdev->flags)) {
716 ret = -EALREADY;
717 goto done;
718 }
719
720 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
721 set_bit(HCI_RAW, &hdev->flags);
722
723 /* Treat all non BR/EDR controllers as raw devices if
724 enable_hs is not set */
725 if (hdev->dev_type != HCI_BREDR && !enable_hs)
726 set_bit(HCI_RAW, &hdev->flags);
727
728 if (hdev->open(hdev)) {
729 ret = -EIO;
730 goto done;
731 }
732
733 if (!test_bit(HCI_RAW, &hdev->flags)) {
734 atomic_set(&hdev->cmd_cnt, 1);
735 set_bit(HCI_INIT, &hdev->flags);
736 hdev->init_last_cmd = 0;
737
738 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
739
740 clear_bit(HCI_INIT, &hdev->flags);
741 }
742
743 if (!ret) {
744 hci_dev_hold(hdev);
745 set_bit(HCI_UP, &hdev->flags);
746 hci_notify(hdev, HCI_DEV_UP);
747 hci_update_ad(hdev);
748 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
749 mgmt_valid_hdev(hdev)) {
750 hci_dev_lock(hdev);
751 mgmt_powered(hdev, 1);
752 hci_dev_unlock(hdev);
753 }
754 } else {
755 /* Init failed, cleanup */
756 flush_work(&hdev->tx_work);
757 flush_work(&hdev->cmd_work);
758 flush_work(&hdev->rx_work);
759
760 skb_queue_purge(&hdev->cmd_q);
761 skb_queue_purge(&hdev->rx_q);
762
763 if (hdev->flush)
764 hdev->flush(hdev);
765
766 if (hdev->sent_cmd) {
767 kfree_skb(hdev->sent_cmd);
768 hdev->sent_cmd = NULL;
769 }
770
771 hdev->close(hdev);
772 hdev->flags = 0;
773 }
774
775 done:
776 hci_req_unlock(hdev);
777 hci_dev_put(hdev);
778 return ret;
779 }
780
781 static int hci_dev_do_close(struct hci_dev *hdev)
782 {
783 BT_DBG("%s %p", hdev->name, hdev);
784
785 cancel_work_sync(&hdev->le_scan);
786
787 cancel_delayed_work(&hdev->power_off);
788
789 hci_req_cancel(hdev, ENODEV);
790 hci_req_lock(hdev);
791
792 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
793 del_timer_sync(&hdev->cmd_timer);
794 hci_req_unlock(hdev);
795 return 0;
796 }
797
798 /* Flush RX and TX works */
799 flush_work(&hdev->tx_work);
800 flush_work(&hdev->rx_work);
801
802 if (hdev->discov_timeout > 0) {
803 cancel_delayed_work(&hdev->discov_off);
804 hdev->discov_timeout = 0;
805 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
806 }
807
808 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
809 cancel_delayed_work(&hdev->service_cache);
810
811 cancel_delayed_work_sync(&hdev->le_scan_disable);
812
813 hci_dev_lock(hdev);
814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
816 hci_dev_unlock(hdev);
817
818 hci_notify(hdev, HCI_DEV_DOWN);
819
820 if (hdev->flush)
821 hdev->flush(hdev);
822
823 /* Reset device */
824 skb_queue_purge(&hdev->cmd_q);
825 atomic_set(&hdev->cmd_cnt, 1);
826 if (!test_bit(HCI_RAW, &hdev->flags) &&
827 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
828 set_bit(HCI_INIT, &hdev->flags);
829 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
830 clear_bit(HCI_INIT, &hdev->flags);
831 }
832
833 /* flush cmd work */
834 flush_work(&hdev->cmd_work);
835
836 /* Drop queues */
837 skb_queue_purge(&hdev->rx_q);
838 skb_queue_purge(&hdev->cmd_q);
839 skb_queue_purge(&hdev->raw_q);
840
841 /* Drop last sent command */
842 if (hdev->sent_cmd) {
843 del_timer_sync(&hdev->cmd_timer);
844 kfree_skb(hdev->sent_cmd);
845 hdev->sent_cmd = NULL;
846 }
847
848 /* After this point our queues are empty
849 * and no tasks are scheduled. */
850 hdev->close(hdev);
851
852 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
853 mgmt_valid_hdev(hdev)) {
854 hci_dev_lock(hdev);
855 mgmt_powered(hdev, 0);
856 hci_dev_unlock(hdev);
857 }
858
859 /* Clear flags */
860 hdev->flags = 0;
861
862 memset(hdev->eir, 0, sizeof(hdev->eir));
863 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
864
865 hci_req_unlock(hdev);
866
867 hci_dev_put(hdev);
868 return 0;
869 }
870
871 int hci_dev_close(__u16 dev)
872 {
873 struct hci_dev *hdev;
874 int err;
875
876 hdev = hci_dev_get(dev);
877 if (!hdev)
878 return -ENODEV;
879
880 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
881 cancel_delayed_work(&hdev->power_off);
882
883 err = hci_dev_do_close(hdev);
884
885 hci_dev_put(hdev);
886 return err;
887 }
888
889 int hci_dev_reset(__u16 dev)
890 {
891 struct hci_dev *hdev;
892 int ret = 0;
893
894 hdev = hci_dev_get(dev);
895 if (!hdev)
896 return -ENODEV;
897
898 hci_req_lock(hdev);
899
900 if (!test_bit(HCI_UP, &hdev->flags))
901 goto done;
902
903 /* Drop queues */
904 skb_queue_purge(&hdev->rx_q);
905 skb_queue_purge(&hdev->cmd_q);
906
907 hci_dev_lock(hdev);
908 inquiry_cache_flush(hdev);
909 hci_conn_hash_flush(hdev);
910 hci_dev_unlock(hdev);
911
912 if (hdev->flush)
913 hdev->flush(hdev);
914
915 atomic_set(&hdev->cmd_cnt, 1);
916 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
917
918 if (!test_bit(HCI_RAW, &hdev->flags))
919 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
920
921 done:
922 hci_req_unlock(hdev);
923 hci_dev_put(hdev);
924 return ret;
925 }
926
927 int hci_dev_reset_stat(__u16 dev)
928 {
929 struct hci_dev *hdev;
930 int ret = 0;
931
932 hdev = hci_dev_get(dev);
933 if (!hdev)
934 return -ENODEV;
935
936 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
937
938 hci_dev_put(hdev);
939
940 return ret;
941 }
942
943 int hci_dev_cmd(unsigned int cmd, void __user *arg)
944 {
945 struct hci_dev *hdev;
946 struct hci_dev_req dr;
947 int err = 0;
948
949 if (copy_from_user(&dr, arg, sizeof(dr)))
950 return -EFAULT;
951
952 hdev = hci_dev_get(dr.dev_id);
953 if (!hdev)
954 return -ENODEV;
955
956 switch (cmd) {
957 case HCISETAUTH:
958 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
959 HCI_INIT_TIMEOUT);
960 break;
961
962 case HCISETENCRYPT:
963 if (!lmp_encrypt_capable(hdev)) {
964 err = -EOPNOTSUPP;
965 break;
966 }
967
968 if (!test_bit(HCI_AUTH, &hdev->flags)) {
969 /* Auth must be enabled first */
970 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
971 HCI_INIT_TIMEOUT);
972 if (err)
973 break;
974 }
975
976 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
977 HCI_INIT_TIMEOUT);
978 break;
979
980 case HCISETSCAN:
981 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
982 HCI_INIT_TIMEOUT);
983 break;
984
985 case HCISETLINKPOL:
986 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
987 HCI_INIT_TIMEOUT);
988 break;
989
990 case HCISETLINKMODE:
991 hdev->link_mode = ((__u16) dr.dev_opt) &
992 (HCI_LM_MASTER | HCI_LM_ACCEPT);
993 break;
994
995 case HCISETPTYPE:
996 hdev->pkt_type = (__u16) dr.dev_opt;
997 break;
998
999 case HCISETACLMTU:
1000 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1001 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1002 break;
1003
1004 case HCISETSCOMTU:
1005 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1006 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1007 break;
1008
1009 default:
1010 err = -EINVAL;
1011 break;
1012 }
1013
1014 hci_dev_put(hdev);
1015 return err;
1016 }
1017
1018 int hci_get_dev_list(void __user *arg)
1019 {
1020 struct hci_dev *hdev;
1021 struct hci_dev_list_req *dl;
1022 struct hci_dev_req *dr;
1023 int n = 0, size, err;
1024 __u16 dev_num;
1025
1026 if (get_user(dev_num, (__u16 __user *) arg))
1027 return -EFAULT;
1028
1029 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1030 return -EINVAL;
1031
1032 size = sizeof(*dl) + dev_num * sizeof(*dr);
1033
1034 dl = kzalloc(size, GFP_KERNEL);
1035 if (!dl)
1036 return -ENOMEM;
1037
1038 dr = dl->dev_req;
1039
1040 read_lock(&hci_dev_list_lock);
1041 list_for_each_entry(hdev, &hci_dev_list, list) {
1042 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1043 cancel_delayed_work(&hdev->power_off);
1044
1045 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1046 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1047
1048 (dr + n)->dev_id = hdev->id;
1049 (dr + n)->dev_opt = hdev->flags;
1050
1051 if (++n >= dev_num)
1052 break;
1053 }
1054 read_unlock(&hci_dev_list_lock);
1055
1056 dl->dev_num = n;
1057 size = sizeof(*dl) + n * sizeof(*dr);
1058
1059 err = copy_to_user(arg, dl, size);
1060 kfree(dl);
1061
1062 return err ? -EFAULT : 0;
1063 }
1064
1065 int hci_get_dev_info(void __user *arg)
1066 {
1067 struct hci_dev *hdev;
1068 struct hci_dev_info di;
1069 int err = 0;
1070
1071 if (copy_from_user(&di, arg, sizeof(di)))
1072 return -EFAULT;
1073
1074 hdev = hci_dev_get(di.dev_id);
1075 if (!hdev)
1076 return -ENODEV;
1077
1078 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1079 cancel_delayed_work_sync(&hdev->power_off);
1080
1081 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1082 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1083
1084 strcpy(di.name, hdev->name);
1085 di.bdaddr = hdev->bdaddr;
1086 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1087 di.flags = hdev->flags;
1088 di.pkt_type = hdev->pkt_type;
1089 if (lmp_bredr_capable(hdev)) {
1090 di.acl_mtu = hdev->acl_mtu;
1091 di.acl_pkts = hdev->acl_pkts;
1092 di.sco_mtu = hdev->sco_mtu;
1093 di.sco_pkts = hdev->sco_pkts;
1094 } else {
1095 di.acl_mtu = hdev->le_mtu;
1096 di.acl_pkts = hdev->le_pkts;
1097 di.sco_mtu = 0;
1098 di.sco_pkts = 0;
1099 }
1100 di.link_policy = hdev->link_policy;
1101 di.link_mode = hdev->link_mode;
1102
1103 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1104 memcpy(&di.features, &hdev->features, sizeof(di.features));
1105
1106 if (copy_to_user(arg, &di, sizeof(di)))
1107 err = -EFAULT;
1108
1109 hci_dev_put(hdev);
1110
1111 return err;
1112 }
1113
1114 /* ---- Interface to HCI drivers ---- */
1115
1116 static int hci_rfkill_set_block(void *data, bool blocked)
1117 {
1118 struct hci_dev *hdev = data;
1119
1120 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1121
1122 if (!blocked)
1123 return 0;
1124
1125 hci_dev_do_close(hdev);
1126
1127 return 0;
1128 }
1129
1130 static const struct rfkill_ops hci_rfkill_ops = {
1131 .set_block = hci_rfkill_set_block,
1132 };
1133
1134 static void hci_power_on(struct work_struct *work)
1135 {
1136 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1137
1138 BT_DBG("%s", hdev->name);
1139
1140 if (hci_dev_open(hdev->id) < 0)
1141 return;
1142
1143 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1144 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1145
1146 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1147 mgmt_index_added(hdev);
1148 }
1149
1150 static void hci_power_off(struct work_struct *work)
1151 {
1152 struct hci_dev *hdev = container_of(work, struct hci_dev,
1153 power_off.work);
1154
1155 BT_DBG("%s", hdev->name);
1156
1157 hci_dev_do_close(hdev);
1158 }
1159
1160 static void hci_discov_off(struct work_struct *work)
1161 {
1162 struct hci_dev *hdev;
1163 u8 scan = SCAN_PAGE;
1164
1165 hdev = container_of(work, struct hci_dev, discov_off.work);
1166
1167 BT_DBG("%s", hdev->name);
1168
1169 hci_dev_lock(hdev);
1170
1171 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1172
1173 hdev->discov_timeout = 0;
1174
1175 hci_dev_unlock(hdev);
1176 }
1177
1178 int hci_uuids_clear(struct hci_dev *hdev)
1179 {
1180 struct list_head *p, *n;
1181
1182 list_for_each_safe(p, n, &hdev->uuids) {
1183 struct bt_uuid *uuid;
1184
1185 uuid = list_entry(p, struct bt_uuid, list);
1186
1187 list_del(p);
1188 kfree(uuid);
1189 }
1190
1191 return 0;
1192 }
1193
1194 int hci_link_keys_clear(struct hci_dev *hdev)
1195 {
1196 struct list_head *p, *n;
1197
1198 list_for_each_safe(p, n, &hdev->link_keys) {
1199 struct link_key *key;
1200
1201 key = list_entry(p, struct link_key, list);
1202
1203 list_del(p);
1204 kfree(key);
1205 }
1206
1207 return 0;
1208 }
1209
1210 int hci_smp_ltks_clear(struct hci_dev *hdev)
1211 {
1212 struct smp_ltk *k, *tmp;
1213
1214 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1215 list_del(&k->list);
1216 kfree(k);
1217 }
1218
1219 return 0;
1220 }
1221
1222 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1223 {
1224 struct link_key *k;
1225
1226 list_for_each_entry(k, &hdev->link_keys, list)
1227 if (bacmp(bdaddr, &k->bdaddr) == 0)
1228 return k;
1229
1230 return NULL;
1231 }
1232
1233 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1234 u8 key_type, u8 old_key_type)
1235 {
1236 /* Legacy key */
1237 if (key_type < 0x03)
1238 return true;
1239
1240 /* Debug keys are insecure so don't store them persistently */
1241 if (key_type == HCI_LK_DEBUG_COMBINATION)
1242 return false;
1243
1244 /* Changed combination key and there's no previous one */
1245 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1246 return false;
1247
1248 /* Security mode 3 case */
1249 if (!conn)
1250 return true;
1251
1252 /* Neither local nor remote side had no-bonding as requirement */
1253 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1254 return true;
1255
1256 /* Local side had dedicated bonding as requirement */
1257 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1258 return true;
1259
1260 /* Remote side had dedicated bonding as requirement */
1261 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1262 return true;
1263
1264 /* If none of the above criteria match, then don't store the key
1265 * persistently */
1266 return false;
1267 }
1268
1269 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1270 {
1271 struct smp_ltk *k;
1272
1273 list_for_each_entry(k, &hdev->long_term_keys, list) {
1274 if (k->ediv != ediv ||
1275 memcmp(rand, k->rand, sizeof(k->rand)))
1276 continue;
1277
1278 return k;
1279 }
1280
1281 return NULL;
1282 }
1283
1284 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1285 u8 addr_type)
1286 {
1287 struct smp_ltk *k;
1288
1289 list_for_each_entry(k, &hdev->long_term_keys, list)
1290 if (addr_type == k->bdaddr_type &&
1291 bacmp(bdaddr, &k->bdaddr) == 0)
1292 return k;
1293
1294 return NULL;
1295 }
1296
1297 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1298 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1299 {
1300 struct link_key *key, *old_key;
1301 u8 old_key_type;
1302 bool persistent;
1303
1304 old_key = hci_find_link_key(hdev, bdaddr);
1305 if (old_key) {
1306 old_key_type = old_key->type;
1307 key = old_key;
1308 } else {
1309 old_key_type = conn ? conn->key_type : 0xff;
1310 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1311 if (!key)
1312 return -ENOMEM;
1313 list_add(&key->list, &hdev->link_keys);
1314 }
1315
1316 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1317
1318 /* Some buggy controller combinations generate a changed
1319 * combination key for legacy pairing even when there's no
1320 * previous key */
1321 if (type == HCI_LK_CHANGED_COMBINATION &&
1322 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1323 type = HCI_LK_COMBINATION;
1324 if (conn)
1325 conn->key_type = type;
1326 }
1327
1328 bacpy(&key->bdaddr, bdaddr);
1329 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1330 key->pin_len = pin_len;
1331
1332 if (type == HCI_LK_CHANGED_COMBINATION)
1333 key->type = old_key_type;
1334 else
1335 key->type = type;
1336
1337 if (!new_key)
1338 return 0;
1339
1340 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1341
1342 mgmt_new_link_key(hdev, key, persistent);
1343
1344 if (conn)
1345 conn->flush_key = !persistent;
1346
1347 return 0;
1348 }
1349
1350 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1351 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1352 ediv, u8 rand[8])
1353 {
1354 struct smp_ltk *key, *old_key;
1355
1356 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1357 return 0;
1358
1359 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1360 if (old_key)
1361 key = old_key;
1362 else {
1363 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1364 if (!key)
1365 return -ENOMEM;
1366 list_add(&key->list, &hdev->long_term_keys);
1367 }
1368
1369 bacpy(&key->bdaddr, bdaddr);
1370 key->bdaddr_type = addr_type;
1371 memcpy(key->val, tk, sizeof(key->val));
1372 key->authenticated = authenticated;
1373 key->ediv = ediv;
1374 key->enc_size = enc_size;
1375 key->type = type;
1376 memcpy(key->rand, rand, sizeof(key->rand));
1377
1378 if (!new_key)
1379 return 0;
1380
1381 if (type & HCI_SMP_LTK)
1382 mgmt_new_ltk(hdev, key, 1);
1383
1384 return 0;
1385 }
1386
1387 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1388 {
1389 struct link_key *key;
1390
1391 key = hci_find_link_key(hdev, bdaddr);
1392 if (!key)
1393 return -ENOENT;
1394
1395 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1396
1397 list_del(&key->list);
1398 kfree(key);
1399
1400 return 0;
1401 }
1402
1403 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1404 {
1405 struct smp_ltk *k, *tmp;
1406
1407 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1408 if (bacmp(bdaddr, &k->bdaddr))
1409 continue;
1410
1411 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1412
1413 list_del(&k->list);
1414 kfree(k);
1415 }
1416
1417 return 0;
1418 }
1419
1420 /* HCI command timer function */
1421 static void hci_cmd_timeout(unsigned long arg)
1422 {
1423 struct hci_dev *hdev = (void *) arg;
1424
1425 if (hdev->sent_cmd) {
1426 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1427 u16 opcode = __le16_to_cpu(sent->opcode);
1428
1429 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1430 } else {
1431 BT_ERR("%s command tx timeout", hdev->name);
1432 }
1433
1434 atomic_set(&hdev->cmd_cnt, 1);
1435 queue_work(hdev->workqueue, &hdev->cmd_work);
1436 }
1437
1438 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1439 bdaddr_t *bdaddr)
1440 {
1441 struct oob_data *data;
1442
1443 list_for_each_entry(data, &hdev->remote_oob_data, list)
1444 if (bacmp(bdaddr, &data->bdaddr) == 0)
1445 return data;
1446
1447 return NULL;
1448 }
1449
1450 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1451 {
1452 struct oob_data *data;
1453
1454 data = hci_find_remote_oob_data(hdev, bdaddr);
1455 if (!data)
1456 return -ENOENT;
1457
1458 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1459
1460 list_del(&data->list);
1461 kfree(data);
1462
1463 return 0;
1464 }
1465
1466 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1467 {
1468 struct oob_data *data, *n;
1469
1470 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1471 list_del(&data->list);
1472 kfree(data);
1473 }
1474
1475 return 0;
1476 }
1477
1478 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1479 u8 *randomizer)
1480 {
1481 struct oob_data *data;
1482
1483 data = hci_find_remote_oob_data(hdev, bdaddr);
1484
1485 if (!data) {
1486 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1487 if (!data)
1488 return -ENOMEM;
1489
1490 bacpy(&data->bdaddr, bdaddr);
1491 list_add(&data->list, &hdev->remote_oob_data);
1492 }
1493
1494 memcpy(data->hash, hash, sizeof(data->hash));
1495 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1496
1497 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1498
1499 return 0;
1500 }
1501
1502 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1503 {
1504 struct bdaddr_list *b;
1505
1506 list_for_each_entry(b, &hdev->blacklist, list)
1507 if (bacmp(bdaddr, &b->bdaddr) == 0)
1508 return b;
1509
1510 return NULL;
1511 }
1512
1513 int hci_blacklist_clear(struct hci_dev *hdev)
1514 {
1515 struct list_head *p, *n;
1516
1517 list_for_each_safe(p, n, &hdev->blacklist) {
1518 struct bdaddr_list *b;
1519
1520 b = list_entry(p, struct bdaddr_list, list);
1521
1522 list_del(p);
1523 kfree(b);
1524 }
1525
1526 return 0;
1527 }
1528
1529 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1530 {
1531 struct bdaddr_list *entry;
1532
1533 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1534 return -EBADF;
1535
1536 if (hci_blacklist_lookup(hdev, bdaddr))
1537 return -EEXIST;
1538
1539 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1540 if (!entry)
1541 return -ENOMEM;
1542
1543 bacpy(&entry->bdaddr, bdaddr);
1544
1545 list_add(&entry->list, &hdev->blacklist);
1546
1547 return mgmt_device_blocked(hdev, bdaddr, type);
1548 }
1549
1550 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1551 {
1552 struct bdaddr_list *entry;
1553
1554 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1555 return hci_blacklist_clear(hdev);
1556
1557 entry = hci_blacklist_lookup(hdev, bdaddr);
1558 if (!entry)
1559 return -ENOENT;
1560
1561 list_del(&entry->list);
1562 kfree(entry);
1563
1564 return mgmt_device_unblocked(hdev, bdaddr, type);
1565 }
1566
1567 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1568 {
1569 struct le_scan_params *param = (struct le_scan_params *) opt;
1570 struct hci_cp_le_set_scan_param cp;
1571
1572 memset(&cp, 0, sizeof(cp));
1573 cp.type = param->type;
1574 cp.interval = cpu_to_le16(param->interval);
1575 cp.window = cpu_to_le16(param->window);
1576
1577 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1578 }
1579
1580 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1581 {
1582 struct hci_cp_le_set_scan_enable cp;
1583
1584 memset(&cp, 0, sizeof(cp));
1585 cp.enable = 1;
1586 cp.filter_dup = 1;
1587
1588 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1589 }
1590
1591 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1592 u16 window, int timeout)
1593 {
1594 long timeo = msecs_to_jiffies(3000);
1595 struct le_scan_params param;
1596 int err;
1597
1598 BT_DBG("%s", hdev->name);
1599
1600 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1601 return -EINPROGRESS;
1602
1603 param.type = type;
1604 param.interval = interval;
1605 param.window = window;
1606
1607 hci_req_lock(hdev);
1608
1609 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1610 timeo);
1611 if (!err)
1612 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1613
1614 hci_req_unlock(hdev);
1615
1616 if (err < 0)
1617 return err;
1618
1619 schedule_delayed_work(&hdev->le_scan_disable,
1620 msecs_to_jiffies(timeout));
1621
1622 return 0;
1623 }
1624
1625 int hci_cancel_le_scan(struct hci_dev *hdev)
1626 {
1627 BT_DBG("%s", hdev->name);
1628
1629 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1630 return -EALREADY;
1631
1632 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1633 struct hci_cp_le_set_scan_enable cp;
1634
1635 /* Send HCI command to disable LE Scan */
1636 memset(&cp, 0, sizeof(cp));
1637 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1638 }
1639
1640 return 0;
1641 }
1642
1643 static void le_scan_disable_work(struct work_struct *work)
1644 {
1645 struct hci_dev *hdev = container_of(work, struct hci_dev,
1646 le_scan_disable.work);
1647 struct hci_cp_le_set_scan_enable cp;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 memset(&cp, 0, sizeof(cp));
1652
1653 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1654 }
1655
1656 static void le_scan_work(struct work_struct *work)
1657 {
1658 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1659 struct le_scan_params *param = &hdev->le_scan_params;
1660
1661 BT_DBG("%s", hdev->name);
1662
1663 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1664 param->timeout);
1665 }
1666
1667 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1668 int timeout)
1669 {
1670 struct le_scan_params *param = &hdev->le_scan_params;
1671
1672 BT_DBG("%s", hdev->name);
1673
1674 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1675 return -ENOTSUPP;
1676
1677 if (work_busy(&hdev->le_scan))
1678 return -EINPROGRESS;
1679
1680 param->type = type;
1681 param->interval = interval;
1682 param->window = window;
1683 param->timeout = timeout;
1684
1685 queue_work(system_long_wq, &hdev->le_scan);
1686
1687 return 0;
1688 }
1689
1690 /* Alloc HCI device */
1691 struct hci_dev *hci_alloc_dev(void)
1692 {
1693 struct hci_dev *hdev;
1694
1695 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1696 if (!hdev)
1697 return NULL;
1698
1699 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1700 hdev->esco_type = (ESCO_HV1);
1701 hdev->link_mode = (HCI_LM_ACCEPT);
1702 hdev->io_capability = 0x03; /* No Input No Output */
1703 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1704 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1705
1706 hdev->sniff_max_interval = 800;
1707 hdev->sniff_min_interval = 80;
1708
1709 mutex_init(&hdev->lock);
1710 mutex_init(&hdev->req_lock);
1711
1712 INIT_LIST_HEAD(&hdev->mgmt_pending);
1713 INIT_LIST_HEAD(&hdev->blacklist);
1714 INIT_LIST_HEAD(&hdev->uuids);
1715 INIT_LIST_HEAD(&hdev->link_keys);
1716 INIT_LIST_HEAD(&hdev->long_term_keys);
1717 INIT_LIST_HEAD(&hdev->remote_oob_data);
1718 INIT_LIST_HEAD(&hdev->conn_hash.list);
1719
1720 INIT_WORK(&hdev->rx_work, hci_rx_work);
1721 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1722 INIT_WORK(&hdev->tx_work, hci_tx_work);
1723 INIT_WORK(&hdev->power_on, hci_power_on);
1724 INIT_WORK(&hdev->le_scan, le_scan_work);
1725
1726 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1727 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1728 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1729
1730 skb_queue_head_init(&hdev->driver_init);
1731 skb_queue_head_init(&hdev->rx_q);
1732 skb_queue_head_init(&hdev->cmd_q);
1733 skb_queue_head_init(&hdev->raw_q);
1734
1735 init_waitqueue_head(&hdev->req_wait_q);
1736
1737 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1738
1739 hci_init_sysfs(hdev);
1740 discovery_init(hdev);
1741
1742 return hdev;
1743 }
1744 EXPORT_SYMBOL(hci_alloc_dev);
1745
1746 /* Free HCI device */
1747 void hci_free_dev(struct hci_dev *hdev)
1748 {
1749 skb_queue_purge(&hdev->driver_init);
1750
1751 /* will free via device release */
1752 put_device(&hdev->dev);
1753 }
1754 EXPORT_SYMBOL(hci_free_dev);
1755
1756 /* Register HCI device */
1757 int hci_register_dev(struct hci_dev *hdev)
1758 {
1759 int id, error;
1760
1761 if (!hdev->open || !hdev->close)
1762 return -EINVAL;
1763
1764 /* Do not allow HCI_AMP devices to register at index 0,
1765 * so the index can be used as the AMP controller ID.
1766 */
1767 switch (hdev->dev_type) {
1768 case HCI_BREDR:
1769 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1770 break;
1771 case HCI_AMP:
1772 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1773 break;
1774 default:
1775 return -EINVAL;
1776 }
1777
1778 if (id < 0)
1779 return id;
1780
1781 sprintf(hdev->name, "hci%d", id);
1782 hdev->id = id;
1783
1784 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1785
1786 write_lock(&hci_dev_list_lock);
1787 list_add(&hdev->list, &hci_dev_list);
1788 write_unlock(&hci_dev_list_lock);
1789
1790 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1791 WQ_MEM_RECLAIM, 1);
1792 if (!hdev->workqueue) {
1793 error = -ENOMEM;
1794 goto err;
1795 }
1796
1797 error = hci_add_sysfs(hdev);
1798 if (error < 0)
1799 goto err_wqueue;
1800
1801 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1802 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1803 hdev);
1804 if (hdev->rfkill) {
1805 if (rfkill_register(hdev->rfkill) < 0) {
1806 rfkill_destroy(hdev->rfkill);
1807 hdev->rfkill = NULL;
1808 }
1809 }
1810
1811 set_bit(HCI_SETUP, &hdev->dev_flags);
1812
1813 if (hdev->dev_type != HCI_AMP)
1814 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1815
1816 schedule_work(&hdev->power_on);
1817
1818 hci_notify(hdev, HCI_DEV_REG);
1819 hci_dev_hold(hdev);
1820
1821 return id;
1822
1823 err_wqueue:
1824 destroy_workqueue(hdev->workqueue);
1825 err:
1826 ida_simple_remove(&hci_index_ida, hdev->id);
1827 write_lock(&hci_dev_list_lock);
1828 list_del(&hdev->list);
1829 write_unlock(&hci_dev_list_lock);
1830
1831 return error;
1832 }
1833 EXPORT_SYMBOL(hci_register_dev);
1834
1835 /* Unregister HCI device */
1836 void hci_unregister_dev(struct hci_dev *hdev)
1837 {
1838 int i, id;
1839
1840 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1841
1842 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1843
1844 id = hdev->id;
1845
1846 write_lock(&hci_dev_list_lock);
1847 list_del(&hdev->list);
1848 write_unlock(&hci_dev_list_lock);
1849
1850 hci_dev_do_close(hdev);
1851
1852 for (i = 0; i < NUM_REASSEMBLY; i++)
1853 kfree_skb(hdev->reassembly[i]);
1854
1855 if (!test_bit(HCI_INIT, &hdev->flags) &&
1856 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1857 hci_dev_lock(hdev);
1858 mgmt_index_removed(hdev);
1859 hci_dev_unlock(hdev);
1860 }
1861
1862 /* mgmt_index_removed should take care of emptying the
1863 * pending list */
1864 BUG_ON(!list_empty(&hdev->mgmt_pending));
1865
1866 hci_notify(hdev, HCI_DEV_UNREG);
1867
1868 if (hdev->rfkill) {
1869 rfkill_unregister(hdev->rfkill);
1870 rfkill_destroy(hdev->rfkill);
1871 }
1872
1873 hci_del_sysfs(hdev);
1874
1875 destroy_workqueue(hdev->workqueue);
1876
1877 hci_dev_lock(hdev);
1878 hci_blacklist_clear(hdev);
1879 hci_uuids_clear(hdev);
1880 hci_link_keys_clear(hdev);
1881 hci_smp_ltks_clear(hdev);
1882 hci_remote_oob_data_clear(hdev);
1883 hci_dev_unlock(hdev);
1884
1885 hci_dev_put(hdev);
1886
1887 ida_simple_remove(&hci_index_ida, id);
1888 }
1889 EXPORT_SYMBOL(hci_unregister_dev);
1890
1891 /* Suspend HCI device */
1892 int hci_suspend_dev(struct hci_dev *hdev)
1893 {
1894 hci_notify(hdev, HCI_DEV_SUSPEND);
1895 return 0;
1896 }
1897 EXPORT_SYMBOL(hci_suspend_dev);
1898
1899 /* Resume HCI device */
1900 int hci_resume_dev(struct hci_dev *hdev)
1901 {
1902 hci_notify(hdev, HCI_DEV_RESUME);
1903 return 0;
1904 }
1905 EXPORT_SYMBOL(hci_resume_dev);
1906
1907 /* Receive frame from HCI drivers */
1908 int hci_recv_frame(struct sk_buff *skb)
1909 {
1910 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1911 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1912 && !test_bit(HCI_INIT, &hdev->flags))) {
1913 kfree_skb(skb);
1914 return -ENXIO;
1915 }
1916
1917 /* Incomming skb */
1918 bt_cb(skb)->incoming = 1;
1919
1920 /* Time stamp */
1921 __net_timestamp(skb);
1922
1923 skb_queue_tail(&hdev->rx_q, skb);
1924 queue_work(hdev->workqueue, &hdev->rx_work);
1925
1926 return 0;
1927 }
1928 EXPORT_SYMBOL(hci_recv_frame);
1929
1930 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1931 int count, __u8 index)
1932 {
1933 int len = 0;
1934 int hlen = 0;
1935 int remain = count;
1936 struct sk_buff *skb;
1937 struct bt_skb_cb *scb;
1938
1939 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1940 index >= NUM_REASSEMBLY)
1941 return -EILSEQ;
1942
1943 skb = hdev->reassembly[index];
1944
1945 if (!skb) {
1946 switch (type) {
1947 case HCI_ACLDATA_PKT:
1948 len = HCI_MAX_FRAME_SIZE;
1949 hlen = HCI_ACL_HDR_SIZE;
1950 break;
1951 case HCI_EVENT_PKT:
1952 len = HCI_MAX_EVENT_SIZE;
1953 hlen = HCI_EVENT_HDR_SIZE;
1954 break;
1955 case HCI_SCODATA_PKT:
1956 len = HCI_MAX_SCO_SIZE;
1957 hlen = HCI_SCO_HDR_SIZE;
1958 break;
1959 }
1960
1961 skb = bt_skb_alloc(len, GFP_ATOMIC);
1962 if (!skb)
1963 return -ENOMEM;
1964
1965 scb = (void *) skb->cb;
1966 scb->expect = hlen;
1967 scb->pkt_type = type;
1968
1969 skb->dev = (void *) hdev;
1970 hdev->reassembly[index] = skb;
1971 }
1972
1973 while (count) {
1974 scb = (void *) skb->cb;
1975 len = min_t(uint, scb->expect, count);
1976
1977 memcpy(skb_put(skb, len), data, len);
1978
1979 count -= len;
1980 data += len;
1981 scb->expect -= len;
1982 remain = count;
1983
1984 switch (type) {
1985 case HCI_EVENT_PKT:
1986 if (skb->len == HCI_EVENT_HDR_SIZE) {
1987 struct hci_event_hdr *h = hci_event_hdr(skb);
1988 scb->expect = h->plen;
1989
1990 if (skb_tailroom(skb) < scb->expect) {
1991 kfree_skb(skb);
1992 hdev->reassembly[index] = NULL;
1993 return -ENOMEM;
1994 }
1995 }
1996 break;
1997
1998 case HCI_ACLDATA_PKT:
1999 if (skb->len == HCI_ACL_HDR_SIZE) {
2000 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2001 scb->expect = __le16_to_cpu(h->dlen);
2002
2003 if (skb_tailroom(skb) < scb->expect) {
2004 kfree_skb(skb);
2005 hdev->reassembly[index] = NULL;
2006 return -ENOMEM;
2007 }
2008 }
2009 break;
2010
2011 case HCI_SCODATA_PKT:
2012 if (skb->len == HCI_SCO_HDR_SIZE) {
2013 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2014 scb->expect = h->dlen;
2015
2016 if (skb_tailroom(skb) < scb->expect) {
2017 kfree_skb(skb);
2018 hdev->reassembly[index] = NULL;
2019 return -ENOMEM;
2020 }
2021 }
2022 break;
2023 }
2024
2025 if (scb->expect == 0) {
2026 /* Complete frame */
2027
2028 bt_cb(skb)->pkt_type = type;
2029 hci_recv_frame(skb);
2030
2031 hdev->reassembly[index] = NULL;
2032 return remain;
2033 }
2034 }
2035
2036 return remain;
2037 }
2038
2039 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2040 {
2041 int rem = 0;
2042
2043 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2044 return -EILSEQ;
2045
2046 while (count) {
2047 rem = hci_reassembly(hdev, type, data, count, type - 1);
2048 if (rem < 0)
2049 return rem;
2050
2051 data += (count - rem);
2052 count = rem;
2053 }
2054
2055 return rem;
2056 }
2057 EXPORT_SYMBOL(hci_recv_fragment);
2058
2059 #define STREAM_REASSEMBLY 0
2060
2061 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2062 {
2063 int type;
2064 int rem = 0;
2065
2066 while (count) {
2067 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2068
2069 if (!skb) {
2070 struct { char type; } *pkt;
2071
2072 /* Start of the frame */
2073 pkt = data;
2074 type = pkt->type;
2075
2076 data++;
2077 count--;
2078 } else
2079 type = bt_cb(skb)->pkt_type;
2080
2081 rem = hci_reassembly(hdev, type, data, count,
2082 STREAM_REASSEMBLY);
2083 if (rem < 0)
2084 return rem;
2085
2086 data += (count - rem);
2087 count = rem;
2088 }
2089
2090 return rem;
2091 }
2092 EXPORT_SYMBOL(hci_recv_stream_fragment);
2093
2094 /* ---- Interface to upper protocols ---- */
2095
2096 int hci_register_cb(struct hci_cb *cb)
2097 {
2098 BT_DBG("%p name %s", cb, cb->name);
2099
2100 write_lock(&hci_cb_list_lock);
2101 list_add(&cb->list, &hci_cb_list);
2102 write_unlock(&hci_cb_list_lock);
2103
2104 return 0;
2105 }
2106 EXPORT_SYMBOL(hci_register_cb);
2107
2108 int hci_unregister_cb(struct hci_cb *cb)
2109 {
2110 BT_DBG("%p name %s", cb, cb->name);
2111
2112 write_lock(&hci_cb_list_lock);
2113 list_del(&cb->list);
2114 write_unlock(&hci_cb_list_lock);
2115
2116 return 0;
2117 }
2118 EXPORT_SYMBOL(hci_unregister_cb);
2119
2120 static int hci_send_frame(struct sk_buff *skb)
2121 {
2122 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2123
2124 if (!hdev) {
2125 kfree_skb(skb);
2126 return -ENODEV;
2127 }
2128
2129 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2130
2131 /* Time stamp */
2132 __net_timestamp(skb);
2133
2134 /* Send copy to monitor */
2135 hci_send_to_monitor(hdev, skb);
2136
2137 if (atomic_read(&hdev->promisc)) {
2138 /* Send copy to the sockets */
2139 hci_send_to_sock(hdev, skb);
2140 }
2141
2142 /* Get rid of skb owner, prior to sending to the driver. */
2143 skb_orphan(skb);
2144
2145 return hdev->send(skb);
2146 }
2147
2148 /* Send HCI command */
2149 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2150 {
2151 int len = HCI_COMMAND_HDR_SIZE + plen;
2152 struct hci_command_hdr *hdr;
2153 struct sk_buff *skb;
2154
2155 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2156
2157 skb = bt_skb_alloc(len, GFP_ATOMIC);
2158 if (!skb) {
2159 BT_ERR("%s no memory for command", hdev->name);
2160 return -ENOMEM;
2161 }
2162
2163 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2164 hdr->opcode = cpu_to_le16(opcode);
2165 hdr->plen = plen;
2166
2167 if (plen)
2168 memcpy(skb_put(skb, plen), param, plen);
2169
2170 BT_DBG("skb len %d", skb->len);
2171
2172 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2173 skb->dev = (void *) hdev;
2174
2175 if (test_bit(HCI_INIT, &hdev->flags))
2176 hdev->init_last_cmd = opcode;
2177
2178 skb_queue_tail(&hdev->cmd_q, skb);
2179 queue_work(hdev->workqueue, &hdev->cmd_work);
2180
2181 return 0;
2182 }
2183
2184 /* Get data from the previously sent command */
2185 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2186 {
2187 struct hci_command_hdr *hdr;
2188
2189 if (!hdev->sent_cmd)
2190 return NULL;
2191
2192 hdr = (void *) hdev->sent_cmd->data;
2193
2194 if (hdr->opcode != cpu_to_le16(opcode))
2195 return NULL;
2196
2197 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2198
2199 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2200 }
2201
2202 /* Send ACL data */
2203 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2204 {
2205 struct hci_acl_hdr *hdr;
2206 int len = skb->len;
2207
2208 skb_push(skb, HCI_ACL_HDR_SIZE);
2209 skb_reset_transport_header(skb);
2210 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2211 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2212 hdr->dlen = cpu_to_le16(len);
2213 }
2214
2215 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2216 struct sk_buff *skb, __u16 flags)
2217 {
2218 struct hci_conn *conn = chan->conn;
2219 struct hci_dev *hdev = conn->hdev;
2220 struct sk_buff *list;
2221
2222 skb->len = skb_headlen(skb);
2223 skb->data_len = 0;
2224
2225 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2226
2227 switch (hdev->dev_type) {
2228 case HCI_BREDR:
2229 hci_add_acl_hdr(skb, conn->handle, flags);
2230 break;
2231 case HCI_AMP:
2232 hci_add_acl_hdr(skb, chan->handle, flags);
2233 break;
2234 default:
2235 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2236 return;
2237 }
2238
2239 list = skb_shinfo(skb)->frag_list;
2240 if (!list) {
2241 /* Non fragmented */
2242 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2243
2244 skb_queue_tail(queue, skb);
2245 } else {
2246 /* Fragmented */
2247 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2248
2249 skb_shinfo(skb)->frag_list = NULL;
2250
2251 /* Queue all fragments atomically */
2252 spin_lock(&queue->lock);
2253
2254 __skb_queue_tail(queue, skb);
2255
2256 flags &= ~ACL_START;
2257 flags |= ACL_CONT;
2258 do {
2259 skb = list; list = list->next;
2260
2261 skb->dev = (void *) hdev;
2262 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2263 hci_add_acl_hdr(skb, conn->handle, flags);
2264
2265 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2266
2267 __skb_queue_tail(queue, skb);
2268 } while (list);
2269
2270 spin_unlock(&queue->lock);
2271 }
2272 }
2273
2274 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2275 {
2276 struct hci_dev *hdev = chan->conn->hdev;
2277
2278 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2279
2280 skb->dev = (void *) hdev;
2281
2282 hci_queue_acl(chan, &chan->data_q, skb, flags);
2283
2284 queue_work(hdev->workqueue, &hdev->tx_work);
2285 }
2286
2287 /* Send SCO data */
2288 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2289 {
2290 struct hci_dev *hdev = conn->hdev;
2291 struct hci_sco_hdr hdr;
2292
2293 BT_DBG("%s len %d", hdev->name, skb->len);
2294
2295 hdr.handle = cpu_to_le16(conn->handle);
2296 hdr.dlen = skb->len;
2297
2298 skb_push(skb, HCI_SCO_HDR_SIZE);
2299 skb_reset_transport_header(skb);
2300 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2301
2302 skb->dev = (void *) hdev;
2303 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2304
2305 skb_queue_tail(&conn->data_q, skb);
2306 queue_work(hdev->workqueue, &hdev->tx_work);
2307 }
2308
2309 /* ---- HCI TX task (outgoing data) ---- */
2310
2311 /* HCI Connection scheduler */
2312 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2313 int *quote)
2314 {
2315 struct hci_conn_hash *h = &hdev->conn_hash;
2316 struct hci_conn *conn = NULL, *c;
2317 unsigned int num = 0, min = ~0;
2318
2319 /* We don't have to lock device here. Connections are always
2320 * added and removed with TX task disabled. */
2321
2322 rcu_read_lock();
2323
2324 list_for_each_entry_rcu(c, &h->list, list) {
2325 if (c->type != type || skb_queue_empty(&c->data_q))
2326 continue;
2327
2328 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2329 continue;
2330
2331 num++;
2332
2333 if (c->sent < min) {
2334 min = c->sent;
2335 conn = c;
2336 }
2337
2338 if (hci_conn_num(hdev, type) == num)
2339 break;
2340 }
2341
2342 rcu_read_unlock();
2343
2344 if (conn) {
2345 int cnt, q;
2346
2347 switch (conn->type) {
2348 case ACL_LINK:
2349 cnt = hdev->acl_cnt;
2350 break;
2351 case SCO_LINK:
2352 case ESCO_LINK:
2353 cnt = hdev->sco_cnt;
2354 break;
2355 case LE_LINK:
2356 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357 break;
2358 default:
2359 cnt = 0;
2360 BT_ERR("Unknown link type");
2361 }
2362
2363 q = cnt / num;
2364 *quote = q ? q : 1;
2365 } else
2366 *quote = 0;
2367
2368 BT_DBG("conn %p quote %d", conn, *quote);
2369 return conn;
2370 }
2371
2372 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2373 {
2374 struct hci_conn_hash *h = &hdev->conn_hash;
2375 struct hci_conn *c;
2376
2377 BT_ERR("%s link tx timeout", hdev->name);
2378
2379 rcu_read_lock();
2380
2381 /* Kill stalled connections */
2382 list_for_each_entry_rcu(c, &h->list, list) {
2383 if (c->type == type && c->sent) {
2384 BT_ERR("%s killing stalled connection %pMR",
2385 hdev->name, &c->dst);
2386 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2387 }
2388 }
2389
2390 rcu_read_unlock();
2391 }
2392
2393 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2394 int *quote)
2395 {
2396 struct hci_conn_hash *h = &hdev->conn_hash;
2397 struct hci_chan *chan = NULL;
2398 unsigned int num = 0, min = ~0, cur_prio = 0;
2399 struct hci_conn *conn;
2400 int cnt, q, conn_num = 0;
2401
2402 BT_DBG("%s", hdev->name);
2403
2404 rcu_read_lock();
2405
2406 list_for_each_entry_rcu(conn, &h->list, list) {
2407 struct hci_chan *tmp;
2408
2409 if (conn->type != type)
2410 continue;
2411
2412 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 continue;
2414
2415 conn_num++;
2416
2417 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2418 struct sk_buff *skb;
2419
2420 if (skb_queue_empty(&tmp->data_q))
2421 continue;
2422
2423 skb = skb_peek(&tmp->data_q);
2424 if (skb->priority < cur_prio)
2425 continue;
2426
2427 if (skb->priority > cur_prio) {
2428 num = 0;
2429 min = ~0;
2430 cur_prio = skb->priority;
2431 }
2432
2433 num++;
2434
2435 if (conn->sent < min) {
2436 min = conn->sent;
2437 chan = tmp;
2438 }
2439 }
2440
2441 if (hci_conn_num(hdev, type) == conn_num)
2442 break;
2443 }
2444
2445 rcu_read_unlock();
2446
2447 if (!chan)
2448 return NULL;
2449
2450 switch (chan->conn->type) {
2451 case ACL_LINK:
2452 cnt = hdev->acl_cnt;
2453 break;
2454 case AMP_LINK:
2455 cnt = hdev->block_cnt;
2456 break;
2457 case SCO_LINK:
2458 case ESCO_LINK:
2459 cnt = hdev->sco_cnt;
2460 break;
2461 case LE_LINK:
2462 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2463 break;
2464 default:
2465 cnt = 0;
2466 BT_ERR("Unknown link type");
2467 }
2468
2469 q = cnt / num;
2470 *quote = q ? q : 1;
2471 BT_DBG("chan %p quote %d", chan, *quote);
2472 return chan;
2473 }
2474
2475 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2476 {
2477 struct hci_conn_hash *h = &hdev->conn_hash;
2478 struct hci_conn *conn;
2479 int num = 0;
2480
2481 BT_DBG("%s", hdev->name);
2482
2483 rcu_read_lock();
2484
2485 list_for_each_entry_rcu(conn, &h->list, list) {
2486 struct hci_chan *chan;
2487
2488 if (conn->type != type)
2489 continue;
2490
2491 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2492 continue;
2493
2494 num++;
2495
2496 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2497 struct sk_buff *skb;
2498
2499 if (chan->sent) {
2500 chan->sent = 0;
2501 continue;
2502 }
2503
2504 if (skb_queue_empty(&chan->data_q))
2505 continue;
2506
2507 skb = skb_peek(&chan->data_q);
2508 if (skb->priority >= HCI_PRIO_MAX - 1)
2509 continue;
2510
2511 skb->priority = HCI_PRIO_MAX - 1;
2512
2513 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2514 skb->priority);
2515 }
2516
2517 if (hci_conn_num(hdev, type) == num)
2518 break;
2519 }
2520
2521 rcu_read_unlock();
2522
2523 }
2524
2525 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2526 {
2527 /* Calculate count of blocks used by this packet */
2528 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2529 }
2530
2531 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2532 {
2533 if (!test_bit(HCI_RAW, &hdev->flags)) {
2534 /* ACL tx timeout must be longer than maximum
2535 * link supervision timeout (40.9 seconds) */
2536 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2537 HCI_ACL_TX_TIMEOUT))
2538 hci_link_tx_to(hdev, ACL_LINK);
2539 }
2540 }
2541
2542 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2543 {
2544 unsigned int cnt = hdev->acl_cnt;
2545 struct hci_chan *chan;
2546 struct sk_buff *skb;
2547 int quote;
2548
2549 __check_timeout(hdev, cnt);
2550
2551 while (hdev->acl_cnt &&
2552 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2553 u32 priority = (skb_peek(&chan->data_q))->priority;
2554 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2555 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2556 skb->len, skb->priority);
2557
2558 /* Stop if priority has changed */
2559 if (skb->priority < priority)
2560 break;
2561
2562 skb = skb_dequeue(&chan->data_q);
2563
2564 hci_conn_enter_active_mode(chan->conn,
2565 bt_cb(skb)->force_active);
2566
2567 hci_send_frame(skb);
2568 hdev->acl_last_tx = jiffies;
2569
2570 hdev->acl_cnt--;
2571 chan->sent++;
2572 chan->conn->sent++;
2573 }
2574 }
2575
2576 if (cnt != hdev->acl_cnt)
2577 hci_prio_recalculate(hdev, ACL_LINK);
2578 }
2579
2580 static void hci_sched_acl_blk(struct hci_dev *hdev)
2581 {
2582 unsigned int cnt = hdev->block_cnt;
2583 struct hci_chan *chan;
2584 struct sk_buff *skb;
2585 int quote;
2586 u8 type;
2587
2588 __check_timeout(hdev, cnt);
2589
2590 BT_DBG("%s", hdev->name);
2591
2592 if (hdev->dev_type == HCI_AMP)
2593 type = AMP_LINK;
2594 else
2595 type = ACL_LINK;
2596
2597 while (hdev->block_cnt > 0 &&
2598 (chan = hci_chan_sent(hdev, type, &quote))) {
2599 u32 priority = (skb_peek(&chan->data_q))->priority;
2600 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2601 int blocks;
2602
2603 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2604 skb->len, skb->priority);
2605
2606 /* Stop if priority has changed */
2607 if (skb->priority < priority)
2608 break;
2609
2610 skb = skb_dequeue(&chan->data_q);
2611
2612 blocks = __get_blocks(hdev, skb);
2613 if (blocks > hdev->block_cnt)
2614 return;
2615
2616 hci_conn_enter_active_mode(chan->conn,
2617 bt_cb(skb)->force_active);
2618
2619 hci_send_frame(skb);
2620 hdev->acl_last_tx = jiffies;
2621
2622 hdev->block_cnt -= blocks;
2623 quote -= blocks;
2624
2625 chan->sent += blocks;
2626 chan->conn->sent += blocks;
2627 }
2628 }
2629
2630 if (cnt != hdev->block_cnt)
2631 hci_prio_recalculate(hdev, type);
2632 }
2633
2634 static void hci_sched_acl(struct hci_dev *hdev)
2635 {
2636 BT_DBG("%s", hdev->name);
2637
2638 /* No ACL link over BR/EDR controller */
2639 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2640 return;
2641
2642 /* No AMP link over AMP controller */
2643 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2644 return;
2645
2646 switch (hdev->flow_ctl_mode) {
2647 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2648 hci_sched_acl_pkt(hdev);
2649 break;
2650
2651 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2652 hci_sched_acl_blk(hdev);
2653 break;
2654 }
2655 }
2656
2657 /* Schedule SCO */
2658 static void hci_sched_sco(struct hci_dev *hdev)
2659 {
2660 struct hci_conn *conn;
2661 struct sk_buff *skb;
2662 int quote;
2663
2664 BT_DBG("%s", hdev->name);
2665
2666 if (!hci_conn_num(hdev, SCO_LINK))
2667 return;
2668
2669 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2670 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2671 BT_DBG("skb %p len %d", skb, skb->len);
2672 hci_send_frame(skb);
2673
2674 conn->sent++;
2675 if (conn->sent == ~0)
2676 conn->sent = 0;
2677 }
2678 }
2679 }
2680
2681 static void hci_sched_esco(struct hci_dev *hdev)
2682 {
2683 struct hci_conn *conn;
2684 struct sk_buff *skb;
2685 int quote;
2686
2687 BT_DBG("%s", hdev->name);
2688
2689 if (!hci_conn_num(hdev, ESCO_LINK))
2690 return;
2691
2692 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2693 &quote))) {
2694 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2695 BT_DBG("skb %p len %d", skb, skb->len);
2696 hci_send_frame(skb);
2697
2698 conn->sent++;
2699 if (conn->sent == ~0)
2700 conn->sent = 0;
2701 }
2702 }
2703 }
2704
2705 static void hci_sched_le(struct hci_dev *hdev)
2706 {
2707 struct hci_chan *chan;
2708 struct sk_buff *skb;
2709 int quote, cnt, tmp;
2710
2711 BT_DBG("%s", hdev->name);
2712
2713 if (!hci_conn_num(hdev, LE_LINK))
2714 return;
2715
2716 if (!test_bit(HCI_RAW, &hdev->flags)) {
2717 /* LE tx timeout must be longer than maximum
2718 * link supervision timeout (40.9 seconds) */
2719 if (!hdev->le_cnt && hdev->le_pkts &&
2720 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2721 hci_link_tx_to(hdev, LE_LINK);
2722 }
2723
2724 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2725 tmp = cnt;
2726 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2727 u32 priority = (skb_peek(&chan->data_q))->priority;
2728 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2729 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2730 skb->len, skb->priority);
2731
2732 /* Stop if priority has changed */
2733 if (skb->priority < priority)
2734 break;
2735
2736 skb = skb_dequeue(&chan->data_q);
2737
2738 hci_send_frame(skb);
2739 hdev->le_last_tx = jiffies;
2740
2741 cnt--;
2742 chan->sent++;
2743 chan->conn->sent++;
2744 }
2745 }
2746
2747 if (hdev->le_pkts)
2748 hdev->le_cnt = cnt;
2749 else
2750 hdev->acl_cnt = cnt;
2751
2752 if (cnt != tmp)
2753 hci_prio_recalculate(hdev, LE_LINK);
2754 }
2755
2756 static void hci_tx_work(struct work_struct *work)
2757 {
2758 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2759 struct sk_buff *skb;
2760
2761 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2762 hdev->sco_cnt, hdev->le_cnt);
2763
2764 /* Schedule queues and send stuff to HCI driver */
2765
2766 hci_sched_acl(hdev);
2767
2768 hci_sched_sco(hdev);
2769
2770 hci_sched_esco(hdev);
2771
2772 hci_sched_le(hdev);
2773
2774 /* Send next queued raw (unknown type) packet */
2775 while ((skb = skb_dequeue(&hdev->raw_q)))
2776 hci_send_frame(skb);
2777 }
2778
2779 /* ----- HCI RX task (incoming data processing) ----- */
2780
2781 /* ACL data packet */
2782 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2783 {
2784 struct hci_acl_hdr *hdr = (void *) skb->data;
2785 struct hci_conn *conn;
2786 __u16 handle, flags;
2787
2788 skb_pull(skb, HCI_ACL_HDR_SIZE);
2789
2790 handle = __le16_to_cpu(hdr->handle);
2791 flags = hci_flags(handle);
2792 handle = hci_handle(handle);
2793
2794 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2795 handle, flags);
2796
2797 hdev->stat.acl_rx++;
2798
2799 hci_dev_lock(hdev);
2800 conn = hci_conn_hash_lookup_handle(hdev, handle);
2801 hci_dev_unlock(hdev);
2802
2803 if (conn) {
2804 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2805
2806 hci_dev_lock(hdev);
2807 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2808 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2809 mgmt_device_connected(hdev, &conn->dst, conn->type,
2810 conn->dst_type, 0, NULL, 0,
2811 conn->dev_class);
2812 hci_dev_unlock(hdev);
2813
2814 /* Send to upper protocol */
2815 l2cap_recv_acldata(conn, skb, flags);
2816 return;
2817 } else {
2818 BT_ERR("%s ACL packet for unknown connection handle %d",
2819 hdev->name, handle);
2820 }
2821
2822 kfree_skb(skb);
2823 }
2824
2825 /* SCO data packet */
2826 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2827 {
2828 struct hci_sco_hdr *hdr = (void *) skb->data;
2829 struct hci_conn *conn;
2830 __u16 handle;
2831
2832 skb_pull(skb, HCI_SCO_HDR_SIZE);
2833
2834 handle = __le16_to_cpu(hdr->handle);
2835
2836 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2837
2838 hdev->stat.sco_rx++;
2839
2840 hci_dev_lock(hdev);
2841 conn = hci_conn_hash_lookup_handle(hdev, handle);
2842 hci_dev_unlock(hdev);
2843
2844 if (conn) {
2845 /* Send to upper protocol */
2846 sco_recv_scodata(conn, skb);
2847 return;
2848 } else {
2849 BT_ERR("%s SCO packet for unknown connection handle %d",
2850 hdev->name, handle);
2851 }
2852
2853 kfree_skb(skb);
2854 }
2855
2856 static void hci_rx_work(struct work_struct *work)
2857 {
2858 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2859 struct sk_buff *skb;
2860
2861 BT_DBG("%s", hdev->name);
2862
2863 while ((skb = skb_dequeue(&hdev->rx_q))) {
2864 /* Send copy to monitor */
2865 hci_send_to_monitor(hdev, skb);
2866
2867 if (atomic_read(&hdev->promisc)) {
2868 /* Send copy to the sockets */
2869 hci_send_to_sock(hdev, skb);
2870 }
2871
2872 if (test_bit(HCI_RAW, &hdev->flags)) {
2873 kfree_skb(skb);
2874 continue;
2875 }
2876
2877 if (test_bit(HCI_INIT, &hdev->flags)) {
2878 /* Don't process data packets in this states. */
2879 switch (bt_cb(skb)->pkt_type) {
2880 case HCI_ACLDATA_PKT:
2881 case HCI_SCODATA_PKT:
2882 kfree_skb(skb);
2883 continue;
2884 }
2885 }
2886
2887 /* Process frame */
2888 switch (bt_cb(skb)->pkt_type) {
2889 case HCI_EVENT_PKT:
2890 BT_DBG("%s Event packet", hdev->name);
2891 hci_event_packet(hdev, skb);
2892 break;
2893
2894 case HCI_ACLDATA_PKT:
2895 BT_DBG("%s ACL data packet", hdev->name);
2896 hci_acldata_packet(hdev, skb);
2897 break;
2898
2899 case HCI_SCODATA_PKT:
2900 BT_DBG("%s SCO data packet", hdev->name);
2901 hci_scodata_packet(hdev, skb);
2902 break;
2903
2904 default:
2905 kfree_skb(skb);
2906 break;
2907 }
2908 }
2909 }
2910
2911 static void hci_cmd_work(struct work_struct *work)
2912 {
2913 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2914 struct sk_buff *skb;
2915
2916 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2917 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2918
2919 /* Send queued commands */
2920 if (atomic_read(&hdev->cmd_cnt)) {
2921 skb = skb_dequeue(&hdev->cmd_q);
2922 if (!skb)
2923 return;
2924
2925 kfree_skb(hdev->sent_cmd);
2926
2927 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2928 if (hdev->sent_cmd) {
2929 atomic_dec(&hdev->cmd_cnt);
2930 hci_send_frame(skb);
2931 if (test_bit(HCI_RESET, &hdev->flags))
2932 del_timer(&hdev->cmd_timer);
2933 else
2934 mod_timer(&hdev->cmd_timer,
2935 jiffies + HCI_CMD_TIMEOUT);
2936 } else {
2937 skb_queue_head(&hdev->cmd_q, skb);
2938 queue_work(hdev->workqueue, &hdev->cmd_work);
2939 }
2940 }
2941 }
2942
2943 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2944 {
2945 /* General inquiry access code (GIAC) */
2946 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2947 struct hci_cp_inquiry cp;
2948
2949 BT_DBG("%s", hdev->name);
2950
2951 if (test_bit(HCI_INQUIRY, &hdev->flags))
2952 return -EINPROGRESS;
2953
2954 inquiry_cache_flush(hdev);
2955
2956 memset(&cp, 0, sizeof(cp));
2957 memcpy(&cp.lap, lap, sizeof(cp.lap));
2958 cp.length = length;
2959
2960 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2961 }
2962
2963 int hci_cancel_inquiry(struct hci_dev *hdev)
2964 {
2965 BT_DBG("%s", hdev->name);
2966
2967 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2968 return -EALREADY;
2969
2970 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2971 }
2972
2973 u8 bdaddr_to_le(u8 bdaddr_type)
2974 {
2975 switch (bdaddr_type) {
2976 case BDADDR_LE_PUBLIC:
2977 return ADDR_LE_DEV_PUBLIC;
2978
2979 default:
2980 /* Fallback to LE Random address type */
2981 return ADDR_LE_DEV_RANDOM;
2982 }
2983 }