Bluetooth: Fix LE MTU reporting for HCIGETDEVINFO
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113 {
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
134 err = -bt_to_errno(hdev->req_result);
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
144 }
145
146 hdev->req_status = hdev->req_result = 0;
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151 }
152
153 static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
156 {
157 int ret;
158
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168 }
169
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178
179 static void bredr_init(struct hci_dev *hdev)
180 {
181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
183 /* Read Local Supported Features */
184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
185
186 /* Read Local Version */
187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
188 }
189
190 static void amp_init(struct hci_dev *hdev)
191 {
192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
202 }
203
204 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205 {
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
239 }
240
241 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242 {
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
249 }
250
251 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252 {
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
259 }
260
261 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262 {
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
267 /* Encryption */
268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
269 }
270
271 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272 {
273 __le16 policy = cpu_to_le16(opt);
274
275 BT_DBG("%s %x", hdev->name, policy);
276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279 }
280
281 /* Get HCI device by index.
282 * Device is held on return. */
283 struct hci_dev *hci_dev_get(int index)
284 {
285 struct hci_dev *hdev = NULL, *d;
286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
293 list_for_each_entry(d, &hci_dev_list, list) {
294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301 }
302
303 /* ---- Inquiry support ---- */
304
305 bool hci_discovery_active(struct hci_dev *hdev)
306 {
307 struct discovery_state *discov = &hdev->discovery;
308
309 switch (discov->state) {
310 case DISCOVERY_FINDING:
311 case DISCOVERY_RESOLVING:
312 return true;
313
314 default:
315 return false;
316 }
317 }
318
319 void hci_discovery_set_state(struct hci_dev *hdev, int state)
320 {
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
330 break;
331 case DISCOVERY_STARTING:
332 break;
333 case DISCOVERY_FINDING:
334 mgmt_discovering(hdev, 1);
335 break;
336 case DISCOVERY_RESOLVING:
337 break;
338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343 }
344
345 static void inquiry_cache_flush(struct hci_dev *hdev)
346 {
347 struct discovery_state *cache = &hdev->discovery;
348 struct inquiry_entry *p, *n;
349
350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
352 kfree(p);
353 }
354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
357 }
358
359 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
361 {
362 struct discovery_state *cache = &hdev->discovery;
363 struct inquiry_entry *e;
364
365 BT_DBG("cache %p, %pMR", cache, bdaddr);
366
367 list_for_each_entry(e, &cache->all, all) {
368 if (!bacmp(&e->data.bdaddr, bdaddr))
369 return e;
370 }
371
372 return NULL;
373 }
374
375 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
376 bdaddr_t *bdaddr)
377 {
378 struct discovery_state *cache = &hdev->discovery;
379 struct inquiry_entry *e;
380
381 BT_DBG("cache %p, %pMR", cache, bdaddr);
382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389 }
390
391 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
392 bdaddr_t *bdaddr,
393 int state)
394 {
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408 }
409
410 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
411 struct inquiry_entry *ie)
412 {
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
421 abs(p->data.rssi) >= abs(ie->data.rssi))
422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427 }
428
429 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
430 bool name_known, bool *ssp)
431 {
432 struct discovery_state *cache = &hdev->discovery;
433 struct inquiry_entry *ie;
434
435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
436
437 if (ssp)
438 *ssp = data->ssp_mode;
439
440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
441 if (ie) {
442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
445 if (ie->name_state == NAME_NEEDED &&
446 data->rssi != ie->data.rssi) {
447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
451 goto update;
452 }
453
454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
457 return false;
458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
467
468 update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
470 ie->name_state != NAME_PENDING) {
471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
473 }
474
475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
477 cache->timestamp = jiffies;
478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
483 }
484
485 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486 {
487 struct discovery_state *cache = &hdev->discovery;
488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
492 list_for_each_entry(e, &cache->all, all) {
493 struct inquiry_data *data = &e->data;
494
495 if (copied >= num)
496 break;
497
498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
504
505 info++;
506 copied++;
507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511 }
512
513 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514 {
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
528 }
529
530 int hci_inquiry(void __user *arg)
531 {
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
544 return -ENODEV;
545
546 hci_dev_lock(hdev);
547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
552 hci_dev_unlock(hdev);
553
554 timeo = ir.length * msecs_to_jiffies(2000);
555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
561
562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
571 if (!buf) {
572 err = -ENOMEM;
573 goto done;
574 }
575
576 hci_dev_lock(hdev);
577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
578 hci_dev_unlock(hdev);
579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
585 ir.num_rsp))
586 err = -EFAULT;
587 } else
588 err = -EFAULT;
589
590 kfree(buf);
591
592 done:
593 hci_dev_put(hdev);
594 return err;
595 }
596
597 /* ---- HCI ioctl helpers ---- */
598
599 int hci_dev_open(__u16 dev)
600 {
601 struct hci_dev *hdev;
602 int ret = 0;
603
604 hdev = hci_dev_get(dev);
605 if (!hdev)
606 return -ENODEV;
607
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_lock(hdev);
611
612 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
613 ret = -ENODEV;
614 goto done;
615 }
616
617 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
618 ret = -ERFKILL;
619 goto done;
620 }
621
622 if (test_bit(HCI_UP, &hdev->flags)) {
623 ret = -EALREADY;
624 goto done;
625 }
626
627 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
628 set_bit(HCI_RAW, &hdev->flags);
629
630 /* Treat all non BR/EDR controllers as raw devices if
631 enable_hs is not set */
632 if (hdev->dev_type != HCI_BREDR && !enable_hs)
633 set_bit(HCI_RAW, &hdev->flags);
634
635 if (hdev->open(hdev)) {
636 ret = -EIO;
637 goto done;
638 }
639
640 if (!test_bit(HCI_RAW, &hdev->flags)) {
641 atomic_set(&hdev->cmd_cnt, 1);
642 set_bit(HCI_INIT, &hdev->flags);
643 hdev->init_last_cmd = 0;
644
645 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
646
647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
650 if (!ret) {
651 hci_dev_hold(hdev);
652 set_bit(HCI_UP, &hdev->flags);
653 hci_notify(hdev, HCI_DEV_UP);
654 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
655 mgmt_valid_hdev(hdev)) {
656 hci_dev_lock(hdev);
657 mgmt_powered(hdev, 1);
658 hci_dev_unlock(hdev);
659 }
660 } else {
661 /* Init failed, cleanup */
662 flush_work(&hdev->tx_work);
663 flush_work(&hdev->cmd_work);
664 flush_work(&hdev->rx_work);
665
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->rx_q);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
672 if (hdev->sent_cmd) {
673 kfree_skb(hdev->sent_cmd);
674 hdev->sent_cmd = NULL;
675 }
676
677 hdev->close(hdev);
678 hdev->flags = 0;
679 }
680
681 done:
682 hci_req_unlock(hdev);
683 hci_dev_put(hdev);
684 return ret;
685 }
686
687 static int hci_dev_do_close(struct hci_dev *hdev)
688 {
689 BT_DBG("%s %p", hdev->name, hdev);
690
691 cancel_work_sync(&hdev->le_scan);
692
693 cancel_delayed_work(&hdev->power_off);
694
695 hci_req_cancel(hdev, ENODEV);
696 hci_req_lock(hdev);
697
698 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
699 del_timer_sync(&hdev->cmd_timer);
700 hci_req_unlock(hdev);
701 return 0;
702 }
703
704 /* Flush RX and TX works */
705 flush_work(&hdev->tx_work);
706 flush_work(&hdev->rx_work);
707
708 if (hdev->discov_timeout > 0) {
709 cancel_delayed_work(&hdev->discov_off);
710 hdev->discov_timeout = 0;
711 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
712 }
713
714 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
715 cancel_delayed_work(&hdev->service_cache);
716
717 cancel_delayed_work_sync(&hdev->le_scan_disable);
718
719 hci_dev_lock(hdev);
720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
722 hci_dev_unlock(hdev);
723
724 hci_notify(hdev, HCI_DEV_DOWN);
725
726 if (hdev->flush)
727 hdev->flush(hdev);
728
729 /* Reset device */
730 skb_queue_purge(&hdev->cmd_q);
731 atomic_set(&hdev->cmd_cnt, 1);
732 if (!test_bit(HCI_RAW, &hdev->flags) &&
733 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
734 set_bit(HCI_INIT, &hdev->flags);
735 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
736 clear_bit(HCI_INIT, &hdev->flags);
737 }
738
739 /* flush cmd work */
740 flush_work(&hdev->cmd_work);
741
742 /* Drop queues */
743 skb_queue_purge(&hdev->rx_q);
744 skb_queue_purge(&hdev->cmd_q);
745 skb_queue_purge(&hdev->raw_q);
746
747 /* Drop last sent command */
748 if (hdev->sent_cmd) {
749 del_timer_sync(&hdev->cmd_timer);
750 kfree_skb(hdev->sent_cmd);
751 hdev->sent_cmd = NULL;
752 }
753
754 /* After this point our queues are empty
755 * and no tasks are scheduled. */
756 hdev->close(hdev);
757
758 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
759 mgmt_valid_hdev(hdev)) {
760 hci_dev_lock(hdev);
761 mgmt_powered(hdev, 0);
762 hci_dev_unlock(hdev);
763 }
764
765 /* Clear flags */
766 hdev->flags = 0;
767
768 memset(hdev->eir, 0, sizeof(hdev->eir));
769 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
770
771 hci_req_unlock(hdev);
772
773 hci_dev_put(hdev);
774 return 0;
775 }
776
777 int hci_dev_close(__u16 dev)
778 {
779 struct hci_dev *hdev;
780 int err;
781
782 hdev = hci_dev_get(dev);
783 if (!hdev)
784 return -ENODEV;
785
786 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
787 cancel_delayed_work(&hdev->power_off);
788
789 err = hci_dev_do_close(hdev);
790
791 hci_dev_put(hdev);
792 return err;
793 }
794
795 int hci_dev_reset(__u16 dev)
796 {
797 struct hci_dev *hdev;
798 int ret = 0;
799
800 hdev = hci_dev_get(dev);
801 if (!hdev)
802 return -ENODEV;
803
804 hci_req_lock(hdev);
805
806 if (!test_bit(HCI_UP, &hdev->flags))
807 goto done;
808
809 /* Drop queues */
810 skb_queue_purge(&hdev->rx_q);
811 skb_queue_purge(&hdev->cmd_q);
812
813 hci_dev_lock(hdev);
814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
816 hci_dev_unlock(hdev);
817
818 if (hdev->flush)
819 hdev->flush(hdev);
820
821 atomic_set(&hdev->cmd_cnt, 1);
822 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
823
824 if (!test_bit(HCI_RAW, &hdev->flags))
825 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
826
827 done:
828 hci_req_unlock(hdev);
829 hci_dev_put(hdev);
830 return ret;
831 }
832
833 int hci_dev_reset_stat(__u16 dev)
834 {
835 struct hci_dev *hdev;
836 int ret = 0;
837
838 hdev = hci_dev_get(dev);
839 if (!hdev)
840 return -ENODEV;
841
842 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
843
844 hci_dev_put(hdev);
845
846 return ret;
847 }
848
849 int hci_dev_cmd(unsigned int cmd, void __user *arg)
850 {
851 struct hci_dev *hdev;
852 struct hci_dev_req dr;
853 int err = 0;
854
855 if (copy_from_user(&dr, arg, sizeof(dr)))
856 return -EFAULT;
857
858 hdev = hci_dev_get(dr.dev_id);
859 if (!hdev)
860 return -ENODEV;
861
862 switch (cmd) {
863 case HCISETAUTH:
864 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
865 HCI_INIT_TIMEOUT);
866 break;
867
868 case HCISETENCRYPT:
869 if (!lmp_encrypt_capable(hdev)) {
870 err = -EOPNOTSUPP;
871 break;
872 }
873
874 if (!test_bit(HCI_AUTH, &hdev->flags)) {
875 /* Auth must be enabled first */
876 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
877 HCI_INIT_TIMEOUT);
878 if (err)
879 break;
880 }
881
882 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
883 HCI_INIT_TIMEOUT);
884 break;
885
886 case HCISETSCAN:
887 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
888 HCI_INIT_TIMEOUT);
889 break;
890
891 case HCISETLINKPOL:
892 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
893 HCI_INIT_TIMEOUT);
894 break;
895
896 case HCISETLINKMODE:
897 hdev->link_mode = ((__u16) dr.dev_opt) &
898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
899 break;
900
901 case HCISETPTYPE:
902 hdev->pkt_type = (__u16) dr.dev_opt;
903 break;
904
905 case HCISETACLMTU:
906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
908 break;
909
910 case HCISETSCOMTU:
911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
913 break;
914
915 default:
916 err = -EINVAL;
917 break;
918 }
919
920 hci_dev_put(hdev);
921 return err;
922 }
923
924 int hci_get_dev_list(void __user *arg)
925 {
926 struct hci_dev *hdev;
927 struct hci_dev_list_req *dl;
928 struct hci_dev_req *dr;
929 int n = 0, size, err;
930 __u16 dev_num;
931
932 if (get_user(dev_num, (__u16 __user *) arg))
933 return -EFAULT;
934
935 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
936 return -EINVAL;
937
938 size = sizeof(*dl) + dev_num * sizeof(*dr);
939
940 dl = kzalloc(size, GFP_KERNEL);
941 if (!dl)
942 return -ENOMEM;
943
944 dr = dl->dev_req;
945
946 read_lock(&hci_dev_list_lock);
947 list_for_each_entry(hdev, &hci_dev_list, list) {
948 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
949 cancel_delayed_work(&hdev->power_off);
950
951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
952 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
953
954 (dr + n)->dev_id = hdev->id;
955 (dr + n)->dev_opt = hdev->flags;
956
957 if (++n >= dev_num)
958 break;
959 }
960 read_unlock(&hci_dev_list_lock);
961
962 dl->dev_num = n;
963 size = sizeof(*dl) + n * sizeof(*dr);
964
965 err = copy_to_user(arg, dl, size);
966 kfree(dl);
967
968 return err ? -EFAULT : 0;
969 }
970
971 int hci_get_dev_info(void __user *arg)
972 {
973 struct hci_dev *hdev;
974 struct hci_dev_info di;
975 int err = 0;
976
977 if (copy_from_user(&di, arg, sizeof(di)))
978 return -EFAULT;
979
980 hdev = hci_dev_get(di.dev_id);
981 if (!hdev)
982 return -ENODEV;
983
984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
985 cancel_delayed_work_sync(&hdev->power_off);
986
987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
989
990 strcpy(di.name, hdev->name);
991 di.bdaddr = hdev->bdaddr;
992 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
993 di.flags = hdev->flags;
994 di.pkt_type = hdev->pkt_type;
995 if (lmp_bredr_capable(hdev)) {
996 di.acl_mtu = hdev->acl_mtu;
997 di.acl_pkts = hdev->acl_pkts;
998 di.sco_mtu = hdev->sco_mtu;
999 di.sco_pkts = hdev->sco_pkts;
1000 } else {
1001 di.acl_mtu = hdev->le_mtu;
1002 di.acl_pkts = hdev->le_pkts;
1003 di.sco_mtu = 0;
1004 di.sco_pkts = 0;
1005 }
1006 di.link_policy = hdev->link_policy;
1007 di.link_mode = hdev->link_mode;
1008
1009 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1010 memcpy(&di.features, &hdev->features, sizeof(di.features));
1011
1012 if (copy_to_user(arg, &di, sizeof(di)))
1013 err = -EFAULT;
1014
1015 hci_dev_put(hdev);
1016
1017 return err;
1018 }
1019
1020 /* ---- Interface to HCI drivers ---- */
1021
1022 static int hci_rfkill_set_block(void *data, bool blocked)
1023 {
1024 struct hci_dev *hdev = data;
1025
1026 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1027
1028 if (!blocked)
1029 return 0;
1030
1031 hci_dev_do_close(hdev);
1032
1033 return 0;
1034 }
1035
1036 static const struct rfkill_ops hci_rfkill_ops = {
1037 .set_block = hci_rfkill_set_block,
1038 };
1039
1040 static void hci_power_on(struct work_struct *work)
1041 {
1042 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1043
1044 BT_DBG("%s", hdev->name);
1045
1046 if (hci_dev_open(hdev->id) < 0)
1047 return;
1048
1049 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1050 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1051
1052 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1053 mgmt_index_added(hdev);
1054 }
1055
1056 static void hci_power_off(struct work_struct *work)
1057 {
1058 struct hci_dev *hdev = container_of(work, struct hci_dev,
1059 power_off.work);
1060
1061 BT_DBG("%s", hdev->name);
1062
1063 hci_dev_do_close(hdev);
1064 }
1065
1066 static void hci_discov_off(struct work_struct *work)
1067 {
1068 struct hci_dev *hdev;
1069 u8 scan = SCAN_PAGE;
1070
1071 hdev = container_of(work, struct hci_dev, discov_off.work);
1072
1073 BT_DBG("%s", hdev->name);
1074
1075 hci_dev_lock(hdev);
1076
1077 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1078
1079 hdev->discov_timeout = 0;
1080
1081 hci_dev_unlock(hdev);
1082 }
1083
1084 int hci_uuids_clear(struct hci_dev *hdev)
1085 {
1086 struct list_head *p, *n;
1087
1088 list_for_each_safe(p, n, &hdev->uuids) {
1089 struct bt_uuid *uuid;
1090
1091 uuid = list_entry(p, struct bt_uuid, list);
1092
1093 list_del(p);
1094 kfree(uuid);
1095 }
1096
1097 return 0;
1098 }
1099
1100 int hci_link_keys_clear(struct hci_dev *hdev)
1101 {
1102 struct list_head *p, *n;
1103
1104 list_for_each_safe(p, n, &hdev->link_keys) {
1105 struct link_key *key;
1106
1107 key = list_entry(p, struct link_key, list);
1108
1109 list_del(p);
1110 kfree(key);
1111 }
1112
1113 return 0;
1114 }
1115
1116 int hci_smp_ltks_clear(struct hci_dev *hdev)
1117 {
1118 struct smp_ltk *k, *tmp;
1119
1120 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1121 list_del(&k->list);
1122 kfree(k);
1123 }
1124
1125 return 0;
1126 }
1127
1128 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1129 {
1130 struct link_key *k;
1131
1132 list_for_each_entry(k, &hdev->link_keys, list)
1133 if (bacmp(bdaddr, &k->bdaddr) == 0)
1134 return k;
1135
1136 return NULL;
1137 }
1138
1139 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1140 u8 key_type, u8 old_key_type)
1141 {
1142 /* Legacy key */
1143 if (key_type < 0x03)
1144 return true;
1145
1146 /* Debug keys are insecure so don't store them persistently */
1147 if (key_type == HCI_LK_DEBUG_COMBINATION)
1148 return false;
1149
1150 /* Changed combination key and there's no previous one */
1151 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1152 return false;
1153
1154 /* Security mode 3 case */
1155 if (!conn)
1156 return true;
1157
1158 /* Neither local nor remote side had no-bonding as requirement */
1159 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1160 return true;
1161
1162 /* Local side had dedicated bonding as requirement */
1163 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1164 return true;
1165
1166 /* Remote side had dedicated bonding as requirement */
1167 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1168 return true;
1169
1170 /* If none of the above criteria match, then don't store the key
1171 * persistently */
1172 return false;
1173 }
1174
1175 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1176 {
1177 struct smp_ltk *k;
1178
1179 list_for_each_entry(k, &hdev->long_term_keys, list) {
1180 if (k->ediv != ediv ||
1181 memcmp(rand, k->rand, sizeof(k->rand)))
1182 continue;
1183
1184 return k;
1185 }
1186
1187 return NULL;
1188 }
1189
1190 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1191 u8 addr_type)
1192 {
1193 struct smp_ltk *k;
1194
1195 list_for_each_entry(k, &hdev->long_term_keys, list)
1196 if (addr_type == k->bdaddr_type &&
1197 bacmp(bdaddr, &k->bdaddr) == 0)
1198 return k;
1199
1200 return NULL;
1201 }
1202
1203 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1204 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1205 {
1206 struct link_key *key, *old_key;
1207 u8 old_key_type;
1208 bool persistent;
1209
1210 old_key = hci_find_link_key(hdev, bdaddr);
1211 if (old_key) {
1212 old_key_type = old_key->type;
1213 key = old_key;
1214 } else {
1215 old_key_type = conn ? conn->key_type : 0xff;
1216 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1217 if (!key)
1218 return -ENOMEM;
1219 list_add(&key->list, &hdev->link_keys);
1220 }
1221
1222 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1223
1224 /* Some buggy controller combinations generate a changed
1225 * combination key for legacy pairing even when there's no
1226 * previous key */
1227 if (type == HCI_LK_CHANGED_COMBINATION &&
1228 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1229 type = HCI_LK_COMBINATION;
1230 if (conn)
1231 conn->key_type = type;
1232 }
1233
1234 bacpy(&key->bdaddr, bdaddr);
1235 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1236 key->pin_len = pin_len;
1237
1238 if (type == HCI_LK_CHANGED_COMBINATION)
1239 key->type = old_key_type;
1240 else
1241 key->type = type;
1242
1243 if (!new_key)
1244 return 0;
1245
1246 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1247
1248 mgmt_new_link_key(hdev, key, persistent);
1249
1250 if (conn)
1251 conn->flush_key = !persistent;
1252
1253 return 0;
1254 }
1255
1256 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1257 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1258 ediv, u8 rand[8])
1259 {
1260 struct smp_ltk *key, *old_key;
1261
1262 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1263 return 0;
1264
1265 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1266 if (old_key)
1267 key = old_key;
1268 else {
1269 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1270 if (!key)
1271 return -ENOMEM;
1272 list_add(&key->list, &hdev->long_term_keys);
1273 }
1274
1275 bacpy(&key->bdaddr, bdaddr);
1276 key->bdaddr_type = addr_type;
1277 memcpy(key->val, tk, sizeof(key->val));
1278 key->authenticated = authenticated;
1279 key->ediv = ediv;
1280 key->enc_size = enc_size;
1281 key->type = type;
1282 memcpy(key->rand, rand, sizeof(key->rand));
1283
1284 if (!new_key)
1285 return 0;
1286
1287 if (type & HCI_SMP_LTK)
1288 mgmt_new_ltk(hdev, key, 1);
1289
1290 return 0;
1291 }
1292
1293 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1294 {
1295 struct link_key *key;
1296
1297 key = hci_find_link_key(hdev, bdaddr);
1298 if (!key)
1299 return -ENOENT;
1300
1301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1302
1303 list_del(&key->list);
1304 kfree(key);
1305
1306 return 0;
1307 }
1308
1309 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310 {
1311 struct smp_ltk *k, *tmp;
1312
1313 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1314 if (bacmp(bdaddr, &k->bdaddr))
1315 continue;
1316
1317 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1318
1319 list_del(&k->list);
1320 kfree(k);
1321 }
1322
1323 return 0;
1324 }
1325
1326 /* HCI command timer function */
1327 static void hci_cmd_timeout(unsigned long arg)
1328 {
1329 struct hci_dev *hdev = (void *) arg;
1330
1331 if (hdev->sent_cmd) {
1332 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1333 u16 opcode = __le16_to_cpu(sent->opcode);
1334
1335 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1336 } else {
1337 BT_ERR("%s command tx timeout", hdev->name);
1338 }
1339
1340 atomic_set(&hdev->cmd_cnt, 1);
1341 queue_work(hdev->workqueue, &hdev->cmd_work);
1342 }
1343
1344 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1345 bdaddr_t *bdaddr)
1346 {
1347 struct oob_data *data;
1348
1349 list_for_each_entry(data, &hdev->remote_oob_data, list)
1350 if (bacmp(bdaddr, &data->bdaddr) == 0)
1351 return data;
1352
1353 return NULL;
1354 }
1355
1356 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1357 {
1358 struct oob_data *data;
1359
1360 data = hci_find_remote_oob_data(hdev, bdaddr);
1361 if (!data)
1362 return -ENOENT;
1363
1364 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1365
1366 list_del(&data->list);
1367 kfree(data);
1368
1369 return 0;
1370 }
1371
1372 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1373 {
1374 struct oob_data *data, *n;
1375
1376 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1377 list_del(&data->list);
1378 kfree(data);
1379 }
1380
1381 return 0;
1382 }
1383
1384 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1385 u8 *randomizer)
1386 {
1387 struct oob_data *data;
1388
1389 data = hci_find_remote_oob_data(hdev, bdaddr);
1390
1391 if (!data) {
1392 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1393 if (!data)
1394 return -ENOMEM;
1395
1396 bacpy(&data->bdaddr, bdaddr);
1397 list_add(&data->list, &hdev->remote_oob_data);
1398 }
1399
1400 memcpy(data->hash, hash, sizeof(data->hash));
1401 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1402
1403 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1404
1405 return 0;
1406 }
1407
1408 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1409 {
1410 struct bdaddr_list *b;
1411
1412 list_for_each_entry(b, &hdev->blacklist, list)
1413 if (bacmp(bdaddr, &b->bdaddr) == 0)
1414 return b;
1415
1416 return NULL;
1417 }
1418
1419 int hci_blacklist_clear(struct hci_dev *hdev)
1420 {
1421 struct list_head *p, *n;
1422
1423 list_for_each_safe(p, n, &hdev->blacklist) {
1424 struct bdaddr_list *b;
1425
1426 b = list_entry(p, struct bdaddr_list, list);
1427
1428 list_del(p);
1429 kfree(b);
1430 }
1431
1432 return 0;
1433 }
1434
1435 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1436 {
1437 struct bdaddr_list *entry;
1438
1439 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1440 return -EBADF;
1441
1442 if (hci_blacklist_lookup(hdev, bdaddr))
1443 return -EEXIST;
1444
1445 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1446 if (!entry)
1447 return -ENOMEM;
1448
1449 bacpy(&entry->bdaddr, bdaddr);
1450
1451 list_add(&entry->list, &hdev->blacklist);
1452
1453 return mgmt_device_blocked(hdev, bdaddr, type);
1454 }
1455
1456 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1457 {
1458 struct bdaddr_list *entry;
1459
1460 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1461 return hci_blacklist_clear(hdev);
1462
1463 entry = hci_blacklist_lookup(hdev, bdaddr);
1464 if (!entry)
1465 return -ENOENT;
1466
1467 list_del(&entry->list);
1468 kfree(entry);
1469
1470 return mgmt_device_unblocked(hdev, bdaddr, type);
1471 }
1472
1473 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1474 {
1475 struct le_scan_params *param = (struct le_scan_params *) opt;
1476 struct hci_cp_le_set_scan_param cp;
1477
1478 memset(&cp, 0, sizeof(cp));
1479 cp.type = param->type;
1480 cp.interval = cpu_to_le16(param->interval);
1481 cp.window = cpu_to_le16(param->window);
1482
1483 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1484 }
1485
1486 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1487 {
1488 struct hci_cp_le_set_scan_enable cp;
1489
1490 memset(&cp, 0, sizeof(cp));
1491 cp.enable = 1;
1492 cp.filter_dup = 1;
1493
1494 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1495 }
1496
1497 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1498 u16 window, int timeout)
1499 {
1500 long timeo = msecs_to_jiffies(3000);
1501 struct le_scan_params param;
1502 int err;
1503
1504 BT_DBG("%s", hdev->name);
1505
1506 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1507 return -EINPROGRESS;
1508
1509 param.type = type;
1510 param.interval = interval;
1511 param.window = window;
1512
1513 hci_req_lock(hdev);
1514
1515 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1516 timeo);
1517 if (!err)
1518 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1519
1520 hci_req_unlock(hdev);
1521
1522 if (err < 0)
1523 return err;
1524
1525 schedule_delayed_work(&hdev->le_scan_disable,
1526 msecs_to_jiffies(timeout));
1527
1528 return 0;
1529 }
1530
1531 int hci_cancel_le_scan(struct hci_dev *hdev)
1532 {
1533 BT_DBG("%s", hdev->name);
1534
1535 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1536 return -EALREADY;
1537
1538 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1539 struct hci_cp_le_set_scan_enable cp;
1540
1541 /* Send HCI command to disable LE Scan */
1542 memset(&cp, 0, sizeof(cp));
1543 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1544 }
1545
1546 return 0;
1547 }
1548
1549 static void le_scan_disable_work(struct work_struct *work)
1550 {
1551 struct hci_dev *hdev = container_of(work, struct hci_dev,
1552 le_scan_disable.work);
1553 struct hci_cp_le_set_scan_enable cp;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 memset(&cp, 0, sizeof(cp));
1558
1559 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1560 }
1561
1562 static void le_scan_work(struct work_struct *work)
1563 {
1564 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1565 struct le_scan_params *param = &hdev->le_scan_params;
1566
1567 BT_DBG("%s", hdev->name);
1568
1569 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1570 param->timeout);
1571 }
1572
1573 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1574 int timeout)
1575 {
1576 struct le_scan_params *param = &hdev->le_scan_params;
1577
1578 BT_DBG("%s", hdev->name);
1579
1580 if (work_busy(&hdev->le_scan))
1581 return -EINPROGRESS;
1582
1583 param->type = type;
1584 param->interval = interval;
1585 param->window = window;
1586 param->timeout = timeout;
1587
1588 queue_work(system_long_wq, &hdev->le_scan);
1589
1590 return 0;
1591 }
1592
1593 /* Alloc HCI device */
1594 struct hci_dev *hci_alloc_dev(void)
1595 {
1596 struct hci_dev *hdev;
1597
1598 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1599 if (!hdev)
1600 return NULL;
1601
1602 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1603 hdev->esco_type = (ESCO_HV1);
1604 hdev->link_mode = (HCI_LM_ACCEPT);
1605 hdev->io_capability = 0x03; /* No Input No Output */
1606
1607 hdev->sniff_max_interval = 800;
1608 hdev->sniff_min_interval = 80;
1609
1610 mutex_init(&hdev->lock);
1611 mutex_init(&hdev->req_lock);
1612
1613 INIT_LIST_HEAD(&hdev->mgmt_pending);
1614 INIT_LIST_HEAD(&hdev->blacklist);
1615 INIT_LIST_HEAD(&hdev->uuids);
1616 INIT_LIST_HEAD(&hdev->link_keys);
1617 INIT_LIST_HEAD(&hdev->long_term_keys);
1618 INIT_LIST_HEAD(&hdev->remote_oob_data);
1619 INIT_LIST_HEAD(&hdev->conn_hash.list);
1620
1621 INIT_WORK(&hdev->rx_work, hci_rx_work);
1622 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1623 INIT_WORK(&hdev->tx_work, hci_tx_work);
1624 INIT_WORK(&hdev->power_on, hci_power_on);
1625 INIT_WORK(&hdev->le_scan, le_scan_work);
1626
1627 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1628 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1629 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1630
1631 skb_queue_head_init(&hdev->driver_init);
1632 skb_queue_head_init(&hdev->rx_q);
1633 skb_queue_head_init(&hdev->cmd_q);
1634 skb_queue_head_init(&hdev->raw_q);
1635
1636 init_waitqueue_head(&hdev->req_wait_q);
1637
1638 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1639
1640 hci_init_sysfs(hdev);
1641 discovery_init(hdev);
1642
1643 return hdev;
1644 }
1645 EXPORT_SYMBOL(hci_alloc_dev);
1646
1647 /* Free HCI device */
1648 void hci_free_dev(struct hci_dev *hdev)
1649 {
1650 skb_queue_purge(&hdev->driver_init);
1651
1652 /* will free via device release */
1653 put_device(&hdev->dev);
1654 }
1655 EXPORT_SYMBOL(hci_free_dev);
1656
1657 /* Register HCI device */
1658 int hci_register_dev(struct hci_dev *hdev)
1659 {
1660 int id, error;
1661
1662 if (!hdev->open || !hdev->close)
1663 return -EINVAL;
1664
1665 /* Do not allow HCI_AMP devices to register at index 0,
1666 * so the index can be used as the AMP controller ID.
1667 */
1668 switch (hdev->dev_type) {
1669 case HCI_BREDR:
1670 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1671 break;
1672 case HCI_AMP:
1673 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1674 break;
1675 default:
1676 return -EINVAL;
1677 }
1678
1679 if (id < 0)
1680 return id;
1681
1682 sprintf(hdev->name, "hci%d", id);
1683 hdev->id = id;
1684
1685 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1686
1687 write_lock(&hci_dev_list_lock);
1688 list_add(&hdev->list, &hci_dev_list);
1689 write_unlock(&hci_dev_list_lock);
1690
1691 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1692 WQ_MEM_RECLAIM, 1);
1693 if (!hdev->workqueue) {
1694 error = -ENOMEM;
1695 goto err;
1696 }
1697
1698 error = hci_add_sysfs(hdev);
1699 if (error < 0)
1700 goto err_wqueue;
1701
1702 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1703 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1704 hdev);
1705 if (hdev->rfkill) {
1706 if (rfkill_register(hdev->rfkill) < 0) {
1707 rfkill_destroy(hdev->rfkill);
1708 hdev->rfkill = NULL;
1709 }
1710 }
1711
1712 set_bit(HCI_SETUP, &hdev->dev_flags);
1713
1714 if (hdev->dev_type != HCI_AMP)
1715 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1716
1717 schedule_work(&hdev->power_on);
1718
1719 hci_notify(hdev, HCI_DEV_REG);
1720 hci_dev_hold(hdev);
1721
1722 return id;
1723
1724 err_wqueue:
1725 destroy_workqueue(hdev->workqueue);
1726 err:
1727 ida_simple_remove(&hci_index_ida, hdev->id);
1728 write_lock(&hci_dev_list_lock);
1729 list_del(&hdev->list);
1730 write_unlock(&hci_dev_list_lock);
1731
1732 return error;
1733 }
1734 EXPORT_SYMBOL(hci_register_dev);
1735
1736 /* Unregister HCI device */
1737 void hci_unregister_dev(struct hci_dev *hdev)
1738 {
1739 int i, id;
1740
1741 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1742
1743 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1744
1745 id = hdev->id;
1746
1747 write_lock(&hci_dev_list_lock);
1748 list_del(&hdev->list);
1749 write_unlock(&hci_dev_list_lock);
1750
1751 hci_dev_do_close(hdev);
1752
1753 for (i = 0; i < NUM_REASSEMBLY; i++)
1754 kfree_skb(hdev->reassembly[i]);
1755
1756 if (!test_bit(HCI_INIT, &hdev->flags) &&
1757 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758 hci_dev_lock(hdev);
1759 mgmt_index_removed(hdev);
1760 hci_dev_unlock(hdev);
1761 }
1762
1763 /* mgmt_index_removed should take care of emptying the
1764 * pending list */
1765 BUG_ON(!list_empty(&hdev->mgmt_pending));
1766
1767 hci_notify(hdev, HCI_DEV_UNREG);
1768
1769 if (hdev->rfkill) {
1770 rfkill_unregister(hdev->rfkill);
1771 rfkill_destroy(hdev->rfkill);
1772 }
1773
1774 hci_del_sysfs(hdev);
1775
1776 destroy_workqueue(hdev->workqueue);
1777
1778 hci_dev_lock(hdev);
1779 hci_blacklist_clear(hdev);
1780 hci_uuids_clear(hdev);
1781 hci_link_keys_clear(hdev);
1782 hci_smp_ltks_clear(hdev);
1783 hci_remote_oob_data_clear(hdev);
1784 hci_dev_unlock(hdev);
1785
1786 hci_dev_put(hdev);
1787
1788 ida_simple_remove(&hci_index_ida, id);
1789 }
1790 EXPORT_SYMBOL(hci_unregister_dev);
1791
1792 /* Suspend HCI device */
1793 int hci_suspend_dev(struct hci_dev *hdev)
1794 {
1795 hci_notify(hdev, HCI_DEV_SUSPEND);
1796 return 0;
1797 }
1798 EXPORT_SYMBOL(hci_suspend_dev);
1799
1800 /* Resume HCI device */
1801 int hci_resume_dev(struct hci_dev *hdev)
1802 {
1803 hci_notify(hdev, HCI_DEV_RESUME);
1804 return 0;
1805 }
1806 EXPORT_SYMBOL(hci_resume_dev);
1807
1808 /* Receive frame from HCI drivers */
1809 int hci_recv_frame(struct sk_buff *skb)
1810 {
1811 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1812 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1813 && !test_bit(HCI_INIT, &hdev->flags))) {
1814 kfree_skb(skb);
1815 return -ENXIO;
1816 }
1817
1818 /* Incomming skb */
1819 bt_cb(skb)->incoming = 1;
1820
1821 /* Time stamp */
1822 __net_timestamp(skb);
1823
1824 skb_queue_tail(&hdev->rx_q, skb);
1825 queue_work(hdev->workqueue, &hdev->rx_work);
1826
1827 return 0;
1828 }
1829 EXPORT_SYMBOL(hci_recv_frame);
1830
1831 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1832 int count, __u8 index)
1833 {
1834 int len = 0;
1835 int hlen = 0;
1836 int remain = count;
1837 struct sk_buff *skb;
1838 struct bt_skb_cb *scb;
1839
1840 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1841 index >= NUM_REASSEMBLY)
1842 return -EILSEQ;
1843
1844 skb = hdev->reassembly[index];
1845
1846 if (!skb) {
1847 switch (type) {
1848 case HCI_ACLDATA_PKT:
1849 len = HCI_MAX_FRAME_SIZE;
1850 hlen = HCI_ACL_HDR_SIZE;
1851 break;
1852 case HCI_EVENT_PKT:
1853 len = HCI_MAX_EVENT_SIZE;
1854 hlen = HCI_EVENT_HDR_SIZE;
1855 break;
1856 case HCI_SCODATA_PKT:
1857 len = HCI_MAX_SCO_SIZE;
1858 hlen = HCI_SCO_HDR_SIZE;
1859 break;
1860 }
1861
1862 skb = bt_skb_alloc(len, GFP_ATOMIC);
1863 if (!skb)
1864 return -ENOMEM;
1865
1866 scb = (void *) skb->cb;
1867 scb->expect = hlen;
1868 scb->pkt_type = type;
1869
1870 skb->dev = (void *) hdev;
1871 hdev->reassembly[index] = skb;
1872 }
1873
1874 while (count) {
1875 scb = (void *) skb->cb;
1876 len = min_t(uint, scb->expect, count);
1877
1878 memcpy(skb_put(skb, len), data, len);
1879
1880 count -= len;
1881 data += len;
1882 scb->expect -= len;
1883 remain = count;
1884
1885 switch (type) {
1886 case HCI_EVENT_PKT:
1887 if (skb->len == HCI_EVENT_HDR_SIZE) {
1888 struct hci_event_hdr *h = hci_event_hdr(skb);
1889 scb->expect = h->plen;
1890
1891 if (skb_tailroom(skb) < scb->expect) {
1892 kfree_skb(skb);
1893 hdev->reassembly[index] = NULL;
1894 return -ENOMEM;
1895 }
1896 }
1897 break;
1898
1899 case HCI_ACLDATA_PKT:
1900 if (skb->len == HCI_ACL_HDR_SIZE) {
1901 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1902 scb->expect = __le16_to_cpu(h->dlen);
1903
1904 if (skb_tailroom(skb) < scb->expect) {
1905 kfree_skb(skb);
1906 hdev->reassembly[index] = NULL;
1907 return -ENOMEM;
1908 }
1909 }
1910 break;
1911
1912 case HCI_SCODATA_PKT:
1913 if (skb->len == HCI_SCO_HDR_SIZE) {
1914 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1915 scb->expect = h->dlen;
1916
1917 if (skb_tailroom(skb) < scb->expect) {
1918 kfree_skb(skb);
1919 hdev->reassembly[index] = NULL;
1920 return -ENOMEM;
1921 }
1922 }
1923 break;
1924 }
1925
1926 if (scb->expect == 0) {
1927 /* Complete frame */
1928
1929 bt_cb(skb)->pkt_type = type;
1930 hci_recv_frame(skb);
1931
1932 hdev->reassembly[index] = NULL;
1933 return remain;
1934 }
1935 }
1936
1937 return remain;
1938 }
1939
1940 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1941 {
1942 int rem = 0;
1943
1944 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1945 return -EILSEQ;
1946
1947 while (count) {
1948 rem = hci_reassembly(hdev, type, data, count, type - 1);
1949 if (rem < 0)
1950 return rem;
1951
1952 data += (count - rem);
1953 count = rem;
1954 }
1955
1956 return rem;
1957 }
1958 EXPORT_SYMBOL(hci_recv_fragment);
1959
1960 #define STREAM_REASSEMBLY 0
1961
1962 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1963 {
1964 int type;
1965 int rem = 0;
1966
1967 while (count) {
1968 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1969
1970 if (!skb) {
1971 struct { char type; } *pkt;
1972
1973 /* Start of the frame */
1974 pkt = data;
1975 type = pkt->type;
1976
1977 data++;
1978 count--;
1979 } else
1980 type = bt_cb(skb)->pkt_type;
1981
1982 rem = hci_reassembly(hdev, type, data, count,
1983 STREAM_REASSEMBLY);
1984 if (rem < 0)
1985 return rem;
1986
1987 data += (count - rem);
1988 count = rem;
1989 }
1990
1991 return rem;
1992 }
1993 EXPORT_SYMBOL(hci_recv_stream_fragment);
1994
1995 /* ---- Interface to upper protocols ---- */
1996
1997 int hci_register_cb(struct hci_cb *cb)
1998 {
1999 BT_DBG("%p name %s", cb, cb->name);
2000
2001 write_lock(&hci_cb_list_lock);
2002 list_add(&cb->list, &hci_cb_list);
2003 write_unlock(&hci_cb_list_lock);
2004
2005 return 0;
2006 }
2007 EXPORT_SYMBOL(hci_register_cb);
2008
2009 int hci_unregister_cb(struct hci_cb *cb)
2010 {
2011 BT_DBG("%p name %s", cb, cb->name);
2012
2013 write_lock(&hci_cb_list_lock);
2014 list_del(&cb->list);
2015 write_unlock(&hci_cb_list_lock);
2016
2017 return 0;
2018 }
2019 EXPORT_SYMBOL(hci_unregister_cb);
2020
2021 static int hci_send_frame(struct sk_buff *skb)
2022 {
2023 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2024
2025 if (!hdev) {
2026 kfree_skb(skb);
2027 return -ENODEV;
2028 }
2029
2030 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2031
2032 /* Time stamp */
2033 __net_timestamp(skb);
2034
2035 /* Send copy to monitor */
2036 hci_send_to_monitor(hdev, skb);
2037
2038 if (atomic_read(&hdev->promisc)) {
2039 /* Send copy to the sockets */
2040 hci_send_to_sock(hdev, skb);
2041 }
2042
2043 /* Get rid of skb owner, prior to sending to the driver. */
2044 skb_orphan(skb);
2045
2046 return hdev->send(skb);
2047 }
2048
2049 /* Send HCI command */
2050 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2051 {
2052 int len = HCI_COMMAND_HDR_SIZE + plen;
2053 struct hci_command_hdr *hdr;
2054 struct sk_buff *skb;
2055
2056 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2057
2058 skb = bt_skb_alloc(len, GFP_ATOMIC);
2059 if (!skb) {
2060 BT_ERR("%s no memory for command", hdev->name);
2061 return -ENOMEM;
2062 }
2063
2064 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2065 hdr->opcode = cpu_to_le16(opcode);
2066 hdr->plen = plen;
2067
2068 if (plen)
2069 memcpy(skb_put(skb, plen), param, plen);
2070
2071 BT_DBG("skb len %d", skb->len);
2072
2073 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2074 skb->dev = (void *) hdev;
2075
2076 if (test_bit(HCI_INIT, &hdev->flags))
2077 hdev->init_last_cmd = opcode;
2078
2079 skb_queue_tail(&hdev->cmd_q, skb);
2080 queue_work(hdev->workqueue, &hdev->cmd_work);
2081
2082 return 0;
2083 }
2084
2085 /* Get data from the previously sent command */
2086 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2087 {
2088 struct hci_command_hdr *hdr;
2089
2090 if (!hdev->sent_cmd)
2091 return NULL;
2092
2093 hdr = (void *) hdev->sent_cmd->data;
2094
2095 if (hdr->opcode != cpu_to_le16(opcode))
2096 return NULL;
2097
2098 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2099
2100 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2101 }
2102
2103 /* Send ACL data */
2104 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2105 {
2106 struct hci_acl_hdr *hdr;
2107 int len = skb->len;
2108
2109 skb_push(skb, HCI_ACL_HDR_SIZE);
2110 skb_reset_transport_header(skb);
2111 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2112 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2113 hdr->dlen = cpu_to_le16(len);
2114 }
2115
2116 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2117 struct sk_buff *skb, __u16 flags)
2118 {
2119 struct hci_conn *conn = chan->conn;
2120 struct hci_dev *hdev = conn->hdev;
2121 struct sk_buff *list;
2122
2123 skb->len = skb_headlen(skb);
2124 skb->data_len = 0;
2125
2126 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2127
2128 switch (hdev->dev_type) {
2129 case HCI_BREDR:
2130 hci_add_acl_hdr(skb, conn->handle, flags);
2131 break;
2132 case HCI_AMP:
2133 hci_add_acl_hdr(skb, chan->handle, flags);
2134 break;
2135 default:
2136 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2137 return;
2138 }
2139
2140 list = skb_shinfo(skb)->frag_list;
2141 if (!list) {
2142 /* Non fragmented */
2143 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2144
2145 skb_queue_tail(queue, skb);
2146 } else {
2147 /* Fragmented */
2148 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2149
2150 skb_shinfo(skb)->frag_list = NULL;
2151
2152 /* Queue all fragments atomically */
2153 spin_lock(&queue->lock);
2154
2155 __skb_queue_tail(queue, skb);
2156
2157 flags &= ~ACL_START;
2158 flags |= ACL_CONT;
2159 do {
2160 skb = list; list = list->next;
2161
2162 skb->dev = (void *) hdev;
2163 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2164 hci_add_acl_hdr(skb, conn->handle, flags);
2165
2166 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2167
2168 __skb_queue_tail(queue, skb);
2169 } while (list);
2170
2171 spin_unlock(&queue->lock);
2172 }
2173 }
2174
2175 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2176 {
2177 struct hci_dev *hdev = chan->conn->hdev;
2178
2179 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2180
2181 skb->dev = (void *) hdev;
2182
2183 hci_queue_acl(chan, &chan->data_q, skb, flags);
2184
2185 queue_work(hdev->workqueue, &hdev->tx_work);
2186 }
2187
2188 /* Send SCO data */
2189 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2190 {
2191 struct hci_dev *hdev = conn->hdev;
2192 struct hci_sco_hdr hdr;
2193
2194 BT_DBG("%s len %d", hdev->name, skb->len);
2195
2196 hdr.handle = cpu_to_le16(conn->handle);
2197 hdr.dlen = skb->len;
2198
2199 skb_push(skb, HCI_SCO_HDR_SIZE);
2200 skb_reset_transport_header(skb);
2201 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2202
2203 skb->dev = (void *) hdev;
2204 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2205
2206 skb_queue_tail(&conn->data_q, skb);
2207 queue_work(hdev->workqueue, &hdev->tx_work);
2208 }
2209
2210 /* ---- HCI TX task (outgoing data) ---- */
2211
2212 /* HCI Connection scheduler */
2213 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2214 int *quote)
2215 {
2216 struct hci_conn_hash *h = &hdev->conn_hash;
2217 struct hci_conn *conn = NULL, *c;
2218 unsigned int num = 0, min = ~0;
2219
2220 /* We don't have to lock device here. Connections are always
2221 * added and removed with TX task disabled. */
2222
2223 rcu_read_lock();
2224
2225 list_for_each_entry_rcu(c, &h->list, list) {
2226 if (c->type != type || skb_queue_empty(&c->data_q))
2227 continue;
2228
2229 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2230 continue;
2231
2232 num++;
2233
2234 if (c->sent < min) {
2235 min = c->sent;
2236 conn = c;
2237 }
2238
2239 if (hci_conn_num(hdev, type) == num)
2240 break;
2241 }
2242
2243 rcu_read_unlock();
2244
2245 if (conn) {
2246 int cnt, q;
2247
2248 switch (conn->type) {
2249 case ACL_LINK:
2250 cnt = hdev->acl_cnt;
2251 break;
2252 case SCO_LINK:
2253 case ESCO_LINK:
2254 cnt = hdev->sco_cnt;
2255 break;
2256 case LE_LINK:
2257 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2258 break;
2259 default:
2260 cnt = 0;
2261 BT_ERR("Unknown link type");
2262 }
2263
2264 q = cnt / num;
2265 *quote = q ? q : 1;
2266 } else
2267 *quote = 0;
2268
2269 BT_DBG("conn %p quote %d", conn, *quote);
2270 return conn;
2271 }
2272
2273 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2274 {
2275 struct hci_conn_hash *h = &hdev->conn_hash;
2276 struct hci_conn *c;
2277
2278 BT_ERR("%s link tx timeout", hdev->name);
2279
2280 rcu_read_lock();
2281
2282 /* Kill stalled connections */
2283 list_for_each_entry_rcu(c, &h->list, list) {
2284 if (c->type == type && c->sent) {
2285 BT_ERR("%s killing stalled connection %pMR",
2286 hdev->name, &c->dst);
2287 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2288 }
2289 }
2290
2291 rcu_read_unlock();
2292 }
2293
2294 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2295 int *quote)
2296 {
2297 struct hci_conn_hash *h = &hdev->conn_hash;
2298 struct hci_chan *chan = NULL;
2299 unsigned int num = 0, min = ~0, cur_prio = 0;
2300 struct hci_conn *conn;
2301 int cnt, q, conn_num = 0;
2302
2303 BT_DBG("%s", hdev->name);
2304
2305 rcu_read_lock();
2306
2307 list_for_each_entry_rcu(conn, &h->list, list) {
2308 struct hci_chan *tmp;
2309
2310 if (conn->type != type)
2311 continue;
2312
2313 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2314 continue;
2315
2316 conn_num++;
2317
2318 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2319 struct sk_buff *skb;
2320
2321 if (skb_queue_empty(&tmp->data_q))
2322 continue;
2323
2324 skb = skb_peek(&tmp->data_q);
2325 if (skb->priority < cur_prio)
2326 continue;
2327
2328 if (skb->priority > cur_prio) {
2329 num = 0;
2330 min = ~0;
2331 cur_prio = skb->priority;
2332 }
2333
2334 num++;
2335
2336 if (conn->sent < min) {
2337 min = conn->sent;
2338 chan = tmp;
2339 }
2340 }
2341
2342 if (hci_conn_num(hdev, type) == conn_num)
2343 break;
2344 }
2345
2346 rcu_read_unlock();
2347
2348 if (!chan)
2349 return NULL;
2350
2351 switch (chan->conn->type) {
2352 case ACL_LINK:
2353 cnt = hdev->acl_cnt;
2354 break;
2355 case AMP_LINK:
2356 cnt = hdev->block_cnt;
2357 break;
2358 case SCO_LINK:
2359 case ESCO_LINK:
2360 cnt = hdev->sco_cnt;
2361 break;
2362 case LE_LINK:
2363 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2364 break;
2365 default:
2366 cnt = 0;
2367 BT_ERR("Unknown link type");
2368 }
2369
2370 q = cnt / num;
2371 *quote = q ? q : 1;
2372 BT_DBG("chan %p quote %d", chan, *quote);
2373 return chan;
2374 }
2375
2376 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2377 {
2378 struct hci_conn_hash *h = &hdev->conn_hash;
2379 struct hci_conn *conn;
2380 int num = 0;
2381
2382 BT_DBG("%s", hdev->name);
2383
2384 rcu_read_lock();
2385
2386 list_for_each_entry_rcu(conn, &h->list, list) {
2387 struct hci_chan *chan;
2388
2389 if (conn->type != type)
2390 continue;
2391
2392 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2393 continue;
2394
2395 num++;
2396
2397 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2398 struct sk_buff *skb;
2399
2400 if (chan->sent) {
2401 chan->sent = 0;
2402 continue;
2403 }
2404
2405 if (skb_queue_empty(&chan->data_q))
2406 continue;
2407
2408 skb = skb_peek(&chan->data_q);
2409 if (skb->priority >= HCI_PRIO_MAX - 1)
2410 continue;
2411
2412 skb->priority = HCI_PRIO_MAX - 1;
2413
2414 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2415 skb->priority);
2416 }
2417
2418 if (hci_conn_num(hdev, type) == num)
2419 break;
2420 }
2421
2422 rcu_read_unlock();
2423
2424 }
2425
2426 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2427 {
2428 /* Calculate count of blocks used by this packet */
2429 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2430 }
2431
2432 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2433 {
2434 if (!test_bit(HCI_RAW, &hdev->flags)) {
2435 /* ACL tx timeout must be longer than maximum
2436 * link supervision timeout (40.9 seconds) */
2437 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2438 HCI_ACL_TX_TIMEOUT))
2439 hci_link_tx_to(hdev, ACL_LINK);
2440 }
2441 }
2442
2443 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2444 {
2445 unsigned int cnt = hdev->acl_cnt;
2446 struct hci_chan *chan;
2447 struct sk_buff *skb;
2448 int quote;
2449
2450 __check_timeout(hdev, cnt);
2451
2452 while (hdev->acl_cnt &&
2453 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2454 u32 priority = (skb_peek(&chan->data_q))->priority;
2455 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2456 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2457 skb->len, skb->priority);
2458
2459 /* Stop if priority has changed */
2460 if (skb->priority < priority)
2461 break;
2462
2463 skb = skb_dequeue(&chan->data_q);
2464
2465 hci_conn_enter_active_mode(chan->conn,
2466 bt_cb(skb)->force_active);
2467
2468 hci_send_frame(skb);
2469 hdev->acl_last_tx = jiffies;
2470
2471 hdev->acl_cnt--;
2472 chan->sent++;
2473 chan->conn->sent++;
2474 }
2475 }
2476
2477 if (cnt != hdev->acl_cnt)
2478 hci_prio_recalculate(hdev, ACL_LINK);
2479 }
2480
2481 static void hci_sched_acl_blk(struct hci_dev *hdev)
2482 {
2483 unsigned int cnt = hdev->block_cnt;
2484 struct hci_chan *chan;
2485 struct sk_buff *skb;
2486 int quote;
2487 u8 type;
2488
2489 __check_timeout(hdev, cnt);
2490
2491 BT_DBG("%s", hdev->name);
2492
2493 if (hdev->dev_type == HCI_AMP)
2494 type = AMP_LINK;
2495 else
2496 type = ACL_LINK;
2497
2498 while (hdev->block_cnt > 0 &&
2499 (chan = hci_chan_sent(hdev, type, &quote))) {
2500 u32 priority = (skb_peek(&chan->data_q))->priority;
2501 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2502 int blocks;
2503
2504 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2505 skb->len, skb->priority);
2506
2507 /* Stop if priority has changed */
2508 if (skb->priority < priority)
2509 break;
2510
2511 skb = skb_dequeue(&chan->data_q);
2512
2513 blocks = __get_blocks(hdev, skb);
2514 if (blocks > hdev->block_cnt)
2515 return;
2516
2517 hci_conn_enter_active_mode(chan->conn,
2518 bt_cb(skb)->force_active);
2519
2520 hci_send_frame(skb);
2521 hdev->acl_last_tx = jiffies;
2522
2523 hdev->block_cnt -= blocks;
2524 quote -= blocks;
2525
2526 chan->sent += blocks;
2527 chan->conn->sent += blocks;
2528 }
2529 }
2530
2531 if (cnt != hdev->block_cnt)
2532 hci_prio_recalculate(hdev, type);
2533 }
2534
2535 static void hci_sched_acl(struct hci_dev *hdev)
2536 {
2537 BT_DBG("%s", hdev->name);
2538
2539 /* No ACL link over BR/EDR controller */
2540 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2541 return;
2542
2543 /* No AMP link over AMP controller */
2544 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2545 return;
2546
2547 switch (hdev->flow_ctl_mode) {
2548 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2549 hci_sched_acl_pkt(hdev);
2550 break;
2551
2552 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2553 hci_sched_acl_blk(hdev);
2554 break;
2555 }
2556 }
2557
2558 /* Schedule SCO */
2559 static void hci_sched_sco(struct hci_dev *hdev)
2560 {
2561 struct hci_conn *conn;
2562 struct sk_buff *skb;
2563 int quote;
2564
2565 BT_DBG("%s", hdev->name);
2566
2567 if (!hci_conn_num(hdev, SCO_LINK))
2568 return;
2569
2570 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2571 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2572 BT_DBG("skb %p len %d", skb, skb->len);
2573 hci_send_frame(skb);
2574
2575 conn->sent++;
2576 if (conn->sent == ~0)
2577 conn->sent = 0;
2578 }
2579 }
2580 }
2581
2582 static void hci_sched_esco(struct hci_dev *hdev)
2583 {
2584 struct hci_conn *conn;
2585 struct sk_buff *skb;
2586 int quote;
2587
2588 BT_DBG("%s", hdev->name);
2589
2590 if (!hci_conn_num(hdev, ESCO_LINK))
2591 return;
2592
2593 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2594 &quote))) {
2595 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2596 BT_DBG("skb %p len %d", skb, skb->len);
2597 hci_send_frame(skb);
2598
2599 conn->sent++;
2600 if (conn->sent == ~0)
2601 conn->sent = 0;
2602 }
2603 }
2604 }
2605
2606 static void hci_sched_le(struct hci_dev *hdev)
2607 {
2608 struct hci_chan *chan;
2609 struct sk_buff *skb;
2610 int quote, cnt, tmp;
2611
2612 BT_DBG("%s", hdev->name);
2613
2614 if (!hci_conn_num(hdev, LE_LINK))
2615 return;
2616
2617 if (!test_bit(HCI_RAW, &hdev->flags)) {
2618 /* LE tx timeout must be longer than maximum
2619 * link supervision timeout (40.9 seconds) */
2620 if (!hdev->le_cnt && hdev->le_pkts &&
2621 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2622 hci_link_tx_to(hdev, LE_LINK);
2623 }
2624
2625 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2626 tmp = cnt;
2627 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2628 u32 priority = (skb_peek(&chan->data_q))->priority;
2629 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2630 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2631 skb->len, skb->priority);
2632
2633 /* Stop if priority has changed */
2634 if (skb->priority < priority)
2635 break;
2636
2637 skb = skb_dequeue(&chan->data_q);
2638
2639 hci_send_frame(skb);
2640 hdev->le_last_tx = jiffies;
2641
2642 cnt--;
2643 chan->sent++;
2644 chan->conn->sent++;
2645 }
2646 }
2647
2648 if (hdev->le_pkts)
2649 hdev->le_cnt = cnt;
2650 else
2651 hdev->acl_cnt = cnt;
2652
2653 if (cnt != tmp)
2654 hci_prio_recalculate(hdev, LE_LINK);
2655 }
2656
2657 static void hci_tx_work(struct work_struct *work)
2658 {
2659 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2660 struct sk_buff *skb;
2661
2662 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2663 hdev->sco_cnt, hdev->le_cnt);
2664
2665 /* Schedule queues and send stuff to HCI driver */
2666
2667 hci_sched_acl(hdev);
2668
2669 hci_sched_sco(hdev);
2670
2671 hci_sched_esco(hdev);
2672
2673 hci_sched_le(hdev);
2674
2675 /* Send next queued raw (unknown type) packet */
2676 while ((skb = skb_dequeue(&hdev->raw_q)))
2677 hci_send_frame(skb);
2678 }
2679
2680 /* ----- HCI RX task (incoming data processing) ----- */
2681
2682 /* ACL data packet */
2683 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2684 {
2685 struct hci_acl_hdr *hdr = (void *) skb->data;
2686 struct hci_conn *conn;
2687 __u16 handle, flags;
2688
2689 skb_pull(skb, HCI_ACL_HDR_SIZE);
2690
2691 handle = __le16_to_cpu(hdr->handle);
2692 flags = hci_flags(handle);
2693 handle = hci_handle(handle);
2694
2695 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2696 handle, flags);
2697
2698 hdev->stat.acl_rx++;
2699
2700 hci_dev_lock(hdev);
2701 conn = hci_conn_hash_lookup_handle(hdev, handle);
2702 hci_dev_unlock(hdev);
2703
2704 if (conn) {
2705 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2706
2707 hci_dev_lock(hdev);
2708 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2709 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2710 mgmt_device_connected(hdev, &conn->dst, conn->type,
2711 conn->dst_type, 0, NULL, 0,
2712 conn->dev_class);
2713 hci_dev_unlock(hdev);
2714
2715 /* Send to upper protocol */
2716 l2cap_recv_acldata(conn, skb, flags);
2717 return;
2718 } else {
2719 BT_ERR("%s ACL packet for unknown connection handle %d",
2720 hdev->name, handle);
2721 }
2722
2723 kfree_skb(skb);
2724 }
2725
2726 /* SCO data packet */
2727 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2728 {
2729 struct hci_sco_hdr *hdr = (void *) skb->data;
2730 struct hci_conn *conn;
2731 __u16 handle;
2732
2733 skb_pull(skb, HCI_SCO_HDR_SIZE);
2734
2735 handle = __le16_to_cpu(hdr->handle);
2736
2737 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2738
2739 hdev->stat.sco_rx++;
2740
2741 hci_dev_lock(hdev);
2742 conn = hci_conn_hash_lookup_handle(hdev, handle);
2743 hci_dev_unlock(hdev);
2744
2745 if (conn) {
2746 /* Send to upper protocol */
2747 sco_recv_scodata(conn, skb);
2748 return;
2749 } else {
2750 BT_ERR("%s SCO packet for unknown connection handle %d",
2751 hdev->name, handle);
2752 }
2753
2754 kfree_skb(skb);
2755 }
2756
2757 static void hci_rx_work(struct work_struct *work)
2758 {
2759 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2760 struct sk_buff *skb;
2761
2762 BT_DBG("%s", hdev->name);
2763
2764 while ((skb = skb_dequeue(&hdev->rx_q))) {
2765 /* Send copy to monitor */
2766 hci_send_to_monitor(hdev, skb);
2767
2768 if (atomic_read(&hdev->promisc)) {
2769 /* Send copy to the sockets */
2770 hci_send_to_sock(hdev, skb);
2771 }
2772
2773 if (test_bit(HCI_RAW, &hdev->flags)) {
2774 kfree_skb(skb);
2775 continue;
2776 }
2777
2778 if (test_bit(HCI_INIT, &hdev->flags)) {
2779 /* Don't process data packets in this states. */
2780 switch (bt_cb(skb)->pkt_type) {
2781 case HCI_ACLDATA_PKT:
2782 case HCI_SCODATA_PKT:
2783 kfree_skb(skb);
2784 continue;
2785 }
2786 }
2787
2788 /* Process frame */
2789 switch (bt_cb(skb)->pkt_type) {
2790 case HCI_EVENT_PKT:
2791 BT_DBG("%s Event packet", hdev->name);
2792 hci_event_packet(hdev, skb);
2793 break;
2794
2795 case HCI_ACLDATA_PKT:
2796 BT_DBG("%s ACL data packet", hdev->name);
2797 hci_acldata_packet(hdev, skb);
2798 break;
2799
2800 case HCI_SCODATA_PKT:
2801 BT_DBG("%s SCO data packet", hdev->name);
2802 hci_scodata_packet(hdev, skb);
2803 break;
2804
2805 default:
2806 kfree_skb(skb);
2807 break;
2808 }
2809 }
2810 }
2811
2812 static void hci_cmd_work(struct work_struct *work)
2813 {
2814 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2815 struct sk_buff *skb;
2816
2817 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2818 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2819
2820 /* Send queued commands */
2821 if (atomic_read(&hdev->cmd_cnt)) {
2822 skb = skb_dequeue(&hdev->cmd_q);
2823 if (!skb)
2824 return;
2825
2826 kfree_skb(hdev->sent_cmd);
2827
2828 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2829 if (hdev->sent_cmd) {
2830 atomic_dec(&hdev->cmd_cnt);
2831 hci_send_frame(skb);
2832 if (test_bit(HCI_RESET, &hdev->flags))
2833 del_timer(&hdev->cmd_timer);
2834 else
2835 mod_timer(&hdev->cmd_timer,
2836 jiffies + HCI_CMD_TIMEOUT);
2837 } else {
2838 skb_queue_head(&hdev->cmd_q, skb);
2839 queue_work(hdev->workqueue, &hdev->cmd_work);
2840 }
2841 }
2842 }
2843
2844 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2845 {
2846 /* General inquiry access code (GIAC) */
2847 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2848 struct hci_cp_inquiry cp;
2849
2850 BT_DBG("%s", hdev->name);
2851
2852 if (test_bit(HCI_INQUIRY, &hdev->flags))
2853 return -EINPROGRESS;
2854
2855 inquiry_cache_flush(hdev);
2856
2857 memset(&cp, 0, sizeof(cp));
2858 memcpy(&cp.lap, lap, sizeof(cp.lap));
2859 cp.length = length;
2860
2861 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2862 }
2863
2864 int hci_cancel_inquiry(struct hci_dev *hdev)
2865 {
2866 BT_DBG("%s", hdev->name);
2867
2868 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2869 return -EALREADY;
2870
2871 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2872 }
2873
2874 u8 bdaddr_to_le(u8 bdaddr_type)
2875 {
2876 switch (bdaddr_type) {
2877 case BDADDR_LE_PUBLIC:
2878 return ADDR_LE_DEV_PUBLIC;
2879
2880 default:
2881 /* Fallback to LE Random address type */
2882 return ADDR_LE_DEV_RANDOM;
2883 }
2884 }