Bluetooth: debug: Print refcnt for hci_dev
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/idr.h>
30
31#include <linux/rfkill.h>
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
36static void hci_rx_work(struct work_struct *work);
37static void hci_cmd_work(struct work_struct *work);
38static void hci_tx_work(struct work_struct *work);
39
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
51/* ---- HCI notifications ---- */
52
53static void hci_notify(struct hci_dev *hdev, int event)
54{
55 hci_sock_dev_event(hdev, event);
56}
57
58/* ---- HCI requests ---- */
59
60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61{
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
134 err = -bt_to_errno(hdev->req_result);
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
144 }
145
146 hdev->req_status = hdev->req_result = 0;
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
156{
157 int ret;
158
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177}
178
179static void bredr_init(struct hci_dev *hdev)
180{
181 struct hci_cp_delete_stored_link_key cp;
182 __le16 param;
183 __u8 flt_type;
184
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
187 /* Mandatory initialization */
188
189 /* Read Local Supported Features */
190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
191
192 /* Read Local Version */
193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
197
198 /* Read BD Address */
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
206
207 /* Read Voice Setting */
208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
213 flt_type = HCI_FLT_CLEAR_ALL;
214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215
216 /* Connection accept timeout ~20 secs */
217 param = __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
223}
224
225static void amp_init(struct hci_dev *hdev)
226{
227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
234}
235
236static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
237{
238 struct sk_buff *skb;
239
240 BT_DBG("%s %ld", hdev->name, opt);
241
242 /* Driver initialization */
243
244 /* Special commands */
245 while ((skb = skb_dequeue(&hdev->driver_init))) {
246 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
247 skb->dev = (void *) hdev;
248
249 skb_queue_tail(&hdev->cmd_q, skb);
250 queue_work(hdev->workqueue, &hdev->cmd_work);
251 }
252 skb_queue_purge(&hdev->driver_init);
253
254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 hci_reset_req(hdev, 0);
257
258 switch (hdev->dev_type) {
259 case HCI_BREDR:
260 bredr_init(hdev);
261 break;
262
263 case HCI_AMP:
264 amp_init(hdev);
265 break;
266
267 default:
268 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break;
270 }
271}
272
273static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
274{
275 BT_DBG("%s", hdev->name);
276
277 /* Read LE buffer size */
278 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
279}
280
281static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
282{
283 __u8 scan = opt;
284
285 BT_DBG("%s %x", hdev->name, scan);
286
287 /* Inquiry and Page scans */
288 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
289}
290
291static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
292{
293 __u8 auth = opt;
294
295 BT_DBG("%s %x", hdev->name, auth);
296
297 /* Authentication */
298 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
299}
300
301static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
302{
303 __u8 encrypt = opt;
304
305 BT_DBG("%s %x", hdev->name, encrypt);
306
307 /* Encryption */
308 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
309}
310
311static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
312{
313 __le16 policy = cpu_to_le16(opt);
314
315 BT_DBG("%s %x", hdev->name, policy);
316
317 /* Default link policy */
318 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
319}
320
321/* Get HCI device by index.
322 * Device is held on return. */
323struct hci_dev *hci_dev_get(int index)
324{
325 struct hci_dev *hdev = NULL, *d;
326
327 BT_DBG("%d", index);
328
329 if (index < 0)
330 return NULL;
331
332 read_lock(&hci_dev_list_lock);
333 list_for_each_entry(d, &hci_dev_list, list) {
334 if (d->id == index) {
335 hdev = hci_dev_hold(d);
336 break;
337 }
338 }
339 read_unlock(&hci_dev_list_lock);
340 return hdev;
341}
342
343/* ---- Inquiry support ---- */
344
345bool hci_discovery_active(struct hci_dev *hdev)
346{
347 struct discovery_state *discov = &hdev->discovery;
348
349 switch (discov->state) {
350 case DISCOVERY_FINDING:
351 case DISCOVERY_RESOLVING:
352 return true;
353
354 default:
355 return false;
356 }
357}
358
359void hci_discovery_set_state(struct hci_dev *hdev, int state)
360{
361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
362
363 if (hdev->discovery.state == state)
364 return;
365
366 switch (state) {
367 case DISCOVERY_STOPPED:
368 if (hdev->discovery.state != DISCOVERY_STARTING)
369 mgmt_discovering(hdev, 0);
370 break;
371 case DISCOVERY_STARTING:
372 break;
373 case DISCOVERY_FINDING:
374 mgmt_discovering(hdev, 1);
375 break;
376 case DISCOVERY_RESOLVING:
377 break;
378 case DISCOVERY_STOPPING:
379 break;
380 }
381
382 hdev->discovery.state = state;
383}
384
385static void inquiry_cache_flush(struct hci_dev *hdev)
386{
387 struct discovery_state *cache = &hdev->discovery;
388 struct inquiry_entry *p, *n;
389
390 list_for_each_entry_safe(p, n, &cache->all, all) {
391 list_del(&p->all);
392 kfree(p);
393 }
394
395 INIT_LIST_HEAD(&cache->unknown);
396 INIT_LIST_HEAD(&cache->resolve);
397}
398
399struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
400 bdaddr_t *bdaddr)
401{
402 struct discovery_state *cache = &hdev->discovery;
403 struct inquiry_entry *e;
404
405 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
406
407 list_for_each_entry(e, &cache->all, all) {
408 if (!bacmp(&e->data.bdaddr, bdaddr))
409 return e;
410 }
411
412 return NULL;
413}
414
415struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
416 bdaddr_t *bdaddr)
417{
418 struct discovery_state *cache = &hdev->discovery;
419 struct inquiry_entry *e;
420
421 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
422
423 list_for_each_entry(e, &cache->unknown, list) {
424 if (!bacmp(&e->data.bdaddr, bdaddr))
425 return e;
426 }
427
428 return NULL;
429}
430
431struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
432 bdaddr_t *bdaddr,
433 int state)
434{
435 struct discovery_state *cache = &hdev->discovery;
436 struct inquiry_entry *e;
437
438 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
439
440 list_for_each_entry(e, &cache->resolve, list) {
441 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
442 return e;
443 if (!bacmp(&e->data.bdaddr, bdaddr))
444 return e;
445 }
446
447 return NULL;
448}
449
450void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
451 struct inquiry_entry *ie)
452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct list_head *pos = &cache->resolve;
455 struct inquiry_entry *p;
456
457 list_del(&ie->list);
458
459 list_for_each_entry(p, &cache->resolve, list) {
460 if (p->name_state != NAME_PENDING &&
461 abs(p->data.rssi) >= abs(ie->data.rssi))
462 break;
463 pos = &p->list;
464 }
465
466 list_add(&ie->list, pos);
467}
468
469bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
470 bool name_known, bool *ssp)
471{
472 struct discovery_state *cache = &hdev->discovery;
473 struct inquiry_entry *ie;
474
475 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
476
477 if (ssp)
478 *ssp = data->ssp_mode;
479
480 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
481 if (ie) {
482 if (ie->data.ssp_mode && ssp)
483 *ssp = true;
484
485 if (ie->name_state == NAME_NEEDED &&
486 data->rssi != ie->data.rssi) {
487 ie->data.rssi = data->rssi;
488 hci_inquiry_cache_update_resolve(hdev, ie);
489 }
490
491 goto update;
492 }
493
494 /* Entry not in the cache. Add new one. */
495 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
496 if (!ie)
497 return false;
498
499 list_add(&ie->all, &cache->all);
500
501 if (name_known) {
502 ie->name_state = NAME_KNOWN;
503 } else {
504 ie->name_state = NAME_NOT_KNOWN;
505 list_add(&ie->list, &cache->unknown);
506 }
507
508update:
509 if (name_known && ie->name_state != NAME_KNOWN &&
510 ie->name_state != NAME_PENDING) {
511 ie->name_state = NAME_KNOWN;
512 list_del(&ie->list);
513 }
514
515 memcpy(&ie->data, data, sizeof(*data));
516 ie->timestamp = jiffies;
517 cache->timestamp = jiffies;
518
519 if (ie->name_state == NAME_NOT_KNOWN)
520 return false;
521
522 return true;
523}
524
525static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
526{
527 struct discovery_state *cache = &hdev->discovery;
528 struct inquiry_info *info = (struct inquiry_info *) buf;
529 struct inquiry_entry *e;
530 int copied = 0;
531
532 list_for_each_entry(e, &cache->all, all) {
533 struct inquiry_data *data = &e->data;
534
535 if (copied >= num)
536 break;
537
538 bacpy(&info->bdaddr, &data->bdaddr);
539 info->pscan_rep_mode = data->pscan_rep_mode;
540 info->pscan_period_mode = data->pscan_period_mode;
541 info->pscan_mode = data->pscan_mode;
542 memcpy(info->dev_class, data->dev_class, 3);
543 info->clock_offset = data->clock_offset;
544
545 info++;
546 copied++;
547 }
548
549 BT_DBG("cache %p, copied %d", cache, copied);
550 return copied;
551}
552
553static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
554{
555 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
556 struct hci_cp_inquiry cp;
557
558 BT_DBG("%s", hdev->name);
559
560 if (test_bit(HCI_INQUIRY, &hdev->flags))
561 return;
562
563 /* Start Inquiry */
564 memcpy(&cp.lap, &ir->lap, 3);
565 cp.length = ir->length;
566 cp.num_rsp = ir->num_rsp;
567 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
568}
569
570int hci_inquiry(void __user *arg)
571{
572 __u8 __user *ptr = arg;
573 struct hci_inquiry_req ir;
574 struct hci_dev *hdev;
575 int err = 0, do_inquiry = 0, max_rsp;
576 long timeo;
577 __u8 *buf;
578
579 if (copy_from_user(&ir, ptr, sizeof(ir)))
580 return -EFAULT;
581
582 hdev = hci_dev_get(ir.dev_id);
583 if (!hdev)
584 return -ENODEV;
585
586 hci_dev_lock(hdev);
587 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
588 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
589 inquiry_cache_flush(hdev);
590 do_inquiry = 1;
591 }
592 hci_dev_unlock(hdev);
593
594 timeo = ir.length * msecs_to_jiffies(2000);
595
596 if (do_inquiry) {
597 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
598 if (err < 0)
599 goto done;
600 }
601
602 /* for unlimited number of responses we will use buffer with
603 * 255 entries
604 */
605 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
606
607 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
608 * copy it to the user space.
609 */
610 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
611 if (!buf) {
612 err = -ENOMEM;
613 goto done;
614 }
615
616 hci_dev_lock(hdev);
617 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
618 hci_dev_unlock(hdev);
619
620 BT_DBG("num_rsp %d", ir.num_rsp);
621
622 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
623 ptr += sizeof(ir);
624 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
625 ir.num_rsp))
626 err = -EFAULT;
627 } else
628 err = -EFAULT;
629
630 kfree(buf);
631
632done:
633 hci_dev_put(hdev);
634 return err;
635}
636
637/* ---- HCI ioctl helpers ---- */
638
639int hci_dev_open(__u16 dev)
640{
641 struct hci_dev *hdev;
642 int ret = 0;
643
644 hdev = hci_dev_get(dev);
645 if (!hdev)
646 return -ENODEV;
647
648 BT_DBG("%s %p", hdev->name, hdev);
649
650 hci_req_lock(hdev);
651
652 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
653 ret = -ENODEV;
654 goto done;
655 }
656
657 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
658 ret = -ERFKILL;
659 goto done;
660 }
661
662 if (test_bit(HCI_UP, &hdev->flags)) {
663 ret = -EALREADY;
664 goto done;
665 }
666
667 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
668 set_bit(HCI_RAW, &hdev->flags);
669
670 /* Treat all non BR/EDR controllers as raw devices if
671 enable_hs is not set */
672 if (hdev->dev_type != HCI_BREDR && !enable_hs)
673 set_bit(HCI_RAW, &hdev->flags);
674
675 if (hdev->open(hdev)) {
676 ret = -EIO;
677 goto done;
678 }
679
680 if (!test_bit(HCI_RAW, &hdev->flags)) {
681 atomic_set(&hdev->cmd_cnt, 1);
682 set_bit(HCI_INIT, &hdev->flags);
683 hdev->init_last_cmd = 0;
684
685 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
686
687 if (lmp_host_le_capable(hdev))
688 ret = __hci_request(hdev, hci_le_init_req, 0,
689 HCI_INIT_TIMEOUT);
690
691 clear_bit(HCI_INIT, &hdev->flags);
692 }
693
694 if (!ret) {
695 hci_dev_hold(hdev);
696 set_bit(HCI_UP, &hdev->flags);
697 hci_notify(hdev, HCI_DEV_UP);
698 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
699 mgmt_valid_hdev(hdev)) {
700 hci_dev_lock(hdev);
701 mgmt_powered(hdev, 1);
702 hci_dev_unlock(hdev);
703 }
704 } else {
705 /* Init failed, cleanup */
706 flush_work(&hdev->tx_work);
707 flush_work(&hdev->cmd_work);
708 flush_work(&hdev->rx_work);
709
710 skb_queue_purge(&hdev->cmd_q);
711 skb_queue_purge(&hdev->rx_q);
712
713 if (hdev->flush)
714 hdev->flush(hdev);
715
716 if (hdev->sent_cmd) {
717 kfree_skb(hdev->sent_cmd);
718 hdev->sent_cmd = NULL;
719 }
720
721 hdev->close(hdev);
722 hdev->flags = 0;
723 }
724
725done:
726 hci_req_unlock(hdev);
727 hci_dev_put(hdev);
728 return ret;
729}
730
731static int hci_dev_do_close(struct hci_dev *hdev)
732{
733 BT_DBG("%s %p", hdev->name, hdev);
734
735 cancel_work_sync(&hdev->le_scan);
736
737 hci_req_cancel(hdev, ENODEV);
738 hci_req_lock(hdev);
739
740 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
741 del_timer_sync(&hdev->cmd_timer);
742 hci_req_unlock(hdev);
743 return 0;
744 }
745
746 /* Flush RX and TX works */
747 flush_work(&hdev->tx_work);
748 flush_work(&hdev->rx_work);
749
750 if (hdev->discov_timeout > 0) {
751 cancel_delayed_work(&hdev->discov_off);
752 hdev->discov_timeout = 0;
753 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
754 }
755
756 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
757 cancel_delayed_work(&hdev->service_cache);
758
759 cancel_delayed_work_sync(&hdev->le_scan_disable);
760
761 hci_dev_lock(hdev);
762 inquiry_cache_flush(hdev);
763 hci_conn_hash_flush(hdev);
764 hci_dev_unlock(hdev);
765
766 hci_notify(hdev, HCI_DEV_DOWN);
767
768 if (hdev->flush)
769 hdev->flush(hdev);
770
771 /* Reset device */
772 skb_queue_purge(&hdev->cmd_q);
773 atomic_set(&hdev->cmd_cnt, 1);
774 if (!test_bit(HCI_RAW, &hdev->flags) &&
775 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
776 set_bit(HCI_INIT, &hdev->flags);
777 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
778 clear_bit(HCI_INIT, &hdev->flags);
779 }
780
781 /* flush cmd work */
782 flush_work(&hdev->cmd_work);
783
784 /* Drop queues */
785 skb_queue_purge(&hdev->rx_q);
786 skb_queue_purge(&hdev->cmd_q);
787 skb_queue_purge(&hdev->raw_q);
788
789 /* Drop last sent command */
790 if (hdev->sent_cmd) {
791 del_timer_sync(&hdev->cmd_timer);
792 kfree_skb(hdev->sent_cmd);
793 hdev->sent_cmd = NULL;
794 }
795
796 /* After this point our queues are empty
797 * and no tasks are scheduled. */
798 hdev->close(hdev);
799
800 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
801 mgmt_valid_hdev(hdev)) {
802 hci_dev_lock(hdev);
803 mgmt_powered(hdev, 0);
804 hci_dev_unlock(hdev);
805 }
806
807 /* Clear flags */
808 hdev->flags = 0;
809
810 memset(hdev->eir, 0, sizeof(hdev->eir));
811 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
812
813 hci_req_unlock(hdev);
814
815 hci_dev_put(hdev);
816 return 0;
817}
818
819int hci_dev_close(__u16 dev)
820{
821 struct hci_dev *hdev;
822 int err;
823
824 hdev = hci_dev_get(dev);
825 if (!hdev)
826 return -ENODEV;
827
828 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
829 cancel_delayed_work(&hdev->power_off);
830
831 err = hci_dev_do_close(hdev);
832
833 hci_dev_put(hdev);
834 return err;
835}
836
837int hci_dev_reset(__u16 dev)
838{
839 struct hci_dev *hdev;
840 int ret = 0;
841
842 hdev = hci_dev_get(dev);
843 if (!hdev)
844 return -ENODEV;
845
846 hci_req_lock(hdev);
847
848 if (!test_bit(HCI_UP, &hdev->flags))
849 goto done;
850
851 /* Drop queues */
852 skb_queue_purge(&hdev->rx_q);
853 skb_queue_purge(&hdev->cmd_q);
854
855 hci_dev_lock(hdev);
856 inquiry_cache_flush(hdev);
857 hci_conn_hash_flush(hdev);
858 hci_dev_unlock(hdev);
859
860 if (hdev->flush)
861 hdev->flush(hdev);
862
863 atomic_set(&hdev->cmd_cnt, 1);
864 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
865
866 if (!test_bit(HCI_RAW, &hdev->flags))
867 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
868
869done:
870 hci_req_unlock(hdev);
871 hci_dev_put(hdev);
872 return ret;
873}
874
875int hci_dev_reset_stat(__u16 dev)
876{
877 struct hci_dev *hdev;
878 int ret = 0;
879
880 hdev = hci_dev_get(dev);
881 if (!hdev)
882 return -ENODEV;
883
884 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
885
886 hci_dev_put(hdev);
887
888 return ret;
889}
890
891int hci_dev_cmd(unsigned int cmd, void __user *arg)
892{
893 struct hci_dev *hdev;
894 struct hci_dev_req dr;
895 int err = 0;
896
897 if (copy_from_user(&dr, arg, sizeof(dr)))
898 return -EFAULT;
899
900 hdev = hci_dev_get(dr.dev_id);
901 if (!hdev)
902 return -ENODEV;
903
904 switch (cmd) {
905 case HCISETAUTH:
906 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
907 HCI_INIT_TIMEOUT);
908 break;
909
910 case HCISETENCRYPT:
911 if (!lmp_encrypt_capable(hdev)) {
912 err = -EOPNOTSUPP;
913 break;
914 }
915
916 if (!test_bit(HCI_AUTH, &hdev->flags)) {
917 /* Auth must be enabled first */
918 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
919 HCI_INIT_TIMEOUT);
920 if (err)
921 break;
922 }
923
924 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
925 HCI_INIT_TIMEOUT);
926 break;
927
928 case HCISETSCAN:
929 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
930 HCI_INIT_TIMEOUT);
931 break;
932
933 case HCISETLINKPOL:
934 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
935 HCI_INIT_TIMEOUT);
936 break;
937
938 case HCISETLINKMODE:
939 hdev->link_mode = ((__u16) dr.dev_opt) &
940 (HCI_LM_MASTER | HCI_LM_ACCEPT);
941 break;
942
943 case HCISETPTYPE:
944 hdev->pkt_type = (__u16) dr.dev_opt;
945 break;
946
947 case HCISETACLMTU:
948 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
949 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
950 break;
951
952 case HCISETSCOMTU:
953 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
955 break;
956
957 default:
958 err = -EINVAL;
959 break;
960 }
961
962 hci_dev_put(hdev);
963 return err;
964}
965
966int hci_get_dev_list(void __user *arg)
967{
968 struct hci_dev *hdev;
969 struct hci_dev_list_req *dl;
970 struct hci_dev_req *dr;
971 int n = 0, size, err;
972 __u16 dev_num;
973
974 if (get_user(dev_num, (__u16 __user *) arg))
975 return -EFAULT;
976
977 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
978 return -EINVAL;
979
980 size = sizeof(*dl) + dev_num * sizeof(*dr);
981
982 dl = kzalloc(size, GFP_KERNEL);
983 if (!dl)
984 return -ENOMEM;
985
986 dr = dl->dev_req;
987
988 read_lock(&hci_dev_list_lock);
989 list_for_each_entry(hdev, &hci_dev_list, list) {
990 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
991 cancel_delayed_work(&hdev->power_off);
992
993 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
994 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
995
996 (dr + n)->dev_id = hdev->id;
997 (dr + n)->dev_opt = hdev->flags;
998
999 if (++n >= dev_num)
1000 break;
1001 }
1002 read_unlock(&hci_dev_list_lock);
1003
1004 dl->dev_num = n;
1005 size = sizeof(*dl) + n * sizeof(*dr);
1006
1007 err = copy_to_user(arg, dl, size);
1008 kfree(dl);
1009
1010 return err ? -EFAULT : 0;
1011}
1012
1013int hci_get_dev_info(void __user *arg)
1014{
1015 struct hci_dev *hdev;
1016 struct hci_dev_info di;
1017 int err = 0;
1018
1019 if (copy_from_user(&di, arg, sizeof(di)))
1020 return -EFAULT;
1021
1022 hdev = hci_dev_get(di.dev_id);
1023 if (!hdev)
1024 return -ENODEV;
1025
1026 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1027 cancel_delayed_work_sync(&hdev->power_off);
1028
1029 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1030 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1031
1032 strcpy(di.name, hdev->name);
1033 di.bdaddr = hdev->bdaddr;
1034 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1035 di.flags = hdev->flags;
1036 di.pkt_type = hdev->pkt_type;
1037 di.acl_mtu = hdev->acl_mtu;
1038 di.acl_pkts = hdev->acl_pkts;
1039 di.sco_mtu = hdev->sco_mtu;
1040 di.sco_pkts = hdev->sco_pkts;
1041 di.link_policy = hdev->link_policy;
1042 di.link_mode = hdev->link_mode;
1043
1044 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1045 memcpy(&di.features, &hdev->features, sizeof(di.features));
1046
1047 if (copy_to_user(arg, &di, sizeof(di)))
1048 err = -EFAULT;
1049
1050 hci_dev_put(hdev);
1051
1052 return err;
1053}
1054
1055/* ---- Interface to HCI drivers ---- */
1056
1057static int hci_rfkill_set_block(void *data, bool blocked)
1058{
1059 struct hci_dev *hdev = data;
1060
1061 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1062
1063 if (!blocked)
1064 return 0;
1065
1066 hci_dev_do_close(hdev);
1067
1068 return 0;
1069}
1070
1071static const struct rfkill_ops hci_rfkill_ops = {
1072 .set_block = hci_rfkill_set_block,
1073};
1074
1075static void hci_power_on(struct work_struct *work)
1076{
1077 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1078
1079 BT_DBG("%s", hdev->name);
1080
1081 if (hci_dev_open(hdev->id) < 0)
1082 return;
1083
1084 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1085 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1086
1087 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1088 mgmt_index_added(hdev);
1089}
1090
1091static void hci_power_off(struct work_struct *work)
1092{
1093 struct hci_dev *hdev = container_of(work, struct hci_dev,
1094 power_off.work);
1095
1096 BT_DBG("%s", hdev->name);
1097
1098 hci_dev_do_close(hdev);
1099}
1100
1101static void hci_discov_off(struct work_struct *work)
1102{
1103 struct hci_dev *hdev;
1104 u8 scan = SCAN_PAGE;
1105
1106 hdev = container_of(work, struct hci_dev, discov_off.work);
1107
1108 BT_DBG("%s", hdev->name);
1109
1110 hci_dev_lock(hdev);
1111
1112 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1113
1114 hdev->discov_timeout = 0;
1115
1116 hci_dev_unlock(hdev);
1117}
1118
1119int hci_uuids_clear(struct hci_dev *hdev)
1120{
1121 struct list_head *p, *n;
1122
1123 list_for_each_safe(p, n, &hdev->uuids) {
1124 struct bt_uuid *uuid;
1125
1126 uuid = list_entry(p, struct bt_uuid, list);
1127
1128 list_del(p);
1129 kfree(uuid);
1130 }
1131
1132 return 0;
1133}
1134
1135int hci_link_keys_clear(struct hci_dev *hdev)
1136{
1137 struct list_head *p, *n;
1138
1139 list_for_each_safe(p, n, &hdev->link_keys) {
1140 struct link_key *key;
1141
1142 key = list_entry(p, struct link_key, list);
1143
1144 list_del(p);
1145 kfree(key);
1146 }
1147
1148 return 0;
1149}
1150
1151int hci_smp_ltks_clear(struct hci_dev *hdev)
1152{
1153 struct smp_ltk *k, *tmp;
1154
1155 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1156 list_del(&k->list);
1157 kfree(k);
1158 }
1159
1160 return 0;
1161}
1162
1163struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1164{
1165 struct link_key *k;
1166
1167 list_for_each_entry(k, &hdev->link_keys, list)
1168 if (bacmp(bdaddr, &k->bdaddr) == 0)
1169 return k;
1170
1171 return NULL;
1172}
1173
1174static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1175 u8 key_type, u8 old_key_type)
1176{
1177 /* Legacy key */
1178 if (key_type < 0x03)
1179 return true;
1180
1181 /* Debug keys are insecure so don't store them persistently */
1182 if (key_type == HCI_LK_DEBUG_COMBINATION)
1183 return false;
1184
1185 /* Changed combination key and there's no previous one */
1186 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1187 return false;
1188
1189 /* Security mode 3 case */
1190 if (!conn)
1191 return true;
1192
1193 /* Neither local nor remote side had no-bonding as requirement */
1194 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1195 return true;
1196
1197 /* Local side had dedicated bonding as requirement */
1198 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1199 return true;
1200
1201 /* Remote side had dedicated bonding as requirement */
1202 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1203 return true;
1204
1205 /* If none of the above criteria match, then don't store the key
1206 * persistently */
1207 return false;
1208}
1209
1210struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1211{
1212 struct smp_ltk *k;
1213
1214 list_for_each_entry(k, &hdev->long_term_keys, list) {
1215 if (k->ediv != ediv ||
1216 memcmp(rand, k->rand, sizeof(k->rand)))
1217 continue;
1218
1219 return k;
1220 }
1221
1222 return NULL;
1223}
1224
1225struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1226 u8 addr_type)
1227{
1228 struct smp_ltk *k;
1229
1230 list_for_each_entry(k, &hdev->long_term_keys, list)
1231 if (addr_type == k->bdaddr_type &&
1232 bacmp(bdaddr, &k->bdaddr) == 0)
1233 return k;
1234
1235 return NULL;
1236}
1237
1238int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1239 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1240{
1241 struct link_key *key, *old_key;
1242 u8 old_key_type;
1243 bool persistent;
1244
1245 old_key = hci_find_link_key(hdev, bdaddr);
1246 if (old_key) {
1247 old_key_type = old_key->type;
1248 key = old_key;
1249 } else {
1250 old_key_type = conn ? conn->key_type : 0xff;
1251 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1252 if (!key)
1253 return -ENOMEM;
1254 list_add(&key->list, &hdev->link_keys);
1255 }
1256
1257 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1258
1259 /* Some buggy controller combinations generate a changed
1260 * combination key for legacy pairing even when there's no
1261 * previous key */
1262 if (type == HCI_LK_CHANGED_COMBINATION &&
1263 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1264 type = HCI_LK_COMBINATION;
1265 if (conn)
1266 conn->key_type = type;
1267 }
1268
1269 bacpy(&key->bdaddr, bdaddr);
1270 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1271 key->pin_len = pin_len;
1272
1273 if (type == HCI_LK_CHANGED_COMBINATION)
1274 key->type = old_key_type;
1275 else
1276 key->type = type;
1277
1278 if (!new_key)
1279 return 0;
1280
1281 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1282
1283 mgmt_new_link_key(hdev, key, persistent);
1284
1285 if (conn)
1286 conn->flush_key = !persistent;
1287
1288 return 0;
1289}
1290
1291int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1292 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1293 ediv, u8 rand[8])
1294{
1295 struct smp_ltk *key, *old_key;
1296
1297 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1298 return 0;
1299
1300 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1301 if (old_key)
1302 key = old_key;
1303 else {
1304 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1305 if (!key)
1306 return -ENOMEM;
1307 list_add(&key->list, &hdev->long_term_keys);
1308 }
1309
1310 bacpy(&key->bdaddr, bdaddr);
1311 key->bdaddr_type = addr_type;
1312 memcpy(key->val, tk, sizeof(key->val));
1313 key->authenticated = authenticated;
1314 key->ediv = ediv;
1315 key->enc_size = enc_size;
1316 key->type = type;
1317 memcpy(key->rand, rand, sizeof(key->rand));
1318
1319 if (!new_key)
1320 return 0;
1321
1322 if (type & HCI_SMP_LTK)
1323 mgmt_new_ltk(hdev, key, 1);
1324
1325 return 0;
1326}
1327
1328int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1329{
1330 struct link_key *key;
1331
1332 key = hci_find_link_key(hdev, bdaddr);
1333 if (!key)
1334 return -ENOENT;
1335
1336 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1337
1338 list_del(&key->list);
1339 kfree(key);
1340
1341 return 0;
1342}
1343
1344int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345{
1346 struct smp_ltk *k, *tmp;
1347
1348 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1349 if (bacmp(bdaddr, &k->bdaddr))
1350 continue;
1351
1352 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1353
1354 list_del(&k->list);
1355 kfree(k);
1356 }
1357
1358 return 0;
1359}
1360
1361/* HCI command timer function */
1362static void hci_cmd_timeout(unsigned long arg)
1363{
1364 struct hci_dev *hdev = (void *) arg;
1365
1366 if (hdev->sent_cmd) {
1367 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1368 u16 opcode = __le16_to_cpu(sent->opcode);
1369
1370 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1371 } else {
1372 BT_ERR("%s command tx timeout", hdev->name);
1373 }
1374
1375 atomic_set(&hdev->cmd_cnt, 1);
1376 queue_work(hdev->workqueue, &hdev->cmd_work);
1377}
1378
1379struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1380 bdaddr_t *bdaddr)
1381{
1382 struct oob_data *data;
1383
1384 list_for_each_entry(data, &hdev->remote_oob_data, list)
1385 if (bacmp(bdaddr, &data->bdaddr) == 0)
1386 return data;
1387
1388 return NULL;
1389}
1390
1391int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1392{
1393 struct oob_data *data;
1394
1395 data = hci_find_remote_oob_data(hdev, bdaddr);
1396 if (!data)
1397 return -ENOENT;
1398
1399 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1400
1401 list_del(&data->list);
1402 kfree(data);
1403
1404 return 0;
1405}
1406
1407int hci_remote_oob_data_clear(struct hci_dev *hdev)
1408{
1409 struct oob_data *data, *n;
1410
1411 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1412 list_del(&data->list);
1413 kfree(data);
1414 }
1415
1416 return 0;
1417}
1418
1419int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1420 u8 *randomizer)
1421{
1422 struct oob_data *data;
1423
1424 data = hci_find_remote_oob_data(hdev, bdaddr);
1425
1426 if (!data) {
1427 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1428 if (!data)
1429 return -ENOMEM;
1430
1431 bacpy(&data->bdaddr, bdaddr);
1432 list_add(&data->list, &hdev->remote_oob_data);
1433 }
1434
1435 memcpy(data->hash, hash, sizeof(data->hash));
1436 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1437
1438 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1439
1440 return 0;
1441}
1442
1443struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1444{
1445 struct bdaddr_list *b;
1446
1447 list_for_each_entry(b, &hdev->blacklist, list)
1448 if (bacmp(bdaddr, &b->bdaddr) == 0)
1449 return b;
1450
1451 return NULL;
1452}
1453
1454int hci_blacklist_clear(struct hci_dev *hdev)
1455{
1456 struct list_head *p, *n;
1457
1458 list_for_each_safe(p, n, &hdev->blacklist) {
1459 struct bdaddr_list *b;
1460
1461 b = list_entry(p, struct bdaddr_list, list);
1462
1463 list_del(p);
1464 kfree(b);
1465 }
1466
1467 return 0;
1468}
1469
1470int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1471{
1472 struct bdaddr_list *entry;
1473
1474 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1475 return -EBADF;
1476
1477 if (hci_blacklist_lookup(hdev, bdaddr))
1478 return -EEXIST;
1479
1480 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1481 if (!entry)
1482 return -ENOMEM;
1483
1484 bacpy(&entry->bdaddr, bdaddr);
1485
1486 list_add(&entry->list, &hdev->blacklist);
1487
1488 return mgmt_device_blocked(hdev, bdaddr, type);
1489}
1490
1491int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1492{
1493 struct bdaddr_list *entry;
1494
1495 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1496 return hci_blacklist_clear(hdev);
1497
1498 entry = hci_blacklist_lookup(hdev, bdaddr);
1499 if (!entry)
1500 return -ENOENT;
1501
1502 list_del(&entry->list);
1503 kfree(entry);
1504
1505 return mgmt_device_unblocked(hdev, bdaddr, type);
1506}
1507
1508static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1509{
1510 struct le_scan_params *param = (struct le_scan_params *) opt;
1511 struct hci_cp_le_set_scan_param cp;
1512
1513 memset(&cp, 0, sizeof(cp));
1514 cp.type = param->type;
1515 cp.interval = cpu_to_le16(param->interval);
1516 cp.window = cpu_to_le16(param->window);
1517
1518 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1519}
1520
1521static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1522{
1523 struct hci_cp_le_set_scan_enable cp;
1524
1525 memset(&cp, 0, sizeof(cp));
1526 cp.enable = 1;
1527 cp.filter_dup = 1;
1528
1529 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1530}
1531
1532static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1533 u16 window, int timeout)
1534{
1535 long timeo = msecs_to_jiffies(3000);
1536 struct le_scan_params param;
1537 int err;
1538
1539 BT_DBG("%s", hdev->name);
1540
1541 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1542 return -EINPROGRESS;
1543
1544 param.type = type;
1545 param.interval = interval;
1546 param.window = window;
1547
1548 hci_req_lock(hdev);
1549
1550 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1551 timeo);
1552 if (!err)
1553 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1554
1555 hci_req_unlock(hdev);
1556
1557 if (err < 0)
1558 return err;
1559
1560 schedule_delayed_work(&hdev->le_scan_disable,
1561 msecs_to_jiffies(timeout));
1562
1563 return 0;
1564}
1565
1566int hci_cancel_le_scan(struct hci_dev *hdev)
1567{
1568 BT_DBG("%s", hdev->name);
1569
1570 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1571 return -EALREADY;
1572
1573 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1574 struct hci_cp_le_set_scan_enable cp;
1575
1576 /* Send HCI command to disable LE Scan */
1577 memset(&cp, 0, sizeof(cp));
1578 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1579 }
1580
1581 return 0;
1582}
1583
1584static void le_scan_disable_work(struct work_struct *work)
1585{
1586 struct hci_dev *hdev = container_of(work, struct hci_dev,
1587 le_scan_disable.work);
1588 struct hci_cp_le_set_scan_enable cp;
1589
1590 BT_DBG("%s", hdev->name);
1591
1592 memset(&cp, 0, sizeof(cp));
1593
1594 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1595}
1596
1597static void le_scan_work(struct work_struct *work)
1598{
1599 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1600 struct le_scan_params *param = &hdev->le_scan_params;
1601
1602 BT_DBG("%s", hdev->name);
1603
1604 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1605 param->timeout);
1606}
1607
1608int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1609 int timeout)
1610{
1611 struct le_scan_params *param = &hdev->le_scan_params;
1612
1613 BT_DBG("%s", hdev->name);
1614
1615 if (work_busy(&hdev->le_scan))
1616 return -EINPROGRESS;
1617
1618 param->type = type;
1619 param->interval = interval;
1620 param->window = window;
1621 param->timeout = timeout;
1622
1623 queue_work(system_long_wq, &hdev->le_scan);
1624
1625 return 0;
1626}
1627
1628/* Alloc HCI device */
1629struct hci_dev *hci_alloc_dev(void)
1630{
1631 struct hci_dev *hdev;
1632
1633 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1634 if (!hdev)
1635 return NULL;
1636
1637 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1638 hdev->esco_type = (ESCO_HV1);
1639 hdev->link_mode = (HCI_LM_ACCEPT);
1640 hdev->io_capability = 0x03; /* No Input No Output */
1641
1642 hdev->sniff_max_interval = 800;
1643 hdev->sniff_min_interval = 80;
1644
1645 mutex_init(&hdev->lock);
1646 mutex_init(&hdev->req_lock);
1647
1648 INIT_LIST_HEAD(&hdev->mgmt_pending);
1649 INIT_LIST_HEAD(&hdev->blacklist);
1650 INIT_LIST_HEAD(&hdev->uuids);
1651 INIT_LIST_HEAD(&hdev->link_keys);
1652 INIT_LIST_HEAD(&hdev->long_term_keys);
1653 INIT_LIST_HEAD(&hdev->remote_oob_data);
1654 INIT_LIST_HEAD(&hdev->conn_hash.list);
1655
1656 INIT_WORK(&hdev->rx_work, hci_rx_work);
1657 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1658 INIT_WORK(&hdev->tx_work, hci_tx_work);
1659 INIT_WORK(&hdev->power_on, hci_power_on);
1660 INIT_WORK(&hdev->le_scan, le_scan_work);
1661
1662 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1663 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1664 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1665
1666 skb_queue_head_init(&hdev->driver_init);
1667 skb_queue_head_init(&hdev->rx_q);
1668 skb_queue_head_init(&hdev->cmd_q);
1669 skb_queue_head_init(&hdev->raw_q);
1670
1671 init_waitqueue_head(&hdev->req_wait_q);
1672
1673 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1674
1675 hci_init_sysfs(hdev);
1676 discovery_init(hdev);
1677
1678 return hdev;
1679}
1680EXPORT_SYMBOL(hci_alloc_dev);
1681
1682/* Free HCI device */
1683void hci_free_dev(struct hci_dev *hdev)
1684{
1685 skb_queue_purge(&hdev->driver_init);
1686
1687 /* will free via device release */
1688 put_device(&hdev->dev);
1689}
1690EXPORT_SYMBOL(hci_free_dev);
1691
1692/* Register HCI device */
1693int hci_register_dev(struct hci_dev *hdev)
1694{
1695 int id, error;
1696
1697 if (!hdev->open || !hdev->close)
1698 return -EINVAL;
1699
1700 /* Do not allow HCI_AMP devices to register at index 0,
1701 * so the index can be used as the AMP controller ID.
1702 */
1703 switch (hdev->dev_type) {
1704 case HCI_BREDR:
1705 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1706 break;
1707 case HCI_AMP:
1708 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1709 break;
1710 default:
1711 return -EINVAL;
1712 }
1713
1714 if (id < 0)
1715 return id;
1716
1717 sprintf(hdev->name, "hci%d", id);
1718 hdev->id = id;
1719
1720 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1721
1722 write_lock(&hci_dev_list_lock);
1723 list_add(&hdev->list, &hci_dev_list);
1724 write_unlock(&hci_dev_list_lock);
1725
1726 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1727 WQ_MEM_RECLAIM, 1);
1728 if (!hdev->workqueue) {
1729 error = -ENOMEM;
1730 goto err;
1731 }
1732
1733 error = hci_add_sysfs(hdev);
1734 if (error < 0)
1735 goto err_wqueue;
1736
1737 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1738 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1739 hdev);
1740 if (hdev->rfkill) {
1741 if (rfkill_register(hdev->rfkill) < 0) {
1742 rfkill_destroy(hdev->rfkill);
1743 hdev->rfkill = NULL;
1744 }
1745 }
1746
1747 set_bit(HCI_SETUP, &hdev->dev_flags);
1748
1749 if (hdev->dev_type != HCI_AMP)
1750 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1751
1752 schedule_work(&hdev->power_on);
1753
1754 hci_notify(hdev, HCI_DEV_REG);
1755 hci_dev_hold(hdev);
1756
1757 return id;
1758
1759err_wqueue:
1760 destroy_workqueue(hdev->workqueue);
1761err:
1762 ida_simple_remove(&hci_index_ida, hdev->id);
1763 write_lock(&hci_dev_list_lock);
1764 list_del(&hdev->list);
1765 write_unlock(&hci_dev_list_lock);
1766
1767 return error;
1768}
1769EXPORT_SYMBOL(hci_register_dev);
1770
1771/* Unregister HCI device */
1772void hci_unregister_dev(struct hci_dev *hdev)
1773{
1774 int i, id;
1775
1776 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1777
1778 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1779
1780 id = hdev->id;
1781
1782 write_lock(&hci_dev_list_lock);
1783 list_del(&hdev->list);
1784 write_unlock(&hci_dev_list_lock);
1785
1786 hci_dev_do_close(hdev);
1787
1788 for (i = 0; i < NUM_REASSEMBLY; i++)
1789 kfree_skb(hdev->reassembly[i]);
1790
1791 if (!test_bit(HCI_INIT, &hdev->flags) &&
1792 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1793 hci_dev_lock(hdev);
1794 mgmt_index_removed(hdev);
1795 hci_dev_unlock(hdev);
1796 }
1797
1798 /* mgmt_index_removed should take care of emptying the
1799 * pending list */
1800 BUG_ON(!list_empty(&hdev->mgmt_pending));
1801
1802 hci_notify(hdev, HCI_DEV_UNREG);
1803
1804 if (hdev->rfkill) {
1805 rfkill_unregister(hdev->rfkill);
1806 rfkill_destroy(hdev->rfkill);
1807 }
1808
1809 hci_del_sysfs(hdev);
1810
1811 destroy_workqueue(hdev->workqueue);
1812
1813 hci_dev_lock(hdev);
1814 hci_blacklist_clear(hdev);
1815 hci_uuids_clear(hdev);
1816 hci_link_keys_clear(hdev);
1817 hci_smp_ltks_clear(hdev);
1818 hci_remote_oob_data_clear(hdev);
1819 hci_dev_unlock(hdev);
1820
1821 hci_dev_put(hdev);
1822
1823 ida_simple_remove(&hci_index_ida, id);
1824}
1825EXPORT_SYMBOL(hci_unregister_dev);
1826
1827/* Suspend HCI device */
1828int hci_suspend_dev(struct hci_dev *hdev)
1829{
1830 hci_notify(hdev, HCI_DEV_SUSPEND);
1831 return 0;
1832}
1833EXPORT_SYMBOL(hci_suspend_dev);
1834
1835/* Resume HCI device */
1836int hci_resume_dev(struct hci_dev *hdev)
1837{
1838 hci_notify(hdev, HCI_DEV_RESUME);
1839 return 0;
1840}
1841EXPORT_SYMBOL(hci_resume_dev);
1842
1843/* Receive frame from HCI drivers */
1844int hci_recv_frame(struct sk_buff *skb)
1845{
1846 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1847 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1848 && !test_bit(HCI_INIT, &hdev->flags))) {
1849 kfree_skb(skb);
1850 return -ENXIO;
1851 }
1852
1853 /* Incomming skb */
1854 bt_cb(skb)->incoming = 1;
1855
1856 /* Time stamp */
1857 __net_timestamp(skb);
1858
1859 skb_queue_tail(&hdev->rx_q, skb);
1860 queue_work(hdev->workqueue, &hdev->rx_work);
1861
1862 return 0;
1863}
1864EXPORT_SYMBOL(hci_recv_frame);
1865
1866static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1867 int count, __u8 index)
1868{
1869 int len = 0;
1870 int hlen = 0;
1871 int remain = count;
1872 struct sk_buff *skb;
1873 struct bt_skb_cb *scb;
1874
1875 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1876 index >= NUM_REASSEMBLY)
1877 return -EILSEQ;
1878
1879 skb = hdev->reassembly[index];
1880
1881 if (!skb) {
1882 switch (type) {
1883 case HCI_ACLDATA_PKT:
1884 len = HCI_MAX_FRAME_SIZE;
1885 hlen = HCI_ACL_HDR_SIZE;
1886 break;
1887 case HCI_EVENT_PKT:
1888 len = HCI_MAX_EVENT_SIZE;
1889 hlen = HCI_EVENT_HDR_SIZE;
1890 break;
1891 case HCI_SCODATA_PKT:
1892 len = HCI_MAX_SCO_SIZE;
1893 hlen = HCI_SCO_HDR_SIZE;
1894 break;
1895 }
1896
1897 skb = bt_skb_alloc(len, GFP_ATOMIC);
1898 if (!skb)
1899 return -ENOMEM;
1900
1901 scb = (void *) skb->cb;
1902 scb->expect = hlen;
1903 scb->pkt_type = type;
1904
1905 skb->dev = (void *) hdev;
1906 hdev->reassembly[index] = skb;
1907 }
1908
1909 while (count) {
1910 scb = (void *) skb->cb;
1911 len = min_t(uint, scb->expect, count);
1912
1913 memcpy(skb_put(skb, len), data, len);
1914
1915 count -= len;
1916 data += len;
1917 scb->expect -= len;
1918 remain = count;
1919
1920 switch (type) {
1921 case HCI_EVENT_PKT:
1922 if (skb->len == HCI_EVENT_HDR_SIZE) {
1923 struct hci_event_hdr *h = hci_event_hdr(skb);
1924 scb->expect = h->plen;
1925
1926 if (skb_tailroom(skb) < scb->expect) {
1927 kfree_skb(skb);
1928 hdev->reassembly[index] = NULL;
1929 return -ENOMEM;
1930 }
1931 }
1932 break;
1933
1934 case HCI_ACLDATA_PKT:
1935 if (skb->len == HCI_ACL_HDR_SIZE) {
1936 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1937 scb->expect = __le16_to_cpu(h->dlen);
1938
1939 if (skb_tailroom(skb) < scb->expect) {
1940 kfree_skb(skb);
1941 hdev->reassembly[index] = NULL;
1942 return -ENOMEM;
1943 }
1944 }
1945 break;
1946
1947 case HCI_SCODATA_PKT:
1948 if (skb->len == HCI_SCO_HDR_SIZE) {
1949 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1950 scb->expect = h->dlen;
1951
1952 if (skb_tailroom(skb) < scb->expect) {
1953 kfree_skb(skb);
1954 hdev->reassembly[index] = NULL;
1955 return -ENOMEM;
1956 }
1957 }
1958 break;
1959 }
1960
1961 if (scb->expect == 0) {
1962 /* Complete frame */
1963
1964 bt_cb(skb)->pkt_type = type;
1965 hci_recv_frame(skb);
1966
1967 hdev->reassembly[index] = NULL;
1968 return remain;
1969 }
1970 }
1971
1972 return remain;
1973}
1974
1975int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1976{
1977 int rem = 0;
1978
1979 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1980 return -EILSEQ;
1981
1982 while (count) {
1983 rem = hci_reassembly(hdev, type, data, count, type - 1);
1984 if (rem < 0)
1985 return rem;
1986
1987 data += (count - rem);
1988 count = rem;
1989 }
1990
1991 return rem;
1992}
1993EXPORT_SYMBOL(hci_recv_fragment);
1994
1995#define STREAM_REASSEMBLY 0
1996
1997int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1998{
1999 int type;
2000 int rem = 0;
2001
2002 while (count) {
2003 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2004
2005 if (!skb) {
2006 struct { char type; } *pkt;
2007
2008 /* Start of the frame */
2009 pkt = data;
2010 type = pkt->type;
2011
2012 data++;
2013 count--;
2014 } else
2015 type = bt_cb(skb)->pkt_type;
2016
2017 rem = hci_reassembly(hdev, type, data, count,
2018 STREAM_REASSEMBLY);
2019 if (rem < 0)
2020 return rem;
2021
2022 data += (count - rem);
2023 count = rem;
2024 }
2025
2026 return rem;
2027}
2028EXPORT_SYMBOL(hci_recv_stream_fragment);
2029
2030/* ---- Interface to upper protocols ---- */
2031
2032int hci_register_cb(struct hci_cb *cb)
2033{
2034 BT_DBG("%p name %s", cb, cb->name);
2035
2036 write_lock(&hci_cb_list_lock);
2037 list_add(&cb->list, &hci_cb_list);
2038 write_unlock(&hci_cb_list_lock);
2039
2040 return 0;
2041}
2042EXPORT_SYMBOL(hci_register_cb);
2043
2044int hci_unregister_cb(struct hci_cb *cb)
2045{
2046 BT_DBG("%p name %s", cb, cb->name);
2047
2048 write_lock(&hci_cb_list_lock);
2049 list_del(&cb->list);
2050 write_unlock(&hci_cb_list_lock);
2051
2052 return 0;
2053}
2054EXPORT_SYMBOL(hci_unregister_cb);
2055
2056static int hci_send_frame(struct sk_buff *skb)
2057{
2058 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2059
2060 if (!hdev) {
2061 kfree_skb(skb);
2062 return -ENODEV;
2063 }
2064
2065 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2066
2067 /* Time stamp */
2068 __net_timestamp(skb);
2069
2070 /* Send copy to monitor */
2071 hci_send_to_monitor(hdev, skb);
2072
2073 if (atomic_read(&hdev->promisc)) {
2074 /* Send copy to the sockets */
2075 hci_send_to_sock(hdev, skb);
2076 }
2077
2078 /* Get rid of skb owner, prior to sending to the driver. */
2079 skb_orphan(skb);
2080
2081 return hdev->send(skb);
2082}
2083
2084/* Send HCI command */
2085int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2086{
2087 int len = HCI_COMMAND_HDR_SIZE + plen;
2088 struct hci_command_hdr *hdr;
2089 struct sk_buff *skb;
2090
2091 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2092
2093 skb = bt_skb_alloc(len, GFP_ATOMIC);
2094 if (!skb) {
2095 BT_ERR("%s no memory for command", hdev->name);
2096 return -ENOMEM;
2097 }
2098
2099 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2100 hdr->opcode = cpu_to_le16(opcode);
2101 hdr->plen = plen;
2102
2103 if (plen)
2104 memcpy(skb_put(skb, plen), param, plen);
2105
2106 BT_DBG("skb len %d", skb->len);
2107
2108 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2109 skb->dev = (void *) hdev;
2110
2111 if (test_bit(HCI_INIT, &hdev->flags))
2112 hdev->init_last_cmd = opcode;
2113
2114 skb_queue_tail(&hdev->cmd_q, skb);
2115 queue_work(hdev->workqueue, &hdev->cmd_work);
2116
2117 return 0;
2118}
2119
2120/* Get data from the previously sent command */
2121void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2122{
2123 struct hci_command_hdr *hdr;
2124
2125 if (!hdev->sent_cmd)
2126 return NULL;
2127
2128 hdr = (void *) hdev->sent_cmd->data;
2129
2130 if (hdr->opcode != cpu_to_le16(opcode))
2131 return NULL;
2132
2133 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2134
2135 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2136}
2137
2138/* Send ACL data */
2139static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2140{
2141 struct hci_acl_hdr *hdr;
2142 int len = skb->len;
2143
2144 skb_push(skb, HCI_ACL_HDR_SIZE);
2145 skb_reset_transport_header(skb);
2146 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2147 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2148 hdr->dlen = cpu_to_le16(len);
2149}
2150
2151static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2152 struct sk_buff *skb, __u16 flags)
2153{
2154 struct hci_dev *hdev = conn->hdev;
2155 struct sk_buff *list;
2156
2157 skb->len = skb_headlen(skb);
2158 skb->data_len = 0;
2159
2160 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2161 hci_add_acl_hdr(skb, conn->handle, flags);
2162
2163 list = skb_shinfo(skb)->frag_list;
2164 if (!list) {
2165 /* Non fragmented */
2166 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2167
2168 skb_queue_tail(queue, skb);
2169 } else {
2170 /* Fragmented */
2171 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2172
2173 skb_shinfo(skb)->frag_list = NULL;
2174
2175 /* Queue all fragments atomically */
2176 spin_lock(&queue->lock);
2177
2178 __skb_queue_tail(queue, skb);
2179
2180 flags &= ~ACL_START;
2181 flags |= ACL_CONT;
2182 do {
2183 skb = list; list = list->next;
2184
2185 skb->dev = (void *) hdev;
2186 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2187 hci_add_acl_hdr(skb, conn->handle, flags);
2188
2189 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2190
2191 __skb_queue_tail(queue, skb);
2192 } while (list);
2193
2194 spin_unlock(&queue->lock);
2195 }
2196}
2197
2198void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2199{
2200 struct hci_conn *conn = chan->conn;
2201 struct hci_dev *hdev = conn->hdev;
2202
2203 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2204
2205 skb->dev = (void *) hdev;
2206
2207 hci_queue_acl(conn, &chan->data_q, skb, flags);
2208
2209 queue_work(hdev->workqueue, &hdev->tx_work);
2210}
2211
2212/* Send SCO data */
2213void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2214{
2215 struct hci_dev *hdev = conn->hdev;
2216 struct hci_sco_hdr hdr;
2217
2218 BT_DBG("%s len %d", hdev->name, skb->len);
2219
2220 hdr.handle = cpu_to_le16(conn->handle);
2221 hdr.dlen = skb->len;
2222
2223 skb_push(skb, HCI_SCO_HDR_SIZE);
2224 skb_reset_transport_header(skb);
2225 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2226
2227 skb->dev = (void *) hdev;
2228 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2229
2230 skb_queue_tail(&conn->data_q, skb);
2231 queue_work(hdev->workqueue, &hdev->tx_work);
2232}
2233
2234/* ---- HCI TX task (outgoing data) ---- */
2235
2236/* HCI Connection scheduler */
2237static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2238 int *quote)
2239{
2240 struct hci_conn_hash *h = &hdev->conn_hash;
2241 struct hci_conn *conn = NULL, *c;
2242 unsigned int num = 0, min = ~0;
2243
2244 /* We don't have to lock device here. Connections are always
2245 * added and removed with TX task disabled. */
2246
2247 rcu_read_lock();
2248
2249 list_for_each_entry_rcu(c, &h->list, list) {
2250 if (c->type != type || skb_queue_empty(&c->data_q))
2251 continue;
2252
2253 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2254 continue;
2255
2256 num++;
2257
2258 if (c->sent < min) {
2259 min = c->sent;
2260 conn = c;
2261 }
2262
2263 if (hci_conn_num(hdev, type) == num)
2264 break;
2265 }
2266
2267 rcu_read_unlock();
2268
2269 if (conn) {
2270 int cnt, q;
2271
2272 switch (conn->type) {
2273 case ACL_LINK:
2274 cnt = hdev->acl_cnt;
2275 break;
2276 case SCO_LINK:
2277 case ESCO_LINK:
2278 cnt = hdev->sco_cnt;
2279 break;
2280 case LE_LINK:
2281 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2282 break;
2283 default:
2284 cnt = 0;
2285 BT_ERR("Unknown link type");
2286 }
2287
2288 q = cnt / num;
2289 *quote = q ? q : 1;
2290 } else
2291 *quote = 0;
2292
2293 BT_DBG("conn %p quote %d", conn, *quote);
2294 return conn;
2295}
2296
2297static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2298{
2299 struct hci_conn_hash *h = &hdev->conn_hash;
2300 struct hci_conn *c;
2301
2302 BT_ERR("%s link tx timeout", hdev->name);
2303
2304 rcu_read_lock();
2305
2306 /* Kill stalled connections */
2307 list_for_each_entry_rcu(c, &h->list, list) {
2308 if (c->type == type && c->sent) {
2309 BT_ERR("%s killing stalled connection %s",
2310 hdev->name, batostr(&c->dst));
2311 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2312 }
2313 }
2314
2315 rcu_read_unlock();
2316}
2317
2318static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2319 int *quote)
2320{
2321 struct hci_conn_hash *h = &hdev->conn_hash;
2322 struct hci_chan *chan = NULL;
2323 unsigned int num = 0, min = ~0, cur_prio = 0;
2324 struct hci_conn *conn;
2325 int cnt, q, conn_num = 0;
2326
2327 BT_DBG("%s", hdev->name);
2328
2329 rcu_read_lock();
2330
2331 list_for_each_entry_rcu(conn, &h->list, list) {
2332 struct hci_chan *tmp;
2333
2334 if (conn->type != type)
2335 continue;
2336
2337 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2338 continue;
2339
2340 conn_num++;
2341
2342 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2343 struct sk_buff *skb;
2344
2345 if (skb_queue_empty(&tmp->data_q))
2346 continue;
2347
2348 skb = skb_peek(&tmp->data_q);
2349 if (skb->priority < cur_prio)
2350 continue;
2351
2352 if (skb->priority > cur_prio) {
2353 num = 0;
2354 min = ~0;
2355 cur_prio = skb->priority;
2356 }
2357
2358 num++;
2359
2360 if (conn->sent < min) {
2361 min = conn->sent;
2362 chan = tmp;
2363 }
2364 }
2365
2366 if (hci_conn_num(hdev, type) == conn_num)
2367 break;
2368 }
2369
2370 rcu_read_unlock();
2371
2372 if (!chan)
2373 return NULL;
2374
2375 switch (chan->conn->type) {
2376 case ACL_LINK:
2377 cnt = hdev->acl_cnt;
2378 break;
2379 case SCO_LINK:
2380 case ESCO_LINK:
2381 cnt = hdev->sco_cnt;
2382 break;
2383 case LE_LINK:
2384 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2385 break;
2386 default:
2387 cnt = 0;
2388 BT_ERR("Unknown link type");
2389 }
2390
2391 q = cnt / num;
2392 *quote = q ? q : 1;
2393 BT_DBG("chan %p quote %d", chan, *quote);
2394 return chan;
2395}
2396
2397static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2398{
2399 struct hci_conn_hash *h = &hdev->conn_hash;
2400 struct hci_conn *conn;
2401 int num = 0;
2402
2403 BT_DBG("%s", hdev->name);
2404
2405 rcu_read_lock();
2406
2407 list_for_each_entry_rcu(conn, &h->list, list) {
2408 struct hci_chan *chan;
2409
2410 if (conn->type != type)
2411 continue;
2412
2413 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2414 continue;
2415
2416 num++;
2417
2418 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2419 struct sk_buff *skb;
2420
2421 if (chan->sent) {
2422 chan->sent = 0;
2423 continue;
2424 }
2425
2426 if (skb_queue_empty(&chan->data_q))
2427 continue;
2428
2429 skb = skb_peek(&chan->data_q);
2430 if (skb->priority >= HCI_PRIO_MAX - 1)
2431 continue;
2432
2433 skb->priority = HCI_PRIO_MAX - 1;
2434
2435 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2436 skb->priority);
2437 }
2438
2439 if (hci_conn_num(hdev, type) == num)
2440 break;
2441 }
2442
2443 rcu_read_unlock();
2444
2445}
2446
2447static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2448{
2449 /* Calculate count of blocks used by this packet */
2450 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2451}
2452
2453static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2454{
2455 if (!test_bit(HCI_RAW, &hdev->flags)) {
2456 /* ACL tx timeout must be longer than maximum
2457 * link supervision timeout (40.9 seconds) */
2458 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2459 HCI_ACL_TX_TIMEOUT))
2460 hci_link_tx_to(hdev, ACL_LINK);
2461 }
2462}
2463
2464static void hci_sched_acl_pkt(struct hci_dev *hdev)
2465{
2466 unsigned int cnt = hdev->acl_cnt;
2467 struct hci_chan *chan;
2468 struct sk_buff *skb;
2469 int quote;
2470
2471 __check_timeout(hdev, cnt);
2472
2473 while (hdev->acl_cnt &&
2474 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2475 u32 priority = (skb_peek(&chan->data_q))->priority;
2476 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2477 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2478 skb->len, skb->priority);
2479
2480 /* Stop if priority has changed */
2481 if (skb->priority < priority)
2482 break;
2483
2484 skb = skb_dequeue(&chan->data_q);
2485
2486 hci_conn_enter_active_mode(chan->conn,
2487 bt_cb(skb)->force_active);
2488
2489 hci_send_frame(skb);
2490 hdev->acl_last_tx = jiffies;
2491
2492 hdev->acl_cnt--;
2493 chan->sent++;
2494 chan->conn->sent++;
2495 }
2496 }
2497
2498 if (cnt != hdev->acl_cnt)
2499 hci_prio_recalculate(hdev, ACL_LINK);
2500}
2501
2502static void hci_sched_acl_blk(struct hci_dev *hdev)
2503{
2504 unsigned int cnt = hdev->block_cnt;
2505 struct hci_chan *chan;
2506 struct sk_buff *skb;
2507 int quote;
2508
2509 __check_timeout(hdev, cnt);
2510
2511 while (hdev->block_cnt > 0 &&
2512 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2513 u32 priority = (skb_peek(&chan->data_q))->priority;
2514 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2515 int blocks;
2516
2517 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2518 skb->len, skb->priority);
2519
2520 /* Stop if priority has changed */
2521 if (skb->priority < priority)
2522 break;
2523
2524 skb = skb_dequeue(&chan->data_q);
2525
2526 blocks = __get_blocks(hdev, skb);
2527 if (blocks > hdev->block_cnt)
2528 return;
2529
2530 hci_conn_enter_active_mode(chan->conn,
2531 bt_cb(skb)->force_active);
2532
2533 hci_send_frame(skb);
2534 hdev->acl_last_tx = jiffies;
2535
2536 hdev->block_cnt -= blocks;
2537 quote -= blocks;
2538
2539 chan->sent += blocks;
2540 chan->conn->sent += blocks;
2541 }
2542 }
2543
2544 if (cnt != hdev->block_cnt)
2545 hci_prio_recalculate(hdev, ACL_LINK);
2546}
2547
2548static void hci_sched_acl(struct hci_dev *hdev)
2549{
2550 BT_DBG("%s", hdev->name);
2551
2552 if (!hci_conn_num(hdev, ACL_LINK))
2553 return;
2554
2555 switch (hdev->flow_ctl_mode) {
2556 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2557 hci_sched_acl_pkt(hdev);
2558 break;
2559
2560 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2561 hci_sched_acl_blk(hdev);
2562 break;
2563 }
2564}
2565
2566/* Schedule SCO */
2567static void hci_sched_sco(struct hci_dev *hdev)
2568{
2569 struct hci_conn *conn;
2570 struct sk_buff *skb;
2571 int quote;
2572
2573 BT_DBG("%s", hdev->name);
2574
2575 if (!hci_conn_num(hdev, SCO_LINK))
2576 return;
2577
2578 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2579 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2580 BT_DBG("skb %p len %d", skb, skb->len);
2581 hci_send_frame(skb);
2582
2583 conn->sent++;
2584 if (conn->sent == ~0)
2585 conn->sent = 0;
2586 }
2587 }
2588}
2589
2590static void hci_sched_esco(struct hci_dev *hdev)
2591{
2592 struct hci_conn *conn;
2593 struct sk_buff *skb;
2594 int quote;
2595
2596 BT_DBG("%s", hdev->name);
2597
2598 if (!hci_conn_num(hdev, ESCO_LINK))
2599 return;
2600
2601 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2602 &quote))) {
2603 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2604 BT_DBG("skb %p len %d", skb, skb->len);
2605 hci_send_frame(skb);
2606
2607 conn->sent++;
2608 if (conn->sent == ~0)
2609 conn->sent = 0;
2610 }
2611 }
2612}
2613
2614static void hci_sched_le(struct hci_dev *hdev)
2615{
2616 struct hci_chan *chan;
2617 struct sk_buff *skb;
2618 int quote, cnt, tmp;
2619
2620 BT_DBG("%s", hdev->name);
2621
2622 if (!hci_conn_num(hdev, LE_LINK))
2623 return;
2624
2625 if (!test_bit(HCI_RAW, &hdev->flags)) {
2626 /* LE tx timeout must be longer than maximum
2627 * link supervision timeout (40.9 seconds) */
2628 if (!hdev->le_cnt && hdev->le_pkts &&
2629 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2630 hci_link_tx_to(hdev, LE_LINK);
2631 }
2632
2633 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2634 tmp = cnt;
2635 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2636 u32 priority = (skb_peek(&chan->data_q))->priority;
2637 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2638 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2639 skb->len, skb->priority);
2640
2641 /* Stop if priority has changed */
2642 if (skb->priority < priority)
2643 break;
2644
2645 skb = skb_dequeue(&chan->data_q);
2646
2647 hci_send_frame(skb);
2648 hdev->le_last_tx = jiffies;
2649
2650 cnt--;
2651 chan->sent++;
2652 chan->conn->sent++;
2653 }
2654 }
2655
2656 if (hdev->le_pkts)
2657 hdev->le_cnt = cnt;
2658 else
2659 hdev->acl_cnt = cnt;
2660
2661 if (cnt != tmp)
2662 hci_prio_recalculate(hdev, LE_LINK);
2663}
2664
2665static void hci_tx_work(struct work_struct *work)
2666{
2667 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2668 struct sk_buff *skb;
2669
2670 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2671 hdev->sco_cnt, hdev->le_cnt);
2672
2673 /* Schedule queues and send stuff to HCI driver */
2674
2675 hci_sched_acl(hdev);
2676
2677 hci_sched_sco(hdev);
2678
2679 hci_sched_esco(hdev);
2680
2681 hci_sched_le(hdev);
2682
2683 /* Send next queued raw (unknown type) packet */
2684 while ((skb = skb_dequeue(&hdev->raw_q)))
2685 hci_send_frame(skb);
2686}
2687
2688/* ----- HCI RX task (incoming data processing) ----- */
2689
2690/* ACL data packet */
2691static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2692{
2693 struct hci_acl_hdr *hdr = (void *) skb->data;
2694 struct hci_conn *conn;
2695 __u16 handle, flags;
2696
2697 skb_pull(skb, HCI_ACL_HDR_SIZE);
2698
2699 handle = __le16_to_cpu(hdr->handle);
2700 flags = hci_flags(handle);
2701 handle = hci_handle(handle);
2702
2703 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2704 handle, flags);
2705
2706 hdev->stat.acl_rx++;
2707
2708 hci_dev_lock(hdev);
2709 conn = hci_conn_hash_lookup_handle(hdev, handle);
2710 hci_dev_unlock(hdev);
2711
2712 if (conn) {
2713 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2714
2715 hci_dev_lock(hdev);
2716 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2717 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2718 mgmt_device_connected(hdev, &conn->dst, conn->type,
2719 conn->dst_type, 0, NULL, 0,
2720 conn->dev_class);
2721 hci_dev_unlock(hdev);
2722
2723 /* Send to upper protocol */
2724 l2cap_recv_acldata(conn, skb, flags);
2725 return;
2726 } else {
2727 BT_ERR("%s ACL packet for unknown connection handle %d",
2728 hdev->name, handle);
2729 }
2730
2731 kfree_skb(skb);
2732}
2733
2734/* SCO data packet */
2735static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2736{
2737 struct hci_sco_hdr *hdr = (void *) skb->data;
2738 struct hci_conn *conn;
2739 __u16 handle;
2740
2741 skb_pull(skb, HCI_SCO_HDR_SIZE);
2742
2743 handle = __le16_to_cpu(hdr->handle);
2744
2745 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2746
2747 hdev->stat.sco_rx++;
2748
2749 hci_dev_lock(hdev);
2750 conn = hci_conn_hash_lookup_handle(hdev, handle);
2751 hci_dev_unlock(hdev);
2752
2753 if (conn) {
2754 /* Send to upper protocol */
2755 sco_recv_scodata(conn, skb);
2756 return;
2757 } else {
2758 BT_ERR("%s SCO packet for unknown connection handle %d",
2759 hdev->name, handle);
2760 }
2761
2762 kfree_skb(skb);
2763}
2764
2765static void hci_rx_work(struct work_struct *work)
2766{
2767 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2768 struct sk_buff *skb;
2769
2770 BT_DBG("%s", hdev->name);
2771
2772 while ((skb = skb_dequeue(&hdev->rx_q))) {
2773 /* Send copy to monitor */
2774 hci_send_to_monitor(hdev, skb);
2775
2776 if (atomic_read(&hdev->promisc)) {
2777 /* Send copy to the sockets */
2778 hci_send_to_sock(hdev, skb);
2779 }
2780
2781 if (test_bit(HCI_RAW, &hdev->flags)) {
2782 kfree_skb(skb);
2783 continue;
2784 }
2785
2786 if (test_bit(HCI_INIT, &hdev->flags)) {
2787 /* Don't process data packets in this states. */
2788 switch (bt_cb(skb)->pkt_type) {
2789 case HCI_ACLDATA_PKT:
2790 case HCI_SCODATA_PKT:
2791 kfree_skb(skb);
2792 continue;
2793 }
2794 }
2795
2796 /* Process frame */
2797 switch (bt_cb(skb)->pkt_type) {
2798 case HCI_EVENT_PKT:
2799 BT_DBG("%s Event packet", hdev->name);
2800 hci_event_packet(hdev, skb);
2801 break;
2802
2803 case HCI_ACLDATA_PKT:
2804 BT_DBG("%s ACL data packet", hdev->name);
2805 hci_acldata_packet(hdev, skb);
2806 break;
2807
2808 case HCI_SCODATA_PKT:
2809 BT_DBG("%s SCO data packet", hdev->name);
2810 hci_scodata_packet(hdev, skb);
2811 break;
2812
2813 default:
2814 kfree_skb(skb);
2815 break;
2816 }
2817 }
2818}
2819
2820static void hci_cmd_work(struct work_struct *work)
2821{
2822 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2823 struct sk_buff *skb;
2824
2825 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2826 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2827
2828 /* Send queued commands */
2829 if (atomic_read(&hdev->cmd_cnt)) {
2830 skb = skb_dequeue(&hdev->cmd_q);
2831 if (!skb)
2832 return;
2833
2834 kfree_skb(hdev->sent_cmd);
2835
2836 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2837 if (hdev->sent_cmd) {
2838 atomic_dec(&hdev->cmd_cnt);
2839 hci_send_frame(skb);
2840 if (test_bit(HCI_RESET, &hdev->flags))
2841 del_timer(&hdev->cmd_timer);
2842 else
2843 mod_timer(&hdev->cmd_timer,
2844 jiffies + HCI_CMD_TIMEOUT);
2845 } else {
2846 skb_queue_head(&hdev->cmd_q, skb);
2847 queue_work(hdev->workqueue, &hdev->cmd_work);
2848 }
2849 }
2850}
2851
2852int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2853{
2854 /* General inquiry access code (GIAC) */
2855 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2856 struct hci_cp_inquiry cp;
2857
2858 BT_DBG("%s", hdev->name);
2859
2860 if (test_bit(HCI_INQUIRY, &hdev->flags))
2861 return -EINPROGRESS;
2862
2863 inquiry_cache_flush(hdev);
2864
2865 memset(&cp, 0, sizeof(cp));
2866 memcpy(&cp.lap, lap, sizeof(cp.lap));
2867 cp.length = length;
2868
2869 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2870}
2871
2872int hci_cancel_inquiry(struct hci_dev *hdev)
2873{
2874 BT_DBG("%s", hdev->name);
2875
2876 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2877 return -EALREADY;
2878
2879 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2880}
2881
2882u8 bdaddr_to_le(u8 bdaddr_type)
2883{
2884 switch (bdaddr_type) {
2885 case BDADDR_LE_PUBLIC:
2886 return ADDR_LE_DEV_PUBLIC;
2887
2888 default:
2889 /* Fallback to LE Random address type */
2890 return ADDR_LE_DEV_RANDOM;
2891 }
2892}