Bluetooth: Add discovery state tracking
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/jiffies.h>
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
41#include <linux/workqueue.h>
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
44#include <linux/rfkill.h>
45#include <linux/timer.h>
46#include <linux/crypto.h>
47#include <net/sock.h>
48
49#include <asm/system.h>
50#include <linux/uaccess.h>
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
56#define AUTO_OFF_TIMEOUT 2000
57
58int enable_hs;
59
60static void hci_rx_work(struct work_struct *work);
61static void hci_cmd_work(struct work_struct *work);
62static void hci_tx_work(struct work_struct *work);
63
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
72/* HCI notifiers list */
73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85}
86
87static void hci_notify(struct hci_dev *hdev, int event)
88{
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90}
91
92/* ---- HCI requests ---- */
93
94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95{
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 return;
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
146 err = -bt_to_errno(hdev->req_result);
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
156 }
157
158 hdev->req_status = hdev->req_result = 0;
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
167{
168 int ret;
169
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188}
189
190static void bredr_init(struct hci_dev *hdev)
191{
192 struct hci_cp_delete_stored_link_key cp;
193 __le16 param;
194 __u8 flt_type;
195
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198 /* Mandatory initialization */
199
200 /* Reset */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204 }
205
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240}
241
242static void amp_init(struct hci_dev *hdev)
243{
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
321 /* Encryption */
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323}
324
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
329 BT_DBG("%s %x", hdev->name, policy);
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
335/* Get HCI device by index.
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
339 struct hci_dev *hdev = NULL, *d;
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
356
357/* ---- Inquiry support ---- */
358
359void hci_discovery_set_state(struct hci_dev *hdev, int state)
360{
361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
362
363 if (hdev->discovery.state == state)
364 return;
365
366 switch (state) {
367 case DISCOVERY_STOPPED:
368 mgmt_discovering(hdev, 0);
369 break;
370 case DISCOVERY_STARTING:
371 break;
372 case DISCOVERY_ACTIVE:
373 mgmt_discovering(hdev, 1);
374 break;
375 case DISCOVERY_STOPPING:
376 break;
377 }
378
379 hdev->discovery.state = state;
380}
381
382static void inquiry_cache_flush(struct hci_dev *hdev)
383{
384 struct discovery_state *cache = &hdev->discovery;
385 struct inquiry_entry *p, *n;
386
387 list_for_each_entry_safe(p, n, &cache->all, all) {
388 list_del(&p->all);
389 kfree(p);
390 }
391
392 INIT_LIST_HEAD(&cache->unknown);
393 INIT_LIST_HEAD(&cache->resolve);
394 cache->state = DISCOVERY_STOPPED;
395}
396
397struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
398{
399 struct discovery_state *cache = &hdev->discovery;
400 struct inquiry_entry *e;
401
402 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
403
404 list_for_each_entry(e, &cache->all, all) {
405 if (!bacmp(&e->data.bdaddr, bdaddr))
406 return e;
407 }
408
409 return NULL;
410}
411
412struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
413 bdaddr_t *bdaddr)
414{
415 struct discovery_state *cache = &hdev->discovery;
416 struct inquiry_entry *e;
417
418 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
419
420 list_for_each_entry(e, &cache->unknown, list) {
421 if (!bacmp(&e->data.bdaddr, bdaddr))
422 return e;
423 }
424
425 return NULL;
426}
427
428bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
429 bool name_known)
430{
431 struct discovery_state *cache = &hdev->discovery;
432 struct inquiry_entry *ie;
433
434 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
435
436 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
437 if (ie)
438 goto update;
439
440 /* Entry not in the cache. Add new one. */
441 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
442 if (!ie)
443 return false;
444
445 list_add(&ie->all, &cache->all);
446
447 if (name_known) {
448 ie->name_state = NAME_KNOWN;
449 } else {
450 ie->name_state = NAME_NOT_KNOWN;
451 list_add(&ie->list, &cache->unknown);
452 }
453
454update:
455 if (name_known && ie->name_state != NAME_KNOWN &&
456 ie->name_state != NAME_PENDING) {
457 ie->name_state = NAME_KNOWN;
458 list_del(&ie->list);
459 }
460
461 memcpy(&ie->data, data, sizeof(*data));
462 ie->timestamp = jiffies;
463 cache->timestamp = jiffies;
464
465 if (ie->name_state == NAME_NOT_KNOWN)
466 return false;
467
468 return true;
469}
470
471static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
472{
473 struct discovery_state *cache = &hdev->discovery;
474 struct inquiry_info *info = (struct inquiry_info *) buf;
475 struct inquiry_entry *e;
476 int copied = 0;
477
478 list_for_each_entry(e, &cache->all, all) {
479 struct inquiry_data *data = &e->data;
480
481 if (copied >= num)
482 break;
483
484 bacpy(&info->bdaddr, &data->bdaddr);
485 info->pscan_rep_mode = data->pscan_rep_mode;
486 info->pscan_period_mode = data->pscan_period_mode;
487 info->pscan_mode = data->pscan_mode;
488 memcpy(info->dev_class, data->dev_class, 3);
489 info->clock_offset = data->clock_offset;
490
491 info++;
492 copied++;
493 }
494
495 BT_DBG("cache %p, copied %d", cache, copied);
496 return copied;
497}
498
499static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
500{
501 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
502 struct hci_cp_inquiry cp;
503
504 BT_DBG("%s", hdev->name);
505
506 if (test_bit(HCI_INQUIRY, &hdev->flags))
507 return;
508
509 /* Start Inquiry */
510 memcpy(&cp.lap, &ir->lap, 3);
511 cp.length = ir->length;
512 cp.num_rsp = ir->num_rsp;
513 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
514}
515
516int hci_inquiry(void __user *arg)
517{
518 __u8 __user *ptr = arg;
519 struct hci_inquiry_req ir;
520 struct hci_dev *hdev;
521 int err = 0, do_inquiry = 0, max_rsp;
522 long timeo;
523 __u8 *buf;
524
525 if (copy_from_user(&ir, ptr, sizeof(ir)))
526 return -EFAULT;
527
528 hdev = hci_dev_get(ir.dev_id);
529 if (!hdev)
530 return -ENODEV;
531
532 hci_dev_lock(hdev);
533 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
534 inquiry_cache_empty(hdev) ||
535 ir.flags & IREQ_CACHE_FLUSH) {
536 inquiry_cache_flush(hdev);
537 do_inquiry = 1;
538 }
539 hci_dev_unlock(hdev);
540
541 timeo = ir.length * msecs_to_jiffies(2000);
542
543 if (do_inquiry) {
544 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
545 if (err < 0)
546 goto done;
547 }
548
549 /* for unlimited number of responses we will use buffer with 255 entries */
550 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
551
552 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
553 * copy it to the user space.
554 */
555 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
556 if (!buf) {
557 err = -ENOMEM;
558 goto done;
559 }
560
561 hci_dev_lock(hdev);
562 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
563 hci_dev_unlock(hdev);
564
565 BT_DBG("num_rsp %d", ir.num_rsp);
566
567 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
568 ptr += sizeof(ir);
569 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
570 ir.num_rsp))
571 err = -EFAULT;
572 } else
573 err = -EFAULT;
574
575 kfree(buf);
576
577done:
578 hci_dev_put(hdev);
579 return err;
580}
581
582/* ---- HCI ioctl helpers ---- */
583
584int hci_dev_open(__u16 dev)
585{
586 struct hci_dev *hdev;
587 int ret = 0;
588
589 hdev = hci_dev_get(dev);
590 if (!hdev)
591 return -ENODEV;
592
593 BT_DBG("%s %p", hdev->name, hdev);
594
595 hci_req_lock(hdev);
596
597 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
598 ret = -ERFKILL;
599 goto done;
600 }
601
602 if (test_bit(HCI_UP, &hdev->flags)) {
603 ret = -EALREADY;
604 goto done;
605 }
606
607 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
608 set_bit(HCI_RAW, &hdev->flags);
609
610 /* Treat all non BR/EDR controllers as raw devices if
611 enable_hs is not set */
612 if (hdev->dev_type != HCI_BREDR && !enable_hs)
613 set_bit(HCI_RAW, &hdev->flags);
614
615 if (hdev->open(hdev)) {
616 ret = -EIO;
617 goto done;
618 }
619
620 if (!test_bit(HCI_RAW, &hdev->flags)) {
621 atomic_set(&hdev->cmd_cnt, 1);
622 set_bit(HCI_INIT, &hdev->flags);
623 hdev->init_last_cmd = 0;
624
625 ret = __hci_request(hdev, hci_init_req, 0,
626 msecs_to_jiffies(HCI_INIT_TIMEOUT));
627
628 if (lmp_host_le_capable(hdev))
629 ret = __hci_request(hdev, hci_le_init_req, 0,
630 msecs_to_jiffies(HCI_INIT_TIMEOUT));
631
632 clear_bit(HCI_INIT, &hdev->flags);
633 }
634
635 if (!ret) {
636 hci_dev_hold(hdev);
637 set_bit(HCI_UP, &hdev->flags);
638 hci_notify(hdev, HCI_DEV_UP);
639 if (!test_bit(HCI_SETUP, &hdev->flags)) {
640 hci_dev_lock(hdev);
641 mgmt_powered(hdev, 1);
642 hci_dev_unlock(hdev);
643 }
644 } else {
645 /* Init failed, cleanup */
646 flush_work(&hdev->tx_work);
647 flush_work(&hdev->cmd_work);
648 flush_work(&hdev->rx_work);
649
650 skb_queue_purge(&hdev->cmd_q);
651 skb_queue_purge(&hdev->rx_q);
652
653 if (hdev->flush)
654 hdev->flush(hdev);
655
656 if (hdev->sent_cmd) {
657 kfree_skb(hdev->sent_cmd);
658 hdev->sent_cmd = NULL;
659 }
660
661 hdev->close(hdev);
662 hdev->flags = 0;
663 }
664
665done:
666 hci_req_unlock(hdev);
667 hci_dev_put(hdev);
668 return ret;
669}
670
671static int hci_dev_do_close(struct hci_dev *hdev)
672{
673 BT_DBG("%s %p", hdev->name, hdev);
674
675 hci_req_cancel(hdev, ENODEV);
676 hci_req_lock(hdev);
677
678 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
679 del_timer_sync(&hdev->cmd_timer);
680 hci_req_unlock(hdev);
681 return 0;
682 }
683
684 /* Flush RX and TX works */
685 flush_work(&hdev->tx_work);
686 flush_work(&hdev->rx_work);
687
688 if (hdev->discov_timeout > 0) {
689 cancel_delayed_work(&hdev->discov_off);
690 hdev->discov_timeout = 0;
691 }
692
693 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
694 cancel_delayed_work(&hdev->power_off);
695
696 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
697 cancel_delayed_work(&hdev->service_cache);
698
699 hci_dev_lock(hdev);
700 inquiry_cache_flush(hdev);
701 hci_conn_hash_flush(hdev);
702 hci_dev_unlock(hdev);
703
704 hci_notify(hdev, HCI_DEV_DOWN);
705
706 if (hdev->flush)
707 hdev->flush(hdev);
708
709 /* Reset device */
710 skb_queue_purge(&hdev->cmd_q);
711 atomic_set(&hdev->cmd_cnt, 1);
712 if (!test_bit(HCI_RAW, &hdev->flags)) {
713 set_bit(HCI_INIT, &hdev->flags);
714 __hci_request(hdev, hci_reset_req, 0,
715 msecs_to_jiffies(250));
716 clear_bit(HCI_INIT, &hdev->flags);
717 }
718
719 /* flush cmd work */
720 flush_work(&hdev->cmd_work);
721
722 /* Drop queues */
723 skb_queue_purge(&hdev->rx_q);
724 skb_queue_purge(&hdev->cmd_q);
725 skb_queue_purge(&hdev->raw_q);
726
727 /* Drop last sent command */
728 if (hdev->sent_cmd) {
729 del_timer_sync(&hdev->cmd_timer);
730 kfree_skb(hdev->sent_cmd);
731 hdev->sent_cmd = NULL;
732 }
733
734 /* After this point our queues are empty
735 * and no tasks are scheduled. */
736 hdev->close(hdev);
737
738 hci_dev_lock(hdev);
739 mgmt_powered(hdev, 0);
740 hci_dev_unlock(hdev);
741
742 /* Clear flags */
743 hdev->flags = 0;
744
745 hci_req_unlock(hdev);
746
747 hci_dev_put(hdev);
748 return 0;
749}
750
751int hci_dev_close(__u16 dev)
752{
753 struct hci_dev *hdev;
754 int err;
755
756 hdev = hci_dev_get(dev);
757 if (!hdev)
758 return -ENODEV;
759 err = hci_dev_do_close(hdev);
760 hci_dev_put(hdev);
761 return err;
762}
763
764int hci_dev_reset(__u16 dev)
765{
766 struct hci_dev *hdev;
767 int ret = 0;
768
769 hdev = hci_dev_get(dev);
770 if (!hdev)
771 return -ENODEV;
772
773 hci_req_lock(hdev);
774
775 if (!test_bit(HCI_UP, &hdev->flags))
776 goto done;
777
778 /* Drop queues */
779 skb_queue_purge(&hdev->rx_q);
780 skb_queue_purge(&hdev->cmd_q);
781
782 hci_dev_lock(hdev);
783 inquiry_cache_flush(hdev);
784 hci_conn_hash_flush(hdev);
785 hci_dev_unlock(hdev);
786
787 if (hdev->flush)
788 hdev->flush(hdev);
789
790 atomic_set(&hdev->cmd_cnt, 1);
791 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
792
793 if (!test_bit(HCI_RAW, &hdev->flags))
794 ret = __hci_request(hdev, hci_reset_req, 0,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
796
797done:
798 hci_req_unlock(hdev);
799 hci_dev_put(hdev);
800 return ret;
801}
802
803int hci_dev_reset_stat(__u16 dev)
804{
805 struct hci_dev *hdev;
806 int ret = 0;
807
808 hdev = hci_dev_get(dev);
809 if (!hdev)
810 return -ENODEV;
811
812 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
813
814 hci_dev_put(hdev);
815
816 return ret;
817}
818
819int hci_dev_cmd(unsigned int cmd, void __user *arg)
820{
821 struct hci_dev *hdev;
822 struct hci_dev_req dr;
823 int err = 0;
824
825 if (copy_from_user(&dr, arg, sizeof(dr)))
826 return -EFAULT;
827
828 hdev = hci_dev_get(dr.dev_id);
829 if (!hdev)
830 return -ENODEV;
831
832 switch (cmd) {
833 case HCISETAUTH:
834 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
835 msecs_to_jiffies(HCI_INIT_TIMEOUT));
836 break;
837
838 case HCISETENCRYPT:
839 if (!lmp_encrypt_capable(hdev)) {
840 err = -EOPNOTSUPP;
841 break;
842 }
843
844 if (!test_bit(HCI_AUTH, &hdev->flags)) {
845 /* Auth must be enabled first */
846 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
847 msecs_to_jiffies(HCI_INIT_TIMEOUT));
848 if (err)
849 break;
850 }
851
852 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
853 msecs_to_jiffies(HCI_INIT_TIMEOUT));
854 break;
855
856 case HCISETSCAN:
857 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
858 msecs_to_jiffies(HCI_INIT_TIMEOUT));
859 break;
860
861 case HCISETLINKPOL:
862 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
863 msecs_to_jiffies(HCI_INIT_TIMEOUT));
864 break;
865
866 case HCISETLINKMODE:
867 hdev->link_mode = ((__u16) dr.dev_opt) &
868 (HCI_LM_MASTER | HCI_LM_ACCEPT);
869 break;
870
871 case HCISETPTYPE:
872 hdev->pkt_type = (__u16) dr.dev_opt;
873 break;
874
875 case HCISETACLMTU:
876 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
877 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
878 break;
879
880 case HCISETSCOMTU:
881 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
882 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
883 break;
884
885 default:
886 err = -EINVAL;
887 break;
888 }
889
890 hci_dev_put(hdev);
891 return err;
892}
893
894int hci_get_dev_list(void __user *arg)
895{
896 struct hci_dev *hdev;
897 struct hci_dev_list_req *dl;
898 struct hci_dev_req *dr;
899 int n = 0, size, err;
900 __u16 dev_num;
901
902 if (get_user(dev_num, (__u16 __user *) arg))
903 return -EFAULT;
904
905 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
906 return -EINVAL;
907
908 size = sizeof(*dl) + dev_num * sizeof(*dr);
909
910 dl = kzalloc(size, GFP_KERNEL);
911 if (!dl)
912 return -ENOMEM;
913
914 dr = dl->dev_req;
915
916 read_lock(&hci_dev_list_lock);
917 list_for_each_entry(hdev, &hci_dev_list, list) {
918 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
919 cancel_delayed_work(&hdev->power_off);
920
921 if (!test_bit(HCI_MGMT, &hdev->flags))
922 set_bit(HCI_PAIRABLE, &hdev->flags);
923
924 (dr + n)->dev_id = hdev->id;
925 (dr + n)->dev_opt = hdev->flags;
926
927 if (++n >= dev_num)
928 break;
929 }
930 read_unlock(&hci_dev_list_lock);
931
932 dl->dev_num = n;
933 size = sizeof(*dl) + n * sizeof(*dr);
934
935 err = copy_to_user(arg, dl, size);
936 kfree(dl);
937
938 return err ? -EFAULT : 0;
939}
940
941int hci_get_dev_info(void __user *arg)
942{
943 struct hci_dev *hdev;
944 struct hci_dev_info di;
945 int err = 0;
946
947 if (copy_from_user(&di, arg, sizeof(di)))
948 return -EFAULT;
949
950 hdev = hci_dev_get(di.dev_id);
951 if (!hdev)
952 return -ENODEV;
953
954 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
955 cancel_delayed_work_sync(&hdev->power_off);
956
957 if (!test_bit(HCI_MGMT, &hdev->flags))
958 set_bit(HCI_PAIRABLE, &hdev->flags);
959
960 strcpy(di.name, hdev->name);
961 di.bdaddr = hdev->bdaddr;
962 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
963 di.flags = hdev->flags;
964 di.pkt_type = hdev->pkt_type;
965 di.acl_mtu = hdev->acl_mtu;
966 di.acl_pkts = hdev->acl_pkts;
967 di.sco_mtu = hdev->sco_mtu;
968 di.sco_pkts = hdev->sco_pkts;
969 di.link_policy = hdev->link_policy;
970 di.link_mode = hdev->link_mode;
971
972 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
973 memcpy(&di.features, &hdev->features, sizeof(di.features));
974
975 if (copy_to_user(arg, &di, sizeof(di)))
976 err = -EFAULT;
977
978 hci_dev_put(hdev);
979
980 return err;
981}
982
983/* ---- Interface to HCI drivers ---- */
984
985static int hci_rfkill_set_block(void *data, bool blocked)
986{
987 struct hci_dev *hdev = data;
988
989 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
990
991 if (!blocked)
992 return 0;
993
994 hci_dev_do_close(hdev);
995
996 return 0;
997}
998
999static const struct rfkill_ops hci_rfkill_ops = {
1000 .set_block = hci_rfkill_set_block,
1001};
1002
1003/* Alloc HCI device */
1004struct hci_dev *hci_alloc_dev(void)
1005{
1006 struct hci_dev *hdev;
1007
1008 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1009 if (!hdev)
1010 return NULL;
1011
1012 hci_init_sysfs(hdev);
1013 skb_queue_head_init(&hdev->driver_init);
1014
1015 return hdev;
1016}
1017EXPORT_SYMBOL(hci_alloc_dev);
1018
1019/* Free HCI device */
1020void hci_free_dev(struct hci_dev *hdev)
1021{
1022 skb_queue_purge(&hdev->driver_init);
1023
1024 /* will free via device release */
1025 put_device(&hdev->dev);
1026}
1027EXPORT_SYMBOL(hci_free_dev);
1028
1029static void hci_power_on(struct work_struct *work)
1030{
1031 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1032
1033 BT_DBG("%s", hdev->name);
1034
1035 if (hci_dev_open(hdev->id) < 0)
1036 return;
1037
1038 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
1039 schedule_delayed_work(&hdev->power_off,
1040 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1041
1042 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
1043 mgmt_index_added(hdev);
1044}
1045
1046static void hci_power_off(struct work_struct *work)
1047{
1048 struct hci_dev *hdev = container_of(work, struct hci_dev,
1049 power_off.work);
1050
1051 BT_DBG("%s", hdev->name);
1052
1053 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1054
1055 hci_dev_close(hdev->id);
1056}
1057
1058static void hci_discov_off(struct work_struct *work)
1059{
1060 struct hci_dev *hdev;
1061 u8 scan = SCAN_PAGE;
1062
1063 hdev = container_of(work, struct hci_dev, discov_off.work);
1064
1065 BT_DBG("%s", hdev->name);
1066
1067 hci_dev_lock(hdev);
1068
1069 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1070
1071 hdev->discov_timeout = 0;
1072
1073 hci_dev_unlock(hdev);
1074}
1075
1076int hci_uuids_clear(struct hci_dev *hdev)
1077{
1078 struct list_head *p, *n;
1079
1080 list_for_each_safe(p, n, &hdev->uuids) {
1081 struct bt_uuid *uuid;
1082
1083 uuid = list_entry(p, struct bt_uuid, list);
1084
1085 list_del(p);
1086 kfree(uuid);
1087 }
1088
1089 return 0;
1090}
1091
1092int hci_link_keys_clear(struct hci_dev *hdev)
1093{
1094 struct list_head *p, *n;
1095
1096 list_for_each_safe(p, n, &hdev->link_keys) {
1097 struct link_key *key;
1098
1099 key = list_entry(p, struct link_key, list);
1100
1101 list_del(p);
1102 kfree(key);
1103 }
1104
1105 return 0;
1106}
1107
1108struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1109{
1110 struct link_key *k;
1111
1112 list_for_each_entry(k, &hdev->link_keys, list)
1113 if (bacmp(bdaddr, &k->bdaddr) == 0)
1114 return k;
1115
1116 return NULL;
1117}
1118
1119static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1120 u8 key_type, u8 old_key_type)
1121{
1122 /* Legacy key */
1123 if (key_type < 0x03)
1124 return 1;
1125
1126 /* Debug keys are insecure so don't store them persistently */
1127 if (key_type == HCI_LK_DEBUG_COMBINATION)
1128 return 0;
1129
1130 /* Changed combination key and there's no previous one */
1131 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1132 return 0;
1133
1134 /* Security mode 3 case */
1135 if (!conn)
1136 return 1;
1137
1138 /* Neither local nor remote side had no-bonding as requirement */
1139 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140 return 1;
1141
1142 /* Local side had dedicated bonding as requirement */
1143 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144 return 1;
1145
1146 /* Remote side had dedicated bonding as requirement */
1147 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148 return 1;
1149
1150 /* If none of the above criteria match, then don't store the key
1151 * persistently */
1152 return 0;
1153}
1154
1155struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1156{
1157 struct link_key *k;
1158
1159 list_for_each_entry(k, &hdev->link_keys, list) {
1160 struct key_master_id *id;
1161
1162 if (k->type != HCI_LK_SMP_LTK)
1163 continue;
1164
1165 if (k->dlen != sizeof(*id))
1166 continue;
1167
1168 id = (void *) &k->data;
1169 if (id->ediv == ediv &&
1170 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1171 return k;
1172 }
1173
1174 return NULL;
1175}
1176EXPORT_SYMBOL(hci_find_ltk);
1177
1178struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1179 bdaddr_t *bdaddr, u8 type)
1180{
1181 struct link_key *k;
1182
1183 list_for_each_entry(k, &hdev->link_keys, list)
1184 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1185 return k;
1186
1187 return NULL;
1188}
1189EXPORT_SYMBOL(hci_find_link_key_type);
1190
1191int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1192 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1193{
1194 struct link_key *key, *old_key;
1195 u8 old_key_type, persistent;
1196
1197 old_key = hci_find_link_key(hdev, bdaddr);
1198 if (old_key) {
1199 old_key_type = old_key->type;
1200 key = old_key;
1201 } else {
1202 old_key_type = conn ? conn->key_type : 0xff;
1203 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1204 if (!key)
1205 return -ENOMEM;
1206 list_add(&key->list, &hdev->link_keys);
1207 }
1208
1209 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1210
1211 /* Some buggy controller combinations generate a changed
1212 * combination key for legacy pairing even when there's no
1213 * previous key */
1214 if (type == HCI_LK_CHANGED_COMBINATION &&
1215 (!conn || conn->remote_auth == 0xff) &&
1216 old_key_type == 0xff) {
1217 type = HCI_LK_COMBINATION;
1218 if (conn)
1219 conn->key_type = type;
1220 }
1221
1222 bacpy(&key->bdaddr, bdaddr);
1223 memcpy(key->val, val, 16);
1224 key->pin_len = pin_len;
1225
1226 if (type == HCI_LK_CHANGED_COMBINATION)
1227 key->type = old_key_type;
1228 else
1229 key->type = type;
1230
1231 if (!new_key)
1232 return 0;
1233
1234 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1235
1236 mgmt_new_link_key(hdev, key, persistent);
1237
1238 if (!persistent) {
1239 list_del(&key->list);
1240 kfree(key);
1241 }
1242
1243 return 0;
1244}
1245
1246int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1247 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1248{
1249 struct link_key *key, *old_key;
1250 struct key_master_id *id;
1251 u8 old_key_type;
1252
1253 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1254
1255 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1256 if (old_key) {
1257 key = old_key;
1258 old_key_type = old_key->type;
1259 } else {
1260 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1261 if (!key)
1262 return -ENOMEM;
1263 list_add(&key->list, &hdev->link_keys);
1264 old_key_type = 0xff;
1265 }
1266
1267 key->dlen = sizeof(*id);
1268
1269 bacpy(&key->bdaddr, bdaddr);
1270 memcpy(key->val, ltk, sizeof(key->val));
1271 key->type = HCI_LK_SMP_LTK;
1272 key->pin_len = key_size;
1273
1274 id = (void *) &key->data;
1275 id->ediv = ediv;
1276 memcpy(id->rand, rand, sizeof(id->rand));
1277
1278 if (new_key)
1279 mgmt_new_link_key(hdev, key, old_key_type);
1280
1281 return 0;
1282}
1283
1284int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1285{
1286 struct link_key *key;
1287
1288 key = hci_find_link_key(hdev, bdaddr);
1289 if (!key)
1290 return -ENOENT;
1291
1292 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1293
1294 list_del(&key->list);
1295 kfree(key);
1296
1297 return 0;
1298}
1299
1300/* HCI command timer function */
1301static void hci_cmd_timer(unsigned long arg)
1302{
1303 struct hci_dev *hdev = (void *) arg;
1304
1305 BT_ERR("%s command tx timeout", hdev->name);
1306 atomic_set(&hdev->cmd_cnt, 1);
1307 queue_work(hdev->workqueue, &hdev->cmd_work);
1308}
1309
1310struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1311 bdaddr_t *bdaddr)
1312{
1313 struct oob_data *data;
1314
1315 list_for_each_entry(data, &hdev->remote_oob_data, list)
1316 if (bacmp(bdaddr, &data->bdaddr) == 0)
1317 return data;
1318
1319 return NULL;
1320}
1321
1322int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1323{
1324 struct oob_data *data;
1325
1326 data = hci_find_remote_oob_data(hdev, bdaddr);
1327 if (!data)
1328 return -ENOENT;
1329
1330 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1331
1332 list_del(&data->list);
1333 kfree(data);
1334
1335 return 0;
1336}
1337
1338int hci_remote_oob_data_clear(struct hci_dev *hdev)
1339{
1340 struct oob_data *data, *n;
1341
1342 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1343 list_del(&data->list);
1344 kfree(data);
1345 }
1346
1347 return 0;
1348}
1349
1350int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1351 u8 *randomizer)
1352{
1353 struct oob_data *data;
1354
1355 data = hci_find_remote_oob_data(hdev, bdaddr);
1356
1357 if (!data) {
1358 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1359 if (!data)
1360 return -ENOMEM;
1361
1362 bacpy(&data->bdaddr, bdaddr);
1363 list_add(&data->list, &hdev->remote_oob_data);
1364 }
1365
1366 memcpy(data->hash, hash, sizeof(data->hash));
1367 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1368
1369 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1370
1371 return 0;
1372}
1373
1374struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1375 bdaddr_t *bdaddr)
1376{
1377 struct bdaddr_list *b;
1378
1379 list_for_each_entry(b, &hdev->blacklist, list)
1380 if (bacmp(bdaddr, &b->bdaddr) == 0)
1381 return b;
1382
1383 return NULL;
1384}
1385
1386int hci_blacklist_clear(struct hci_dev *hdev)
1387{
1388 struct list_head *p, *n;
1389
1390 list_for_each_safe(p, n, &hdev->blacklist) {
1391 struct bdaddr_list *b;
1392
1393 b = list_entry(p, struct bdaddr_list, list);
1394
1395 list_del(p);
1396 kfree(b);
1397 }
1398
1399 return 0;
1400}
1401
1402int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403{
1404 struct bdaddr_list *entry;
1405
1406 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1407 return -EBADF;
1408
1409 if (hci_blacklist_lookup(hdev, bdaddr))
1410 return -EEXIST;
1411
1412 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1413 if (!entry)
1414 return -ENOMEM;
1415
1416 bacpy(&entry->bdaddr, bdaddr);
1417
1418 list_add(&entry->list, &hdev->blacklist);
1419
1420 return mgmt_device_blocked(hdev, bdaddr);
1421}
1422
1423int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1424{
1425 struct bdaddr_list *entry;
1426
1427 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1428 return hci_blacklist_clear(hdev);
1429
1430 entry = hci_blacklist_lookup(hdev, bdaddr);
1431 if (!entry)
1432 return -ENOENT;
1433
1434 list_del(&entry->list);
1435 kfree(entry);
1436
1437 return mgmt_device_unblocked(hdev, bdaddr);
1438}
1439
1440static void hci_clear_adv_cache(struct work_struct *work)
1441{
1442 struct hci_dev *hdev = container_of(work, struct hci_dev,
1443 adv_work.work);
1444
1445 hci_dev_lock(hdev);
1446
1447 hci_adv_entries_clear(hdev);
1448
1449 hci_dev_unlock(hdev);
1450}
1451
1452int hci_adv_entries_clear(struct hci_dev *hdev)
1453{
1454 struct adv_entry *entry, *tmp;
1455
1456 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1457 list_del(&entry->list);
1458 kfree(entry);
1459 }
1460
1461 BT_DBG("%s adv cache cleared", hdev->name);
1462
1463 return 0;
1464}
1465
1466struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1467{
1468 struct adv_entry *entry;
1469
1470 list_for_each_entry(entry, &hdev->adv_entries, list)
1471 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1472 return entry;
1473
1474 return NULL;
1475}
1476
1477static inline int is_connectable_adv(u8 evt_type)
1478{
1479 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1480 return 1;
1481
1482 return 0;
1483}
1484
1485int hci_add_adv_entry(struct hci_dev *hdev,
1486 struct hci_ev_le_advertising_info *ev)
1487{
1488 struct adv_entry *entry;
1489
1490 if (!is_connectable_adv(ev->evt_type))
1491 return -EINVAL;
1492
1493 /* Only new entries should be added to adv_entries. So, if
1494 * bdaddr was found, don't add it. */
1495 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1496 return 0;
1497
1498 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1499 if (!entry)
1500 return -ENOMEM;
1501
1502 bacpy(&entry->bdaddr, &ev->bdaddr);
1503 entry->bdaddr_type = ev->bdaddr_type;
1504
1505 list_add(&entry->list, &hdev->adv_entries);
1506
1507 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1508 batostr(&entry->bdaddr), entry->bdaddr_type);
1509
1510 return 0;
1511}
1512
1513/* Register HCI device */
1514int hci_register_dev(struct hci_dev *hdev)
1515{
1516 struct list_head *head = &hci_dev_list, *p;
1517 int i, id, error;
1518
1519 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1520 hdev->bus, hdev->owner);
1521
1522 if (!hdev->open || !hdev->close || !hdev->destruct)
1523 return -EINVAL;
1524
1525 /* Do not allow HCI_AMP devices to register at index 0,
1526 * so the index can be used as the AMP controller ID.
1527 */
1528 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1529
1530 write_lock(&hci_dev_list_lock);
1531
1532 /* Find first available device id */
1533 list_for_each(p, &hci_dev_list) {
1534 if (list_entry(p, struct hci_dev, list)->id != id)
1535 break;
1536 head = p; id++;
1537 }
1538
1539 sprintf(hdev->name, "hci%d", id);
1540 hdev->id = id;
1541 list_add_tail(&hdev->list, head);
1542
1543 atomic_set(&hdev->refcnt, 1);
1544 mutex_init(&hdev->lock);
1545
1546 hdev->flags = 0;
1547 hdev->dev_flags = 0;
1548 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1549 hdev->esco_type = (ESCO_HV1);
1550 hdev->link_mode = (HCI_LM_ACCEPT);
1551 hdev->io_capability = 0x03; /* No Input No Output */
1552
1553 hdev->idle_timeout = 0;
1554 hdev->sniff_max_interval = 800;
1555 hdev->sniff_min_interval = 80;
1556
1557 INIT_WORK(&hdev->rx_work, hci_rx_work);
1558 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1559 INIT_WORK(&hdev->tx_work, hci_tx_work);
1560
1561
1562 skb_queue_head_init(&hdev->rx_q);
1563 skb_queue_head_init(&hdev->cmd_q);
1564 skb_queue_head_init(&hdev->raw_q);
1565
1566 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1567
1568 for (i = 0; i < NUM_REASSEMBLY; i++)
1569 hdev->reassembly[i] = NULL;
1570
1571 init_waitqueue_head(&hdev->req_wait_q);
1572 mutex_init(&hdev->req_lock);
1573
1574 discovery_init(hdev);
1575
1576 hci_conn_hash_init(hdev);
1577
1578 INIT_LIST_HEAD(&hdev->mgmt_pending);
1579
1580 INIT_LIST_HEAD(&hdev->blacklist);
1581
1582 INIT_LIST_HEAD(&hdev->uuids);
1583
1584 INIT_LIST_HEAD(&hdev->link_keys);
1585
1586 INIT_LIST_HEAD(&hdev->remote_oob_data);
1587
1588 INIT_LIST_HEAD(&hdev->adv_entries);
1589
1590 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1591 INIT_WORK(&hdev->power_on, hci_power_on);
1592 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1593
1594 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1595
1596 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1597
1598 atomic_set(&hdev->promisc, 0);
1599
1600 write_unlock(&hci_dev_list_lock);
1601
1602 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1603 WQ_MEM_RECLAIM, 1);
1604 if (!hdev->workqueue) {
1605 error = -ENOMEM;
1606 goto err;
1607 }
1608
1609 error = hci_add_sysfs(hdev);
1610 if (error < 0)
1611 goto err_wqueue;
1612
1613 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1614 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1615 if (hdev->rfkill) {
1616 if (rfkill_register(hdev->rfkill) < 0) {
1617 rfkill_destroy(hdev->rfkill);
1618 hdev->rfkill = NULL;
1619 }
1620 }
1621
1622 set_bit(HCI_AUTO_OFF, &hdev->flags);
1623 set_bit(HCI_SETUP, &hdev->flags);
1624 schedule_work(&hdev->power_on);
1625
1626 hci_notify(hdev, HCI_DEV_REG);
1627
1628 return id;
1629
1630err_wqueue:
1631 destroy_workqueue(hdev->workqueue);
1632err:
1633 write_lock(&hci_dev_list_lock);
1634 list_del(&hdev->list);
1635 write_unlock(&hci_dev_list_lock);
1636
1637 return error;
1638}
1639EXPORT_SYMBOL(hci_register_dev);
1640
1641/* Unregister HCI device */
1642void hci_unregister_dev(struct hci_dev *hdev)
1643{
1644 int i;
1645
1646 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1647
1648 write_lock(&hci_dev_list_lock);
1649 list_del(&hdev->list);
1650 write_unlock(&hci_dev_list_lock);
1651
1652 hci_dev_do_close(hdev);
1653
1654 for (i = 0; i < NUM_REASSEMBLY; i++)
1655 kfree_skb(hdev->reassembly[i]);
1656
1657 if (!test_bit(HCI_INIT, &hdev->flags) &&
1658 !test_bit(HCI_SETUP, &hdev->flags)) {
1659 hci_dev_lock(hdev);
1660 mgmt_index_removed(hdev);
1661 hci_dev_unlock(hdev);
1662 }
1663
1664 /* mgmt_index_removed should take care of emptying the
1665 * pending list */
1666 BUG_ON(!list_empty(&hdev->mgmt_pending));
1667
1668 hci_notify(hdev, HCI_DEV_UNREG);
1669
1670 if (hdev->rfkill) {
1671 rfkill_unregister(hdev->rfkill);
1672 rfkill_destroy(hdev->rfkill);
1673 }
1674
1675 hci_del_sysfs(hdev);
1676
1677 cancel_delayed_work_sync(&hdev->adv_work);
1678
1679 destroy_workqueue(hdev->workqueue);
1680
1681 hci_dev_lock(hdev);
1682 hci_blacklist_clear(hdev);
1683 hci_uuids_clear(hdev);
1684 hci_link_keys_clear(hdev);
1685 hci_remote_oob_data_clear(hdev);
1686 hci_adv_entries_clear(hdev);
1687 hci_dev_unlock(hdev);
1688
1689 __hci_dev_put(hdev);
1690}
1691EXPORT_SYMBOL(hci_unregister_dev);
1692
1693/* Suspend HCI device */
1694int hci_suspend_dev(struct hci_dev *hdev)
1695{
1696 hci_notify(hdev, HCI_DEV_SUSPEND);
1697 return 0;
1698}
1699EXPORT_SYMBOL(hci_suspend_dev);
1700
1701/* Resume HCI device */
1702int hci_resume_dev(struct hci_dev *hdev)
1703{
1704 hci_notify(hdev, HCI_DEV_RESUME);
1705 return 0;
1706}
1707EXPORT_SYMBOL(hci_resume_dev);
1708
1709/* Receive frame from HCI drivers */
1710int hci_recv_frame(struct sk_buff *skb)
1711{
1712 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1713 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1714 && !test_bit(HCI_INIT, &hdev->flags))) {
1715 kfree_skb(skb);
1716 return -ENXIO;
1717 }
1718
1719 /* Incomming skb */
1720 bt_cb(skb)->incoming = 1;
1721
1722 /* Time stamp */
1723 __net_timestamp(skb);
1724
1725 skb_queue_tail(&hdev->rx_q, skb);
1726 queue_work(hdev->workqueue, &hdev->rx_work);
1727
1728 return 0;
1729}
1730EXPORT_SYMBOL(hci_recv_frame);
1731
1732static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1733 int count, __u8 index)
1734{
1735 int len = 0;
1736 int hlen = 0;
1737 int remain = count;
1738 struct sk_buff *skb;
1739 struct bt_skb_cb *scb;
1740
1741 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1742 index >= NUM_REASSEMBLY)
1743 return -EILSEQ;
1744
1745 skb = hdev->reassembly[index];
1746
1747 if (!skb) {
1748 switch (type) {
1749 case HCI_ACLDATA_PKT:
1750 len = HCI_MAX_FRAME_SIZE;
1751 hlen = HCI_ACL_HDR_SIZE;
1752 break;
1753 case HCI_EVENT_PKT:
1754 len = HCI_MAX_EVENT_SIZE;
1755 hlen = HCI_EVENT_HDR_SIZE;
1756 break;
1757 case HCI_SCODATA_PKT:
1758 len = HCI_MAX_SCO_SIZE;
1759 hlen = HCI_SCO_HDR_SIZE;
1760 break;
1761 }
1762
1763 skb = bt_skb_alloc(len, GFP_ATOMIC);
1764 if (!skb)
1765 return -ENOMEM;
1766
1767 scb = (void *) skb->cb;
1768 scb->expect = hlen;
1769 scb->pkt_type = type;
1770
1771 skb->dev = (void *) hdev;
1772 hdev->reassembly[index] = skb;
1773 }
1774
1775 while (count) {
1776 scb = (void *) skb->cb;
1777 len = min(scb->expect, (__u16)count);
1778
1779 memcpy(skb_put(skb, len), data, len);
1780
1781 count -= len;
1782 data += len;
1783 scb->expect -= len;
1784 remain = count;
1785
1786 switch (type) {
1787 case HCI_EVENT_PKT:
1788 if (skb->len == HCI_EVENT_HDR_SIZE) {
1789 struct hci_event_hdr *h = hci_event_hdr(skb);
1790 scb->expect = h->plen;
1791
1792 if (skb_tailroom(skb) < scb->expect) {
1793 kfree_skb(skb);
1794 hdev->reassembly[index] = NULL;
1795 return -ENOMEM;
1796 }
1797 }
1798 break;
1799
1800 case HCI_ACLDATA_PKT:
1801 if (skb->len == HCI_ACL_HDR_SIZE) {
1802 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1803 scb->expect = __le16_to_cpu(h->dlen);
1804
1805 if (skb_tailroom(skb) < scb->expect) {
1806 kfree_skb(skb);
1807 hdev->reassembly[index] = NULL;
1808 return -ENOMEM;
1809 }
1810 }
1811 break;
1812
1813 case HCI_SCODATA_PKT:
1814 if (skb->len == HCI_SCO_HDR_SIZE) {
1815 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1816 scb->expect = h->dlen;
1817
1818 if (skb_tailroom(skb) < scb->expect) {
1819 kfree_skb(skb);
1820 hdev->reassembly[index] = NULL;
1821 return -ENOMEM;
1822 }
1823 }
1824 break;
1825 }
1826
1827 if (scb->expect == 0) {
1828 /* Complete frame */
1829
1830 bt_cb(skb)->pkt_type = type;
1831 hci_recv_frame(skb);
1832
1833 hdev->reassembly[index] = NULL;
1834 return remain;
1835 }
1836 }
1837
1838 return remain;
1839}
1840
1841int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1842{
1843 int rem = 0;
1844
1845 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1846 return -EILSEQ;
1847
1848 while (count) {
1849 rem = hci_reassembly(hdev, type, data, count, type - 1);
1850 if (rem < 0)
1851 return rem;
1852
1853 data += (count - rem);
1854 count = rem;
1855 }
1856
1857 return rem;
1858}
1859EXPORT_SYMBOL(hci_recv_fragment);
1860
1861#define STREAM_REASSEMBLY 0
1862
1863int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1864{
1865 int type;
1866 int rem = 0;
1867
1868 while (count) {
1869 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1870
1871 if (!skb) {
1872 struct { char type; } *pkt;
1873
1874 /* Start of the frame */
1875 pkt = data;
1876 type = pkt->type;
1877
1878 data++;
1879 count--;
1880 } else
1881 type = bt_cb(skb)->pkt_type;
1882
1883 rem = hci_reassembly(hdev, type, data, count,
1884 STREAM_REASSEMBLY);
1885 if (rem < 0)
1886 return rem;
1887
1888 data += (count - rem);
1889 count = rem;
1890 }
1891
1892 return rem;
1893}
1894EXPORT_SYMBOL(hci_recv_stream_fragment);
1895
1896/* ---- Interface to upper protocols ---- */
1897
1898int hci_register_cb(struct hci_cb *cb)
1899{
1900 BT_DBG("%p name %s", cb, cb->name);
1901
1902 write_lock(&hci_cb_list_lock);
1903 list_add(&cb->list, &hci_cb_list);
1904 write_unlock(&hci_cb_list_lock);
1905
1906 return 0;
1907}
1908EXPORT_SYMBOL(hci_register_cb);
1909
1910int hci_unregister_cb(struct hci_cb *cb)
1911{
1912 BT_DBG("%p name %s", cb, cb->name);
1913
1914 write_lock(&hci_cb_list_lock);
1915 list_del(&cb->list);
1916 write_unlock(&hci_cb_list_lock);
1917
1918 return 0;
1919}
1920EXPORT_SYMBOL(hci_unregister_cb);
1921
1922static int hci_send_frame(struct sk_buff *skb)
1923{
1924 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1925
1926 if (!hdev) {
1927 kfree_skb(skb);
1928 return -ENODEV;
1929 }
1930
1931 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1932
1933 if (atomic_read(&hdev->promisc)) {
1934 /* Time stamp */
1935 __net_timestamp(skb);
1936
1937 hci_send_to_sock(hdev, skb, NULL);
1938 }
1939
1940 /* Get rid of skb owner, prior to sending to the driver. */
1941 skb_orphan(skb);
1942
1943 return hdev->send(skb);
1944}
1945
1946/* Send HCI command */
1947int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1948{
1949 int len = HCI_COMMAND_HDR_SIZE + plen;
1950 struct hci_command_hdr *hdr;
1951 struct sk_buff *skb;
1952
1953 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1954
1955 skb = bt_skb_alloc(len, GFP_ATOMIC);
1956 if (!skb) {
1957 BT_ERR("%s no memory for command", hdev->name);
1958 return -ENOMEM;
1959 }
1960
1961 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1962 hdr->opcode = cpu_to_le16(opcode);
1963 hdr->plen = plen;
1964
1965 if (plen)
1966 memcpy(skb_put(skb, plen), param, plen);
1967
1968 BT_DBG("skb len %d", skb->len);
1969
1970 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1971 skb->dev = (void *) hdev;
1972
1973 if (test_bit(HCI_INIT, &hdev->flags))
1974 hdev->init_last_cmd = opcode;
1975
1976 skb_queue_tail(&hdev->cmd_q, skb);
1977 queue_work(hdev->workqueue, &hdev->cmd_work);
1978
1979 return 0;
1980}
1981
1982/* Get data from the previously sent command */
1983void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1984{
1985 struct hci_command_hdr *hdr;
1986
1987 if (!hdev->sent_cmd)
1988 return NULL;
1989
1990 hdr = (void *) hdev->sent_cmd->data;
1991
1992 if (hdr->opcode != cpu_to_le16(opcode))
1993 return NULL;
1994
1995 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1996
1997 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1998}
1999
2000/* Send ACL data */
2001static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2002{
2003 struct hci_acl_hdr *hdr;
2004 int len = skb->len;
2005
2006 skb_push(skb, HCI_ACL_HDR_SIZE);
2007 skb_reset_transport_header(skb);
2008 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2009 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2010 hdr->dlen = cpu_to_le16(len);
2011}
2012
2013static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2014 struct sk_buff *skb, __u16 flags)
2015{
2016 struct hci_dev *hdev = conn->hdev;
2017 struct sk_buff *list;
2018
2019 list = skb_shinfo(skb)->frag_list;
2020 if (!list) {
2021 /* Non fragmented */
2022 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2023
2024 skb_queue_tail(queue, skb);
2025 } else {
2026 /* Fragmented */
2027 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2028
2029 skb_shinfo(skb)->frag_list = NULL;
2030
2031 /* Queue all fragments atomically */
2032 spin_lock(&queue->lock);
2033
2034 __skb_queue_tail(queue, skb);
2035
2036 flags &= ~ACL_START;
2037 flags |= ACL_CONT;
2038 do {
2039 skb = list; list = list->next;
2040
2041 skb->dev = (void *) hdev;
2042 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2043 hci_add_acl_hdr(skb, conn->handle, flags);
2044
2045 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2046
2047 __skb_queue_tail(queue, skb);
2048 } while (list);
2049
2050 spin_unlock(&queue->lock);
2051 }
2052}
2053
2054void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2055{
2056 struct hci_conn *conn = chan->conn;
2057 struct hci_dev *hdev = conn->hdev;
2058
2059 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2060
2061 skb->dev = (void *) hdev;
2062 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2063 hci_add_acl_hdr(skb, conn->handle, flags);
2064
2065 hci_queue_acl(conn, &chan->data_q, skb, flags);
2066
2067 queue_work(hdev->workqueue, &hdev->tx_work);
2068}
2069EXPORT_SYMBOL(hci_send_acl);
2070
2071/* Send SCO data */
2072void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2073{
2074 struct hci_dev *hdev = conn->hdev;
2075 struct hci_sco_hdr hdr;
2076
2077 BT_DBG("%s len %d", hdev->name, skb->len);
2078
2079 hdr.handle = cpu_to_le16(conn->handle);
2080 hdr.dlen = skb->len;
2081
2082 skb_push(skb, HCI_SCO_HDR_SIZE);
2083 skb_reset_transport_header(skb);
2084 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2085
2086 skb->dev = (void *) hdev;
2087 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2088
2089 skb_queue_tail(&conn->data_q, skb);
2090 queue_work(hdev->workqueue, &hdev->tx_work);
2091}
2092EXPORT_SYMBOL(hci_send_sco);
2093
2094/* ---- HCI TX task (outgoing data) ---- */
2095
2096/* HCI Connection scheduler */
2097static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2098{
2099 struct hci_conn_hash *h = &hdev->conn_hash;
2100 struct hci_conn *conn = NULL, *c;
2101 int num = 0, min = ~0;
2102
2103 /* We don't have to lock device here. Connections are always
2104 * added and removed with TX task disabled. */
2105
2106 rcu_read_lock();
2107
2108 list_for_each_entry_rcu(c, &h->list, list) {
2109 if (c->type != type || skb_queue_empty(&c->data_q))
2110 continue;
2111
2112 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2113 continue;
2114
2115 num++;
2116
2117 if (c->sent < min) {
2118 min = c->sent;
2119 conn = c;
2120 }
2121
2122 if (hci_conn_num(hdev, type) == num)
2123 break;
2124 }
2125
2126 rcu_read_unlock();
2127
2128 if (conn) {
2129 int cnt, q;
2130
2131 switch (conn->type) {
2132 case ACL_LINK:
2133 cnt = hdev->acl_cnt;
2134 break;
2135 case SCO_LINK:
2136 case ESCO_LINK:
2137 cnt = hdev->sco_cnt;
2138 break;
2139 case LE_LINK:
2140 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2141 break;
2142 default:
2143 cnt = 0;
2144 BT_ERR("Unknown link type");
2145 }
2146
2147 q = cnt / num;
2148 *quote = q ? q : 1;
2149 } else
2150 *quote = 0;
2151
2152 BT_DBG("conn %p quote %d", conn, *quote);
2153 return conn;
2154}
2155
2156static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2157{
2158 struct hci_conn_hash *h = &hdev->conn_hash;
2159 struct hci_conn *c;
2160
2161 BT_ERR("%s link tx timeout", hdev->name);
2162
2163 rcu_read_lock();
2164
2165 /* Kill stalled connections */
2166 list_for_each_entry_rcu(c, &h->list, list) {
2167 if (c->type == type && c->sent) {
2168 BT_ERR("%s killing stalled connection %s",
2169 hdev->name, batostr(&c->dst));
2170 hci_acl_disconn(c, 0x13);
2171 }
2172 }
2173
2174 rcu_read_unlock();
2175}
2176
2177static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2178 int *quote)
2179{
2180 struct hci_conn_hash *h = &hdev->conn_hash;
2181 struct hci_chan *chan = NULL;
2182 int num = 0, min = ~0, cur_prio = 0;
2183 struct hci_conn *conn;
2184 int cnt, q, conn_num = 0;
2185
2186 BT_DBG("%s", hdev->name);
2187
2188 rcu_read_lock();
2189
2190 list_for_each_entry_rcu(conn, &h->list, list) {
2191 struct hci_chan *tmp;
2192
2193 if (conn->type != type)
2194 continue;
2195
2196 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2197 continue;
2198
2199 conn_num++;
2200
2201 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2202 struct sk_buff *skb;
2203
2204 if (skb_queue_empty(&tmp->data_q))
2205 continue;
2206
2207 skb = skb_peek(&tmp->data_q);
2208 if (skb->priority < cur_prio)
2209 continue;
2210
2211 if (skb->priority > cur_prio) {
2212 num = 0;
2213 min = ~0;
2214 cur_prio = skb->priority;
2215 }
2216
2217 num++;
2218
2219 if (conn->sent < min) {
2220 min = conn->sent;
2221 chan = tmp;
2222 }
2223 }
2224
2225 if (hci_conn_num(hdev, type) == conn_num)
2226 break;
2227 }
2228
2229 rcu_read_unlock();
2230
2231 if (!chan)
2232 return NULL;
2233
2234 switch (chan->conn->type) {
2235 case ACL_LINK:
2236 cnt = hdev->acl_cnt;
2237 break;
2238 case SCO_LINK:
2239 case ESCO_LINK:
2240 cnt = hdev->sco_cnt;
2241 break;
2242 case LE_LINK:
2243 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2244 break;
2245 default:
2246 cnt = 0;
2247 BT_ERR("Unknown link type");
2248 }
2249
2250 q = cnt / num;
2251 *quote = q ? q : 1;
2252 BT_DBG("chan %p quote %d", chan, *quote);
2253 return chan;
2254}
2255
2256static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2257{
2258 struct hci_conn_hash *h = &hdev->conn_hash;
2259 struct hci_conn *conn;
2260 int num = 0;
2261
2262 BT_DBG("%s", hdev->name);
2263
2264 rcu_read_lock();
2265
2266 list_for_each_entry_rcu(conn, &h->list, list) {
2267 struct hci_chan *chan;
2268
2269 if (conn->type != type)
2270 continue;
2271
2272 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2273 continue;
2274
2275 num++;
2276
2277 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2278 struct sk_buff *skb;
2279
2280 if (chan->sent) {
2281 chan->sent = 0;
2282 continue;
2283 }
2284
2285 if (skb_queue_empty(&chan->data_q))
2286 continue;
2287
2288 skb = skb_peek(&chan->data_q);
2289 if (skb->priority >= HCI_PRIO_MAX - 1)
2290 continue;
2291
2292 skb->priority = HCI_PRIO_MAX - 1;
2293
2294 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2295 skb->priority);
2296 }
2297
2298 if (hci_conn_num(hdev, type) == num)
2299 break;
2300 }
2301
2302 rcu_read_unlock();
2303
2304}
2305
2306static inline void hci_sched_acl(struct hci_dev *hdev)
2307{
2308 struct hci_chan *chan;
2309 struct sk_buff *skb;
2310 int quote;
2311 unsigned int cnt;
2312
2313 BT_DBG("%s", hdev->name);
2314
2315 if (!hci_conn_num(hdev, ACL_LINK))
2316 return;
2317
2318 if (!test_bit(HCI_RAW, &hdev->flags)) {
2319 /* ACL tx timeout must be longer than maximum
2320 * link supervision timeout (40.9 seconds) */
2321 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2322 hci_link_tx_to(hdev, ACL_LINK);
2323 }
2324
2325 cnt = hdev->acl_cnt;
2326
2327 while (hdev->acl_cnt &&
2328 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2329 u32 priority = (skb_peek(&chan->data_q))->priority;
2330 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2331 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2332 skb->len, skb->priority);
2333
2334 /* Stop if priority has changed */
2335 if (skb->priority < priority)
2336 break;
2337
2338 skb = skb_dequeue(&chan->data_q);
2339
2340 hci_conn_enter_active_mode(chan->conn,
2341 bt_cb(skb)->force_active);
2342
2343 hci_send_frame(skb);
2344 hdev->acl_last_tx = jiffies;
2345
2346 hdev->acl_cnt--;
2347 chan->sent++;
2348 chan->conn->sent++;
2349 }
2350 }
2351
2352 if (cnt != hdev->acl_cnt)
2353 hci_prio_recalculate(hdev, ACL_LINK);
2354}
2355
2356/* Schedule SCO */
2357static inline void hci_sched_sco(struct hci_dev *hdev)
2358{
2359 struct hci_conn *conn;
2360 struct sk_buff *skb;
2361 int quote;
2362
2363 BT_DBG("%s", hdev->name);
2364
2365 if (!hci_conn_num(hdev, SCO_LINK))
2366 return;
2367
2368 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2369 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2370 BT_DBG("skb %p len %d", skb, skb->len);
2371 hci_send_frame(skb);
2372
2373 conn->sent++;
2374 if (conn->sent == ~0)
2375 conn->sent = 0;
2376 }
2377 }
2378}
2379
2380static inline void hci_sched_esco(struct hci_dev *hdev)
2381{
2382 struct hci_conn *conn;
2383 struct sk_buff *skb;
2384 int quote;
2385
2386 BT_DBG("%s", hdev->name);
2387
2388 if (!hci_conn_num(hdev, ESCO_LINK))
2389 return;
2390
2391 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2392 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2393 BT_DBG("skb %p len %d", skb, skb->len);
2394 hci_send_frame(skb);
2395
2396 conn->sent++;
2397 if (conn->sent == ~0)
2398 conn->sent = 0;
2399 }
2400 }
2401}
2402
2403static inline void hci_sched_le(struct hci_dev *hdev)
2404{
2405 struct hci_chan *chan;
2406 struct sk_buff *skb;
2407 int quote, cnt, tmp;
2408
2409 BT_DBG("%s", hdev->name);
2410
2411 if (!hci_conn_num(hdev, LE_LINK))
2412 return;
2413
2414 if (!test_bit(HCI_RAW, &hdev->flags)) {
2415 /* LE tx timeout must be longer than maximum
2416 * link supervision timeout (40.9 seconds) */
2417 if (!hdev->le_cnt && hdev->le_pkts &&
2418 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2419 hci_link_tx_to(hdev, LE_LINK);
2420 }
2421
2422 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2423 tmp = cnt;
2424 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2425 u32 priority = (skb_peek(&chan->data_q))->priority;
2426 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2427 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2428 skb->len, skb->priority);
2429
2430 /* Stop if priority has changed */
2431 if (skb->priority < priority)
2432 break;
2433
2434 skb = skb_dequeue(&chan->data_q);
2435
2436 hci_send_frame(skb);
2437 hdev->le_last_tx = jiffies;
2438
2439 cnt--;
2440 chan->sent++;
2441 chan->conn->sent++;
2442 }
2443 }
2444
2445 if (hdev->le_pkts)
2446 hdev->le_cnt = cnt;
2447 else
2448 hdev->acl_cnt = cnt;
2449
2450 if (cnt != tmp)
2451 hci_prio_recalculate(hdev, LE_LINK);
2452}
2453
2454static void hci_tx_work(struct work_struct *work)
2455{
2456 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2457 struct sk_buff *skb;
2458
2459 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2460 hdev->sco_cnt, hdev->le_cnt);
2461
2462 /* Schedule queues and send stuff to HCI driver */
2463
2464 hci_sched_acl(hdev);
2465
2466 hci_sched_sco(hdev);
2467
2468 hci_sched_esco(hdev);
2469
2470 hci_sched_le(hdev);
2471
2472 /* Send next queued raw (unknown type) packet */
2473 while ((skb = skb_dequeue(&hdev->raw_q)))
2474 hci_send_frame(skb);
2475}
2476
2477/* ----- HCI RX task (incoming data processing) ----- */
2478
2479/* ACL data packet */
2480static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2481{
2482 struct hci_acl_hdr *hdr = (void *) skb->data;
2483 struct hci_conn *conn;
2484 __u16 handle, flags;
2485
2486 skb_pull(skb, HCI_ACL_HDR_SIZE);
2487
2488 handle = __le16_to_cpu(hdr->handle);
2489 flags = hci_flags(handle);
2490 handle = hci_handle(handle);
2491
2492 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2493
2494 hdev->stat.acl_rx++;
2495
2496 hci_dev_lock(hdev);
2497 conn = hci_conn_hash_lookup_handle(hdev, handle);
2498 hci_dev_unlock(hdev);
2499
2500 if (conn) {
2501 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2502
2503 /* Send to upper protocol */
2504 l2cap_recv_acldata(conn, skb, flags);
2505 return;
2506 } else {
2507 BT_ERR("%s ACL packet for unknown connection handle %d",
2508 hdev->name, handle);
2509 }
2510
2511 kfree_skb(skb);
2512}
2513
2514/* SCO data packet */
2515static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2516{
2517 struct hci_sco_hdr *hdr = (void *) skb->data;
2518 struct hci_conn *conn;
2519 __u16 handle;
2520
2521 skb_pull(skb, HCI_SCO_HDR_SIZE);
2522
2523 handle = __le16_to_cpu(hdr->handle);
2524
2525 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2526
2527 hdev->stat.sco_rx++;
2528
2529 hci_dev_lock(hdev);
2530 conn = hci_conn_hash_lookup_handle(hdev, handle);
2531 hci_dev_unlock(hdev);
2532
2533 if (conn) {
2534 /* Send to upper protocol */
2535 sco_recv_scodata(conn, skb);
2536 return;
2537 } else {
2538 BT_ERR("%s SCO packet for unknown connection handle %d",
2539 hdev->name, handle);
2540 }
2541
2542 kfree_skb(skb);
2543}
2544
2545static void hci_rx_work(struct work_struct *work)
2546{
2547 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2548 struct sk_buff *skb;
2549
2550 BT_DBG("%s", hdev->name);
2551
2552 while ((skb = skb_dequeue(&hdev->rx_q))) {
2553 if (atomic_read(&hdev->promisc)) {
2554 /* Send copy to the sockets */
2555 hci_send_to_sock(hdev, skb, NULL);
2556 }
2557
2558 if (test_bit(HCI_RAW, &hdev->flags)) {
2559 kfree_skb(skb);
2560 continue;
2561 }
2562
2563 if (test_bit(HCI_INIT, &hdev->flags)) {
2564 /* Don't process data packets in this states. */
2565 switch (bt_cb(skb)->pkt_type) {
2566 case HCI_ACLDATA_PKT:
2567 case HCI_SCODATA_PKT:
2568 kfree_skb(skb);
2569 continue;
2570 }
2571 }
2572
2573 /* Process frame */
2574 switch (bt_cb(skb)->pkt_type) {
2575 case HCI_EVENT_PKT:
2576 BT_DBG("%s Event packet", hdev->name);
2577 hci_event_packet(hdev, skb);
2578 break;
2579
2580 case HCI_ACLDATA_PKT:
2581 BT_DBG("%s ACL data packet", hdev->name);
2582 hci_acldata_packet(hdev, skb);
2583 break;
2584
2585 case HCI_SCODATA_PKT:
2586 BT_DBG("%s SCO data packet", hdev->name);
2587 hci_scodata_packet(hdev, skb);
2588 break;
2589
2590 default:
2591 kfree_skb(skb);
2592 break;
2593 }
2594 }
2595}
2596
2597static void hci_cmd_work(struct work_struct *work)
2598{
2599 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2600 struct sk_buff *skb;
2601
2602 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2603
2604 /* Send queued commands */
2605 if (atomic_read(&hdev->cmd_cnt)) {
2606 skb = skb_dequeue(&hdev->cmd_q);
2607 if (!skb)
2608 return;
2609
2610 kfree_skb(hdev->sent_cmd);
2611
2612 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2613 if (hdev->sent_cmd) {
2614 atomic_dec(&hdev->cmd_cnt);
2615 hci_send_frame(skb);
2616 if (test_bit(HCI_RESET, &hdev->flags))
2617 del_timer(&hdev->cmd_timer);
2618 else
2619 mod_timer(&hdev->cmd_timer,
2620 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2621 } else {
2622 skb_queue_head(&hdev->cmd_q, skb);
2623 queue_work(hdev->workqueue, &hdev->cmd_work);
2624 }
2625 }
2626}
2627
2628int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2629{
2630 /* General inquiry access code (GIAC) */
2631 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2632 struct hci_cp_inquiry cp;
2633
2634 BT_DBG("%s", hdev->name);
2635
2636 if (test_bit(HCI_INQUIRY, &hdev->flags))
2637 return -EINPROGRESS;
2638
2639 inquiry_cache_flush(hdev);
2640
2641 memset(&cp, 0, sizeof(cp));
2642 memcpy(&cp.lap, lap, sizeof(cp.lap));
2643 cp.length = length;
2644
2645 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2646}
2647
2648int hci_cancel_inquiry(struct hci_dev *hdev)
2649{
2650 BT_DBG("%s", hdev->name);
2651
2652 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2653 return -EPERM;
2654
2655 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2656}
2657
2658module_param(enable_hs, bool, 0644);
2659MODULE_PARM_DESC(enable_hs, "Enable High Speed");