Bluetooth: Return updated name state with hci_inquiry_cache_update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 int enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 return;
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109 }
110
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120 }
121
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
125 {
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
146 err = -bt_to_errno(hdev->req_result);
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
156 }
157
158 hdev->req_status = hdev->req_result = 0;
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163 }
164
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
167 {
168 int ret;
169
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179 }
180
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 static void bredr_init(struct hci_dev *hdev)
191 {
192 struct hci_cp_delete_stored_link_key cp;
193 __le16 param;
194 __u8 flt_type;
195
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198 /* Mandatory initialization */
199
200 /* Reset */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204 }
205
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241
242 static void amp_init(struct hci_dev *hdev)
243 {
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285 }
286
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
321 /* Encryption */
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327 __le16 policy = cpu_to_le16(opt);
328
329 BT_DBG("%s %x", hdev->name, policy);
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
338 {
339 struct hci_dev *hdev = NULL, *d;
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355 }
356
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev *hdev)
359 {
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *p, *n;
362
363 list_for_each_entry_safe(p, n, &cache->all, all) {
364 list_del(&p->all);
365 kfree(p);
366 }
367
368 INIT_LIST_HEAD(&cache->unknown);
369 INIT_LIST_HEAD(&cache->resolve);
370 }
371
372 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373 {
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
376
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378
379 list_for_each_entry(e, &cache->all, all) {
380 if (!bacmp(&e->data.bdaddr, bdaddr))
381 return e;
382 }
383
384 return NULL;
385 }
386
387 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
388 bdaddr_t *bdaddr)
389 {
390 struct inquiry_cache *cache = &hdev->inq_cache;
391 struct inquiry_entry *e;
392
393 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
394
395 list_for_each_entry(e, &cache->unknown, list) {
396 if (!bacmp(&e->data.bdaddr, bdaddr))
397 return e;
398 }
399
400 return NULL;
401 }
402
403 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
404 bool name_known)
405 {
406 struct inquiry_cache *cache = &hdev->inq_cache;
407 struct inquiry_entry *ie;
408
409 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
410
411 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
412 if (ie)
413 goto update;
414
415 /* Entry not in the cache. Add new one. */
416 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
417 if (!ie)
418 return false;
419
420 list_add(&ie->all, &cache->all);
421
422 if (name_known) {
423 ie->name_state = NAME_KNOWN;
424 } else {
425 ie->name_state = NAME_NOT_KNOWN;
426 list_add(&ie->list, &cache->unknown);
427 }
428
429 update:
430 if (name_known && ie->name_state != NAME_KNOWN &&
431 ie->name_state != NAME_PENDING) {
432 ie->name_state = NAME_KNOWN;
433 list_del(&ie->list);
434 }
435
436 memcpy(&ie->data, data, sizeof(*data));
437 ie->timestamp = jiffies;
438 cache->timestamp = jiffies;
439
440 if (ie->name_state == NAME_NOT_KNOWN)
441 return false;
442
443 return true;
444 }
445
446 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
447 {
448 struct inquiry_cache *cache = &hdev->inq_cache;
449 struct inquiry_info *info = (struct inquiry_info *) buf;
450 struct inquiry_entry *e;
451 int copied = 0;
452
453 list_for_each_entry(e, &cache->all, all) {
454 struct inquiry_data *data = &e->data;
455
456 if (copied >= num)
457 break;
458
459 bacpy(&info->bdaddr, &data->bdaddr);
460 info->pscan_rep_mode = data->pscan_rep_mode;
461 info->pscan_period_mode = data->pscan_period_mode;
462 info->pscan_mode = data->pscan_mode;
463 memcpy(info->dev_class, data->dev_class, 3);
464 info->clock_offset = data->clock_offset;
465
466 info++;
467 copied++;
468 }
469
470 BT_DBG("cache %p, copied %d", cache, copied);
471 return copied;
472 }
473
474 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
475 {
476 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
477 struct hci_cp_inquiry cp;
478
479 BT_DBG("%s", hdev->name);
480
481 if (test_bit(HCI_INQUIRY, &hdev->flags))
482 return;
483
484 /* Start Inquiry */
485 memcpy(&cp.lap, &ir->lap, 3);
486 cp.length = ir->length;
487 cp.num_rsp = ir->num_rsp;
488 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
489 }
490
491 int hci_inquiry(void __user *arg)
492 {
493 __u8 __user *ptr = arg;
494 struct hci_inquiry_req ir;
495 struct hci_dev *hdev;
496 int err = 0, do_inquiry = 0, max_rsp;
497 long timeo;
498 __u8 *buf;
499
500 if (copy_from_user(&ir, ptr, sizeof(ir)))
501 return -EFAULT;
502
503 hdev = hci_dev_get(ir.dev_id);
504 if (!hdev)
505 return -ENODEV;
506
507 hci_dev_lock(hdev);
508 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
509 inquiry_cache_empty(hdev) ||
510 ir.flags & IREQ_CACHE_FLUSH) {
511 inquiry_cache_flush(hdev);
512 do_inquiry = 1;
513 }
514 hci_dev_unlock(hdev);
515
516 timeo = ir.length * msecs_to_jiffies(2000);
517
518 if (do_inquiry) {
519 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
520 if (err < 0)
521 goto done;
522 }
523
524 /* for unlimited number of responses we will use buffer with 255 entries */
525 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
526
527 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
528 * copy it to the user space.
529 */
530 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
531 if (!buf) {
532 err = -ENOMEM;
533 goto done;
534 }
535
536 hci_dev_lock(hdev);
537 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
538 hci_dev_unlock(hdev);
539
540 BT_DBG("num_rsp %d", ir.num_rsp);
541
542 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
543 ptr += sizeof(ir);
544 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
545 ir.num_rsp))
546 err = -EFAULT;
547 } else
548 err = -EFAULT;
549
550 kfree(buf);
551
552 done:
553 hci_dev_put(hdev);
554 return err;
555 }
556
557 /* ---- HCI ioctl helpers ---- */
558
559 int hci_dev_open(__u16 dev)
560 {
561 struct hci_dev *hdev;
562 int ret = 0;
563
564 hdev = hci_dev_get(dev);
565 if (!hdev)
566 return -ENODEV;
567
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_lock(hdev);
571
572 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
573 ret = -ERFKILL;
574 goto done;
575 }
576
577 if (test_bit(HCI_UP, &hdev->flags)) {
578 ret = -EALREADY;
579 goto done;
580 }
581
582 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
583 set_bit(HCI_RAW, &hdev->flags);
584
585 /* Treat all non BR/EDR controllers as raw devices if
586 enable_hs is not set */
587 if (hdev->dev_type != HCI_BREDR && !enable_hs)
588 set_bit(HCI_RAW, &hdev->flags);
589
590 if (hdev->open(hdev)) {
591 ret = -EIO;
592 goto done;
593 }
594
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 atomic_set(&hdev->cmd_cnt, 1);
597 set_bit(HCI_INIT, &hdev->flags);
598 hdev->init_last_cmd = 0;
599
600 ret = __hci_request(hdev, hci_init_req, 0,
601 msecs_to_jiffies(HCI_INIT_TIMEOUT));
602
603 if (lmp_host_le_capable(hdev))
604 ret = __hci_request(hdev, hci_le_init_req, 0,
605 msecs_to_jiffies(HCI_INIT_TIMEOUT));
606
607 clear_bit(HCI_INIT, &hdev->flags);
608 }
609
610 if (!ret) {
611 hci_dev_hold(hdev);
612 set_bit(HCI_UP, &hdev->flags);
613 hci_notify(hdev, HCI_DEV_UP);
614 if (!test_bit(HCI_SETUP, &hdev->flags)) {
615 hci_dev_lock(hdev);
616 mgmt_powered(hdev, 1);
617 hci_dev_unlock(hdev);
618 }
619 } else {
620 /* Init failed, cleanup */
621 flush_work(&hdev->tx_work);
622 flush_work(&hdev->cmd_work);
623 flush_work(&hdev->rx_work);
624
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->rx_q);
627
628 if (hdev->flush)
629 hdev->flush(hdev);
630
631 if (hdev->sent_cmd) {
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
634 }
635
636 hdev->close(hdev);
637 hdev->flags = 0;
638 }
639
640 done:
641 hci_req_unlock(hdev);
642 hci_dev_put(hdev);
643 return ret;
644 }
645
646 static int hci_dev_do_close(struct hci_dev *hdev)
647 {
648 BT_DBG("%s %p", hdev->name, hdev);
649
650 hci_req_cancel(hdev, ENODEV);
651 hci_req_lock(hdev);
652
653 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
654 del_timer_sync(&hdev->cmd_timer);
655 hci_req_unlock(hdev);
656 return 0;
657 }
658
659 /* Flush RX and TX works */
660 flush_work(&hdev->tx_work);
661 flush_work(&hdev->rx_work);
662
663 if (hdev->discov_timeout > 0) {
664 cancel_delayed_work(&hdev->discov_off);
665 hdev->discov_timeout = 0;
666 }
667
668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
669 cancel_delayed_work(&hdev->power_off);
670
671 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
672 cancel_delayed_work(&hdev->service_cache);
673
674 hci_dev_lock(hdev);
675 inquiry_cache_flush(hdev);
676 hci_conn_hash_flush(hdev);
677 hci_dev_unlock(hdev);
678
679 hci_notify(hdev, HCI_DEV_DOWN);
680
681 if (hdev->flush)
682 hdev->flush(hdev);
683
684 /* Reset device */
685 skb_queue_purge(&hdev->cmd_q);
686 atomic_set(&hdev->cmd_cnt, 1);
687 if (!test_bit(HCI_RAW, &hdev->flags)) {
688 set_bit(HCI_INIT, &hdev->flags);
689 __hci_request(hdev, hci_reset_req, 0,
690 msecs_to_jiffies(250));
691 clear_bit(HCI_INIT, &hdev->flags);
692 }
693
694 /* flush cmd work */
695 flush_work(&hdev->cmd_work);
696
697 /* Drop queues */
698 skb_queue_purge(&hdev->rx_q);
699 skb_queue_purge(&hdev->cmd_q);
700 skb_queue_purge(&hdev->raw_q);
701
702 /* Drop last sent command */
703 if (hdev->sent_cmd) {
704 del_timer_sync(&hdev->cmd_timer);
705 kfree_skb(hdev->sent_cmd);
706 hdev->sent_cmd = NULL;
707 }
708
709 /* After this point our queues are empty
710 * and no tasks are scheduled. */
711 hdev->close(hdev);
712
713 hci_dev_lock(hdev);
714 mgmt_powered(hdev, 0);
715 hci_dev_unlock(hdev);
716
717 /* Clear flags */
718 hdev->flags = 0;
719
720 hci_req_unlock(hdev);
721
722 hci_dev_put(hdev);
723 return 0;
724 }
725
726 int hci_dev_close(__u16 dev)
727 {
728 struct hci_dev *hdev;
729 int err;
730
731 hdev = hci_dev_get(dev);
732 if (!hdev)
733 return -ENODEV;
734 err = hci_dev_do_close(hdev);
735 hci_dev_put(hdev);
736 return err;
737 }
738
739 int hci_dev_reset(__u16 dev)
740 {
741 struct hci_dev *hdev;
742 int ret = 0;
743
744 hdev = hci_dev_get(dev);
745 if (!hdev)
746 return -ENODEV;
747
748 hci_req_lock(hdev);
749
750 if (!test_bit(HCI_UP, &hdev->flags))
751 goto done;
752
753 /* Drop queues */
754 skb_queue_purge(&hdev->rx_q);
755 skb_queue_purge(&hdev->cmd_q);
756
757 hci_dev_lock(hdev);
758 inquiry_cache_flush(hdev);
759 hci_conn_hash_flush(hdev);
760 hci_dev_unlock(hdev);
761
762 if (hdev->flush)
763 hdev->flush(hdev);
764
765 atomic_set(&hdev->cmd_cnt, 1);
766 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
767
768 if (!test_bit(HCI_RAW, &hdev->flags))
769 ret = __hci_request(hdev, hci_reset_req, 0,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT));
771
772 done:
773 hci_req_unlock(hdev);
774 hci_dev_put(hdev);
775 return ret;
776 }
777
778 int hci_dev_reset_stat(__u16 dev)
779 {
780 struct hci_dev *hdev;
781 int ret = 0;
782
783 hdev = hci_dev_get(dev);
784 if (!hdev)
785 return -ENODEV;
786
787 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
788
789 hci_dev_put(hdev);
790
791 return ret;
792 }
793
794 int hci_dev_cmd(unsigned int cmd, void __user *arg)
795 {
796 struct hci_dev *hdev;
797 struct hci_dev_req dr;
798 int err = 0;
799
800 if (copy_from_user(&dr, arg, sizeof(dr)))
801 return -EFAULT;
802
803 hdev = hci_dev_get(dr.dev_id);
804 if (!hdev)
805 return -ENODEV;
806
807 switch (cmd) {
808 case HCISETAUTH:
809 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
810 msecs_to_jiffies(HCI_INIT_TIMEOUT));
811 break;
812
813 case HCISETENCRYPT:
814 if (!lmp_encrypt_capable(hdev)) {
815 err = -EOPNOTSUPP;
816 break;
817 }
818
819 if (!test_bit(HCI_AUTH, &hdev->flags)) {
820 /* Auth must be enabled first */
821 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
822 msecs_to_jiffies(HCI_INIT_TIMEOUT));
823 if (err)
824 break;
825 }
826
827 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
828 msecs_to_jiffies(HCI_INIT_TIMEOUT));
829 break;
830
831 case HCISETSCAN:
832 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
833 msecs_to_jiffies(HCI_INIT_TIMEOUT));
834 break;
835
836 case HCISETLINKPOL:
837 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
838 msecs_to_jiffies(HCI_INIT_TIMEOUT));
839 break;
840
841 case HCISETLINKMODE:
842 hdev->link_mode = ((__u16) dr.dev_opt) &
843 (HCI_LM_MASTER | HCI_LM_ACCEPT);
844 break;
845
846 case HCISETPTYPE:
847 hdev->pkt_type = (__u16) dr.dev_opt;
848 break;
849
850 case HCISETACLMTU:
851 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
852 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
853 break;
854
855 case HCISETSCOMTU:
856 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
857 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
858 break;
859
860 default:
861 err = -EINVAL;
862 break;
863 }
864
865 hci_dev_put(hdev);
866 return err;
867 }
868
869 int hci_get_dev_list(void __user *arg)
870 {
871 struct hci_dev *hdev;
872 struct hci_dev_list_req *dl;
873 struct hci_dev_req *dr;
874 int n = 0, size, err;
875 __u16 dev_num;
876
877 if (get_user(dev_num, (__u16 __user *) arg))
878 return -EFAULT;
879
880 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
881 return -EINVAL;
882
883 size = sizeof(*dl) + dev_num * sizeof(*dr);
884
885 dl = kzalloc(size, GFP_KERNEL);
886 if (!dl)
887 return -ENOMEM;
888
889 dr = dl->dev_req;
890
891 read_lock(&hci_dev_list_lock);
892 list_for_each_entry(hdev, &hci_dev_list, list) {
893 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
894 cancel_delayed_work(&hdev->power_off);
895
896 if (!test_bit(HCI_MGMT, &hdev->flags))
897 set_bit(HCI_PAIRABLE, &hdev->flags);
898
899 (dr + n)->dev_id = hdev->id;
900 (dr + n)->dev_opt = hdev->flags;
901
902 if (++n >= dev_num)
903 break;
904 }
905 read_unlock(&hci_dev_list_lock);
906
907 dl->dev_num = n;
908 size = sizeof(*dl) + n * sizeof(*dr);
909
910 err = copy_to_user(arg, dl, size);
911 kfree(dl);
912
913 return err ? -EFAULT : 0;
914 }
915
916 int hci_get_dev_info(void __user *arg)
917 {
918 struct hci_dev *hdev;
919 struct hci_dev_info di;
920 int err = 0;
921
922 if (copy_from_user(&di, arg, sizeof(di)))
923 return -EFAULT;
924
925 hdev = hci_dev_get(di.dev_id);
926 if (!hdev)
927 return -ENODEV;
928
929 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
930 cancel_delayed_work_sync(&hdev->power_off);
931
932 if (!test_bit(HCI_MGMT, &hdev->flags))
933 set_bit(HCI_PAIRABLE, &hdev->flags);
934
935 strcpy(di.name, hdev->name);
936 di.bdaddr = hdev->bdaddr;
937 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
938 di.flags = hdev->flags;
939 di.pkt_type = hdev->pkt_type;
940 di.acl_mtu = hdev->acl_mtu;
941 di.acl_pkts = hdev->acl_pkts;
942 di.sco_mtu = hdev->sco_mtu;
943 di.sco_pkts = hdev->sco_pkts;
944 di.link_policy = hdev->link_policy;
945 di.link_mode = hdev->link_mode;
946
947 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
948 memcpy(&di.features, &hdev->features, sizeof(di.features));
949
950 if (copy_to_user(arg, &di, sizeof(di)))
951 err = -EFAULT;
952
953 hci_dev_put(hdev);
954
955 return err;
956 }
957
958 /* ---- Interface to HCI drivers ---- */
959
960 static int hci_rfkill_set_block(void *data, bool blocked)
961 {
962 struct hci_dev *hdev = data;
963
964 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
965
966 if (!blocked)
967 return 0;
968
969 hci_dev_do_close(hdev);
970
971 return 0;
972 }
973
974 static const struct rfkill_ops hci_rfkill_ops = {
975 .set_block = hci_rfkill_set_block,
976 };
977
978 /* Alloc HCI device */
979 struct hci_dev *hci_alloc_dev(void)
980 {
981 struct hci_dev *hdev;
982
983 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
984 if (!hdev)
985 return NULL;
986
987 hci_init_sysfs(hdev);
988 skb_queue_head_init(&hdev->driver_init);
989
990 return hdev;
991 }
992 EXPORT_SYMBOL(hci_alloc_dev);
993
994 /* Free HCI device */
995 void hci_free_dev(struct hci_dev *hdev)
996 {
997 skb_queue_purge(&hdev->driver_init);
998
999 /* will free via device release */
1000 put_device(&hdev->dev);
1001 }
1002 EXPORT_SYMBOL(hci_free_dev);
1003
1004 static void hci_power_on(struct work_struct *work)
1005 {
1006 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1007
1008 BT_DBG("%s", hdev->name);
1009
1010 if (hci_dev_open(hdev->id) < 0)
1011 return;
1012
1013 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
1014 schedule_delayed_work(&hdev->power_off,
1015 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1016
1017 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
1018 mgmt_index_added(hdev);
1019 }
1020
1021 static void hci_power_off(struct work_struct *work)
1022 {
1023 struct hci_dev *hdev = container_of(work, struct hci_dev,
1024 power_off.work);
1025
1026 BT_DBG("%s", hdev->name);
1027
1028 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1029
1030 hci_dev_close(hdev->id);
1031 }
1032
1033 static void hci_discov_off(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev;
1036 u8 scan = SCAN_PAGE;
1037
1038 hdev = container_of(work, struct hci_dev, discov_off.work);
1039
1040 BT_DBG("%s", hdev->name);
1041
1042 hci_dev_lock(hdev);
1043
1044 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1045
1046 hdev->discov_timeout = 0;
1047
1048 hci_dev_unlock(hdev);
1049 }
1050
1051 int hci_uuids_clear(struct hci_dev *hdev)
1052 {
1053 struct list_head *p, *n;
1054
1055 list_for_each_safe(p, n, &hdev->uuids) {
1056 struct bt_uuid *uuid;
1057
1058 uuid = list_entry(p, struct bt_uuid, list);
1059
1060 list_del(p);
1061 kfree(uuid);
1062 }
1063
1064 return 0;
1065 }
1066
1067 int hci_link_keys_clear(struct hci_dev *hdev)
1068 {
1069 struct list_head *p, *n;
1070
1071 list_for_each_safe(p, n, &hdev->link_keys) {
1072 struct link_key *key;
1073
1074 key = list_entry(p, struct link_key, list);
1075
1076 list_del(p);
1077 kfree(key);
1078 }
1079
1080 return 0;
1081 }
1082
1083 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1084 {
1085 struct link_key *k;
1086
1087 list_for_each_entry(k, &hdev->link_keys, list)
1088 if (bacmp(bdaddr, &k->bdaddr) == 0)
1089 return k;
1090
1091 return NULL;
1092 }
1093
1094 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1095 u8 key_type, u8 old_key_type)
1096 {
1097 /* Legacy key */
1098 if (key_type < 0x03)
1099 return 1;
1100
1101 /* Debug keys are insecure so don't store them persistently */
1102 if (key_type == HCI_LK_DEBUG_COMBINATION)
1103 return 0;
1104
1105 /* Changed combination key and there's no previous one */
1106 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1107 return 0;
1108
1109 /* Security mode 3 case */
1110 if (!conn)
1111 return 1;
1112
1113 /* Neither local nor remote side had no-bonding as requirement */
1114 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1115 return 1;
1116
1117 /* Local side had dedicated bonding as requirement */
1118 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1119 return 1;
1120
1121 /* Remote side had dedicated bonding as requirement */
1122 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1123 return 1;
1124
1125 /* If none of the above criteria match, then don't store the key
1126 * persistently */
1127 return 0;
1128 }
1129
1130 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1131 {
1132 struct link_key *k;
1133
1134 list_for_each_entry(k, &hdev->link_keys, list) {
1135 struct key_master_id *id;
1136
1137 if (k->type != HCI_LK_SMP_LTK)
1138 continue;
1139
1140 if (k->dlen != sizeof(*id))
1141 continue;
1142
1143 id = (void *) &k->data;
1144 if (id->ediv == ediv &&
1145 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1146 return k;
1147 }
1148
1149 return NULL;
1150 }
1151 EXPORT_SYMBOL(hci_find_ltk);
1152
1153 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1154 bdaddr_t *bdaddr, u8 type)
1155 {
1156 struct link_key *k;
1157
1158 list_for_each_entry(k, &hdev->link_keys, list)
1159 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1160 return k;
1161
1162 return NULL;
1163 }
1164 EXPORT_SYMBOL(hci_find_link_key_type);
1165
1166 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1167 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1168 {
1169 struct link_key *key, *old_key;
1170 u8 old_key_type, persistent;
1171
1172 old_key = hci_find_link_key(hdev, bdaddr);
1173 if (old_key) {
1174 old_key_type = old_key->type;
1175 key = old_key;
1176 } else {
1177 old_key_type = conn ? conn->key_type : 0xff;
1178 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1179 if (!key)
1180 return -ENOMEM;
1181 list_add(&key->list, &hdev->link_keys);
1182 }
1183
1184 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1185
1186 /* Some buggy controller combinations generate a changed
1187 * combination key for legacy pairing even when there's no
1188 * previous key */
1189 if (type == HCI_LK_CHANGED_COMBINATION &&
1190 (!conn || conn->remote_auth == 0xff) &&
1191 old_key_type == 0xff) {
1192 type = HCI_LK_COMBINATION;
1193 if (conn)
1194 conn->key_type = type;
1195 }
1196
1197 bacpy(&key->bdaddr, bdaddr);
1198 memcpy(key->val, val, 16);
1199 key->pin_len = pin_len;
1200
1201 if (type == HCI_LK_CHANGED_COMBINATION)
1202 key->type = old_key_type;
1203 else
1204 key->type = type;
1205
1206 if (!new_key)
1207 return 0;
1208
1209 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1210
1211 mgmt_new_link_key(hdev, key, persistent);
1212
1213 if (!persistent) {
1214 list_del(&key->list);
1215 kfree(key);
1216 }
1217
1218 return 0;
1219 }
1220
1221 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1222 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1223 {
1224 struct link_key *key, *old_key;
1225 struct key_master_id *id;
1226 u8 old_key_type;
1227
1228 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1229
1230 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1231 if (old_key) {
1232 key = old_key;
1233 old_key_type = old_key->type;
1234 } else {
1235 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1236 if (!key)
1237 return -ENOMEM;
1238 list_add(&key->list, &hdev->link_keys);
1239 old_key_type = 0xff;
1240 }
1241
1242 key->dlen = sizeof(*id);
1243
1244 bacpy(&key->bdaddr, bdaddr);
1245 memcpy(key->val, ltk, sizeof(key->val));
1246 key->type = HCI_LK_SMP_LTK;
1247 key->pin_len = key_size;
1248
1249 id = (void *) &key->data;
1250 id->ediv = ediv;
1251 memcpy(id->rand, rand, sizeof(id->rand));
1252
1253 if (new_key)
1254 mgmt_new_link_key(hdev, key, old_key_type);
1255
1256 return 0;
1257 }
1258
1259 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1260 {
1261 struct link_key *key;
1262
1263 key = hci_find_link_key(hdev, bdaddr);
1264 if (!key)
1265 return -ENOENT;
1266
1267 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1268
1269 list_del(&key->list);
1270 kfree(key);
1271
1272 return 0;
1273 }
1274
1275 /* HCI command timer function */
1276 static void hci_cmd_timer(unsigned long arg)
1277 {
1278 struct hci_dev *hdev = (void *) arg;
1279
1280 BT_ERR("%s command tx timeout", hdev->name);
1281 atomic_set(&hdev->cmd_cnt, 1);
1282 queue_work(hdev->workqueue, &hdev->cmd_work);
1283 }
1284
1285 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1286 bdaddr_t *bdaddr)
1287 {
1288 struct oob_data *data;
1289
1290 list_for_each_entry(data, &hdev->remote_oob_data, list)
1291 if (bacmp(bdaddr, &data->bdaddr) == 0)
1292 return data;
1293
1294 return NULL;
1295 }
1296
1297 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298 {
1299 struct oob_data *data;
1300
1301 data = hci_find_remote_oob_data(hdev, bdaddr);
1302 if (!data)
1303 return -ENOENT;
1304
1305 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1306
1307 list_del(&data->list);
1308 kfree(data);
1309
1310 return 0;
1311 }
1312
1313 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1314 {
1315 struct oob_data *data, *n;
1316
1317 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1318 list_del(&data->list);
1319 kfree(data);
1320 }
1321
1322 return 0;
1323 }
1324
1325 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1326 u8 *randomizer)
1327 {
1328 struct oob_data *data;
1329
1330 data = hci_find_remote_oob_data(hdev, bdaddr);
1331
1332 if (!data) {
1333 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1334 if (!data)
1335 return -ENOMEM;
1336
1337 bacpy(&data->bdaddr, bdaddr);
1338 list_add(&data->list, &hdev->remote_oob_data);
1339 }
1340
1341 memcpy(data->hash, hash, sizeof(data->hash));
1342 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1343
1344 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1345
1346 return 0;
1347 }
1348
1349 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1350 bdaddr_t *bdaddr)
1351 {
1352 struct bdaddr_list *b;
1353
1354 list_for_each_entry(b, &hdev->blacklist, list)
1355 if (bacmp(bdaddr, &b->bdaddr) == 0)
1356 return b;
1357
1358 return NULL;
1359 }
1360
1361 int hci_blacklist_clear(struct hci_dev *hdev)
1362 {
1363 struct list_head *p, *n;
1364
1365 list_for_each_safe(p, n, &hdev->blacklist) {
1366 struct bdaddr_list *b;
1367
1368 b = list_entry(p, struct bdaddr_list, list);
1369
1370 list_del(p);
1371 kfree(b);
1372 }
1373
1374 return 0;
1375 }
1376
1377 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1378 {
1379 struct bdaddr_list *entry;
1380
1381 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1382 return -EBADF;
1383
1384 if (hci_blacklist_lookup(hdev, bdaddr))
1385 return -EEXIST;
1386
1387 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1388 if (!entry)
1389 return -ENOMEM;
1390
1391 bacpy(&entry->bdaddr, bdaddr);
1392
1393 list_add(&entry->list, &hdev->blacklist);
1394
1395 return mgmt_device_blocked(hdev, bdaddr);
1396 }
1397
1398 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399 {
1400 struct bdaddr_list *entry;
1401
1402 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1403 return hci_blacklist_clear(hdev);
1404
1405 entry = hci_blacklist_lookup(hdev, bdaddr);
1406 if (!entry)
1407 return -ENOENT;
1408
1409 list_del(&entry->list);
1410 kfree(entry);
1411
1412 return mgmt_device_unblocked(hdev, bdaddr);
1413 }
1414
1415 static void hci_clear_adv_cache(struct work_struct *work)
1416 {
1417 struct hci_dev *hdev = container_of(work, struct hci_dev,
1418 adv_work.work);
1419
1420 hci_dev_lock(hdev);
1421
1422 hci_adv_entries_clear(hdev);
1423
1424 hci_dev_unlock(hdev);
1425 }
1426
1427 int hci_adv_entries_clear(struct hci_dev *hdev)
1428 {
1429 struct adv_entry *entry, *tmp;
1430
1431 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1432 list_del(&entry->list);
1433 kfree(entry);
1434 }
1435
1436 BT_DBG("%s adv cache cleared", hdev->name);
1437
1438 return 0;
1439 }
1440
1441 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1442 {
1443 struct adv_entry *entry;
1444
1445 list_for_each_entry(entry, &hdev->adv_entries, list)
1446 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1447 return entry;
1448
1449 return NULL;
1450 }
1451
1452 static inline int is_connectable_adv(u8 evt_type)
1453 {
1454 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1455 return 1;
1456
1457 return 0;
1458 }
1459
1460 int hci_add_adv_entry(struct hci_dev *hdev,
1461 struct hci_ev_le_advertising_info *ev)
1462 {
1463 struct adv_entry *entry;
1464
1465 if (!is_connectable_adv(ev->evt_type))
1466 return -EINVAL;
1467
1468 /* Only new entries should be added to adv_entries. So, if
1469 * bdaddr was found, don't add it. */
1470 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1471 return 0;
1472
1473 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1474 if (!entry)
1475 return -ENOMEM;
1476
1477 bacpy(&entry->bdaddr, &ev->bdaddr);
1478 entry->bdaddr_type = ev->bdaddr_type;
1479
1480 list_add(&entry->list, &hdev->adv_entries);
1481
1482 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1483 batostr(&entry->bdaddr), entry->bdaddr_type);
1484
1485 return 0;
1486 }
1487
1488 /* Register HCI device */
1489 int hci_register_dev(struct hci_dev *hdev)
1490 {
1491 struct list_head *head = &hci_dev_list, *p;
1492 int i, id, error;
1493
1494 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1495 hdev->bus, hdev->owner);
1496
1497 if (!hdev->open || !hdev->close || !hdev->destruct)
1498 return -EINVAL;
1499
1500 /* Do not allow HCI_AMP devices to register at index 0,
1501 * so the index can be used as the AMP controller ID.
1502 */
1503 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1504
1505 write_lock(&hci_dev_list_lock);
1506
1507 /* Find first available device id */
1508 list_for_each(p, &hci_dev_list) {
1509 if (list_entry(p, struct hci_dev, list)->id != id)
1510 break;
1511 head = p; id++;
1512 }
1513
1514 sprintf(hdev->name, "hci%d", id);
1515 hdev->id = id;
1516 list_add_tail(&hdev->list, head);
1517
1518 atomic_set(&hdev->refcnt, 1);
1519 mutex_init(&hdev->lock);
1520
1521 hdev->flags = 0;
1522 hdev->dev_flags = 0;
1523 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1524 hdev->esco_type = (ESCO_HV1);
1525 hdev->link_mode = (HCI_LM_ACCEPT);
1526 hdev->io_capability = 0x03; /* No Input No Output */
1527
1528 hdev->idle_timeout = 0;
1529 hdev->sniff_max_interval = 800;
1530 hdev->sniff_min_interval = 80;
1531
1532 INIT_WORK(&hdev->rx_work, hci_rx_work);
1533 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1534 INIT_WORK(&hdev->tx_work, hci_tx_work);
1535
1536
1537 skb_queue_head_init(&hdev->rx_q);
1538 skb_queue_head_init(&hdev->cmd_q);
1539 skb_queue_head_init(&hdev->raw_q);
1540
1541 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1542
1543 for (i = 0; i < NUM_REASSEMBLY; i++)
1544 hdev->reassembly[i] = NULL;
1545
1546 init_waitqueue_head(&hdev->req_wait_q);
1547 mutex_init(&hdev->req_lock);
1548
1549 inquiry_cache_init(hdev);
1550
1551 hci_conn_hash_init(hdev);
1552
1553 INIT_LIST_HEAD(&hdev->mgmt_pending);
1554
1555 INIT_LIST_HEAD(&hdev->blacklist);
1556
1557 INIT_LIST_HEAD(&hdev->uuids);
1558
1559 INIT_LIST_HEAD(&hdev->link_keys);
1560
1561 INIT_LIST_HEAD(&hdev->remote_oob_data);
1562
1563 INIT_LIST_HEAD(&hdev->adv_entries);
1564
1565 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1566 INIT_WORK(&hdev->power_on, hci_power_on);
1567 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1568
1569 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1570
1571 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1572
1573 atomic_set(&hdev->promisc, 0);
1574
1575 write_unlock(&hci_dev_list_lock);
1576
1577 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1578 WQ_MEM_RECLAIM, 1);
1579 if (!hdev->workqueue) {
1580 error = -ENOMEM;
1581 goto err;
1582 }
1583
1584 error = hci_add_sysfs(hdev);
1585 if (error < 0)
1586 goto err_wqueue;
1587
1588 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1589 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1590 if (hdev->rfkill) {
1591 if (rfkill_register(hdev->rfkill) < 0) {
1592 rfkill_destroy(hdev->rfkill);
1593 hdev->rfkill = NULL;
1594 }
1595 }
1596
1597 set_bit(HCI_AUTO_OFF, &hdev->flags);
1598 set_bit(HCI_SETUP, &hdev->flags);
1599 schedule_work(&hdev->power_on);
1600
1601 hci_notify(hdev, HCI_DEV_REG);
1602
1603 return id;
1604
1605 err_wqueue:
1606 destroy_workqueue(hdev->workqueue);
1607 err:
1608 write_lock(&hci_dev_list_lock);
1609 list_del(&hdev->list);
1610 write_unlock(&hci_dev_list_lock);
1611
1612 return error;
1613 }
1614 EXPORT_SYMBOL(hci_register_dev);
1615
1616 /* Unregister HCI device */
1617 void hci_unregister_dev(struct hci_dev *hdev)
1618 {
1619 int i;
1620
1621 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1622
1623 write_lock(&hci_dev_list_lock);
1624 list_del(&hdev->list);
1625 write_unlock(&hci_dev_list_lock);
1626
1627 hci_dev_do_close(hdev);
1628
1629 for (i = 0; i < NUM_REASSEMBLY; i++)
1630 kfree_skb(hdev->reassembly[i]);
1631
1632 if (!test_bit(HCI_INIT, &hdev->flags) &&
1633 !test_bit(HCI_SETUP, &hdev->flags)) {
1634 hci_dev_lock(hdev);
1635 mgmt_index_removed(hdev);
1636 hci_dev_unlock(hdev);
1637 }
1638
1639 /* mgmt_index_removed should take care of emptying the
1640 * pending list */
1641 BUG_ON(!list_empty(&hdev->mgmt_pending));
1642
1643 hci_notify(hdev, HCI_DEV_UNREG);
1644
1645 if (hdev->rfkill) {
1646 rfkill_unregister(hdev->rfkill);
1647 rfkill_destroy(hdev->rfkill);
1648 }
1649
1650 hci_del_sysfs(hdev);
1651
1652 cancel_delayed_work_sync(&hdev->adv_work);
1653
1654 destroy_workqueue(hdev->workqueue);
1655
1656 hci_dev_lock(hdev);
1657 hci_blacklist_clear(hdev);
1658 hci_uuids_clear(hdev);
1659 hci_link_keys_clear(hdev);
1660 hci_remote_oob_data_clear(hdev);
1661 hci_adv_entries_clear(hdev);
1662 hci_dev_unlock(hdev);
1663
1664 __hci_dev_put(hdev);
1665 }
1666 EXPORT_SYMBOL(hci_unregister_dev);
1667
1668 /* Suspend HCI device */
1669 int hci_suspend_dev(struct hci_dev *hdev)
1670 {
1671 hci_notify(hdev, HCI_DEV_SUSPEND);
1672 return 0;
1673 }
1674 EXPORT_SYMBOL(hci_suspend_dev);
1675
1676 /* Resume HCI device */
1677 int hci_resume_dev(struct hci_dev *hdev)
1678 {
1679 hci_notify(hdev, HCI_DEV_RESUME);
1680 return 0;
1681 }
1682 EXPORT_SYMBOL(hci_resume_dev);
1683
1684 /* Receive frame from HCI drivers */
1685 int hci_recv_frame(struct sk_buff *skb)
1686 {
1687 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1688 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1689 && !test_bit(HCI_INIT, &hdev->flags))) {
1690 kfree_skb(skb);
1691 return -ENXIO;
1692 }
1693
1694 /* Incomming skb */
1695 bt_cb(skb)->incoming = 1;
1696
1697 /* Time stamp */
1698 __net_timestamp(skb);
1699
1700 skb_queue_tail(&hdev->rx_q, skb);
1701 queue_work(hdev->workqueue, &hdev->rx_work);
1702
1703 return 0;
1704 }
1705 EXPORT_SYMBOL(hci_recv_frame);
1706
1707 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1708 int count, __u8 index)
1709 {
1710 int len = 0;
1711 int hlen = 0;
1712 int remain = count;
1713 struct sk_buff *skb;
1714 struct bt_skb_cb *scb;
1715
1716 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1717 index >= NUM_REASSEMBLY)
1718 return -EILSEQ;
1719
1720 skb = hdev->reassembly[index];
1721
1722 if (!skb) {
1723 switch (type) {
1724 case HCI_ACLDATA_PKT:
1725 len = HCI_MAX_FRAME_SIZE;
1726 hlen = HCI_ACL_HDR_SIZE;
1727 break;
1728 case HCI_EVENT_PKT:
1729 len = HCI_MAX_EVENT_SIZE;
1730 hlen = HCI_EVENT_HDR_SIZE;
1731 break;
1732 case HCI_SCODATA_PKT:
1733 len = HCI_MAX_SCO_SIZE;
1734 hlen = HCI_SCO_HDR_SIZE;
1735 break;
1736 }
1737
1738 skb = bt_skb_alloc(len, GFP_ATOMIC);
1739 if (!skb)
1740 return -ENOMEM;
1741
1742 scb = (void *) skb->cb;
1743 scb->expect = hlen;
1744 scb->pkt_type = type;
1745
1746 skb->dev = (void *) hdev;
1747 hdev->reassembly[index] = skb;
1748 }
1749
1750 while (count) {
1751 scb = (void *) skb->cb;
1752 len = min(scb->expect, (__u16)count);
1753
1754 memcpy(skb_put(skb, len), data, len);
1755
1756 count -= len;
1757 data += len;
1758 scb->expect -= len;
1759 remain = count;
1760
1761 switch (type) {
1762 case HCI_EVENT_PKT:
1763 if (skb->len == HCI_EVENT_HDR_SIZE) {
1764 struct hci_event_hdr *h = hci_event_hdr(skb);
1765 scb->expect = h->plen;
1766
1767 if (skb_tailroom(skb) < scb->expect) {
1768 kfree_skb(skb);
1769 hdev->reassembly[index] = NULL;
1770 return -ENOMEM;
1771 }
1772 }
1773 break;
1774
1775 case HCI_ACLDATA_PKT:
1776 if (skb->len == HCI_ACL_HDR_SIZE) {
1777 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1778 scb->expect = __le16_to_cpu(h->dlen);
1779
1780 if (skb_tailroom(skb) < scb->expect) {
1781 kfree_skb(skb);
1782 hdev->reassembly[index] = NULL;
1783 return -ENOMEM;
1784 }
1785 }
1786 break;
1787
1788 case HCI_SCODATA_PKT:
1789 if (skb->len == HCI_SCO_HDR_SIZE) {
1790 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1791 scb->expect = h->dlen;
1792
1793 if (skb_tailroom(skb) < scb->expect) {
1794 kfree_skb(skb);
1795 hdev->reassembly[index] = NULL;
1796 return -ENOMEM;
1797 }
1798 }
1799 break;
1800 }
1801
1802 if (scb->expect == 0) {
1803 /* Complete frame */
1804
1805 bt_cb(skb)->pkt_type = type;
1806 hci_recv_frame(skb);
1807
1808 hdev->reassembly[index] = NULL;
1809 return remain;
1810 }
1811 }
1812
1813 return remain;
1814 }
1815
1816 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1817 {
1818 int rem = 0;
1819
1820 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1821 return -EILSEQ;
1822
1823 while (count) {
1824 rem = hci_reassembly(hdev, type, data, count, type - 1);
1825 if (rem < 0)
1826 return rem;
1827
1828 data += (count - rem);
1829 count = rem;
1830 }
1831
1832 return rem;
1833 }
1834 EXPORT_SYMBOL(hci_recv_fragment);
1835
1836 #define STREAM_REASSEMBLY 0
1837
1838 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1839 {
1840 int type;
1841 int rem = 0;
1842
1843 while (count) {
1844 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1845
1846 if (!skb) {
1847 struct { char type; } *pkt;
1848
1849 /* Start of the frame */
1850 pkt = data;
1851 type = pkt->type;
1852
1853 data++;
1854 count--;
1855 } else
1856 type = bt_cb(skb)->pkt_type;
1857
1858 rem = hci_reassembly(hdev, type, data, count,
1859 STREAM_REASSEMBLY);
1860 if (rem < 0)
1861 return rem;
1862
1863 data += (count - rem);
1864 count = rem;
1865 }
1866
1867 return rem;
1868 }
1869 EXPORT_SYMBOL(hci_recv_stream_fragment);
1870
1871 /* ---- Interface to upper protocols ---- */
1872
1873 int hci_register_cb(struct hci_cb *cb)
1874 {
1875 BT_DBG("%p name %s", cb, cb->name);
1876
1877 write_lock(&hci_cb_list_lock);
1878 list_add(&cb->list, &hci_cb_list);
1879 write_unlock(&hci_cb_list_lock);
1880
1881 return 0;
1882 }
1883 EXPORT_SYMBOL(hci_register_cb);
1884
1885 int hci_unregister_cb(struct hci_cb *cb)
1886 {
1887 BT_DBG("%p name %s", cb, cb->name);
1888
1889 write_lock(&hci_cb_list_lock);
1890 list_del(&cb->list);
1891 write_unlock(&hci_cb_list_lock);
1892
1893 return 0;
1894 }
1895 EXPORT_SYMBOL(hci_unregister_cb);
1896
1897 static int hci_send_frame(struct sk_buff *skb)
1898 {
1899 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1900
1901 if (!hdev) {
1902 kfree_skb(skb);
1903 return -ENODEV;
1904 }
1905
1906 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1907
1908 if (atomic_read(&hdev->promisc)) {
1909 /* Time stamp */
1910 __net_timestamp(skb);
1911
1912 hci_send_to_sock(hdev, skb, NULL);
1913 }
1914
1915 /* Get rid of skb owner, prior to sending to the driver. */
1916 skb_orphan(skb);
1917
1918 return hdev->send(skb);
1919 }
1920
1921 /* Send HCI command */
1922 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1923 {
1924 int len = HCI_COMMAND_HDR_SIZE + plen;
1925 struct hci_command_hdr *hdr;
1926 struct sk_buff *skb;
1927
1928 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1929
1930 skb = bt_skb_alloc(len, GFP_ATOMIC);
1931 if (!skb) {
1932 BT_ERR("%s no memory for command", hdev->name);
1933 return -ENOMEM;
1934 }
1935
1936 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1937 hdr->opcode = cpu_to_le16(opcode);
1938 hdr->plen = plen;
1939
1940 if (plen)
1941 memcpy(skb_put(skb, plen), param, plen);
1942
1943 BT_DBG("skb len %d", skb->len);
1944
1945 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1946 skb->dev = (void *) hdev;
1947
1948 if (test_bit(HCI_INIT, &hdev->flags))
1949 hdev->init_last_cmd = opcode;
1950
1951 skb_queue_tail(&hdev->cmd_q, skb);
1952 queue_work(hdev->workqueue, &hdev->cmd_work);
1953
1954 return 0;
1955 }
1956
1957 /* Get data from the previously sent command */
1958 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1959 {
1960 struct hci_command_hdr *hdr;
1961
1962 if (!hdev->sent_cmd)
1963 return NULL;
1964
1965 hdr = (void *) hdev->sent_cmd->data;
1966
1967 if (hdr->opcode != cpu_to_le16(opcode))
1968 return NULL;
1969
1970 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1971
1972 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1973 }
1974
1975 /* Send ACL data */
1976 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1977 {
1978 struct hci_acl_hdr *hdr;
1979 int len = skb->len;
1980
1981 skb_push(skb, HCI_ACL_HDR_SIZE);
1982 skb_reset_transport_header(skb);
1983 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1984 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1985 hdr->dlen = cpu_to_le16(len);
1986 }
1987
1988 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1989 struct sk_buff *skb, __u16 flags)
1990 {
1991 struct hci_dev *hdev = conn->hdev;
1992 struct sk_buff *list;
1993
1994 list = skb_shinfo(skb)->frag_list;
1995 if (!list) {
1996 /* Non fragmented */
1997 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1998
1999 skb_queue_tail(queue, skb);
2000 } else {
2001 /* Fragmented */
2002 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2003
2004 skb_shinfo(skb)->frag_list = NULL;
2005
2006 /* Queue all fragments atomically */
2007 spin_lock(&queue->lock);
2008
2009 __skb_queue_tail(queue, skb);
2010
2011 flags &= ~ACL_START;
2012 flags |= ACL_CONT;
2013 do {
2014 skb = list; list = list->next;
2015
2016 skb->dev = (void *) hdev;
2017 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2018 hci_add_acl_hdr(skb, conn->handle, flags);
2019
2020 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2021
2022 __skb_queue_tail(queue, skb);
2023 } while (list);
2024
2025 spin_unlock(&queue->lock);
2026 }
2027 }
2028
2029 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2030 {
2031 struct hci_conn *conn = chan->conn;
2032 struct hci_dev *hdev = conn->hdev;
2033
2034 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2035
2036 skb->dev = (void *) hdev;
2037 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2038 hci_add_acl_hdr(skb, conn->handle, flags);
2039
2040 hci_queue_acl(conn, &chan->data_q, skb, flags);
2041
2042 queue_work(hdev->workqueue, &hdev->tx_work);
2043 }
2044 EXPORT_SYMBOL(hci_send_acl);
2045
2046 /* Send SCO data */
2047 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2048 {
2049 struct hci_dev *hdev = conn->hdev;
2050 struct hci_sco_hdr hdr;
2051
2052 BT_DBG("%s len %d", hdev->name, skb->len);
2053
2054 hdr.handle = cpu_to_le16(conn->handle);
2055 hdr.dlen = skb->len;
2056
2057 skb_push(skb, HCI_SCO_HDR_SIZE);
2058 skb_reset_transport_header(skb);
2059 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2060
2061 skb->dev = (void *) hdev;
2062 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2063
2064 skb_queue_tail(&conn->data_q, skb);
2065 queue_work(hdev->workqueue, &hdev->tx_work);
2066 }
2067 EXPORT_SYMBOL(hci_send_sco);
2068
2069 /* ---- HCI TX task (outgoing data) ---- */
2070
2071 /* HCI Connection scheduler */
2072 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2073 {
2074 struct hci_conn_hash *h = &hdev->conn_hash;
2075 struct hci_conn *conn = NULL, *c;
2076 int num = 0, min = ~0;
2077
2078 /* We don't have to lock device here. Connections are always
2079 * added and removed with TX task disabled. */
2080
2081 rcu_read_lock();
2082
2083 list_for_each_entry_rcu(c, &h->list, list) {
2084 if (c->type != type || skb_queue_empty(&c->data_q))
2085 continue;
2086
2087 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2088 continue;
2089
2090 num++;
2091
2092 if (c->sent < min) {
2093 min = c->sent;
2094 conn = c;
2095 }
2096
2097 if (hci_conn_num(hdev, type) == num)
2098 break;
2099 }
2100
2101 rcu_read_unlock();
2102
2103 if (conn) {
2104 int cnt, q;
2105
2106 switch (conn->type) {
2107 case ACL_LINK:
2108 cnt = hdev->acl_cnt;
2109 break;
2110 case SCO_LINK:
2111 case ESCO_LINK:
2112 cnt = hdev->sco_cnt;
2113 break;
2114 case LE_LINK:
2115 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2116 break;
2117 default:
2118 cnt = 0;
2119 BT_ERR("Unknown link type");
2120 }
2121
2122 q = cnt / num;
2123 *quote = q ? q : 1;
2124 } else
2125 *quote = 0;
2126
2127 BT_DBG("conn %p quote %d", conn, *quote);
2128 return conn;
2129 }
2130
2131 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2132 {
2133 struct hci_conn_hash *h = &hdev->conn_hash;
2134 struct hci_conn *c;
2135
2136 BT_ERR("%s link tx timeout", hdev->name);
2137
2138 rcu_read_lock();
2139
2140 /* Kill stalled connections */
2141 list_for_each_entry_rcu(c, &h->list, list) {
2142 if (c->type == type && c->sent) {
2143 BT_ERR("%s killing stalled connection %s",
2144 hdev->name, batostr(&c->dst));
2145 hci_acl_disconn(c, 0x13);
2146 }
2147 }
2148
2149 rcu_read_unlock();
2150 }
2151
2152 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2153 int *quote)
2154 {
2155 struct hci_conn_hash *h = &hdev->conn_hash;
2156 struct hci_chan *chan = NULL;
2157 int num = 0, min = ~0, cur_prio = 0;
2158 struct hci_conn *conn;
2159 int cnt, q, conn_num = 0;
2160
2161 BT_DBG("%s", hdev->name);
2162
2163 rcu_read_lock();
2164
2165 list_for_each_entry_rcu(conn, &h->list, list) {
2166 struct hci_chan *tmp;
2167
2168 if (conn->type != type)
2169 continue;
2170
2171 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2172 continue;
2173
2174 conn_num++;
2175
2176 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2177 struct sk_buff *skb;
2178
2179 if (skb_queue_empty(&tmp->data_q))
2180 continue;
2181
2182 skb = skb_peek(&tmp->data_q);
2183 if (skb->priority < cur_prio)
2184 continue;
2185
2186 if (skb->priority > cur_prio) {
2187 num = 0;
2188 min = ~0;
2189 cur_prio = skb->priority;
2190 }
2191
2192 num++;
2193
2194 if (conn->sent < min) {
2195 min = conn->sent;
2196 chan = tmp;
2197 }
2198 }
2199
2200 if (hci_conn_num(hdev, type) == conn_num)
2201 break;
2202 }
2203
2204 rcu_read_unlock();
2205
2206 if (!chan)
2207 return NULL;
2208
2209 switch (chan->conn->type) {
2210 case ACL_LINK:
2211 cnt = hdev->acl_cnt;
2212 break;
2213 case SCO_LINK:
2214 case ESCO_LINK:
2215 cnt = hdev->sco_cnt;
2216 break;
2217 case LE_LINK:
2218 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2219 break;
2220 default:
2221 cnt = 0;
2222 BT_ERR("Unknown link type");
2223 }
2224
2225 q = cnt / num;
2226 *quote = q ? q : 1;
2227 BT_DBG("chan %p quote %d", chan, *quote);
2228 return chan;
2229 }
2230
2231 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2232 {
2233 struct hci_conn_hash *h = &hdev->conn_hash;
2234 struct hci_conn *conn;
2235 int num = 0;
2236
2237 BT_DBG("%s", hdev->name);
2238
2239 rcu_read_lock();
2240
2241 list_for_each_entry_rcu(conn, &h->list, list) {
2242 struct hci_chan *chan;
2243
2244 if (conn->type != type)
2245 continue;
2246
2247 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2248 continue;
2249
2250 num++;
2251
2252 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2253 struct sk_buff *skb;
2254
2255 if (chan->sent) {
2256 chan->sent = 0;
2257 continue;
2258 }
2259
2260 if (skb_queue_empty(&chan->data_q))
2261 continue;
2262
2263 skb = skb_peek(&chan->data_q);
2264 if (skb->priority >= HCI_PRIO_MAX - 1)
2265 continue;
2266
2267 skb->priority = HCI_PRIO_MAX - 1;
2268
2269 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2270 skb->priority);
2271 }
2272
2273 if (hci_conn_num(hdev, type) == num)
2274 break;
2275 }
2276
2277 rcu_read_unlock();
2278
2279 }
2280
2281 static inline void hci_sched_acl(struct hci_dev *hdev)
2282 {
2283 struct hci_chan *chan;
2284 struct sk_buff *skb;
2285 int quote;
2286 unsigned int cnt;
2287
2288 BT_DBG("%s", hdev->name);
2289
2290 if (!hci_conn_num(hdev, ACL_LINK))
2291 return;
2292
2293 if (!test_bit(HCI_RAW, &hdev->flags)) {
2294 /* ACL tx timeout must be longer than maximum
2295 * link supervision timeout (40.9 seconds) */
2296 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2297 hci_link_tx_to(hdev, ACL_LINK);
2298 }
2299
2300 cnt = hdev->acl_cnt;
2301
2302 while (hdev->acl_cnt &&
2303 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2304 u32 priority = (skb_peek(&chan->data_q))->priority;
2305 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2306 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2307 skb->len, skb->priority);
2308
2309 /* Stop if priority has changed */
2310 if (skb->priority < priority)
2311 break;
2312
2313 skb = skb_dequeue(&chan->data_q);
2314
2315 hci_conn_enter_active_mode(chan->conn,
2316 bt_cb(skb)->force_active);
2317
2318 hci_send_frame(skb);
2319 hdev->acl_last_tx = jiffies;
2320
2321 hdev->acl_cnt--;
2322 chan->sent++;
2323 chan->conn->sent++;
2324 }
2325 }
2326
2327 if (cnt != hdev->acl_cnt)
2328 hci_prio_recalculate(hdev, ACL_LINK);
2329 }
2330
2331 /* Schedule SCO */
2332 static inline void hci_sched_sco(struct hci_dev *hdev)
2333 {
2334 struct hci_conn *conn;
2335 struct sk_buff *skb;
2336 int quote;
2337
2338 BT_DBG("%s", hdev->name);
2339
2340 if (!hci_conn_num(hdev, SCO_LINK))
2341 return;
2342
2343 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2344 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2345 BT_DBG("skb %p len %d", skb, skb->len);
2346 hci_send_frame(skb);
2347
2348 conn->sent++;
2349 if (conn->sent == ~0)
2350 conn->sent = 0;
2351 }
2352 }
2353 }
2354
2355 static inline void hci_sched_esco(struct hci_dev *hdev)
2356 {
2357 struct hci_conn *conn;
2358 struct sk_buff *skb;
2359 int quote;
2360
2361 BT_DBG("%s", hdev->name);
2362
2363 if (!hci_conn_num(hdev, ESCO_LINK))
2364 return;
2365
2366 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2367 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2368 BT_DBG("skb %p len %d", skb, skb->len);
2369 hci_send_frame(skb);
2370
2371 conn->sent++;
2372 if (conn->sent == ~0)
2373 conn->sent = 0;
2374 }
2375 }
2376 }
2377
2378 static inline void hci_sched_le(struct hci_dev *hdev)
2379 {
2380 struct hci_chan *chan;
2381 struct sk_buff *skb;
2382 int quote, cnt, tmp;
2383
2384 BT_DBG("%s", hdev->name);
2385
2386 if (!hci_conn_num(hdev, LE_LINK))
2387 return;
2388
2389 if (!test_bit(HCI_RAW, &hdev->flags)) {
2390 /* LE tx timeout must be longer than maximum
2391 * link supervision timeout (40.9 seconds) */
2392 if (!hdev->le_cnt && hdev->le_pkts &&
2393 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2394 hci_link_tx_to(hdev, LE_LINK);
2395 }
2396
2397 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2398 tmp = cnt;
2399 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2400 u32 priority = (skb_peek(&chan->data_q))->priority;
2401 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2402 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2403 skb->len, skb->priority);
2404
2405 /* Stop if priority has changed */
2406 if (skb->priority < priority)
2407 break;
2408
2409 skb = skb_dequeue(&chan->data_q);
2410
2411 hci_send_frame(skb);
2412 hdev->le_last_tx = jiffies;
2413
2414 cnt--;
2415 chan->sent++;
2416 chan->conn->sent++;
2417 }
2418 }
2419
2420 if (hdev->le_pkts)
2421 hdev->le_cnt = cnt;
2422 else
2423 hdev->acl_cnt = cnt;
2424
2425 if (cnt != tmp)
2426 hci_prio_recalculate(hdev, LE_LINK);
2427 }
2428
2429 static void hci_tx_work(struct work_struct *work)
2430 {
2431 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2432 struct sk_buff *skb;
2433
2434 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2435 hdev->sco_cnt, hdev->le_cnt);
2436
2437 /* Schedule queues and send stuff to HCI driver */
2438
2439 hci_sched_acl(hdev);
2440
2441 hci_sched_sco(hdev);
2442
2443 hci_sched_esco(hdev);
2444
2445 hci_sched_le(hdev);
2446
2447 /* Send next queued raw (unknown type) packet */
2448 while ((skb = skb_dequeue(&hdev->raw_q)))
2449 hci_send_frame(skb);
2450 }
2451
2452 /* ----- HCI RX task (incoming data processing) ----- */
2453
2454 /* ACL data packet */
2455 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2456 {
2457 struct hci_acl_hdr *hdr = (void *) skb->data;
2458 struct hci_conn *conn;
2459 __u16 handle, flags;
2460
2461 skb_pull(skb, HCI_ACL_HDR_SIZE);
2462
2463 handle = __le16_to_cpu(hdr->handle);
2464 flags = hci_flags(handle);
2465 handle = hci_handle(handle);
2466
2467 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2468
2469 hdev->stat.acl_rx++;
2470
2471 hci_dev_lock(hdev);
2472 conn = hci_conn_hash_lookup_handle(hdev, handle);
2473 hci_dev_unlock(hdev);
2474
2475 if (conn) {
2476 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2477
2478 /* Send to upper protocol */
2479 l2cap_recv_acldata(conn, skb, flags);
2480 return;
2481 } else {
2482 BT_ERR("%s ACL packet for unknown connection handle %d",
2483 hdev->name, handle);
2484 }
2485
2486 kfree_skb(skb);
2487 }
2488
2489 /* SCO data packet */
2490 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2491 {
2492 struct hci_sco_hdr *hdr = (void *) skb->data;
2493 struct hci_conn *conn;
2494 __u16 handle;
2495
2496 skb_pull(skb, HCI_SCO_HDR_SIZE);
2497
2498 handle = __le16_to_cpu(hdr->handle);
2499
2500 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2501
2502 hdev->stat.sco_rx++;
2503
2504 hci_dev_lock(hdev);
2505 conn = hci_conn_hash_lookup_handle(hdev, handle);
2506 hci_dev_unlock(hdev);
2507
2508 if (conn) {
2509 /* Send to upper protocol */
2510 sco_recv_scodata(conn, skb);
2511 return;
2512 } else {
2513 BT_ERR("%s SCO packet for unknown connection handle %d",
2514 hdev->name, handle);
2515 }
2516
2517 kfree_skb(skb);
2518 }
2519
2520 static void hci_rx_work(struct work_struct *work)
2521 {
2522 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2523 struct sk_buff *skb;
2524
2525 BT_DBG("%s", hdev->name);
2526
2527 while ((skb = skb_dequeue(&hdev->rx_q))) {
2528 if (atomic_read(&hdev->promisc)) {
2529 /* Send copy to the sockets */
2530 hci_send_to_sock(hdev, skb, NULL);
2531 }
2532
2533 if (test_bit(HCI_RAW, &hdev->flags)) {
2534 kfree_skb(skb);
2535 continue;
2536 }
2537
2538 if (test_bit(HCI_INIT, &hdev->flags)) {
2539 /* Don't process data packets in this states. */
2540 switch (bt_cb(skb)->pkt_type) {
2541 case HCI_ACLDATA_PKT:
2542 case HCI_SCODATA_PKT:
2543 kfree_skb(skb);
2544 continue;
2545 }
2546 }
2547
2548 /* Process frame */
2549 switch (bt_cb(skb)->pkt_type) {
2550 case HCI_EVENT_PKT:
2551 BT_DBG("%s Event packet", hdev->name);
2552 hci_event_packet(hdev, skb);
2553 break;
2554
2555 case HCI_ACLDATA_PKT:
2556 BT_DBG("%s ACL data packet", hdev->name);
2557 hci_acldata_packet(hdev, skb);
2558 break;
2559
2560 case HCI_SCODATA_PKT:
2561 BT_DBG("%s SCO data packet", hdev->name);
2562 hci_scodata_packet(hdev, skb);
2563 break;
2564
2565 default:
2566 kfree_skb(skb);
2567 break;
2568 }
2569 }
2570 }
2571
2572 static void hci_cmd_work(struct work_struct *work)
2573 {
2574 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2575 struct sk_buff *skb;
2576
2577 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2578
2579 /* Send queued commands */
2580 if (atomic_read(&hdev->cmd_cnt)) {
2581 skb = skb_dequeue(&hdev->cmd_q);
2582 if (!skb)
2583 return;
2584
2585 kfree_skb(hdev->sent_cmd);
2586
2587 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2588 if (hdev->sent_cmd) {
2589 atomic_dec(&hdev->cmd_cnt);
2590 hci_send_frame(skb);
2591 if (test_bit(HCI_RESET, &hdev->flags))
2592 del_timer(&hdev->cmd_timer);
2593 else
2594 mod_timer(&hdev->cmd_timer,
2595 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2596 } else {
2597 skb_queue_head(&hdev->cmd_q, skb);
2598 queue_work(hdev->workqueue, &hdev->cmd_work);
2599 }
2600 }
2601 }
2602
2603 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2604 {
2605 /* General inquiry access code (GIAC) */
2606 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2607 struct hci_cp_inquiry cp;
2608
2609 BT_DBG("%s", hdev->name);
2610
2611 if (test_bit(HCI_INQUIRY, &hdev->flags))
2612 return -EINPROGRESS;
2613
2614 memset(&cp, 0, sizeof(cp));
2615 memcpy(&cp.lap, lap, sizeof(cp.lap));
2616 cp.length = length;
2617
2618 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2619 }
2620
2621 int hci_cancel_inquiry(struct hci_dev *hdev)
2622 {
2623 BT_DBG("%s", hdev->name);
2624
2625 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2626 return -EPERM;
2627
2628 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2629 }
2630
2631 module_param(enable_hs, bool, 0644);
2632 MODULE_PARM_DESC(enable_hs, "Enable High Speed");