Bluetooth: Move Extended Inquiry Response defines to hci.h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 int enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 return;
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109 }
110
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120 }
121
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
125 {
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
146 err = -bt_to_errno(hdev->req_result);
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
156 }
157
158 hdev->req_status = hdev->req_result = 0;
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163 }
164
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
167 {
168 int ret;
169
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179 }
180
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 static void bredr_init(struct hci_dev *hdev)
191 {
192 struct hci_cp_delete_stored_link_key cp;
193 __le16 param;
194 __u8 flt_type;
195
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198 /* Mandatory initialization */
199
200 /* Reset */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204 }
205
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241
242 static void amp_init(struct hci_dev *hdev)
243 {
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285 }
286
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
321 /* Encryption */
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327 __le16 policy = cpu_to_le16(opt);
328
329 BT_DBG("%s %x", hdev->name, policy);
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
338 {
339 struct hci_dev *hdev = NULL, *d;
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355 }
356
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev *hdev)
359 {
360 struct inquiry_entry *p, *n;
361
362 list_for_each_entry_safe(p, n, &hdev->inq_cache.list, list) {
363 list_del(&p->list);
364 kfree(p);
365 }
366 }
367
368 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
369 {
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_entry *e;
372
373 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
374
375 list_for_each_entry(e, &cache->list, list) {
376 if (!bacmp(&e->data.bdaddr, bdaddr))
377 return e;
378 }
379
380 return NULL;
381 }
382
383 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
384 {
385 struct inquiry_cache *cache = &hdev->inq_cache;
386 struct inquiry_entry *ie;
387
388 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
389
390 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
391 if (!ie) {
392 /* Entry not in the cache. Add new one. */
393 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
394 if (!ie)
395 return;
396
397 list_add(&ie->list, &cache->list);
398 }
399
400 memcpy(&ie->data, data, sizeof(*data));
401 ie->timestamp = jiffies;
402 cache->timestamp = jiffies;
403 }
404
405 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
406 {
407 struct inquiry_cache *cache = &hdev->inq_cache;
408 struct inquiry_info *info = (struct inquiry_info *) buf;
409 struct inquiry_entry *e;
410 int copied = 0;
411
412 list_for_each_entry(e, &cache->list, list) {
413 struct inquiry_data *data = &e->data;
414
415 if (copied >= num)
416 break;
417
418 bacpy(&info->bdaddr, &data->bdaddr);
419 info->pscan_rep_mode = data->pscan_rep_mode;
420 info->pscan_period_mode = data->pscan_period_mode;
421 info->pscan_mode = data->pscan_mode;
422 memcpy(info->dev_class, data->dev_class, 3);
423 info->clock_offset = data->clock_offset;
424
425 info++;
426 copied++;
427 }
428
429 BT_DBG("cache %p, copied %d", cache, copied);
430 return copied;
431 }
432
433 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
434 {
435 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
436 struct hci_cp_inquiry cp;
437
438 BT_DBG("%s", hdev->name);
439
440 if (test_bit(HCI_INQUIRY, &hdev->flags))
441 return;
442
443 /* Start Inquiry */
444 memcpy(&cp.lap, &ir->lap, 3);
445 cp.length = ir->length;
446 cp.num_rsp = ir->num_rsp;
447 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
448 }
449
450 int hci_inquiry(void __user *arg)
451 {
452 __u8 __user *ptr = arg;
453 struct hci_inquiry_req ir;
454 struct hci_dev *hdev;
455 int err = 0, do_inquiry = 0, max_rsp;
456 long timeo;
457 __u8 *buf;
458
459 if (copy_from_user(&ir, ptr, sizeof(ir)))
460 return -EFAULT;
461
462 hdev = hci_dev_get(ir.dev_id);
463 if (!hdev)
464 return -ENODEV;
465
466 hci_dev_lock(hdev);
467 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
468 inquiry_cache_empty(hdev) ||
469 ir.flags & IREQ_CACHE_FLUSH) {
470 inquiry_cache_flush(hdev);
471 do_inquiry = 1;
472 }
473 hci_dev_unlock(hdev);
474
475 timeo = ir.length * msecs_to_jiffies(2000);
476
477 if (do_inquiry) {
478 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
479 if (err < 0)
480 goto done;
481 }
482
483 /* for unlimited number of responses we will use buffer with 255 entries */
484 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
485
486 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
487 * copy it to the user space.
488 */
489 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
490 if (!buf) {
491 err = -ENOMEM;
492 goto done;
493 }
494
495 hci_dev_lock(hdev);
496 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
497 hci_dev_unlock(hdev);
498
499 BT_DBG("num_rsp %d", ir.num_rsp);
500
501 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
502 ptr += sizeof(ir);
503 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
504 ir.num_rsp))
505 err = -EFAULT;
506 } else
507 err = -EFAULT;
508
509 kfree(buf);
510
511 done:
512 hci_dev_put(hdev);
513 return err;
514 }
515
516 /* ---- HCI ioctl helpers ---- */
517
518 int hci_dev_open(__u16 dev)
519 {
520 struct hci_dev *hdev;
521 int ret = 0;
522
523 hdev = hci_dev_get(dev);
524 if (!hdev)
525 return -ENODEV;
526
527 BT_DBG("%s %p", hdev->name, hdev);
528
529 hci_req_lock(hdev);
530
531 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
532 ret = -ERFKILL;
533 goto done;
534 }
535
536 if (test_bit(HCI_UP, &hdev->flags)) {
537 ret = -EALREADY;
538 goto done;
539 }
540
541 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
542 set_bit(HCI_RAW, &hdev->flags);
543
544 /* Treat all non BR/EDR controllers as raw devices if
545 enable_hs is not set */
546 if (hdev->dev_type != HCI_BREDR && !enable_hs)
547 set_bit(HCI_RAW, &hdev->flags);
548
549 if (hdev->open(hdev)) {
550 ret = -EIO;
551 goto done;
552 }
553
554 if (!test_bit(HCI_RAW, &hdev->flags)) {
555 atomic_set(&hdev->cmd_cnt, 1);
556 set_bit(HCI_INIT, &hdev->flags);
557 hdev->init_last_cmd = 0;
558
559 ret = __hci_request(hdev, hci_init_req, 0,
560 msecs_to_jiffies(HCI_INIT_TIMEOUT));
561
562 if (lmp_host_le_capable(hdev))
563 ret = __hci_request(hdev, hci_le_init_req, 0,
564 msecs_to_jiffies(HCI_INIT_TIMEOUT));
565
566 clear_bit(HCI_INIT, &hdev->flags);
567 }
568
569 if (!ret) {
570 hci_dev_hold(hdev);
571 set_bit(HCI_UP, &hdev->flags);
572 hci_notify(hdev, HCI_DEV_UP);
573 if (!test_bit(HCI_SETUP, &hdev->flags)) {
574 hci_dev_lock(hdev);
575 mgmt_powered(hdev, 1);
576 hci_dev_unlock(hdev);
577 }
578 } else {
579 /* Init failed, cleanup */
580 flush_work(&hdev->tx_work);
581 flush_work(&hdev->cmd_work);
582 flush_work(&hdev->rx_work);
583
584 skb_queue_purge(&hdev->cmd_q);
585 skb_queue_purge(&hdev->rx_q);
586
587 if (hdev->flush)
588 hdev->flush(hdev);
589
590 if (hdev->sent_cmd) {
591 kfree_skb(hdev->sent_cmd);
592 hdev->sent_cmd = NULL;
593 }
594
595 hdev->close(hdev);
596 hdev->flags = 0;
597 }
598
599 done:
600 hci_req_unlock(hdev);
601 hci_dev_put(hdev);
602 return ret;
603 }
604
605 static int hci_dev_do_close(struct hci_dev *hdev)
606 {
607 BT_DBG("%s %p", hdev->name, hdev);
608
609 hci_req_cancel(hdev, ENODEV);
610 hci_req_lock(hdev);
611
612 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
613 del_timer_sync(&hdev->cmd_timer);
614 hci_req_unlock(hdev);
615 return 0;
616 }
617
618 /* Flush RX and TX works */
619 flush_work(&hdev->tx_work);
620 flush_work(&hdev->rx_work);
621
622 if (hdev->discov_timeout > 0) {
623 cancel_delayed_work(&hdev->discov_off);
624 hdev->discov_timeout = 0;
625 }
626
627 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
628 cancel_delayed_work(&hdev->power_off);
629
630 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
631 cancel_delayed_work(&hdev->service_cache);
632
633 hci_dev_lock(hdev);
634 inquiry_cache_flush(hdev);
635 hci_conn_hash_flush(hdev);
636 hci_dev_unlock(hdev);
637
638 hci_notify(hdev, HCI_DEV_DOWN);
639
640 if (hdev->flush)
641 hdev->flush(hdev);
642
643 /* Reset device */
644 skb_queue_purge(&hdev->cmd_q);
645 atomic_set(&hdev->cmd_cnt, 1);
646 if (!test_bit(HCI_RAW, &hdev->flags)) {
647 set_bit(HCI_INIT, &hdev->flags);
648 __hci_request(hdev, hci_reset_req, 0,
649 msecs_to_jiffies(250));
650 clear_bit(HCI_INIT, &hdev->flags);
651 }
652
653 /* flush cmd work */
654 flush_work(&hdev->cmd_work);
655
656 /* Drop queues */
657 skb_queue_purge(&hdev->rx_q);
658 skb_queue_purge(&hdev->cmd_q);
659 skb_queue_purge(&hdev->raw_q);
660
661 /* Drop last sent command */
662 if (hdev->sent_cmd) {
663 del_timer_sync(&hdev->cmd_timer);
664 kfree_skb(hdev->sent_cmd);
665 hdev->sent_cmd = NULL;
666 }
667
668 /* After this point our queues are empty
669 * and no tasks are scheduled. */
670 hdev->close(hdev);
671
672 hci_dev_lock(hdev);
673 mgmt_powered(hdev, 0);
674 hci_dev_unlock(hdev);
675
676 /* Clear flags */
677 hdev->flags = 0;
678
679 hci_req_unlock(hdev);
680
681 hci_dev_put(hdev);
682 return 0;
683 }
684
685 int hci_dev_close(__u16 dev)
686 {
687 struct hci_dev *hdev;
688 int err;
689
690 hdev = hci_dev_get(dev);
691 if (!hdev)
692 return -ENODEV;
693 err = hci_dev_do_close(hdev);
694 hci_dev_put(hdev);
695 return err;
696 }
697
698 int hci_dev_reset(__u16 dev)
699 {
700 struct hci_dev *hdev;
701 int ret = 0;
702
703 hdev = hci_dev_get(dev);
704 if (!hdev)
705 return -ENODEV;
706
707 hci_req_lock(hdev);
708
709 if (!test_bit(HCI_UP, &hdev->flags))
710 goto done;
711
712 /* Drop queues */
713 skb_queue_purge(&hdev->rx_q);
714 skb_queue_purge(&hdev->cmd_q);
715
716 hci_dev_lock(hdev);
717 inquiry_cache_flush(hdev);
718 hci_conn_hash_flush(hdev);
719 hci_dev_unlock(hdev);
720
721 if (hdev->flush)
722 hdev->flush(hdev);
723
724 atomic_set(&hdev->cmd_cnt, 1);
725 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
726
727 if (!test_bit(HCI_RAW, &hdev->flags))
728 ret = __hci_request(hdev, hci_reset_req, 0,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
730
731 done:
732 hci_req_unlock(hdev);
733 hci_dev_put(hdev);
734 return ret;
735 }
736
737 int hci_dev_reset_stat(__u16 dev)
738 {
739 struct hci_dev *hdev;
740 int ret = 0;
741
742 hdev = hci_dev_get(dev);
743 if (!hdev)
744 return -ENODEV;
745
746 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
747
748 hci_dev_put(hdev);
749
750 return ret;
751 }
752
753 int hci_dev_cmd(unsigned int cmd, void __user *arg)
754 {
755 struct hci_dev *hdev;
756 struct hci_dev_req dr;
757 int err = 0;
758
759 if (copy_from_user(&dr, arg, sizeof(dr)))
760 return -EFAULT;
761
762 hdev = hci_dev_get(dr.dev_id);
763 if (!hdev)
764 return -ENODEV;
765
766 switch (cmd) {
767 case HCISETAUTH:
768 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
769 msecs_to_jiffies(HCI_INIT_TIMEOUT));
770 break;
771
772 case HCISETENCRYPT:
773 if (!lmp_encrypt_capable(hdev)) {
774 err = -EOPNOTSUPP;
775 break;
776 }
777
778 if (!test_bit(HCI_AUTH, &hdev->flags)) {
779 /* Auth must be enabled first */
780 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
781 msecs_to_jiffies(HCI_INIT_TIMEOUT));
782 if (err)
783 break;
784 }
785
786 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
787 msecs_to_jiffies(HCI_INIT_TIMEOUT));
788 break;
789
790 case HCISETSCAN:
791 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
792 msecs_to_jiffies(HCI_INIT_TIMEOUT));
793 break;
794
795 case HCISETLINKPOL:
796 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
797 msecs_to_jiffies(HCI_INIT_TIMEOUT));
798 break;
799
800 case HCISETLINKMODE:
801 hdev->link_mode = ((__u16) dr.dev_opt) &
802 (HCI_LM_MASTER | HCI_LM_ACCEPT);
803 break;
804
805 case HCISETPTYPE:
806 hdev->pkt_type = (__u16) dr.dev_opt;
807 break;
808
809 case HCISETACLMTU:
810 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
811 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
812 break;
813
814 case HCISETSCOMTU:
815 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
816 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
817 break;
818
819 default:
820 err = -EINVAL;
821 break;
822 }
823
824 hci_dev_put(hdev);
825 return err;
826 }
827
828 int hci_get_dev_list(void __user *arg)
829 {
830 struct hci_dev *hdev;
831 struct hci_dev_list_req *dl;
832 struct hci_dev_req *dr;
833 int n = 0, size, err;
834 __u16 dev_num;
835
836 if (get_user(dev_num, (__u16 __user *) arg))
837 return -EFAULT;
838
839 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
840 return -EINVAL;
841
842 size = sizeof(*dl) + dev_num * sizeof(*dr);
843
844 dl = kzalloc(size, GFP_KERNEL);
845 if (!dl)
846 return -ENOMEM;
847
848 dr = dl->dev_req;
849
850 read_lock(&hci_dev_list_lock);
851 list_for_each_entry(hdev, &hci_dev_list, list) {
852 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
853 cancel_delayed_work(&hdev->power_off);
854
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
858 (dr + n)->dev_id = hdev->id;
859 (dr + n)->dev_opt = hdev->flags;
860
861 if (++n >= dev_num)
862 break;
863 }
864 read_unlock(&hci_dev_list_lock);
865
866 dl->dev_num = n;
867 size = sizeof(*dl) + n * sizeof(*dr);
868
869 err = copy_to_user(arg, dl, size);
870 kfree(dl);
871
872 return err ? -EFAULT : 0;
873 }
874
875 int hci_get_dev_info(void __user *arg)
876 {
877 struct hci_dev *hdev;
878 struct hci_dev_info di;
879 int err = 0;
880
881 if (copy_from_user(&di, arg, sizeof(di)))
882 return -EFAULT;
883
884 hdev = hci_dev_get(di.dev_id);
885 if (!hdev)
886 return -ENODEV;
887
888 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
889 cancel_delayed_work_sync(&hdev->power_off);
890
891 if (!test_bit(HCI_MGMT, &hdev->flags))
892 set_bit(HCI_PAIRABLE, &hdev->flags);
893
894 strcpy(di.name, hdev->name);
895 di.bdaddr = hdev->bdaddr;
896 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
897 di.flags = hdev->flags;
898 di.pkt_type = hdev->pkt_type;
899 di.acl_mtu = hdev->acl_mtu;
900 di.acl_pkts = hdev->acl_pkts;
901 di.sco_mtu = hdev->sco_mtu;
902 di.sco_pkts = hdev->sco_pkts;
903 di.link_policy = hdev->link_policy;
904 di.link_mode = hdev->link_mode;
905
906 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
907 memcpy(&di.features, &hdev->features, sizeof(di.features));
908
909 if (copy_to_user(arg, &di, sizeof(di)))
910 err = -EFAULT;
911
912 hci_dev_put(hdev);
913
914 return err;
915 }
916
917 /* ---- Interface to HCI drivers ---- */
918
919 static int hci_rfkill_set_block(void *data, bool blocked)
920 {
921 struct hci_dev *hdev = data;
922
923 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
924
925 if (!blocked)
926 return 0;
927
928 hci_dev_do_close(hdev);
929
930 return 0;
931 }
932
933 static const struct rfkill_ops hci_rfkill_ops = {
934 .set_block = hci_rfkill_set_block,
935 };
936
937 /* Alloc HCI device */
938 struct hci_dev *hci_alloc_dev(void)
939 {
940 struct hci_dev *hdev;
941
942 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
943 if (!hdev)
944 return NULL;
945
946 hci_init_sysfs(hdev);
947 skb_queue_head_init(&hdev->driver_init);
948
949 return hdev;
950 }
951 EXPORT_SYMBOL(hci_alloc_dev);
952
953 /* Free HCI device */
954 void hci_free_dev(struct hci_dev *hdev)
955 {
956 skb_queue_purge(&hdev->driver_init);
957
958 /* will free via device release */
959 put_device(&hdev->dev);
960 }
961 EXPORT_SYMBOL(hci_free_dev);
962
963 static void hci_power_on(struct work_struct *work)
964 {
965 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
966
967 BT_DBG("%s", hdev->name);
968
969 if (hci_dev_open(hdev->id) < 0)
970 return;
971
972 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
973 schedule_delayed_work(&hdev->power_off,
974 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
975
976 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
977 mgmt_index_added(hdev);
978 }
979
980 static void hci_power_off(struct work_struct *work)
981 {
982 struct hci_dev *hdev = container_of(work, struct hci_dev,
983 power_off.work);
984
985 BT_DBG("%s", hdev->name);
986
987 clear_bit(HCI_AUTO_OFF, &hdev->flags);
988
989 hci_dev_close(hdev->id);
990 }
991
992 static void hci_discov_off(struct work_struct *work)
993 {
994 struct hci_dev *hdev;
995 u8 scan = SCAN_PAGE;
996
997 hdev = container_of(work, struct hci_dev, discov_off.work);
998
999 BT_DBG("%s", hdev->name);
1000
1001 hci_dev_lock(hdev);
1002
1003 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1004
1005 hdev->discov_timeout = 0;
1006
1007 hci_dev_unlock(hdev);
1008 }
1009
1010 int hci_uuids_clear(struct hci_dev *hdev)
1011 {
1012 struct list_head *p, *n;
1013
1014 list_for_each_safe(p, n, &hdev->uuids) {
1015 struct bt_uuid *uuid;
1016
1017 uuid = list_entry(p, struct bt_uuid, list);
1018
1019 list_del(p);
1020 kfree(uuid);
1021 }
1022
1023 return 0;
1024 }
1025
1026 int hci_link_keys_clear(struct hci_dev *hdev)
1027 {
1028 struct list_head *p, *n;
1029
1030 list_for_each_safe(p, n, &hdev->link_keys) {
1031 struct link_key *key;
1032
1033 key = list_entry(p, struct link_key, list);
1034
1035 list_del(p);
1036 kfree(key);
1037 }
1038
1039 return 0;
1040 }
1041
1042 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1043 {
1044 struct link_key *k;
1045
1046 list_for_each_entry(k, &hdev->link_keys, list)
1047 if (bacmp(bdaddr, &k->bdaddr) == 0)
1048 return k;
1049
1050 return NULL;
1051 }
1052
1053 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1054 u8 key_type, u8 old_key_type)
1055 {
1056 /* Legacy key */
1057 if (key_type < 0x03)
1058 return 1;
1059
1060 /* Debug keys are insecure so don't store them persistently */
1061 if (key_type == HCI_LK_DEBUG_COMBINATION)
1062 return 0;
1063
1064 /* Changed combination key and there's no previous one */
1065 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1066 return 0;
1067
1068 /* Security mode 3 case */
1069 if (!conn)
1070 return 1;
1071
1072 /* Neither local nor remote side had no-bonding as requirement */
1073 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1074 return 1;
1075
1076 /* Local side had dedicated bonding as requirement */
1077 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1078 return 1;
1079
1080 /* Remote side had dedicated bonding as requirement */
1081 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1082 return 1;
1083
1084 /* If none of the above criteria match, then don't store the key
1085 * persistently */
1086 return 0;
1087 }
1088
1089 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1090 {
1091 struct link_key *k;
1092
1093 list_for_each_entry(k, &hdev->link_keys, list) {
1094 struct key_master_id *id;
1095
1096 if (k->type != HCI_LK_SMP_LTK)
1097 continue;
1098
1099 if (k->dlen != sizeof(*id))
1100 continue;
1101
1102 id = (void *) &k->data;
1103 if (id->ediv == ediv &&
1104 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1105 return k;
1106 }
1107
1108 return NULL;
1109 }
1110 EXPORT_SYMBOL(hci_find_ltk);
1111
1112 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1113 bdaddr_t *bdaddr, u8 type)
1114 {
1115 struct link_key *k;
1116
1117 list_for_each_entry(k, &hdev->link_keys, list)
1118 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1119 return k;
1120
1121 return NULL;
1122 }
1123 EXPORT_SYMBOL(hci_find_link_key_type);
1124
1125 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1126 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1127 {
1128 struct link_key *key, *old_key;
1129 u8 old_key_type, persistent;
1130
1131 old_key = hci_find_link_key(hdev, bdaddr);
1132 if (old_key) {
1133 old_key_type = old_key->type;
1134 key = old_key;
1135 } else {
1136 old_key_type = conn ? conn->key_type : 0xff;
1137 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1138 if (!key)
1139 return -ENOMEM;
1140 list_add(&key->list, &hdev->link_keys);
1141 }
1142
1143 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1144
1145 /* Some buggy controller combinations generate a changed
1146 * combination key for legacy pairing even when there's no
1147 * previous key */
1148 if (type == HCI_LK_CHANGED_COMBINATION &&
1149 (!conn || conn->remote_auth == 0xff) &&
1150 old_key_type == 0xff) {
1151 type = HCI_LK_COMBINATION;
1152 if (conn)
1153 conn->key_type = type;
1154 }
1155
1156 bacpy(&key->bdaddr, bdaddr);
1157 memcpy(key->val, val, 16);
1158 key->pin_len = pin_len;
1159
1160 if (type == HCI_LK_CHANGED_COMBINATION)
1161 key->type = old_key_type;
1162 else
1163 key->type = type;
1164
1165 if (!new_key)
1166 return 0;
1167
1168 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1169
1170 mgmt_new_link_key(hdev, key, persistent);
1171
1172 if (!persistent) {
1173 list_del(&key->list);
1174 kfree(key);
1175 }
1176
1177 return 0;
1178 }
1179
1180 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1181 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1182 {
1183 struct link_key *key, *old_key;
1184 struct key_master_id *id;
1185 u8 old_key_type;
1186
1187 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1188
1189 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1190 if (old_key) {
1191 key = old_key;
1192 old_key_type = old_key->type;
1193 } else {
1194 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1195 if (!key)
1196 return -ENOMEM;
1197 list_add(&key->list, &hdev->link_keys);
1198 old_key_type = 0xff;
1199 }
1200
1201 key->dlen = sizeof(*id);
1202
1203 bacpy(&key->bdaddr, bdaddr);
1204 memcpy(key->val, ltk, sizeof(key->val));
1205 key->type = HCI_LK_SMP_LTK;
1206 key->pin_len = key_size;
1207
1208 id = (void *) &key->data;
1209 id->ediv = ediv;
1210 memcpy(id->rand, rand, sizeof(id->rand));
1211
1212 if (new_key)
1213 mgmt_new_link_key(hdev, key, old_key_type);
1214
1215 return 0;
1216 }
1217
1218 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1219 {
1220 struct link_key *key;
1221
1222 key = hci_find_link_key(hdev, bdaddr);
1223 if (!key)
1224 return -ENOENT;
1225
1226 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1227
1228 list_del(&key->list);
1229 kfree(key);
1230
1231 return 0;
1232 }
1233
1234 /* HCI command timer function */
1235 static void hci_cmd_timer(unsigned long arg)
1236 {
1237 struct hci_dev *hdev = (void *) arg;
1238
1239 BT_ERR("%s command tx timeout", hdev->name);
1240 atomic_set(&hdev->cmd_cnt, 1);
1241 queue_work(hdev->workqueue, &hdev->cmd_work);
1242 }
1243
1244 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1245 bdaddr_t *bdaddr)
1246 {
1247 struct oob_data *data;
1248
1249 list_for_each_entry(data, &hdev->remote_oob_data, list)
1250 if (bacmp(bdaddr, &data->bdaddr) == 0)
1251 return data;
1252
1253 return NULL;
1254 }
1255
1256 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1257 {
1258 struct oob_data *data;
1259
1260 data = hci_find_remote_oob_data(hdev, bdaddr);
1261 if (!data)
1262 return -ENOENT;
1263
1264 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1265
1266 list_del(&data->list);
1267 kfree(data);
1268
1269 return 0;
1270 }
1271
1272 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1273 {
1274 struct oob_data *data, *n;
1275
1276 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1277 list_del(&data->list);
1278 kfree(data);
1279 }
1280
1281 return 0;
1282 }
1283
1284 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1285 u8 *randomizer)
1286 {
1287 struct oob_data *data;
1288
1289 data = hci_find_remote_oob_data(hdev, bdaddr);
1290
1291 if (!data) {
1292 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1293 if (!data)
1294 return -ENOMEM;
1295
1296 bacpy(&data->bdaddr, bdaddr);
1297 list_add(&data->list, &hdev->remote_oob_data);
1298 }
1299
1300 memcpy(data->hash, hash, sizeof(data->hash));
1301 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1302
1303 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1304
1305 return 0;
1306 }
1307
1308 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1309 bdaddr_t *bdaddr)
1310 {
1311 struct bdaddr_list *b;
1312
1313 list_for_each_entry(b, &hdev->blacklist, list)
1314 if (bacmp(bdaddr, &b->bdaddr) == 0)
1315 return b;
1316
1317 return NULL;
1318 }
1319
1320 int hci_blacklist_clear(struct hci_dev *hdev)
1321 {
1322 struct list_head *p, *n;
1323
1324 list_for_each_safe(p, n, &hdev->blacklist) {
1325 struct bdaddr_list *b;
1326
1327 b = list_entry(p, struct bdaddr_list, list);
1328
1329 list_del(p);
1330 kfree(b);
1331 }
1332
1333 return 0;
1334 }
1335
1336 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337 {
1338 struct bdaddr_list *entry;
1339
1340 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1341 return -EBADF;
1342
1343 if (hci_blacklist_lookup(hdev, bdaddr))
1344 return -EEXIST;
1345
1346 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1347 if (!entry)
1348 return -ENOMEM;
1349
1350 bacpy(&entry->bdaddr, bdaddr);
1351
1352 list_add(&entry->list, &hdev->blacklist);
1353
1354 return mgmt_device_blocked(hdev, bdaddr);
1355 }
1356
1357 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358 {
1359 struct bdaddr_list *entry;
1360
1361 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1362 return hci_blacklist_clear(hdev);
1363
1364 entry = hci_blacklist_lookup(hdev, bdaddr);
1365 if (!entry)
1366 return -ENOENT;
1367
1368 list_del(&entry->list);
1369 kfree(entry);
1370
1371 return mgmt_device_unblocked(hdev, bdaddr);
1372 }
1373
1374 static void hci_clear_adv_cache(struct work_struct *work)
1375 {
1376 struct hci_dev *hdev = container_of(work, struct hci_dev,
1377 adv_work.work);
1378
1379 hci_dev_lock(hdev);
1380
1381 hci_adv_entries_clear(hdev);
1382
1383 hci_dev_unlock(hdev);
1384 }
1385
1386 int hci_adv_entries_clear(struct hci_dev *hdev)
1387 {
1388 struct adv_entry *entry, *tmp;
1389
1390 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1391 list_del(&entry->list);
1392 kfree(entry);
1393 }
1394
1395 BT_DBG("%s adv cache cleared", hdev->name);
1396
1397 return 0;
1398 }
1399
1400 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1401 {
1402 struct adv_entry *entry;
1403
1404 list_for_each_entry(entry, &hdev->adv_entries, list)
1405 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1406 return entry;
1407
1408 return NULL;
1409 }
1410
1411 static inline int is_connectable_adv(u8 evt_type)
1412 {
1413 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1414 return 1;
1415
1416 return 0;
1417 }
1418
1419 int hci_add_adv_entry(struct hci_dev *hdev,
1420 struct hci_ev_le_advertising_info *ev)
1421 {
1422 struct adv_entry *entry;
1423
1424 if (!is_connectable_adv(ev->evt_type))
1425 return -EINVAL;
1426
1427 /* Only new entries should be added to adv_entries. So, if
1428 * bdaddr was found, don't add it. */
1429 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1430 return 0;
1431
1432 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1433 if (!entry)
1434 return -ENOMEM;
1435
1436 bacpy(&entry->bdaddr, &ev->bdaddr);
1437 entry->bdaddr_type = ev->bdaddr_type;
1438
1439 list_add(&entry->list, &hdev->adv_entries);
1440
1441 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1442 batostr(&entry->bdaddr), entry->bdaddr_type);
1443
1444 return 0;
1445 }
1446
1447 /* Register HCI device */
1448 int hci_register_dev(struct hci_dev *hdev)
1449 {
1450 struct list_head *head = &hci_dev_list, *p;
1451 int i, id, error;
1452
1453 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1454 hdev->bus, hdev->owner);
1455
1456 if (!hdev->open || !hdev->close || !hdev->destruct)
1457 return -EINVAL;
1458
1459 /* Do not allow HCI_AMP devices to register at index 0,
1460 * so the index can be used as the AMP controller ID.
1461 */
1462 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1463
1464 write_lock(&hci_dev_list_lock);
1465
1466 /* Find first available device id */
1467 list_for_each(p, &hci_dev_list) {
1468 if (list_entry(p, struct hci_dev, list)->id != id)
1469 break;
1470 head = p; id++;
1471 }
1472
1473 sprintf(hdev->name, "hci%d", id);
1474 hdev->id = id;
1475 list_add_tail(&hdev->list, head);
1476
1477 atomic_set(&hdev->refcnt, 1);
1478 mutex_init(&hdev->lock);
1479
1480 hdev->flags = 0;
1481 hdev->dev_flags = 0;
1482 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1483 hdev->esco_type = (ESCO_HV1);
1484 hdev->link_mode = (HCI_LM_ACCEPT);
1485 hdev->io_capability = 0x03; /* No Input No Output */
1486
1487 hdev->idle_timeout = 0;
1488 hdev->sniff_max_interval = 800;
1489 hdev->sniff_min_interval = 80;
1490
1491 INIT_WORK(&hdev->rx_work, hci_rx_work);
1492 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1493 INIT_WORK(&hdev->tx_work, hci_tx_work);
1494
1495
1496 skb_queue_head_init(&hdev->rx_q);
1497 skb_queue_head_init(&hdev->cmd_q);
1498 skb_queue_head_init(&hdev->raw_q);
1499
1500 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1501
1502 for (i = 0; i < NUM_REASSEMBLY; i++)
1503 hdev->reassembly[i] = NULL;
1504
1505 init_waitqueue_head(&hdev->req_wait_q);
1506 mutex_init(&hdev->req_lock);
1507
1508 inquiry_cache_init(hdev);
1509
1510 hci_conn_hash_init(hdev);
1511
1512 INIT_LIST_HEAD(&hdev->mgmt_pending);
1513
1514 INIT_LIST_HEAD(&hdev->blacklist);
1515
1516 INIT_LIST_HEAD(&hdev->uuids);
1517
1518 INIT_LIST_HEAD(&hdev->link_keys);
1519
1520 INIT_LIST_HEAD(&hdev->remote_oob_data);
1521
1522 INIT_LIST_HEAD(&hdev->adv_entries);
1523
1524 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1525 INIT_WORK(&hdev->power_on, hci_power_on);
1526 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1527
1528 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1529
1530 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1531
1532 atomic_set(&hdev->promisc, 0);
1533
1534 write_unlock(&hci_dev_list_lock);
1535
1536 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1537 WQ_MEM_RECLAIM, 1);
1538 if (!hdev->workqueue) {
1539 error = -ENOMEM;
1540 goto err;
1541 }
1542
1543 error = hci_add_sysfs(hdev);
1544 if (error < 0)
1545 goto err_wqueue;
1546
1547 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1548 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1549 if (hdev->rfkill) {
1550 if (rfkill_register(hdev->rfkill) < 0) {
1551 rfkill_destroy(hdev->rfkill);
1552 hdev->rfkill = NULL;
1553 }
1554 }
1555
1556 set_bit(HCI_AUTO_OFF, &hdev->flags);
1557 set_bit(HCI_SETUP, &hdev->flags);
1558 schedule_work(&hdev->power_on);
1559
1560 hci_notify(hdev, HCI_DEV_REG);
1561
1562 return id;
1563
1564 err_wqueue:
1565 destroy_workqueue(hdev->workqueue);
1566 err:
1567 write_lock(&hci_dev_list_lock);
1568 list_del(&hdev->list);
1569 write_unlock(&hci_dev_list_lock);
1570
1571 return error;
1572 }
1573 EXPORT_SYMBOL(hci_register_dev);
1574
1575 /* Unregister HCI device */
1576 void hci_unregister_dev(struct hci_dev *hdev)
1577 {
1578 int i;
1579
1580 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1581
1582 write_lock(&hci_dev_list_lock);
1583 list_del(&hdev->list);
1584 write_unlock(&hci_dev_list_lock);
1585
1586 hci_dev_do_close(hdev);
1587
1588 for (i = 0; i < NUM_REASSEMBLY; i++)
1589 kfree_skb(hdev->reassembly[i]);
1590
1591 if (!test_bit(HCI_INIT, &hdev->flags) &&
1592 !test_bit(HCI_SETUP, &hdev->flags)) {
1593 hci_dev_lock(hdev);
1594 mgmt_index_removed(hdev);
1595 hci_dev_unlock(hdev);
1596 }
1597
1598 /* mgmt_index_removed should take care of emptying the
1599 * pending list */
1600 BUG_ON(!list_empty(&hdev->mgmt_pending));
1601
1602 hci_notify(hdev, HCI_DEV_UNREG);
1603
1604 if (hdev->rfkill) {
1605 rfkill_unregister(hdev->rfkill);
1606 rfkill_destroy(hdev->rfkill);
1607 }
1608
1609 hci_del_sysfs(hdev);
1610
1611 cancel_delayed_work_sync(&hdev->adv_work);
1612
1613 destroy_workqueue(hdev->workqueue);
1614
1615 hci_dev_lock(hdev);
1616 hci_blacklist_clear(hdev);
1617 hci_uuids_clear(hdev);
1618 hci_link_keys_clear(hdev);
1619 hci_remote_oob_data_clear(hdev);
1620 hci_adv_entries_clear(hdev);
1621 hci_dev_unlock(hdev);
1622
1623 __hci_dev_put(hdev);
1624 }
1625 EXPORT_SYMBOL(hci_unregister_dev);
1626
1627 /* Suspend HCI device */
1628 int hci_suspend_dev(struct hci_dev *hdev)
1629 {
1630 hci_notify(hdev, HCI_DEV_SUSPEND);
1631 return 0;
1632 }
1633 EXPORT_SYMBOL(hci_suspend_dev);
1634
1635 /* Resume HCI device */
1636 int hci_resume_dev(struct hci_dev *hdev)
1637 {
1638 hci_notify(hdev, HCI_DEV_RESUME);
1639 return 0;
1640 }
1641 EXPORT_SYMBOL(hci_resume_dev);
1642
1643 /* Receive frame from HCI drivers */
1644 int hci_recv_frame(struct sk_buff *skb)
1645 {
1646 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1647 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1648 && !test_bit(HCI_INIT, &hdev->flags))) {
1649 kfree_skb(skb);
1650 return -ENXIO;
1651 }
1652
1653 /* Incomming skb */
1654 bt_cb(skb)->incoming = 1;
1655
1656 /* Time stamp */
1657 __net_timestamp(skb);
1658
1659 skb_queue_tail(&hdev->rx_q, skb);
1660 queue_work(hdev->workqueue, &hdev->rx_work);
1661
1662 return 0;
1663 }
1664 EXPORT_SYMBOL(hci_recv_frame);
1665
1666 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1667 int count, __u8 index)
1668 {
1669 int len = 0;
1670 int hlen = 0;
1671 int remain = count;
1672 struct sk_buff *skb;
1673 struct bt_skb_cb *scb;
1674
1675 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1676 index >= NUM_REASSEMBLY)
1677 return -EILSEQ;
1678
1679 skb = hdev->reassembly[index];
1680
1681 if (!skb) {
1682 switch (type) {
1683 case HCI_ACLDATA_PKT:
1684 len = HCI_MAX_FRAME_SIZE;
1685 hlen = HCI_ACL_HDR_SIZE;
1686 break;
1687 case HCI_EVENT_PKT:
1688 len = HCI_MAX_EVENT_SIZE;
1689 hlen = HCI_EVENT_HDR_SIZE;
1690 break;
1691 case HCI_SCODATA_PKT:
1692 len = HCI_MAX_SCO_SIZE;
1693 hlen = HCI_SCO_HDR_SIZE;
1694 break;
1695 }
1696
1697 skb = bt_skb_alloc(len, GFP_ATOMIC);
1698 if (!skb)
1699 return -ENOMEM;
1700
1701 scb = (void *) skb->cb;
1702 scb->expect = hlen;
1703 scb->pkt_type = type;
1704
1705 skb->dev = (void *) hdev;
1706 hdev->reassembly[index] = skb;
1707 }
1708
1709 while (count) {
1710 scb = (void *) skb->cb;
1711 len = min(scb->expect, (__u16)count);
1712
1713 memcpy(skb_put(skb, len), data, len);
1714
1715 count -= len;
1716 data += len;
1717 scb->expect -= len;
1718 remain = count;
1719
1720 switch (type) {
1721 case HCI_EVENT_PKT:
1722 if (skb->len == HCI_EVENT_HDR_SIZE) {
1723 struct hci_event_hdr *h = hci_event_hdr(skb);
1724 scb->expect = h->plen;
1725
1726 if (skb_tailroom(skb) < scb->expect) {
1727 kfree_skb(skb);
1728 hdev->reassembly[index] = NULL;
1729 return -ENOMEM;
1730 }
1731 }
1732 break;
1733
1734 case HCI_ACLDATA_PKT:
1735 if (skb->len == HCI_ACL_HDR_SIZE) {
1736 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1737 scb->expect = __le16_to_cpu(h->dlen);
1738
1739 if (skb_tailroom(skb) < scb->expect) {
1740 kfree_skb(skb);
1741 hdev->reassembly[index] = NULL;
1742 return -ENOMEM;
1743 }
1744 }
1745 break;
1746
1747 case HCI_SCODATA_PKT:
1748 if (skb->len == HCI_SCO_HDR_SIZE) {
1749 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1750 scb->expect = h->dlen;
1751
1752 if (skb_tailroom(skb) < scb->expect) {
1753 kfree_skb(skb);
1754 hdev->reassembly[index] = NULL;
1755 return -ENOMEM;
1756 }
1757 }
1758 break;
1759 }
1760
1761 if (scb->expect == 0) {
1762 /* Complete frame */
1763
1764 bt_cb(skb)->pkt_type = type;
1765 hci_recv_frame(skb);
1766
1767 hdev->reassembly[index] = NULL;
1768 return remain;
1769 }
1770 }
1771
1772 return remain;
1773 }
1774
1775 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1776 {
1777 int rem = 0;
1778
1779 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1780 return -EILSEQ;
1781
1782 while (count) {
1783 rem = hci_reassembly(hdev, type, data, count, type - 1);
1784 if (rem < 0)
1785 return rem;
1786
1787 data += (count - rem);
1788 count = rem;
1789 }
1790
1791 return rem;
1792 }
1793 EXPORT_SYMBOL(hci_recv_fragment);
1794
1795 #define STREAM_REASSEMBLY 0
1796
1797 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1798 {
1799 int type;
1800 int rem = 0;
1801
1802 while (count) {
1803 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1804
1805 if (!skb) {
1806 struct { char type; } *pkt;
1807
1808 /* Start of the frame */
1809 pkt = data;
1810 type = pkt->type;
1811
1812 data++;
1813 count--;
1814 } else
1815 type = bt_cb(skb)->pkt_type;
1816
1817 rem = hci_reassembly(hdev, type, data, count,
1818 STREAM_REASSEMBLY);
1819 if (rem < 0)
1820 return rem;
1821
1822 data += (count - rem);
1823 count = rem;
1824 }
1825
1826 return rem;
1827 }
1828 EXPORT_SYMBOL(hci_recv_stream_fragment);
1829
1830 /* ---- Interface to upper protocols ---- */
1831
1832 int hci_register_cb(struct hci_cb *cb)
1833 {
1834 BT_DBG("%p name %s", cb, cb->name);
1835
1836 write_lock(&hci_cb_list_lock);
1837 list_add(&cb->list, &hci_cb_list);
1838 write_unlock(&hci_cb_list_lock);
1839
1840 return 0;
1841 }
1842 EXPORT_SYMBOL(hci_register_cb);
1843
1844 int hci_unregister_cb(struct hci_cb *cb)
1845 {
1846 BT_DBG("%p name %s", cb, cb->name);
1847
1848 write_lock(&hci_cb_list_lock);
1849 list_del(&cb->list);
1850 write_unlock(&hci_cb_list_lock);
1851
1852 return 0;
1853 }
1854 EXPORT_SYMBOL(hci_unregister_cb);
1855
1856 static int hci_send_frame(struct sk_buff *skb)
1857 {
1858 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1859
1860 if (!hdev) {
1861 kfree_skb(skb);
1862 return -ENODEV;
1863 }
1864
1865 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1866
1867 if (atomic_read(&hdev->promisc)) {
1868 /* Time stamp */
1869 __net_timestamp(skb);
1870
1871 hci_send_to_sock(hdev, skb, NULL);
1872 }
1873
1874 /* Get rid of skb owner, prior to sending to the driver. */
1875 skb_orphan(skb);
1876
1877 return hdev->send(skb);
1878 }
1879
1880 /* Send HCI command */
1881 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1882 {
1883 int len = HCI_COMMAND_HDR_SIZE + plen;
1884 struct hci_command_hdr *hdr;
1885 struct sk_buff *skb;
1886
1887 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1888
1889 skb = bt_skb_alloc(len, GFP_ATOMIC);
1890 if (!skb) {
1891 BT_ERR("%s no memory for command", hdev->name);
1892 return -ENOMEM;
1893 }
1894
1895 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1896 hdr->opcode = cpu_to_le16(opcode);
1897 hdr->plen = plen;
1898
1899 if (plen)
1900 memcpy(skb_put(skb, plen), param, plen);
1901
1902 BT_DBG("skb len %d", skb->len);
1903
1904 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1905 skb->dev = (void *) hdev;
1906
1907 if (test_bit(HCI_INIT, &hdev->flags))
1908 hdev->init_last_cmd = opcode;
1909
1910 skb_queue_tail(&hdev->cmd_q, skb);
1911 queue_work(hdev->workqueue, &hdev->cmd_work);
1912
1913 return 0;
1914 }
1915
1916 /* Get data from the previously sent command */
1917 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1918 {
1919 struct hci_command_hdr *hdr;
1920
1921 if (!hdev->sent_cmd)
1922 return NULL;
1923
1924 hdr = (void *) hdev->sent_cmd->data;
1925
1926 if (hdr->opcode != cpu_to_le16(opcode))
1927 return NULL;
1928
1929 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1930
1931 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1932 }
1933
1934 /* Send ACL data */
1935 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1936 {
1937 struct hci_acl_hdr *hdr;
1938 int len = skb->len;
1939
1940 skb_push(skb, HCI_ACL_HDR_SIZE);
1941 skb_reset_transport_header(skb);
1942 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1943 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1944 hdr->dlen = cpu_to_le16(len);
1945 }
1946
1947 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1948 struct sk_buff *skb, __u16 flags)
1949 {
1950 struct hci_dev *hdev = conn->hdev;
1951 struct sk_buff *list;
1952
1953 list = skb_shinfo(skb)->frag_list;
1954 if (!list) {
1955 /* Non fragmented */
1956 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1957
1958 skb_queue_tail(queue, skb);
1959 } else {
1960 /* Fragmented */
1961 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1962
1963 skb_shinfo(skb)->frag_list = NULL;
1964
1965 /* Queue all fragments atomically */
1966 spin_lock(&queue->lock);
1967
1968 __skb_queue_tail(queue, skb);
1969
1970 flags &= ~ACL_START;
1971 flags |= ACL_CONT;
1972 do {
1973 skb = list; list = list->next;
1974
1975 skb->dev = (void *) hdev;
1976 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1977 hci_add_acl_hdr(skb, conn->handle, flags);
1978
1979 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1980
1981 __skb_queue_tail(queue, skb);
1982 } while (list);
1983
1984 spin_unlock(&queue->lock);
1985 }
1986 }
1987
1988 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1989 {
1990 struct hci_conn *conn = chan->conn;
1991 struct hci_dev *hdev = conn->hdev;
1992
1993 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1994
1995 skb->dev = (void *) hdev;
1996 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1997 hci_add_acl_hdr(skb, conn->handle, flags);
1998
1999 hci_queue_acl(conn, &chan->data_q, skb, flags);
2000
2001 queue_work(hdev->workqueue, &hdev->tx_work);
2002 }
2003 EXPORT_SYMBOL(hci_send_acl);
2004
2005 /* Send SCO data */
2006 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2007 {
2008 struct hci_dev *hdev = conn->hdev;
2009 struct hci_sco_hdr hdr;
2010
2011 BT_DBG("%s len %d", hdev->name, skb->len);
2012
2013 hdr.handle = cpu_to_le16(conn->handle);
2014 hdr.dlen = skb->len;
2015
2016 skb_push(skb, HCI_SCO_HDR_SIZE);
2017 skb_reset_transport_header(skb);
2018 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2019
2020 skb->dev = (void *) hdev;
2021 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2022
2023 skb_queue_tail(&conn->data_q, skb);
2024 queue_work(hdev->workqueue, &hdev->tx_work);
2025 }
2026 EXPORT_SYMBOL(hci_send_sco);
2027
2028 /* ---- HCI TX task (outgoing data) ---- */
2029
2030 /* HCI Connection scheduler */
2031 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2032 {
2033 struct hci_conn_hash *h = &hdev->conn_hash;
2034 struct hci_conn *conn = NULL, *c;
2035 int num = 0, min = ~0;
2036
2037 /* We don't have to lock device here. Connections are always
2038 * added and removed with TX task disabled. */
2039
2040 rcu_read_lock();
2041
2042 list_for_each_entry_rcu(c, &h->list, list) {
2043 if (c->type != type || skb_queue_empty(&c->data_q))
2044 continue;
2045
2046 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2047 continue;
2048
2049 num++;
2050
2051 if (c->sent < min) {
2052 min = c->sent;
2053 conn = c;
2054 }
2055
2056 if (hci_conn_num(hdev, type) == num)
2057 break;
2058 }
2059
2060 rcu_read_unlock();
2061
2062 if (conn) {
2063 int cnt, q;
2064
2065 switch (conn->type) {
2066 case ACL_LINK:
2067 cnt = hdev->acl_cnt;
2068 break;
2069 case SCO_LINK:
2070 case ESCO_LINK:
2071 cnt = hdev->sco_cnt;
2072 break;
2073 case LE_LINK:
2074 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2075 break;
2076 default:
2077 cnt = 0;
2078 BT_ERR("Unknown link type");
2079 }
2080
2081 q = cnt / num;
2082 *quote = q ? q : 1;
2083 } else
2084 *quote = 0;
2085
2086 BT_DBG("conn %p quote %d", conn, *quote);
2087 return conn;
2088 }
2089
2090 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2091 {
2092 struct hci_conn_hash *h = &hdev->conn_hash;
2093 struct hci_conn *c;
2094
2095 BT_ERR("%s link tx timeout", hdev->name);
2096
2097 rcu_read_lock();
2098
2099 /* Kill stalled connections */
2100 list_for_each_entry_rcu(c, &h->list, list) {
2101 if (c->type == type && c->sent) {
2102 BT_ERR("%s killing stalled connection %s",
2103 hdev->name, batostr(&c->dst));
2104 hci_acl_disconn(c, 0x13);
2105 }
2106 }
2107
2108 rcu_read_unlock();
2109 }
2110
2111 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2112 int *quote)
2113 {
2114 struct hci_conn_hash *h = &hdev->conn_hash;
2115 struct hci_chan *chan = NULL;
2116 int num = 0, min = ~0, cur_prio = 0;
2117 struct hci_conn *conn;
2118 int cnt, q, conn_num = 0;
2119
2120 BT_DBG("%s", hdev->name);
2121
2122 rcu_read_lock();
2123
2124 list_for_each_entry_rcu(conn, &h->list, list) {
2125 struct hci_chan *tmp;
2126
2127 if (conn->type != type)
2128 continue;
2129
2130 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2131 continue;
2132
2133 conn_num++;
2134
2135 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2136 struct sk_buff *skb;
2137
2138 if (skb_queue_empty(&tmp->data_q))
2139 continue;
2140
2141 skb = skb_peek(&tmp->data_q);
2142 if (skb->priority < cur_prio)
2143 continue;
2144
2145 if (skb->priority > cur_prio) {
2146 num = 0;
2147 min = ~0;
2148 cur_prio = skb->priority;
2149 }
2150
2151 num++;
2152
2153 if (conn->sent < min) {
2154 min = conn->sent;
2155 chan = tmp;
2156 }
2157 }
2158
2159 if (hci_conn_num(hdev, type) == conn_num)
2160 break;
2161 }
2162
2163 rcu_read_unlock();
2164
2165 if (!chan)
2166 return NULL;
2167
2168 switch (chan->conn->type) {
2169 case ACL_LINK:
2170 cnt = hdev->acl_cnt;
2171 break;
2172 case SCO_LINK:
2173 case ESCO_LINK:
2174 cnt = hdev->sco_cnt;
2175 break;
2176 case LE_LINK:
2177 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2178 break;
2179 default:
2180 cnt = 0;
2181 BT_ERR("Unknown link type");
2182 }
2183
2184 q = cnt / num;
2185 *quote = q ? q : 1;
2186 BT_DBG("chan %p quote %d", chan, *quote);
2187 return chan;
2188 }
2189
2190 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2191 {
2192 struct hci_conn_hash *h = &hdev->conn_hash;
2193 struct hci_conn *conn;
2194 int num = 0;
2195
2196 BT_DBG("%s", hdev->name);
2197
2198 rcu_read_lock();
2199
2200 list_for_each_entry_rcu(conn, &h->list, list) {
2201 struct hci_chan *chan;
2202
2203 if (conn->type != type)
2204 continue;
2205
2206 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2207 continue;
2208
2209 num++;
2210
2211 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2212 struct sk_buff *skb;
2213
2214 if (chan->sent) {
2215 chan->sent = 0;
2216 continue;
2217 }
2218
2219 if (skb_queue_empty(&chan->data_q))
2220 continue;
2221
2222 skb = skb_peek(&chan->data_q);
2223 if (skb->priority >= HCI_PRIO_MAX - 1)
2224 continue;
2225
2226 skb->priority = HCI_PRIO_MAX - 1;
2227
2228 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2229 skb->priority);
2230 }
2231
2232 if (hci_conn_num(hdev, type) == num)
2233 break;
2234 }
2235
2236 rcu_read_unlock();
2237
2238 }
2239
2240 static inline void hci_sched_acl(struct hci_dev *hdev)
2241 {
2242 struct hci_chan *chan;
2243 struct sk_buff *skb;
2244 int quote;
2245 unsigned int cnt;
2246
2247 BT_DBG("%s", hdev->name);
2248
2249 if (!hci_conn_num(hdev, ACL_LINK))
2250 return;
2251
2252 if (!test_bit(HCI_RAW, &hdev->flags)) {
2253 /* ACL tx timeout must be longer than maximum
2254 * link supervision timeout (40.9 seconds) */
2255 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2256 hci_link_tx_to(hdev, ACL_LINK);
2257 }
2258
2259 cnt = hdev->acl_cnt;
2260
2261 while (hdev->acl_cnt &&
2262 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2263 u32 priority = (skb_peek(&chan->data_q))->priority;
2264 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2265 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2266 skb->len, skb->priority);
2267
2268 /* Stop if priority has changed */
2269 if (skb->priority < priority)
2270 break;
2271
2272 skb = skb_dequeue(&chan->data_q);
2273
2274 hci_conn_enter_active_mode(chan->conn,
2275 bt_cb(skb)->force_active);
2276
2277 hci_send_frame(skb);
2278 hdev->acl_last_tx = jiffies;
2279
2280 hdev->acl_cnt--;
2281 chan->sent++;
2282 chan->conn->sent++;
2283 }
2284 }
2285
2286 if (cnt != hdev->acl_cnt)
2287 hci_prio_recalculate(hdev, ACL_LINK);
2288 }
2289
2290 /* Schedule SCO */
2291 static inline void hci_sched_sco(struct hci_dev *hdev)
2292 {
2293 struct hci_conn *conn;
2294 struct sk_buff *skb;
2295 int quote;
2296
2297 BT_DBG("%s", hdev->name);
2298
2299 if (!hci_conn_num(hdev, SCO_LINK))
2300 return;
2301
2302 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2303 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2304 BT_DBG("skb %p len %d", skb, skb->len);
2305 hci_send_frame(skb);
2306
2307 conn->sent++;
2308 if (conn->sent == ~0)
2309 conn->sent = 0;
2310 }
2311 }
2312 }
2313
2314 static inline void hci_sched_esco(struct hci_dev *hdev)
2315 {
2316 struct hci_conn *conn;
2317 struct sk_buff *skb;
2318 int quote;
2319
2320 BT_DBG("%s", hdev->name);
2321
2322 if (!hci_conn_num(hdev, ESCO_LINK))
2323 return;
2324
2325 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2326 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2327 BT_DBG("skb %p len %d", skb, skb->len);
2328 hci_send_frame(skb);
2329
2330 conn->sent++;
2331 if (conn->sent == ~0)
2332 conn->sent = 0;
2333 }
2334 }
2335 }
2336
2337 static inline void hci_sched_le(struct hci_dev *hdev)
2338 {
2339 struct hci_chan *chan;
2340 struct sk_buff *skb;
2341 int quote, cnt, tmp;
2342
2343 BT_DBG("%s", hdev->name);
2344
2345 if (!hci_conn_num(hdev, LE_LINK))
2346 return;
2347
2348 if (!test_bit(HCI_RAW, &hdev->flags)) {
2349 /* LE tx timeout must be longer than maximum
2350 * link supervision timeout (40.9 seconds) */
2351 if (!hdev->le_cnt && hdev->le_pkts &&
2352 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2353 hci_link_tx_to(hdev, LE_LINK);
2354 }
2355
2356 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2357 tmp = cnt;
2358 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2359 u32 priority = (skb_peek(&chan->data_q))->priority;
2360 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2361 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2362 skb->len, skb->priority);
2363
2364 /* Stop if priority has changed */
2365 if (skb->priority < priority)
2366 break;
2367
2368 skb = skb_dequeue(&chan->data_q);
2369
2370 hci_send_frame(skb);
2371 hdev->le_last_tx = jiffies;
2372
2373 cnt--;
2374 chan->sent++;
2375 chan->conn->sent++;
2376 }
2377 }
2378
2379 if (hdev->le_pkts)
2380 hdev->le_cnt = cnt;
2381 else
2382 hdev->acl_cnt = cnt;
2383
2384 if (cnt != tmp)
2385 hci_prio_recalculate(hdev, LE_LINK);
2386 }
2387
2388 static void hci_tx_work(struct work_struct *work)
2389 {
2390 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2391 struct sk_buff *skb;
2392
2393 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2394 hdev->sco_cnt, hdev->le_cnt);
2395
2396 /* Schedule queues and send stuff to HCI driver */
2397
2398 hci_sched_acl(hdev);
2399
2400 hci_sched_sco(hdev);
2401
2402 hci_sched_esco(hdev);
2403
2404 hci_sched_le(hdev);
2405
2406 /* Send next queued raw (unknown type) packet */
2407 while ((skb = skb_dequeue(&hdev->raw_q)))
2408 hci_send_frame(skb);
2409 }
2410
2411 /* ----- HCI RX task (incoming data processing) ----- */
2412
2413 /* ACL data packet */
2414 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2415 {
2416 struct hci_acl_hdr *hdr = (void *) skb->data;
2417 struct hci_conn *conn;
2418 __u16 handle, flags;
2419
2420 skb_pull(skb, HCI_ACL_HDR_SIZE);
2421
2422 handle = __le16_to_cpu(hdr->handle);
2423 flags = hci_flags(handle);
2424 handle = hci_handle(handle);
2425
2426 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2427
2428 hdev->stat.acl_rx++;
2429
2430 hci_dev_lock(hdev);
2431 conn = hci_conn_hash_lookup_handle(hdev, handle);
2432 hci_dev_unlock(hdev);
2433
2434 if (conn) {
2435 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2436
2437 /* Send to upper protocol */
2438 l2cap_recv_acldata(conn, skb, flags);
2439 return;
2440 } else {
2441 BT_ERR("%s ACL packet for unknown connection handle %d",
2442 hdev->name, handle);
2443 }
2444
2445 kfree_skb(skb);
2446 }
2447
2448 /* SCO data packet */
2449 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2450 {
2451 struct hci_sco_hdr *hdr = (void *) skb->data;
2452 struct hci_conn *conn;
2453 __u16 handle;
2454
2455 skb_pull(skb, HCI_SCO_HDR_SIZE);
2456
2457 handle = __le16_to_cpu(hdr->handle);
2458
2459 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2460
2461 hdev->stat.sco_rx++;
2462
2463 hci_dev_lock(hdev);
2464 conn = hci_conn_hash_lookup_handle(hdev, handle);
2465 hci_dev_unlock(hdev);
2466
2467 if (conn) {
2468 /* Send to upper protocol */
2469 sco_recv_scodata(conn, skb);
2470 return;
2471 } else {
2472 BT_ERR("%s SCO packet for unknown connection handle %d",
2473 hdev->name, handle);
2474 }
2475
2476 kfree_skb(skb);
2477 }
2478
2479 static void hci_rx_work(struct work_struct *work)
2480 {
2481 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2482 struct sk_buff *skb;
2483
2484 BT_DBG("%s", hdev->name);
2485
2486 while ((skb = skb_dequeue(&hdev->rx_q))) {
2487 if (atomic_read(&hdev->promisc)) {
2488 /* Send copy to the sockets */
2489 hci_send_to_sock(hdev, skb, NULL);
2490 }
2491
2492 if (test_bit(HCI_RAW, &hdev->flags)) {
2493 kfree_skb(skb);
2494 continue;
2495 }
2496
2497 if (test_bit(HCI_INIT, &hdev->flags)) {
2498 /* Don't process data packets in this states. */
2499 switch (bt_cb(skb)->pkt_type) {
2500 case HCI_ACLDATA_PKT:
2501 case HCI_SCODATA_PKT:
2502 kfree_skb(skb);
2503 continue;
2504 }
2505 }
2506
2507 /* Process frame */
2508 switch (bt_cb(skb)->pkt_type) {
2509 case HCI_EVENT_PKT:
2510 BT_DBG("%s Event packet", hdev->name);
2511 hci_event_packet(hdev, skb);
2512 break;
2513
2514 case HCI_ACLDATA_PKT:
2515 BT_DBG("%s ACL data packet", hdev->name);
2516 hci_acldata_packet(hdev, skb);
2517 break;
2518
2519 case HCI_SCODATA_PKT:
2520 BT_DBG("%s SCO data packet", hdev->name);
2521 hci_scodata_packet(hdev, skb);
2522 break;
2523
2524 default:
2525 kfree_skb(skb);
2526 break;
2527 }
2528 }
2529 }
2530
2531 static void hci_cmd_work(struct work_struct *work)
2532 {
2533 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2534 struct sk_buff *skb;
2535
2536 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2537
2538 /* Send queued commands */
2539 if (atomic_read(&hdev->cmd_cnt)) {
2540 skb = skb_dequeue(&hdev->cmd_q);
2541 if (!skb)
2542 return;
2543
2544 kfree_skb(hdev->sent_cmd);
2545
2546 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2547 if (hdev->sent_cmd) {
2548 atomic_dec(&hdev->cmd_cnt);
2549 hci_send_frame(skb);
2550 if (test_bit(HCI_RESET, &hdev->flags))
2551 del_timer(&hdev->cmd_timer);
2552 else
2553 mod_timer(&hdev->cmd_timer,
2554 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2555 } else {
2556 skb_queue_head(&hdev->cmd_q, skb);
2557 queue_work(hdev->workqueue, &hdev->cmd_work);
2558 }
2559 }
2560 }
2561
2562 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2563 {
2564 /* General inquiry access code (GIAC) */
2565 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2566 struct hci_cp_inquiry cp;
2567
2568 BT_DBG("%s", hdev->name);
2569
2570 if (test_bit(HCI_INQUIRY, &hdev->flags))
2571 return -EINPROGRESS;
2572
2573 memset(&cp, 0, sizeof(cp));
2574 memcpy(&cp.lap, lap, sizeof(cp.lap));
2575 cp.length = length;
2576
2577 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2578 }
2579
2580 int hci_cancel_inquiry(struct hci_dev *hdev)
2581 {
2582 BT_DBG("%s", hdev->name);
2583
2584 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2585 return -EPERM;
2586
2587 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2588 }
2589
2590 module_param(enable_hs, bool, 0644);
2591 MODULE_PARM_DESC(enable_hs, "Enable High Speed");