Bluetooth: Add missing hci_dev locking when calling mgmt functions
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82 return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 return;
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
128 {
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170 {
171 int ret;
172
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195 struct hci_cp_delete_stored_link_key cp;
196 struct sk_buff *skb;
197 __le16 param;
198 __u8 flt_type;
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 skb->dev = (void *) hdev;
208
209 skb_queue_tail(&hdev->cmd_q, skb);
210 tasklet_schedule(&hdev->cmd_task);
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220 }
221
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225 /* Read Local Version */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240 }
241 #endif
242
243 /* Read BD Address */
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252 /* Read Voice Setting */
253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
258 flt_type = HCI_FLT_CLEAR_ALL;
259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261 /* Connection accept timeout ~20 secs */
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
304 /* Encryption */
305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310 __le16 policy = cpu_to_le16(opt);
311
312 BT_DBG("%s %x", hdev->name, policy);
313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322 struct hci_dev *hdev = NULL, *d;
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
330 list_for_each_entry(d, &hci_dev_list, list) {
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338 }
339
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev *hdev)
342 {
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353 }
354
355 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356 {
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366 }
367
368 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369 {
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_entry *ie;
372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
377 /* Entry not in the cache. Add new one. */
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
380 return;
381
382 ie->next = cache->list;
383 cache->list = ie;
384 }
385
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
388 cache->timestamp = jiffies;
389 }
390
391 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392 {
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411 }
412
413 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414 {
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
428 }
429
430 int hci_inquiry(void __user *arg)
431 {
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
455 timeo = ir.length * msecs_to_jiffies(2000);
456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
470 if (!buf) {
471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
486 } else
487 err = -EFAULT;
488
489 kfree(buf);
490
491 done:
492 hci_dev_put(hdev);
493 return err;
494 }
495
496 /* ---- HCI ioctl helpers ---- */
497
498 int hci_dev_open(__u16 dev)
499 {
500 struct hci_dev *hdev;
501 int ret = 0;
502
503 hdev = hci_dev_get(dev);
504 if (!hdev)
505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
527
528 if (hdev->open(hdev)) {
529 ret = -EIO;
530 goto done;
531 }
532
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
536 hdev->init_last_cmd = 0;
537
538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
540
541 if (lmp_host_le_capable(hdev))
542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
552 if (!test_bit(HCI_SETUP, &hdev->flags)) {
553 hci_dev_lock_bh(hdev);
554 mgmt_powered(hdev, 1);
555 hci_dev_unlock_bh(hdev);
556 }
557 } else {
558 /* Init failed, cleanup */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
561 tasklet_kill(&hdev->cmd_task);
562
563 skb_queue_purge(&hdev->cmd_q);
564 skb_queue_purge(&hdev->rx_q);
565
566 if (hdev->flush)
567 hdev->flush(hdev);
568
569 if (hdev->sent_cmd) {
570 kfree_skb(hdev->sent_cmd);
571 hdev->sent_cmd = NULL;
572 }
573
574 hdev->close(hdev);
575 hdev->flags = 0;
576 }
577
578 done:
579 hci_req_unlock(hdev);
580 hci_dev_put(hdev);
581 return ret;
582 }
583
584 static int hci_dev_do_close(struct hci_dev *hdev)
585 {
586 BT_DBG("%s %p", hdev->name, hdev);
587
588 hci_req_cancel(hdev, ENODEV);
589 hci_req_lock(hdev);
590
591 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
592 del_timer_sync(&hdev->cmd_timer);
593 hci_req_unlock(hdev);
594 return 0;
595 }
596
597 /* Kill RX and TX tasks */
598 tasklet_kill(&hdev->rx_task);
599 tasklet_kill(&hdev->tx_task);
600
601 if (hdev->discov_timeout > 0) {
602 cancel_delayed_work_sync(&hdev->discov_off);
603 hdev->discov_timeout = 0;
604 }
605
606 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
607 cancel_delayed_work_sync(&hdev->power_off);
608
609 hci_dev_lock_bh(hdev);
610 inquiry_cache_flush(hdev);
611 hci_conn_hash_flush(hdev);
612 hci_dev_unlock_bh(hdev);
613
614 hci_notify(hdev, HCI_DEV_DOWN);
615
616 if (hdev->flush)
617 hdev->flush(hdev);
618
619 /* Reset device */
620 skb_queue_purge(&hdev->cmd_q);
621 atomic_set(&hdev->cmd_cnt, 1);
622 if (!test_bit(HCI_RAW, &hdev->flags)) {
623 set_bit(HCI_INIT, &hdev->flags);
624 __hci_request(hdev, hci_reset_req, 0,
625 msecs_to_jiffies(HCI_INIT_TIMEOUT));
626 clear_bit(HCI_INIT, &hdev->flags);
627 }
628
629 /* Kill cmd task */
630 tasklet_kill(&hdev->cmd_task);
631
632 /* Drop queues */
633 skb_queue_purge(&hdev->rx_q);
634 skb_queue_purge(&hdev->cmd_q);
635 skb_queue_purge(&hdev->raw_q);
636
637 /* Drop last sent command */
638 if (hdev->sent_cmd) {
639 del_timer_sync(&hdev->cmd_timer);
640 kfree_skb(hdev->sent_cmd);
641 hdev->sent_cmd = NULL;
642 }
643
644 /* After this point our queues are empty
645 * and no tasks are scheduled. */
646 hdev->close(hdev);
647
648 hci_dev_lock_bh(hdev);
649 mgmt_powered(hdev, 0);
650 hci_dev_unlock_bh(hdev);
651
652 /* Clear flags */
653 hdev->flags = 0;
654
655 hci_req_unlock(hdev);
656
657 hci_dev_put(hdev);
658 return 0;
659 }
660
661 int hci_dev_close(__u16 dev)
662 {
663 struct hci_dev *hdev;
664 int err;
665
666 hdev = hci_dev_get(dev);
667 if (!hdev)
668 return -ENODEV;
669 err = hci_dev_do_close(hdev);
670 hci_dev_put(hdev);
671 return err;
672 }
673
674 int hci_dev_reset(__u16 dev)
675 {
676 struct hci_dev *hdev;
677 int ret = 0;
678
679 hdev = hci_dev_get(dev);
680 if (!hdev)
681 return -ENODEV;
682
683 hci_req_lock(hdev);
684 tasklet_disable(&hdev->tx_task);
685
686 if (!test_bit(HCI_UP, &hdev->flags))
687 goto done;
688
689 /* Drop queues */
690 skb_queue_purge(&hdev->rx_q);
691 skb_queue_purge(&hdev->cmd_q);
692
693 hci_dev_lock_bh(hdev);
694 inquiry_cache_flush(hdev);
695 hci_conn_hash_flush(hdev);
696 hci_dev_unlock_bh(hdev);
697
698 if (hdev->flush)
699 hdev->flush(hdev);
700
701 atomic_set(&hdev->cmd_cnt, 1);
702 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
703
704 if (!test_bit(HCI_RAW, &hdev->flags))
705 ret = __hci_request(hdev, hci_reset_req, 0,
706 msecs_to_jiffies(HCI_INIT_TIMEOUT));
707
708 done:
709 tasklet_enable(&hdev->tx_task);
710 hci_req_unlock(hdev);
711 hci_dev_put(hdev);
712 return ret;
713 }
714
715 int hci_dev_reset_stat(__u16 dev)
716 {
717 struct hci_dev *hdev;
718 int ret = 0;
719
720 hdev = hci_dev_get(dev);
721 if (!hdev)
722 return -ENODEV;
723
724 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
725
726 hci_dev_put(hdev);
727
728 return ret;
729 }
730
731 int hci_dev_cmd(unsigned int cmd, void __user *arg)
732 {
733 struct hci_dev *hdev;
734 struct hci_dev_req dr;
735 int err = 0;
736
737 if (copy_from_user(&dr, arg, sizeof(dr)))
738 return -EFAULT;
739
740 hdev = hci_dev_get(dr.dev_id);
741 if (!hdev)
742 return -ENODEV;
743
744 switch (cmd) {
745 case HCISETAUTH:
746 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
747 msecs_to_jiffies(HCI_INIT_TIMEOUT));
748 break;
749
750 case HCISETENCRYPT:
751 if (!lmp_encrypt_capable(hdev)) {
752 err = -EOPNOTSUPP;
753 break;
754 }
755
756 if (!test_bit(HCI_AUTH, &hdev->flags)) {
757 /* Auth must be enabled first */
758 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 if (err)
761 break;
762 }
763
764 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
766 break;
767
768 case HCISETSCAN:
769 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT));
771 break;
772
773 case HCISETLINKPOL:
774 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
775 msecs_to_jiffies(HCI_INIT_TIMEOUT));
776 break;
777
778 case HCISETLINKMODE:
779 hdev->link_mode = ((__u16) dr.dev_opt) &
780 (HCI_LM_MASTER | HCI_LM_ACCEPT);
781 break;
782
783 case HCISETPTYPE:
784 hdev->pkt_type = (__u16) dr.dev_opt;
785 break;
786
787 case HCISETACLMTU:
788 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
789 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
790 break;
791
792 case HCISETSCOMTU:
793 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
794 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
795 break;
796
797 default:
798 err = -EINVAL;
799 break;
800 }
801
802 hci_dev_put(hdev);
803 return err;
804 }
805
806 int hci_get_dev_list(void __user *arg)
807 {
808 struct hci_dev *hdev;
809 struct hci_dev_list_req *dl;
810 struct hci_dev_req *dr;
811 int n = 0, size, err;
812 __u16 dev_num;
813
814 if (get_user(dev_num, (__u16 __user *) arg))
815 return -EFAULT;
816
817 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
818 return -EINVAL;
819
820 size = sizeof(*dl) + dev_num * sizeof(*dr);
821
822 dl = kzalloc(size, GFP_KERNEL);
823 if (!dl)
824 return -ENOMEM;
825
826 dr = dl->dev_req;
827
828 read_lock_bh(&hci_dev_list_lock);
829 list_for_each_entry(hdev, &hci_dev_list, list) {
830 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
831 cancel_delayed_work_sync(&hdev->power_off);
832
833 if (!test_bit(HCI_MGMT, &hdev->flags))
834 set_bit(HCI_PAIRABLE, &hdev->flags);
835
836 (dr + n)->dev_id = hdev->id;
837 (dr + n)->dev_opt = hdev->flags;
838
839 if (++n >= dev_num)
840 break;
841 }
842 read_unlock_bh(&hci_dev_list_lock);
843
844 dl->dev_num = n;
845 size = sizeof(*dl) + n * sizeof(*dr);
846
847 err = copy_to_user(arg, dl, size);
848 kfree(dl);
849
850 return err ? -EFAULT : 0;
851 }
852
853 int hci_get_dev_info(void __user *arg)
854 {
855 struct hci_dev *hdev;
856 struct hci_dev_info di;
857 int err = 0;
858
859 if (copy_from_user(&di, arg, sizeof(di)))
860 return -EFAULT;
861
862 hdev = hci_dev_get(di.dev_id);
863 if (!hdev)
864 return -ENODEV;
865
866 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
867 cancel_delayed_work_sync(&hdev->power_off);
868
869 if (!test_bit(HCI_MGMT, &hdev->flags))
870 set_bit(HCI_PAIRABLE, &hdev->flags);
871
872 strcpy(di.name, hdev->name);
873 di.bdaddr = hdev->bdaddr;
874 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
875 di.flags = hdev->flags;
876 di.pkt_type = hdev->pkt_type;
877 di.acl_mtu = hdev->acl_mtu;
878 di.acl_pkts = hdev->acl_pkts;
879 di.sco_mtu = hdev->sco_mtu;
880 di.sco_pkts = hdev->sco_pkts;
881 di.link_policy = hdev->link_policy;
882 di.link_mode = hdev->link_mode;
883
884 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
885 memcpy(&di.features, &hdev->features, sizeof(di.features));
886
887 if (copy_to_user(arg, &di, sizeof(di)))
888 err = -EFAULT;
889
890 hci_dev_put(hdev);
891
892 return err;
893 }
894
895 /* ---- Interface to HCI drivers ---- */
896
897 static int hci_rfkill_set_block(void *data, bool blocked)
898 {
899 struct hci_dev *hdev = data;
900
901 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
902
903 if (!blocked)
904 return 0;
905
906 hci_dev_do_close(hdev);
907
908 return 0;
909 }
910
911 static const struct rfkill_ops hci_rfkill_ops = {
912 .set_block = hci_rfkill_set_block,
913 };
914
915 /* Alloc HCI device */
916 struct hci_dev *hci_alloc_dev(void)
917 {
918 struct hci_dev *hdev;
919
920 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
921 if (!hdev)
922 return NULL;
923
924 hci_init_sysfs(hdev);
925 skb_queue_head_init(&hdev->driver_init);
926
927 return hdev;
928 }
929 EXPORT_SYMBOL(hci_alloc_dev);
930
931 /* Free HCI device */
932 void hci_free_dev(struct hci_dev *hdev)
933 {
934 skb_queue_purge(&hdev->driver_init);
935
936 /* will free via device release */
937 put_device(&hdev->dev);
938 }
939 EXPORT_SYMBOL(hci_free_dev);
940
941 static void hci_power_on(struct work_struct *work)
942 {
943 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
944
945 BT_DBG("%s", hdev->name);
946
947 if (hci_dev_open(hdev->id) < 0)
948 return;
949
950 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
951 queue_delayed_work(hdev->workqueue, &hdev->power_off,
952 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
953
954 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
955 mgmt_index_added(hdev);
956 }
957
958 static void hci_power_off(struct work_struct *work)
959 {
960 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 power_off.work);
962
963 BT_DBG("%s", hdev->name);
964
965 clear_bit(HCI_AUTO_OFF, &hdev->flags);
966
967 hci_dev_close(hdev->id);
968 }
969
970 static void hci_discov_off(struct work_struct *work)
971 {
972 struct hci_dev *hdev;
973 u8 scan = SCAN_PAGE;
974
975 hdev = container_of(work, struct hci_dev, discov_off.work);
976
977 BT_DBG("%s", hdev->name);
978
979 hci_dev_lock_bh(hdev);
980
981 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
982
983 hdev->discov_timeout = 0;
984
985 hci_dev_unlock_bh(hdev);
986 }
987
988 int hci_uuids_clear(struct hci_dev *hdev)
989 {
990 struct list_head *p, *n;
991
992 list_for_each_safe(p, n, &hdev->uuids) {
993 struct bt_uuid *uuid;
994
995 uuid = list_entry(p, struct bt_uuid, list);
996
997 list_del(p);
998 kfree(uuid);
999 }
1000
1001 return 0;
1002 }
1003
1004 int hci_link_keys_clear(struct hci_dev *hdev)
1005 {
1006 struct list_head *p, *n;
1007
1008 list_for_each_safe(p, n, &hdev->link_keys) {
1009 struct link_key *key;
1010
1011 key = list_entry(p, struct link_key, list);
1012
1013 list_del(p);
1014 kfree(key);
1015 }
1016
1017 return 0;
1018 }
1019
1020 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1021 {
1022 struct link_key *k;
1023
1024 list_for_each_entry(k, &hdev->link_keys, list)
1025 if (bacmp(bdaddr, &k->bdaddr) == 0)
1026 return k;
1027
1028 return NULL;
1029 }
1030
1031 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1032 u8 key_type, u8 old_key_type)
1033 {
1034 /* Legacy key */
1035 if (key_type < 0x03)
1036 return 1;
1037
1038 /* Debug keys are insecure so don't store them persistently */
1039 if (key_type == HCI_LK_DEBUG_COMBINATION)
1040 return 0;
1041
1042 /* Changed combination key and there's no previous one */
1043 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1044 return 0;
1045
1046 /* Security mode 3 case */
1047 if (!conn)
1048 return 1;
1049
1050 /* Neither local nor remote side had no-bonding as requirement */
1051 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1052 return 1;
1053
1054 /* Local side had dedicated bonding as requirement */
1055 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1056 return 1;
1057
1058 /* Remote side had dedicated bonding as requirement */
1059 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1060 return 1;
1061
1062 /* If none of the above criteria match, then don't store the key
1063 * persistently */
1064 return 0;
1065 }
1066
1067 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1068 {
1069 struct link_key *k;
1070
1071 list_for_each_entry(k, &hdev->link_keys, list) {
1072 struct key_master_id *id;
1073
1074 if (k->type != HCI_LK_SMP_LTK)
1075 continue;
1076
1077 if (k->dlen != sizeof(*id))
1078 continue;
1079
1080 id = (void *) &k->data;
1081 if (id->ediv == ediv &&
1082 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1083 return k;
1084 }
1085
1086 return NULL;
1087 }
1088 EXPORT_SYMBOL(hci_find_ltk);
1089
1090 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1091 bdaddr_t *bdaddr, u8 type)
1092 {
1093 struct link_key *k;
1094
1095 list_for_each_entry(k, &hdev->link_keys, list)
1096 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1097 return k;
1098
1099 return NULL;
1100 }
1101 EXPORT_SYMBOL(hci_find_link_key_type);
1102
1103 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1104 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1105 {
1106 struct link_key *key, *old_key;
1107 u8 old_key_type, persistent;
1108
1109 old_key = hci_find_link_key(hdev, bdaddr);
1110 if (old_key) {
1111 old_key_type = old_key->type;
1112 key = old_key;
1113 } else {
1114 old_key_type = conn ? conn->key_type : 0xff;
1115 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1116 if (!key)
1117 return -ENOMEM;
1118 list_add(&key->list, &hdev->link_keys);
1119 }
1120
1121 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1122
1123 /* Some buggy controller combinations generate a changed
1124 * combination key for legacy pairing even when there's no
1125 * previous key */
1126 if (type == HCI_LK_CHANGED_COMBINATION &&
1127 (!conn || conn->remote_auth == 0xff) &&
1128 old_key_type == 0xff) {
1129 type = HCI_LK_COMBINATION;
1130 if (conn)
1131 conn->key_type = type;
1132 }
1133
1134 bacpy(&key->bdaddr, bdaddr);
1135 memcpy(key->val, val, 16);
1136 key->pin_len = pin_len;
1137
1138 if (type == HCI_LK_CHANGED_COMBINATION)
1139 key->type = old_key_type;
1140 else
1141 key->type = type;
1142
1143 if (!new_key)
1144 return 0;
1145
1146 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1147
1148 mgmt_new_link_key(hdev, key, persistent);
1149
1150 if (!persistent) {
1151 list_del(&key->list);
1152 kfree(key);
1153 }
1154
1155 return 0;
1156 }
1157
1158 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1159 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1160 {
1161 struct link_key *key, *old_key;
1162 struct key_master_id *id;
1163 u8 old_key_type;
1164
1165 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1166
1167 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1168 if (old_key) {
1169 key = old_key;
1170 old_key_type = old_key->type;
1171 } else {
1172 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1173 if (!key)
1174 return -ENOMEM;
1175 list_add(&key->list, &hdev->link_keys);
1176 old_key_type = 0xff;
1177 }
1178
1179 key->dlen = sizeof(*id);
1180
1181 bacpy(&key->bdaddr, bdaddr);
1182 memcpy(key->val, ltk, sizeof(key->val));
1183 key->type = HCI_LK_SMP_LTK;
1184 key->pin_len = key_size;
1185
1186 id = (void *) &key->data;
1187 id->ediv = ediv;
1188 memcpy(id->rand, rand, sizeof(id->rand));
1189
1190 if (new_key)
1191 mgmt_new_link_key(hdev, key, old_key_type);
1192
1193 return 0;
1194 }
1195
1196 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1197 {
1198 struct link_key *key;
1199
1200 key = hci_find_link_key(hdev, bdaddr);
1201 if (!key)
1202 return -ENOENT;
1203
1204 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1205
1206 list_del(&key->list);
1207 kfree(key);
1208
1209 return 0;
1210 }
1211
1212 /* HCI command timer function */
1213 static void hci_cmd_timer(unsigned long arg)
1214 {
1215 struct hci_dev *hdev = (void *) arg;
1216
1217 BT_ERR("%s command tx timeout", hdev->name);
1218 atomic_set(&hdev->cmd_cnt, 1);
1219 tasklet_schedule(&hdev->cmd_task);
1220 }
1221
1222 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1223 bdaddr_t *bdaddr)
1224 {
1225 struct oob_data *data;
1226
1227 list_for_each_entry(data, &hdev->remote_oob_data, list)
1228 if (bacmp(bdaddr, &data->bdaddr) == 0)
1229 return data;
1230
1231 return NULL;
1232 }
1233
1234 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1235 {
1236 struct oob_data *data;
1237
1238 data = hci_find_remote_oob_data(hdev, bdaddr);
1239 if (!data)
1240 return -ENOENT;
1241
1242 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1243
1244 list_del(&data->list);
1245 kfree(data);
1246
1247 return 0;
1248 }
1249
1250 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1251 {
1252 struct oob_data *data, *n;
1253
1254 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1255 list_del(&data->list);
1256 kfree(data);
1257 }
1258
1259 return 0;
1260 }
1261
1262 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1263 u8 *randomizer)
1264 {
1265 struct oob_data *data;
1266
1267 data = hci_find_remote_oob_data(hdev, bdaddr);
1268
1269 if (!data) {
1270 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1271 if (!data)
1272 return -ENOMEM;
1273
1274 bacpy(&data->bdaddr, bdaddr);
1275 list_add(&data->list, &hdev->remote_oob_data);
1276 }
1277
1278 memcpy(data->hash, hash, sizeof(data->hash));
1279 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1280
1281 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1282
1283 return 0;
1284 }
1285
1286 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1287 bdaddr_t *bdaddr)
1288 {
1289 struct bdaddr_list *b;
1290
1291 list_for_each_entry(b, &hdev->blacklist, list)
1292 if (bacmp(bdaddr, &b->bdaddr) == 0)
1293 return b;
1294
1295 return NULL;
1296 }
1297
1298 int hci_blacklist_clear(struct hci_dev *hdev)
1299 {
1300 struct list_head *p, *n;
1301
1302 list_for_each_safe(p, n, &hdev->blacklist) {
1303 struct bdaddr_list *b;
1304
1305 b = list_entry(p, struct bdaddr_list, list);
1306
1307 list_del(p);
1308 kfree(b);
1309 }
1310
1311 return 0;
1312 }
1313
1314 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1315 {
1316 struct bdaddr_list *entry;
1317
1318 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1319 return -EBADF;
1320
1321 if (hci_blacklist_lookup(hdev, bdaddr))
1322 return -EEXIST;
1323
1324 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1325 if (!entry)
1326 return -ENOMEM;
1327
1328 bacpy(&entry->bdaddr, bdaddr);
1329
1330 list_add(&entry->list, &hdev->blacklist);
1331
1332 return mgmt_device_blocked(hdev, bdaddr);
1333 }
1334
1335 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336 {
1337 struct bdaddr_list *entry;
1338
1339 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1340 return hci_blacklist_clear(hdev);
1341 }
1342
1343 entry = hci_blacklist_lookup(hdev, bdaddr);
1344 if (!entry) {
1345 return -ENOENT;
1346 }
1347
1348 list_del(&entry->list);
1349 kfree(entry);
1350
1351 return mgmt_device_unblocked(hdev, bdaddr);
1352 }
1353
1354 static void hci_clear_adv_cache(unsigned long arg)
1355 {
1356 struct hci_dev *hdev = (void *) arg;
1357
1358 hci_dev_lock(hdev);
1359
1360 hci_adv_entries_clear(hdev);
1361
1362 hci_dev_unlock(hdev);
1363 }
1364
1365 int hci_adv_entries_clear(struct hci_dev *hdev)
1366 {
1367 struct adv_entry *entry, *tmp;
1368
1369 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1370 list_del(&entry->list);
1371 kfree(entry);
1372 }
1373
1374 BT_DBG("%s adv cache cleared", hdev->name);
1375
1376 return 0;
1377 }
1378
1379 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1380 {
1381 struct adv_entry *entry;
1382
1383 list_for_each_entry(entry, &hdev->adv_entries, list)
1384 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1385 return entry;
1386
1387 return NULL;
1388 }
1389
1390 static inline int is_connectable_adv(u8 evt_type)
1391 {
1392 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1393 return 1;
1394
1395 return 0;
1396 }
1397
1398 int hci_add_adv_entry(struct hci_dev *hdev,
1399 struct hci_ev_le_advertising_info *ev)
1400 {
1401 struct adv_entry *entry;
1402
1403 if (!is_connectable_adv(ev->evt_type))
1404 return -EINVAL;
1405
1406 /* Only new entries should be added to adv_entries. So, if
1407 * bdaddr was found, don't add it. */
1408 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1409 return 0;
1410
1411 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1412 if (!entry)
1413 return -ENOMEM;
1414
1415 bacpy(&entry->bdaddr, &ev->bdaddr);
1416 entry->bdaddr_type = ev->bdaddr_type;
1417
1418 list_add(&entry->list, &hdev->adv_entries);
1419
1420 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1421 batostr(&entry->bdaddr), entry->bdaddr_type);
1422
1423 return 0;
1424 }
1425
1426 /* Register HCI device */
1427 int hci_register_dev(struct hci_dev *hdev)
1428 {
1429 struct list_head *head = &hci_dev_list, *p;
1430 int i, id, error;
1431
1432 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1433 hdev->bus, hdev->owner);
1434
1435 if (!hdev->open || !hdev->close || !hdev->destruct)
1436 return -EINVAL;
1437
1438 /* Do not allow HCI_AMP devices to register at index 0,
1439 * so the index can be used as the AMP controller ID.
1440 */
1441 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1442
1443 write_lock_bh(&hci_dev_list_lock);
1444
1445 /* Find first available device id */
1446 list_for_each(p, &hci_dev_list) {
1447 if (list_entry(p, struct hci_dev, list)->id != id)
1448 break;
1449 head = p; id++;
1450 }
1451
1452 sprintf(hdev->name, "hci%d", id);
1453 hdev->id = id;
1454 list_add(&hdev->list, head);
1455
1456 atomic_set(&hdev->refcnt, 1);
1457 spin_lock_init(&hdev->lock);
1458
1459 hdev->flags = 0;
1460 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1461 hdev->esco_type = (ESCO_HV1);
1462 hdev->link_mode = (HCI_LM_ACCEPT);
1463 hdev->io_capability = 0x03; /* No Input No Output */
1464
1465 hdev->idle_timeout = 0;
1466 hdev->sniff_max_interval = 800;
1467 hdev->sniff_min_interval = 80;
1468
1469 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1470 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1471 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1472
1473 skb_queue_head_init(&hdev->rx_q);
1474 skb_queue_head_init(&hdev->cmd_q);
1475 skb_queue_head_init(&hdev->raw_q);
1476
1477 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1478
1479 for (i = 0; i < NUM_REASSEMBLY; i++)
1480 hdev->reassembly[i] = NULL;
1481
1482 init_waitqueue_head(&hdev->req_wait_q);
1483 mutex_init(&hdev->req_lock);
1484
1485 inquiry_cache_init(hdev);
1486
1487 hci_conn_hash_init(hdev);
1488
1489 INIT_LIST_HEAD(&hdev->mgmt_pending);
1490
1491 INIT_LIST_HEAD(&hdev->blacklist);
1492
1493 INIT_LIST_HEAD(&hdev->uuids);
1494
1495 INIT_LIST_HEAD(&hdev->link_keys);
1496
1497 INIT_LIST_HEAD(&hdev->remote_oob_data);
1498
1499 INIT_LIST_HEAD(&hdev->adv_entries);
1500 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1501 (unsigned long) hdev);
1502
1503 INIT_WORK(&hdev->power_on, hci_power_on);
1504 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1505
1506 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1507
1508 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1509
1510 atomic_set(&hdev->promisc, 0);
1511
1512 write_unlock_bh(&hci_dev_list_lock);
1513
1514 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1515 if (!hdev->workqueue) {
1516 error = -ENOMEM;
1517 goto err;
1518 }
1519
1520 error = hci_add_sysfs(hdev);
1521 if (error < 0)
1522 goto err_wqueue;
1523
1524 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1525 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1526 if (hdev->rfkill) {
1527 if (rfkill_register(hdev->rfkill) < 0) {
1528 rfkill_destroy(hdev->rfkill);
1529 hdev->rfkill = NULL;
1530 }
1531 }
1532
1533 set_bit(HCI_AUTO_OFF, &hdev->flags);
1534 set_bit(HCI_SETUP, &hdev->flags);
1535 queue_work(hdev->workqueue, &hdev->power_on);
1536
1537 hci_notify(hdev, HCI_DEV_REG);
1538
1539 return id;
1540
1541 err_wqueue:
1542 destroy_workqueue(hdev->workqueue);
1543 err:
1544 write_lock_bh(&hci_dev_list_lock);
1545 list_del(&hdev->list);
1546 write_unlock_bh(&hci_dev_list_lock);
1547
1548 return error;
1549 }
1550 EXPORT_SYMBOL(hci_register_dev);
1551
1552 /* Unregister HCI device */
1553 void hci_unregister_dev(struct hci_dev *hdev)
1554 {
1555 int i;
1556
1557 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1558
1559 write_lock_bh(&hci_dev_list_lock);
1560 list_del(&hdev->list);
1561 write_unlock_bh(&hci_dev_list_lock);
1562
1563 hci_dev_do_close(hdev);
1564
1565 for (i = 0; i < NUM_REASSEMBLY; i++)
1566 kfree_skb(hdev->reassembly[i]);
1567
1568 if (!test_bit(HCI_INIT, &hdev->flags) &&
1569 !test_bit(HCI_SETUP, &hdev->flags)) {
1570 hci_dev_lock_bh(hdev);
1571 mgmt_index_removed(hdev);
1572 hci_dev_unlock_bh(hdev);
1573 }
1574
1575 /* mgmt_index_removed should take care of emptying the
1576 * pending list */
1577 BUG_ON(!list_empty(&hdev->mgmt_pending));
1578
1579 hci_notify(hdev, HCI_DEV_UNREG);
1580
1581 if (hdev->rfkill) {
1582 rfkill_unregister(hdev->rfkill);
1583 rfkill_destroy(hdev->rfkill);
1584 }
1585
1586 hci_del_sysfs(hdev);
1587
1588 del_timer(&hdev->adv_timer);
1589
1590 destroy_workqueue(hdev->workqueue);
1591
1592 hci_dev_lock_bh(hdev);
1593 hci_blacklist_clear(hdev);
1594 hci_uuids_clear(hdev);
1595 hci_link_keys_clear(hdev);
1596 hci_remote_oob_data_clear(hdev);
1597 hci_adv_entries_clear(hdev);
1598 hci_dev_unlock_bh(hdev);
1599
1600 __hci_dev_put(hdev);
1601 }
1602 EXPORT_SYMBOL(hci_unregister_dev);
1603
1604 /* Suspend HCI device */
1605 int hci_suspend_dev(struct hci_dev *hdev)
1606 {
1607 hci_notify(hdev, HCI_DEV_SUSPEND);
1608 return 0;
1609 }
1610 EXPORT_SYMBOL(hci_suspend_dev);
1611
1612 /* Resume HCI device */
1613 int hci_resume_dev(struct hci_dev *hdev)
1614 {
1615 hci_notify(hdev, HCI_DEV_RESUME);
1616 return 0;
1617 }
1618 EXPORT_SYMBOL(hci_resume_dev);
1619
1620 /* Receive frame from HCI drivers */
1621 int hci_recv_frame(struct sk_buff *skb)
1622 {
1623 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1624 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1625 && !test_bit(HCI_INIT, &hdev->flags))) {
1626 kfree_skb(skb);
1627 return -ENXIO;
1628 }
1629
1630 /* Incomming skb */
1631 bt_cb(skb)->incoming = 1;
1632
1633 /* Time stamp */
1634 __net_timestamp(skb);
1635
1636 /* Queue frame for rx task */
1637 skb_queue_tail(&hdev->rx_q, skb);
1638 tasklet_schedule(&hdev->rx_task);
1639
1640 return 0;
1641 }
1642 EXPORT_SYMBOL(hci_recv_frame);
1643
1644 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1645 int count, __u8 index)
1646 {
1647 int len = 0;
1648 int hlen = 0;
1649 int remain = count;
1650 struct sk_buff *skb;
1651 struct bt_skb_cb *scb;
1652
1653 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1654 index >= NUM_REASSEMBLY)
1655 return -EILSEQ;
1656
1657 skb = hdev->reassembly[index];
1658
1659 if (!skb) {
1660 switch (type) {
1661 case HCI_ACLDATA_PKT:
1662 len = HCI_MAX_FRAME_SIZE;
1663 hlen = HCI_ACL_HDR_SIZE;
1664 break;
1665 case HCI_EVENT_PKT:
1666 len = HCI_MAX_EVENT_SIZE;
1667 hlen = HCI_EVENT_HDR_SIZE;
1668 break;
1669 case HCI_SCODATA_PKT:
1670 len = HCI_MAX_SCO_SIZE;
1671 hlen = HCI_SCO_HDR_SIZE;
1672 break;
1673 }
1674
1675 skb = bt_skb_alloc(len, GFP_ATOMIC);
1676 if (!skb)
1677 return -ENOMEM;
1678
1679 scb = (void *) skb->cb;
1680 scb->expect = hlen;
1681 scb->pkt_type = type;
1682
1683 skb->dev = (void *) hdev;
1684 hdev->reassembly[index] = skb;
1685 }
1686
1687 while (count) {
1688 scb = (void *) skb->cb;
1689 len = min(scb->expect, (__u16)count);
1690
1691 memcpy(skb_put(skb, len), data, len);
1692
1693 count -= len;
1694 data += len;
1695 scb->expect -= len;
1696 remain = count;
1697
1698 switch (type) {
1699 case HCI_EVENT_PKT:
1700 if (skb->len == HCI_EVENT_HDR_SIZE) {
1701 struct hci_event_hdr *h = hci_event_hdr(skb);
1702 scb->expect = h->plen;
1703
1704 if (skb_tailroom(skb) < scb->expect) {
1705 kfree_skb(skb);
1706 hdev->reassembly[index] = NULL;
1707 return -ENOMEM;
1708 }
1709 }
1710 break;
1711
1712 case HCI_ACLDATA_PKT:
1713 if (skb->len == HCI_ACL_HDR_SIZE) {
1714 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1715 scb->expect = __le16_to_cpu(h->dlen);
1716
1717 if (skb_tailroom(skb) < scb->expect) {
1718 kfree_skb(skb);
1719 hdev->reassembly[index] = NULL;
1720 return -ENOMEM;
1721 }
1722 }
1723 break;
1724
1725 case HCI_SCODATA_PKT:
1726 if (skb->len == HCI_SCO_HDR_SIZE) {
1727 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1728 scb->expect = h->dlen;
1729
1730 if (skb_tailroom(skb) < scb->expect) {
1731 kfree_skb(skb);
1732 hdev->reassembly[index] = NULL;
1733 return -ENOMEM;
1734 }
1735 }
1736 break;
1737 }
1738
1739 if (scb->expect == 0) {
1740 /* Complete frame */
1741
1742 bt_cb(skb)->pkt_type = type;
1743 hci_recv_frame(skb);
1744
1745 hdev->reassembly[index] = NULL;
1746 return remain;
1747 }
1748 }
1749
1750 return remain;
1751 }
1752
1753 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1754 {
1755 int rem = 0;
1756
1757 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1758 return -EILSEQ;
1759
1760 while (count) {
1761 rem = hci_reassembly(hdev, type, data, count, type - 1);
1762 if (rem < 0)
1763 return rem;
1764
1765 data += (count - rem);
1766 count = rem;
1767 }
1768
1769 return rem;
1770 }
1771 EXPORT_SYMBOL(hci_recv_fragment);
1772
1773 #define STREAM_REASSEMBLY 0
1774
1775 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1776 {
1777 int type;
1778 int rem = 0;
1779
1780 while (count) {
1781 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1782
1783 if (!skb) {
1784 struct { char type; } *pkt;
1785
1786 /* Start of the frame */
1787 pkt = data;
1788 type = pkt->type;
1789
1790 data++;
1791 count--;
1792 } else
1793 type = bt_cb(skb)->pkt_type;
1794
1795 rem = hci_reassembly(hdev, type, data, count,
1796 STREAM_REASSEMBLY);
1797 if (rem < 0)
1798 return rem;
1799
1800 data += (count - rem);
1801 count = rem;
1802 }
1803
1804 return rem;
1805 }
1806 EXPORT_SYMBOL(hci_recv_stream_fragment);
1807
1808 /* ---- Interface to upper protocols ---- */
1809
1810 /* Register/Unregister protocols.
1811 * hci_task_lock is used to ensure that no tasks are running. */
1812 int hci_register_proto(struct hci_proto *hp)
1813 {
1814 int err = 0;
1815
1816 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1817
1818 if (hp->id >= HCI_MAX_PROTO)
1819 return -EINVAL;
1820
1821 write_lock_bh(&hci_task_lock);
1822
1823 if (!hci_proto[hp->id])
1824 hci_proto[hp->id] = hp;
1825 else
1826 err = -EEXIST;
1827
1828 write_unlock_bh(&hci_task_lock);
1829
1830 return err;
1831 }
1832 EXPORT_SYMBOL(hci_register_proto);
1833
1834 int hci_unregister_proto(struct hci_proto *hp)
1835 {
1836 int err = 0;
1837
1838 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1839
1840 if (hp->id >= HCI_MAX_PROTO)
1841 return -EINVAL;
1842
1843 write_lock_bh(&hci_task_lock);
1844
1845 if (hci_proto[hp->id])
1846 hci_proto[hp->id] = NULL;
1847 else
1848 err = -ENOENT;
1849
1850 write_unlock_bh(&hci_task_lock);
1851
1852 return err;
1853 }
1854 EXPORT_SYMBOL(hci_unregister_proto);
1855
1856 int hci_register_cb(struct hci_cb *cb)
1857 {
1858 BT_DBG("%p name %s", cb, cb->name);
1859
1860 write_lock_bh(&hci_cb_list_lock);
1861 list_add(&cb->list, &hci_cb_list);
1862 write_unlock_bh(&hci_cb_list_lock);
1863
1864 return 0;
1865 }
1866 EXPORT_SYMBOL(hci_register_cb);
1867
1868 int hci_unregister_cb(struct hci_cb *cb)
1869 {
1870 BT_DBG("%p name %s", cb, cb->name);
1871
1872 write_lock_bh(&hci_cb_list_lock);
1873 list_del(&cb->list);
1874 write_unlock_bh(&hci_cb_list_lock);
1875
1876 return 0;
1877 }
1878 EXPORT_SYMBOL(hci_unregister_cb);
1879
1880 static int hci_send_frame(struct sk_buff *skb)
1881 {
1882 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1883
1884 if (!hdev) {
1885 kfree_skb(skb);
1886 return -ENODEV;
1887 }
1888
1889 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1890
1891 if (atomic_read(&hdev->promisc)) {
1892 /* Time stamp */
1893 __net_timestamp(skb);
1894
1895 hci_send_to_sock(hdev, skb, NULL);
1896 }
1897
1898 /* Get rid of skb owner, prior to sending to the driver. */
1899 skb_orphan(skb);
1900
1901 return hdev->send(skb);
1902 }
1903
1904 /* Send HCI command */
1905 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1906 {
1907 int len = HCI_COMMAND_HDR_SIZE + plen;
1908 struct hci_command_hdr *hdr;
1909 struct sk_buff *skb;
1910
1911 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1912
1913 skb = bt_skb_alloc(len, GFP_ATOMIC);
1914 if (!skb) {
1915 BT_ERR("%s no memory for command", hdev->name);
1916 return -ENOMEM;
1917 }
1918
1919 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1920 hdr->opcode = cpu_to_le16(opcode);
1921 hdr->plen = plen;
1922
1923 if (plen)
1924 memcpy(skb_put(skb, plen), param, plen);
1925
1926 BT_DBG("skb len %d", skb->len);
1927
1928 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1929 skb->dev = (void *) hdev;
1930
1931 if (test_bit(HCI_INIT, &hdev->flags))
1932 hdev->init_last_cmd = opcode;
1933
1934 skb_queue_tail(&hdev->cmd_q, skb);
1935 tasklet_schedule(&hdev->cmd_task);
1936
1937 return 0;
1938 }
1939
1940 /* Get data from the previously sent command */
1941 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1942 {
1943 struct hci_command_hdr *hdr;
1944
1945 if (!hdev->sent_cmd)
1946 return NULL;
1947
1948 hdr = (void *) hdev->sent_cmd->data;
1949
1950 if (hdr->opcode != cpu_to_le16(opcode))
1951 return NULL;
1952
1953 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1954
1955 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1956 }
1957
1958 /* Send ACL data */
1959 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1960 {
1961 struct hci_acl_hdr *hdr;
1962 int len = skb->len;
1963
1964 skb_push(skb, HCI_ACL_HDR_SIZE);
1965 skb_reset_transport_header(skb);
1966 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1967 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1968 hdr->dlen = cpu_to_le16(len);
1969 }
1970
1971 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1972 struct sk_buff *skb, __u16 flags)
1973 {
1974 struct hci_dev *hdev = conn->hdev;
1975 struct sk_buff *list;
1976
1977 list = skb_shinfo(skb)->frag_list;
1978 if (!list) {
1979 /* Non fragmented */
1980 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1981
1982 skb_queue_tail(queue, skb);
1983 } else {
1984 /* Fragmented */
1985 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1986
1987 skb_shinfo(skb)->frag_list = NULL;
1988
1989 /* Queue all fragments atomically */
1990 spin_lock_bh(&queue->lock);
1991
1992 __skb_queue_tail(queue, skb);
1993
1994 flags &= ~ACL_START;
1995 flags |= ACL_CONT;
1996 do {
1997 skb = list; list = list->next;
1998
1999 skb->dev = (void *) hdev;
2000 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2001 hci_add_acl_hdr(skb, conn->handle, flags);
2002
2003 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2004
2005 __skb_queue_tail(queue, skb);
2006 } while (list);
2007
2008 spin_unlock_bh(&queue->lock);
2009 }
2010 }
2011
2012 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2013 {
2014 struct hci_conn *conn = chan->conn;
2015 struct hci_dev *hdev = conn->hdev;
2016
2017 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2018
2019 skb->dev = (void *) hdev;
2020 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2021 hci_add_acl_hdr(skb, conn->handle, flags);
2022
2023 hci_queue_acl(conn, &chan->data_q, skb, flags);
2024
2025 tasklet_schedule(&hdev->tx_task);
2026 }
2027 EXPORT_SYMBOL(hci_send_acl);
2028
2029 /* Send SCO data */
2030 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2031 {
2032 struct hci_dev *hdev = conn->hdev;
2033 struct hci_sco_hdr hdr;
2034
2035 BT_DBG("%s len %d", hdev->name, skb->len);
2036
2037 hdr.handle = cpu_to_le16(conn->handle);
2038 hdr.dlen = skb->len;
2039
2040 skb_push(skb, HCI_SCO_HDR_SIZE);
2041 skb_reset_transport_header(skb);
2042 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2043
2044 skb->dev = (void *) hdev;
2045 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2046
2047 skb_queue_tail(&conn->data_q, skb);
2048 tasklet_schedule(&hdev->tx_task);
2049 }
2050 EXPORT_SYMBOL(hci_send_sco);
2051
2052 /* ---- HCI TX task (outgoing data) ---- */
2053
2054 /* HCI Connection scheduler */
2055 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2056 {
2057 struct hci_conn_hash *h = &hdev->conn_hash;
2058 struct hci_conn *conn = NULL, *c;
2059 int num = 0, min = ~0;
2060
2061 /* We don't have to lock device here. Connections are always
2062 * added and removed with TX task disabled. */
2063 list_for_each_entry(c, &h->list, list) {
2064 if (c->type != type || skb_queue_empty(&c->data_q))
2065 continue;
2066
2067 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2068 continue;
2069
2070 num++;
2071
2072 if (c->sent < min) {
2073 min = c->sent;
2074 conn = c;
2075 }
2076
2077 if (hci_conn_num(hdev, type) == num)
2078 break;
2079 }
2080
2081 if (conn) {
2082 int cnt, q;
2083
2084 switch (conn->type) {
2085 case ACL_LINK:
2086 cnt = hdev->acl_cnt;
2087 break;
2088 case SCO_LINK:
2089 case ESCO_LINK:
2090 cnt = hdev->sco_cnt;
2091 break;
2092 case LE_LINK:
2093 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2094 break;
2095 default:
2096 cnt = 0;
2097 BT_ERR("Unknown link type");
2098 }
2099
2100 q = cnt / num;
2101 *quote = q ? q : 1;
2102 } else
2103 *quote = 0;
2104
2105 BT_DBG("conn %p quote %d", conn, *quote);
2106 return conn;
2107 }
2108
2109 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2110 {
2111 struct hci_conn_hash *h = &hdev->conn_hash;
2112 struct hci_conn *c;
2113
2114 BT_ERR("%s link tx timeout", hdev->name);
2115
2116 /* Kill stalled connections */
2117 list_for_each_entry(c, &h->list, list) {
2118 if (c->type == type && c->sent) {
2119 BT_ERR("%s killing stalled connection %s",
2120 hdev->name, batostr(&c->dst));
2121 hci_acl_disconn(c, 0x13);
2122 }
2123 }
2124 }
2125
2126 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2127 int *quote)
2128 {
2129 struct hci_conn_hash *h = &hdev->conn_hash;
2130 struct hci_chan *chan = NULL;
2131 int num = 0, min = ~0, cur_prio = 0;
2132 struct hci_conn *conn;
2133 int cnt, q, conn_num = 0;
2134
2135 BT_DBG("%s", hdev->name);
2136
2137 list_for_each_entry(conn, &h->list, list) {
2138 struct hci_chan_hash *ch;
2139 struct hci_chan *tmp;
2140
2141 if (conn->type != type)
2142 continue;
2143
2144 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2145 continue;
2146
2147 conn_num++;
2148
2149 ch = &conn->chan_hash;
2150
2151 list_for_each_entry(tmp, &ch->list, list) {
2152 struct sk_buff *skb;
2153
2154 if (skb_queue_empty(&tmp->data_q))
2155 continue;
2156
2157 skb = skb_peek(&tmp->data_q);
2158 if (skb->priority < cur_prio)
2159 continue;
2160
2161 if (skb->priority > cur_prio) {
2162 num = 0;
2163 min = ~0;
2164 cur_prio = skb->priority;
2165 }
2166
2167 num++;
2168
2169 if (conn->sent < min) {
2170 min = conn->sent;
2171 chan = tmp;
2172 }
2173 }
2174
2175 if (hci_conn_num(hdev, type) == conn_num)
2176 break;
2177 }
2178
2179 if (!chan)
2180 return NULL;
2181
2182 switch (chan->conn->type) {
2183 case ACL_LINK:
2184 cnt = hdev->acl_cnt;
2185 break;
2186 case SCO_LINK:
2187 case ESCO_LINK:
2188 cnt = hdev->sco_cnt;
2189 break;
2190 case LE_LINK:
2191 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2192 break;
2193 default:
2194 cnt = 0;
2195 BT_ERR("Unknown link type");
2196 }
2197
2198 q = cnt / num;
2199 *quote = q ? q : 1;
2200 BT_DBG("chan %p quote %d", chan, *quote);
2201 return chan;
2202 }
2203
2204 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2205 {
2206 struct hci_conn_hash *h = &hdev->conn_hash;
2207 struct hci_conn *conn;
2208 int num = 0;
2209
2210 BT_DBG("%s", hdev->name);
2211
2212 list_for_each_entry(conn, &h->list, list) {
2213 struct hci_chan_hash *ch;
2214 struct hci_chan *chan;
2215
2216 if (conn->type != type)
2217 continue;
2218
2219 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2220 continue;
2221
2222 num++;
2223
2224 ch = &conn->chan_hash;
2225 list_for_each_entry(chan, &ch->list, list) {
2226 struct sk_buff *skb;
2227
2228 if (chan->sent) {
2229 chan->sent = 0;
2230 continue;
2231 }
2232
2233 if (skb_queue_empty(&chan->data_q))
2234 continue;
2235
2236 skb = skb_peek(&chan->data_q);
2237 if (skb->priority >= HCI_PRIO_MAX - 1)
2238 continue;
2239
2240 skb->priority = HCI_PRIO_MAX - 1;
2241
2242 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2243 skb->priority);
2244 }
2245
2246 if (hci_conn_num(hdev, type) == num)
2247 break;
2248 }
2249 }
2250
2251 static inline void hci_sched_acl(struct hci_dev *hdev)
2252 {
2253 struct hci_chan *chan;
2254 struct sk_buff *skb;
2255 int quote;
2256 unsigned int cnt;
2257
2258 BT_DBG("%s", hdev->name);
2259
2260 if (!hci_conn_num(hdev, ACL_LINK))
2261 return;
2262
2263 if (!test_bit(HCI_RAW, &hdev->flags)) {
2264 /* ACL tx timeout must be longer than maximum
2265 * link supervision timeout (40.9 seconds) */
2266 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2267 hci_link_tx_to(hdev, ACL_LINK);
2268 }
2269
2270 cnt = hdev->acl_cnt;
2271
2272 while (hdev->acl_cnt &&
2273 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2274 u32 priority = (skb_peek(&chan->data_q))->priority;
2275 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2276 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2277 skb->len, skb->priority);
2278
2279 /* Stop if priority has changed */
2280 if (skb->priority < priority)
2281 break;
2282
2283 skb = skb_dequeue(&chan->data_q);
2284
2285 hci_conn_enter_active_mode(chan->conn,
2286 bt_cb(skb)->force_active);
2287
2288 hci_send_frame(skb);
2289 hdev->acl_last_tx = jiffies;
2290
2291 hdev->acl_cnt--;
2292 chan->sent++;
2293 chan->conn->sent++;
2294 }
2295 }
2296
2297 if (cnt != hdev->acl_cnt)
2298 hci_prio_recalculate(hdev, ACL_LINK);
2299 }
2300
2301 /* Schedule SCO */
2302 static inline void hci_sched_sco(struct hci_dev *hdev)
2303 {
2304 struct hci_conn *conn;
2305 struct sk_buff *skb;
2306 int quote;
2307
2308 BT_DBG("%s", hdev->name);
2309
2310 if (!hci_conn_num(hdev, SCO_LINK))
2311 return;
2312
2313 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2314 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2315 BT_DBG("skb %p len %d", skb, skb->len);
2316 hci_send_frame(skb);
2317
2318 conn->sent++;
2319 if (conn->sent == ~0)
2320 conn->sent = 0;
2321 }
2322 }
2323 }
2324
2325 static inline void hci_sched_esco(struct hci_dev *hdev)
2326 {
2327 struct hci_conn *conn;
2328 struct sk_buff *skb;
2329 int quote;
2330
2331 BT_DBG("%s", hdev->name);
2332
2333 if (!hci_conn_num(hdev, ESCO_LINK))
2334 return;
2335
2336 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2337 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2338 BT_DBG("skb %p len %d", skb, skb->len);
2339 hci_send_frame(skb);
2340
2341 conn->sent++;
2342 if (conn->sent == ~0)
2343 conn->sent = 0;
2344 }
2345 }
2346 }
2347
2348 static inline void hci_sched_le(struct hci_dev *hdev)
2349 {
2350 struct hci_chan *chan;
2351 struct sk_buff *skb;
2352 int quote, cnt, tmp;
2353
2354 BT_DBG("%s", hdev->name);
2355
2356 if (!hci_conn_num(hdev, LE_LINK))
2357 return;
2358
2359 if (!test_bit(HCI_RAW, &hdev->flags)) {
2360 /* LE tx timeout must be longer than maximum
2361 * link supervision timeout (40.9 seconds) */
2362 if (!hdev->le_cnt && hdev->le_pkts &&
2363 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2364 hci_link_tx_to(hdev, LE_LINK);
2365 }
2366
2367 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2368 tmp = cnt;
2369 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2370 u32 priority = (skb_peek(&chan->data_q))->priority;
2371 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2372 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2373 skb->len, skb->priority);
2374
2375 /* Stop if priority has changed */
2376 if (skb->priority < priority)
2377 break;
2378
2379 skb = skb_dequeue(&chan->data_q);
2380
2381 hci_send_frame(skb);
2382 hdev->le_last_tx = jiffies;
2383
2384 cnt--;
2385 chan->sent++;
2386 chan->conn->sent++;
2387 }
2388 }
2389
2390 if (hdev->le_pkts)
2391 hdev->le_cnt = cnt;
2392 else
2393 hdev->acl_cnt = cnt;
2394
2395 if (cnt != tmp)
2396 hci_prio_recalculate(hdev, LE_LINK);
2397 }
2398
2399 static void hci_tx_task(unsigned long arg)
2400 {
2401 struct hci_dev *hdev = (struct hci_dev *) arg;
2402 struct sk_buff *skb;
2403
2404 read_lock(&hci_task_lock);
2405
2406 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2407 hdev->sco_cnt, hdev->le_cnt);
2408
2409 /* Schedule queues and send stuff to HCI driver */
2410
2411 hci_sched_acl(hdev);
2412
2413 hci_sched_sco(hdev);
2414
2415 hci_sched_esco(hdev);
2416
2417 hci_sched_le(hdev);
2418
2419 /* Send next queued raw (unknown type) packet */
2420 while ((skb = skb_dequeue(&hdev->raw_q)))
2421 hci_send_frame(skb);
2422
2423 read_unlock(&hci_task_lock);
2424 }
2425
2426 /* ----- HCI RX task (incoming data processing) ----- */
2427
2428 /* ACL data packet */
2429 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2430 {
2431 struct hci_acl_hdr *hdr = (void *) skb->data;
2432 struct hci_conn *conn;
2433 __u16 handle, flags;
2434
2435 skb_pull(skb, HCI_ACL_HDR_SIZE);
2436
2437 handle = __le16_to_cpu(hdr->handle);
2438 flags = hci_flags(handle);
2439 handle = hci_handle(handle);
2440
2441 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2442
2443 hdev->stat.acl_rx++;
2444
2445 hci_dev_lock(hdev);
2446 conn = hci_conn_hash_lookup_handle(hdev, handle);
2447 hci_dev_unlock(hdev);
2448
2449 if (conn) {
2450 register struct hci_proto *hp;
2451
2452 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2453
2454 /* Send to upper protocol */
2455 hp = hci_proto[HCI_PROTO_L2CAP];
2456 if (hp && hp->recv_acldata) {
2457 hp->recv_acldata(conn, skb, flags);
2458 return;
2459 }
2460 } else {
2461 BT_ERR("%s ACL packet for unknown connection handle %d",
2462 hdev->name, handle);
2463 }
2464
2465 kfree_skb(skb);
2466 }
2467
2468 /* SCO data packet */
2469 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2470 {
2471 struct hci_sco_hdr *hdr = (void *) skb->data;
2472 struct hci_conn *conn;
2473 __u16 handle;
2474
2475 skb_pull(skb, HCI_SCO_HDR_SIZE);
2476
2477 handle = __le16_to_cpu(hdr->handle);
2478
2479 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2480
2481 hdev->stat.sco_rx++;
2482
2483 hci_dev_lock(hdev);
2484 conn = hci_conn_hash_lookup_handle(hdev, handle);
2485 hci_dev_unlock(hdev);
2486
2487 if (conn) {
2488 register struct hci_proto *hp;
2489
2490 /* Send to upper protocol */
2491 hp = hci_proto[HCI_PROTO_SCO];
2492 if (hp && hp->recv_scodata) {
2493 hp->recv_scodata(conn, skb);
2494 return;
2495 }
2496 } else {
2497 BT_ERR("%s SCO packet for unknown connection handle %d",
2498 hdev->name, handle);
2499 }
2500
2501 kfree_skb(skb);
2502 }
2503
2504 static void hci_rx_task(unsigned long arg)
2505 {
2506 struct hci_dev *hdev = (struct hci_dev *) arg;
2507 struct sk_buff *skb;
2508
2509 BT_DBG("%s", hdev->name);
2510
2511 read_lock(&hci_task_lock);
2512
2513 while ((skb = skb_dequeue(&hdev->rx_q))) {
2514 if (atomic_read(&hdev->promisc)) {
2515 /* Send copy to the sockets */
2516 hci_send_to_sock(hdev, skb, NULL);
2517 }
2518
2519 if (test_bit(HCI_RAW, &hdev->flags)) {
2520 kfree_skb(skb);
2521 continue;
2522 }
2523
2524 if (test_bit(HCI_INIT, &hdev->flags)) {
2525 /* Don't process data packets in this states. */
2526 switch (bt_cb(skb)->pkt_type) {
2527 case HCI_ACLDATA_PKT:
2528 case HCI_SCODATA_PKT:
2529 kfree_skb(skb);
2530 continue;
2531 }
2532 }
2533
2534 /* Process frame */
2535 switch (bt_cb(skb)->pkt_type) {
2536 case HCI_EVENT_PKT:
2537 hci_event_packet(hdev, skb);
2538 break;
2539
2540 case HCI_ACLDATA_PKT:
2541 BT_DBG("%s ACL data packet", hdev->name);
2542 hci_acldata_packet(hdev, skb);
2543 break;
2544
2545 case HCI_SCODATA_PKT:
2546 BT_DBG("%s SCO data packet", hdev->name);
2547 hci_scodata_packet(hdev, skb);
2548 break;
2549
2550 default:
2551 kfree_skb(skb);
2552 break;
2553 }
2554 }
2555
2556 read_unlock(&hci_task_lock);
2557 }
2558
2559 static void hci_cmd_task(unsigned long arg)
2560 {
2561 struct hci_dev *hdev = (struct hci_dev *) arg;
2562 struct sk_buff *skb;
2563
2564 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2565
2566 /* Send queued commands */
2567 if (atomic_read(&hdev->cmd_cnt)) {
2568 skb = skb_dequeue(&hdev->cmd_q);
2569 if (!skb)
2570 return;
2571
2572 kfree_skb(hdev->sent_cmd);
2573
2574 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2575 if (hdev->sent_cmd) {
2576 atomic_dec(&hdev->cmd_cnt);
2577 hci_send_frame(skb);
2578 if (test_bit(HCI_RESET, &hdev->flags))
2579 del_timer(&hdev->cmd_timer);
2580 else
2581 mod_timer(&hdev->cmd_timer,
2582 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2583 } else {
2584 skb_queue_head(&hdev->cmd_q, skb);
2585 tasklet_schedule(&hdev->cmd_task);
2586 }
2587 }
2588 }
2589
2590 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2591 {
2592 /* General inquiry access code (GIAC) */
2593 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2594 struct hci_cp_inquiry cp;
2595
2596 BT_DBG("%s", hdev->name);
2597
2598 if (test_bit(HCI_INQUIRY, &hdev->flags))
2599 return -EINPROGRESS;
2600
2601 memset(&cp, 0, sizeof(cp));
2602 memcpy(&cp.lap, lap, sizeof(cp.lap));
2603 cp.length = length;
2604
2605 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2606 }
2607
2608 int hci_cancel_inquiry(struct hci_dev *hdev)
2609 {
2610 BT_DBG("%s", hdev->name);
2611
2612 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2613 return -EPERM;
2614
2615 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2616 }