Bluetooth: Add MGMT event for Passkey Entry
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82 return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 return;
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
128 {
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170 {
171 int ret;
172
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195 struct hci_cp_delete_stored_link_key cp;
196 struct sk_buff *skb;
197 __le16 param;
198 __u8 flt_type;
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 skb->dev = (void *) hdev;
208
209 skb_queue_tail(&hdev->cmd_q, skb);
210 tasklet_schedule(&hdev->cmd_task);
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220 }
221
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225 /* Read Local Version */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240 }
241 #endif
242
243 /* Read BD Address */
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252 /* Read Voice Setting */
253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
258 flt_type = HCI_FLT_CLEAR_ALL;
259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261 /* Connection accept timeout ~20 secs */
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
304 /* Encryption */
305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310 __le16 policy = cpu_to_le16(opt);
311
312 BT_DBG("%s %x", hdev->name, policy);
313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322 struct hci_dev *hdev = NULL, *d;
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
330 list_for_each_entry(d, &hci_dev_list, list) {
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338 }
339
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev *hdev)
342 {
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353 }
354
355 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356 {
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366 }
367
368 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369 {
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_entry *ie;
372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
377 /* Entry not in the cache. Add new one. */
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
380 return;
381
382 ie->next = cache->list;
383 cache->list = ie;
384 }
385
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
388 cache->timestamp = jiffies;
389 }
390
391 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392 {
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411 }
412
413 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414 {
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
428 }
429
430 int hci_inquiry(void __user *arg)
431 {
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
455 timeo = ir.length * msecs_to_jiffies(2000);
456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
470 if (!buf) {
471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
486 } else
487 err = -EFAULT;
488
489 kfree(buf);
490
491 done:
492 hci_dev_put(hdev);
493 return err;
494 }
495
496 /* ---- HCI ioctl helpers ---- */
497
498 int hci_dev_open(__u16 dev)
499 {
500 struct hci_dev *hdev;
501 int ret = 0;
502
503 hdev = hci_dev_get(dev);
504 if (!hdev)
505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
524 /* Treat all non BR/EDR controllers as raw devices if
525 enable_hs is not set */
526 if (hdev->dev_type != HCI_BREDR && !enable_hs)
527 set_bit(HCI_RAW, &hdev->flags);
528
529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
537 hdev->init_last_cmd = 0;
538
539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
541
542 if (lmp_host_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
553 if (!test_bit(HCI_SETUP, &hdev->flags)) {
554 hci_dev_lock_bh(hdev);
555 mgmt_powered(hdev, 1);
556 hci_dev_unlock_bh(hdev);
557 }
558 } else {
559 /* Init failed, cleanup */
560 tasklet_kill(&hdev->rx_task);
561 tasklet_kill(&hdev->tx_task);
562 tasklet_kill(&hdev->cmd_task);
563
564 skb_queue_purge(&hdev->cmd_q);
565 skb_queue_purge(&hdev->rx_q);
566
567 if (hdev->flush)
568 hdev->flush(hdev);
569
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
573 }
574
575 hdev->close(hdev);
576 hdev->flags = 0;
577 }
578
579 done:
580 hci_req_unlock(hdev);
581 hci_dev_put(hdev);
582 return ret;
583 }
584
585 static int hci_dev_do_close(struct hci_dev *hdev)
586 {
587 BT_DBG("%s %p", hdev->name, hdev);
588
589 hci_req_cancel(hdev, ENODEV);
590 hci_req_lock(hdev);
591
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593 del_timer_sync(&hdev->cmd_timer);
594 hci_req_unlock(hdev);
595 return 0;
596 }
597
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
601
602 if (hdev->discov_timeout > 0) {
603 cancel_delayed_work(&hdev->discov_off);
604 hdev->discov_timeout = 0;
605 }
606
607 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
608 cancel_delayed_work(&hdev->power_off);
609
610 hci_dev_lock_bh(hdev);
611 inquiry_cache_flush(hdev);
612 hci_conn_hash_flush(hdev);
613 hci_dev_unlock_bh(hdev);
614
615 hci_notify(hdev, HCI_DEV_DOWN);
616
617 if (hdev->flush)
618 hdev->flush(hdev);
619
620 /* Reset device */
621 skb_queue_purge(&hdev->cmd_q);
622 atomic_set(&hdev->cmd_cnt, 1);
623 if (!test_bit(HCI_RAW, &hdev->flags)) {
624 set_bit(HCI_INIT, &hdev->flags);
625 __hci_request(hdev, hci_reset_req, 0,
626 msecs_to_jiffies(HCI_INIT_TIMEOUT));
627 clear_bit(HCI_INIT, &hdev->flags);
628 }
629
630 /* Kill cmd task */
631 tasklet_kill(&hdev->cmd_task);
632
633 /* Drop queues */
634 skb_queue_purge(&hdev->rx_q);
635 skb_queue_purge(&hdev->cmd_q);
636 skb_queue_purge(&hdev->raw_q);
637
638 /* Drop last sent command */
639 if (hdev->sent_cmd) {
640 del_timer_sync(&hdev->cmd_timer);
641 kfree_skb(hdev->sent_cmd);
642 hdev->sent_cmd = NULL;
643 }
644
645 /* After this point our queues are empty
646 * and no tasks are scheduled. */
647 hdev->close(hdev);
648
649 hci_dev_lock_bh(hdev);
650 mgmt_powered(hdev, 0);
651 hci_dev_unlock_bh(hdev);
652
653 /* Clear flags */
654 hdev->flags = 0;
655
656 hci_req_unlock(hdev);
657
658 hci_dev_put(hdev);
659 return 0;
660 }
661
662 int hci_dev_close(__u16 dev)
663 {
664 struct hci_dev *hdev;
665 int err;
666
667 hdev = hci_dev_get(dev);
668 if (!hdev)
669 return -ENODEV;
670 err = hci_dev_do_close(hdev);
671 hci_dev_put(hdev);
672 return err;
673 }
674
675 int hci_dev_reset(__u16 dev)
676 {
677 struct hci_dev *hdev;
678 int ret = 0;
679
680 hdev = hci_dev_get(dev);
681 if (!hdev)
682 return -ENODEV;
683
684 hci_req_lock(hdev);
685 tasklet_disable(&hdev->tx_task);
686
687 if (!test_bit(HCI_UP, &hdev->flags))
688 goto done;
689
690 /* Drop queues */
691 skb_queue_purge(&hdev->rx_q);
692 skb_queue_purge(&hdev->cmd_q);
693
694 hci_dev_lock_bh(hdev);
695 inquiry_cache_flush(hdev);
696 hci_conn_hash_flush(hdev);
697 hci_dev_unlock_bh(hdev);
698
699 if (hdev->flush)
700 hdev->flush(hdev);
701
702 atomic_set(&hdev->cmd_cnt, 1);
703 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
704
705 if (!test_bit(HCI_RAW, &hdev->flags))
706 ret = __hci_request(hdev, hci_reset_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
709 done:
710 tasklet_enable(&hdev->tx_task);
711 hci_req_unlock(hdev);
712 hci_dev_put(hdev);
713 return ret;
714 }
715
716 int hci_dev_reset_stat(__u16 dev)
717 {
718 struct hci_dev *hdev;
719 int ret = 0;
720
721 hdev = hci_dev_get(dev);
722 if (!hdev)
723 return -ENODEV;
724
725 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
726
727 hci_dev_put(hdev);
728
729 return ret;
730 }
731
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
736 int err = 0;
737
738 if (copy_from_user(&dr, arg, sizeof(dr)))
739 return -EFAULT;
740
741 hdev = hci_dev_get(dr.dev_id);
742 if (!hdev)
743 return -ENODEV;
744
745 switch (cmd) {
746 case HCISETAUTH:
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
749 break;
750
751 case HCISETENCRYPT:
752 if (!lmp_encrypt_capable(hdev)) {
753 err = -EOPNOTSUPP;
754 break;
755 }
756
757 if (!test_bit(HCI_AUTH, &hdev->flags)) {
758 /* Auth must be enabled first */
759 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
761 if (err)
762 break;
763 }
764
765 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
767 break;
768
769 case HCISETSCAN:
770 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
771 msecs_to_jiffies(HCI_INIT_TIMEOUT));
772 break;
773
774 case HCISETLINKPOL:
775 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
777 break;
778
779 case HCISETLINKMODE:
780 hdev->link_mode = ((__u16) dr.dev_opt) &
781 (HCI_LM_MASTER | HCI_LM_ACCEPT);
782 break;
783
784 case HCISETPTYPE:
785 hdev->pkt_type = (__u16) dr.dev_opt;
786 break;
787
788 case HCISETACLMTU:
789 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
790 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
791 break;
792
793 case HCISETSCOMTU:
794 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
795 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
796 break;
797
798 default:
799 err = -EINVAL;
800 break;
801 }
802
803 hci_dev_put(hdev);
804 return err;
805 }
806
807 int hci_get_dev_list(void __user *arg)
808 {
809 struct hci_dev *hdev;
810 struct hci_dev_list_req *dl;
811 struct hci_dev_req *dr;
812 int n = 0, size, err;
813 __u16 dev_num;
814
815 if (get_user(dev_num, (__u16 __user *) arg))
816 return -EFAULT;
817
818 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
819 return -EINVAL;
820
821 size = sizeof(*dl) + dev_num * sizeof(*dr);
822
823 dl = kzalloc(size, GFP_KERNEL);
824 if (!dl)
825 return -ENOMEM;
826
827 dr = dl->dev_req;
828
829 read_lock_bh(&hci_dev_list_lock);
830 list_for_each_entry(hdev, &hci_dev_list, list) {
831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
832 cancel_delayed_work(&hdev->power_off);
833
834 if (!test_bit(HCI_MGMT, &hdev->flags))
835 set_bit(HCI_PAIRABLE, &hdev->flags);
836
837 (dr + n)->dev_id = hdev->id;
838 (dr + n)->dev_opt = hdev->flags;
839
840 if (++n >= dev_num)
841 break;
842 }
843 read_unlock_bh(&hci_dev_list_lock);
844
845 dl->dev_num = n;
846 size = sizeof(*dl) + n * sizeof(*dr);
847
848 err = copy_to_user(arg, dl, size);
849 kfree(dl);
850
851 return err ? -EFAULT : 0;
852 }
853
854 int hci_get_dev_info(void __user *arg)
855 {
856 struct hci_dev *hdev;
857 struct hci_dev_info di;
858 int err = 0;
859
860 if (copy_from_user(&di, arg, sizeof(di)))
861 return -EFAULT;
862
863 hdev = hci_dev_get(di.dev_id);
864 if (!hdev)
865 return -ENODEV;
866
867 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
868 cancel_delayed_work_sync(&hdev->power_off);
869
870 if (!test_bit(HCI_MGMT, &hdev->flags))
871 set_bit(HCI_PAIRABLE, &hdev->flags);
872
873 strcpy(di.name, hdev->name);
874 di.bdaddr = hdev->bdaddr;
875 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
876 di.flags = hdev->flags;
877 di.pkt_type = hdev->pkt_type;
878 di.acl_mtu = hdev->acl_mtu;
879 di.acl_pkts = hdev->acl_pkts;
880 di.sco_mtu = hdev->sco_mtu;
881 di.sco_pkts = hdev->sco_pkts;
882 di.link_policy = hdev->link_policy;
883 di.link_mode = hdev->link_mode;
884
885 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
886 memcpy(&di.features, &hdev->features, sizeof(di.features));
887
888 if (copy_to_user(arg, &di, sizeof(di)))
889 err = -EFAULT;
890
891 hci_dev_put(hdev);
892
893 return err;
894 }
895
896 /* ---- Interface to HCI drivers ---- */
897
898 static int hci_rfkill_set_block(void *data, bool blocked)
899 {
900 struct hci_dev *hdev = data;
901
902 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
903
904 if (!blocked)
905 return 0;
906
907 hci_dev_do_close(hdev);
908
909 return 0;
910 }
911
912 static const struct rfkill_ops hci_rfkill_ops = {
913 .set_block = hci_rfkill_set_block,
914 };
915
916 /* Alloc HCI device */
917 struct hci_dev *hci_alloc_dev(void)
918 {
919 struct hci_dev *hdev;
920
921 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
922 if (!hdev)
923 return NULL;
924
925 hci_init_sysfs(hdev);
926 skb_queue_head_init(&hdev->driver_init);
927
928 return hdev;
929 }
930 EXPORT_SYMBOL(hci_alloc_dev);
931
932 /* Free HCI device */
933 void hci_free_dev(struct hci_dev *hdev)
934 {
935 skb_queue_purge(&hdev->driver_init);
936
937 /* will free via device release */
938 put_device(&hdev->dev);
939 }
940 EXPORT_SYMBOL(hci_free_dev);
941
942 static void hci_power_on(struct work_struct *work)
943 {
944 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
945
946 BT_DBG("%s", hdev->name);
947
948 if (hci_dev_open(hdev->id) < 0)
949 return;
950
951 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
952 queue_delayed_work(hdev->workqueue, &hdev->power_off,
953 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
954
955 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
956 mgmt_index_added(hdev);
957 }
958
959 static void hci_power_off(struct work_struct *work)
960 {
961 struct hci_dev *hdev = container_of(work, struct hci_dev,
962 power_off.work);
963
964 BT_DBG("%s", hdev->name);
965
966 clear_bit(HCI_AUTO_OFF, &hdev->flags);
967
968 hci_dev_close(hdev->id);
969 }
970
971 static void hci_discov_off(struct work_struct *work)
972 {
973 struct hci_dev *hdev;
974 u8 scan = SCAN_PAGE;
975
976 hdev = container_of(work, struct hci_dev, discov_off.work);
977
978 BT_DBG("%s", hdev->name);
979
980 hci_dev_lock_bh(hdev);
981
982 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
983
984 hdev->discov_timeout = 0;
985
986 hci_dev_unlock_bh(hdev);
987 }
988
989 int hci_uuids_clear(struct hci_dev *hdev)
990 {
991 struct list_head *p, *n;
992
993 list_for_each_safe(p, n, &hdev->uuids) {
994 struct bt_uuid *uuid;
995
996 uuid = list_entry(p, struct bt_uuid, list);
997
998 list_del(p);
999 kfree(uuid);
1000 }
1001
1002 return 0;
1003 }
1004
1005 int hci_link_keys_clear(struct hci_dev *hdev)
1006 {
1007 struct list_head *p, *n;
1008
1009 list_for_each_safe(p, n, &hdev->link_keys) {
1010 struct link_key *key;
1011
1012 key = list_entry(p, struct link_key, list);
1013
1014 list_del(p);
1015 kfree(key);
1016 }
1017
1018 return 0;
1019 }
1020
1021 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1022 {
1023 struct link_key *k;
1024
1025 list_for_each_entry(k, &hdev->link_keys, list)
1026 if (bacmp(bdaddr, &k->bdaddr) == 0)
1027 return k;
1028
1029 return NULL;
1030 }
1031
1032 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1033 u8 key_type, u8 old_key_type)
1034 {
1035 /* Legacy key */
1036 if (key_type < 0x03)
1037 return 1;
1038
1039 /* Debug keys are insecure so don't store them persistently */
1040 if (key_type == HCI_LK_DEBUG_COMBINATION)
1041 return 0;
1042
1043 /* Changed combination key and there's no previous one */
1044 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1045 return 0;
1046
1047 /* Security mode 3 case */
1048 if (!conn)
1049 return 1;
1050
1051 /* Neither local nor remote side had no-bonding as requirement */
1052 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1053 return 1;
1054
1055 /* Local side had dedicated bonding as requirement */
1056 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1057 return 1;
1058
1059 /* Remote side had dedicated bonding as requirement */
1060 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1061 return 1;
1062
1063 /* If none of the above criteria match, then don't store the key
1064 * persistently */
1065 return 0;
1066 }
1067
1068 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1069 {
1070 struct link_key *k;
1071
1072 list_for_each_entry(k, &hdev->link_keys, list) {
1073 struct key_master_id *id;
1074
1075 if (k->type != HCI_LK_SMP_LTK)
1076 continue;
1077
1078 if (k->dlen != sizeof(*id))
1079 continue;
1080
1081 id = (void *) &k->data;
1082 if (id->ediv == ediv &&
1083 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1084 return k;
1085 }
1086
1087 return NULL;
1088 }
1089 EXPORT_SYMBOL(hci_find_ltk);
1090
1091 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1092 bdaddr_t *bdaddr, u8 type)
1093 {
1094 struct link_key *k;
1095
1096 list_for_each_entry(k, &hdev->link_keys, list)
1097 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1098 return k;
1099
1100 return NULL;
1101 }
1102 EXPORT_SYMBOL(hci_find_link_key_type);
1103
1104 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1105 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1106 {
1107 struct link_key *key, *old_key;
1108 u8 old_key_type, persistent;
1109
1110 old_key = hci_find_link_key(hdev, bdaddr);
1111 if (old_key) {
1112 old_key_type = old_key->type;
1113 key = old_key;
1114 } else {
1115 old_key_type = conn ? conn->key_type : 0xff;
1116 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1117 if (!key)
1118 return -ENOMEM;
1119 list_add(&key->list, &hdev->link_keys);
1120 }
1121
1122 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1123
1124 /* Some buggy controller combinations generate a changed
1125 * combination key for legacy pairing even when there's no
1126 * previous key */
1127 if (type == HCI_LK_CHANGED_COMBINATION &&
1128 (!conn || conn->remote_auth == 0xff) &&
1129 old_key_type == 0xff) {
1130 type = HCI_LK_COMBINATION;
1131 if (conn)
1132 conn->key_type = type;
1133 }
1134
1135 bacpy(&key->bdaddr, bdaddr);
1136 memcpy(key->val, val, 16);
1137 key->pin_len = pin_len;
1138
1139 if (type == HCI_LK_CHANGED_COMBINATION)
1140 key->type = old_key_type;
1141 else
1142 key->type = type;
1143
1144 if (!new_key)
1145 return 0;
1146
1147 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1148
1149 mgmt_new_link_key(hdev, key, persistent);
1150
1151 if (!persistent) {
1152 list_del(&key->list);
1153 kfree(key);
1154 }
1155
1156 return 0;
1157 }
1158
1159 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1160 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1161 {
1162 struct link_key *key, *old_key;
1163 struct key_master_id *id;
1164 u8 old_key_type;
1165
1166 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1167
1168 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1169 if (old_key) {
1170 key = old_key;
1171 old_key_type = old_key->type;
1172 } else {
1173 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1174 if (!key)
1175 return -ENOMEM;
1176 list_add(&key->list, &hdev->link_keys);
1177 old_key_type = 0xff;
1178 }
1179
1180 key->dlen = sizeof(*id);
1181
1182 bacpy(&key->bdaddr, bdaddr);
1183 memcpy(key->val, ltk, sizeof(key->val));
1184 key->type = HCI_LK_SMP_LTK;
1185 key->pin_len = key_size;
1186
1187 id = (void *) &key->data;
1188 id->ediv = ediv;
1189 memcpy(id->rand, rand, sizeof(id->rand));
1190
1191 if (new_key)
1192 mgmt_new_link_key(hdev, key, old_key_type);
1193
1194 return 0;
1195 }
1196
1197 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1198 {
1199 struct link_key *key;
1200
1201 key = hci_find_link_key(hdev, bdaddr);
1202 if (!key)
1203 return -ENOENT;
1204
1205 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1206
1207 list_del(&key->list);
1208 kfree(key);
1209
1210 return 0;
1211 }
1212
1213 /* HCI command timer function */
1214 static void hci_cmd_timer(unsigned long arg)
1215 {
1216 struct hci_dev *hdev = (void *) arg;
1217
1218 BT_ERR("%s command tx timeout", hdev->name);
1219 atomic_set(&hdev->cmd_cnt, 1);
1220 tasklet_schedule(&hdev->cmd_task);
1221 }
1222
1223 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1224 bdaddr_t *bdaddr)
1225 {
1226 struct oob_data *data;
1227
1228 list_for_each_entry(data, &hdev->remote_oob_data, list)
1229 if (bacmp(bdaddr, &data->bdaddr) == 0)
1230 return data;
1231
1232 return NULL;
1233 }
1234
1235 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1236 {
1237 struct oob_data *data;
1238
1239 data = hci_find_remote_oob_data(hdev, bdaddr);
1240 if (!data)
1241 return -ENOENT;
1242
1243 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1244
1245 list_del(&data->list);
1246 kfree(data);
1247
1248 return 0;
1249 }
1250
1251 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1252 {
1253 struct oob_data *data, *n;
1254
1255 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1256 list_del(&data->list);
1257 kfree(data);
1258 }
1259
1260 return 0;
1261 }
1262
1263 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1264 u8 *randomizer)
1265 {
1266 struct oob_data *data;
1267
1268 data = hci_find_remote_oob_data(hdev, bdaddr);
1269
1270 if (!data) {
1271 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1272 if (!data)
1273 return -ENOMEM;
1274
1275 bacpy(&data->bdaddr, bdaddr);
1276 list_add(&data->list, &hdev->remote_oob_data);
1277 }
1278
1279 memcpy(data->hash, hash, sizeof(data->hash));
1280 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1281
1282 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1283
1284 return 0;
1285 }
1286
1287 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1288 bdaddr_t *bdaddr)
1289 {
1290 struct bdaddr_list *b;
1291
1292 list_for_each_entry(b, &hdev->blacklist, list)
1293 if (bacmp(bdaddr, &b->bdaddr) == 0)
1294 return b;
1295
1296 return NULL;
1297 }
1298
1299 int hci_blacklist_clear(struct hci_dev *hdev)
1300 {
1301 struct list_head *p, *n;
1302
1303 list_for_each_safe(p, n, &hdev->blacklist) {
1304 struct bdaddr_list *b;
1305
1306 b = list_entry(p, struct bdaddr_list, list);
1307
1308 list_del(p);
1309 kfree(b);
1310 }
1311
1312 return 0;
1313 }
1314
1315 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1316 {
1317 struct bdaddr_list *entry;
1318
1319 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1320 return -EBADF;
1321
1322 if (hci_blacklist_lookup(hdev, bdaddr))
1323 return -EEXIST;
1324
1325 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1326 if (!entry)
1327 return -ENOMEM;
1328
1329 bacpy(&entry->bdaddr, bdaddr);
1330
1331 list_add(&entry->list, &hdev->blacklist);
1332
1333 return mgmt_device_blocked(hdev, bdaddr);
1334 }
1335
1336 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337 {
1338 struct bdaddr_list *entry;
1339
1340 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1341 return hci_blacklist_clear(hdev);
1342
1343 entry = hci_blacklist_lookup(hdev, bdaddr);
1344 if (!entry)
1345 return -ENOENT;
1346
1347 list_del(&entry->list);
1348 kfree(entry);
1349
1350 return mgmt_device_unblocked(hdev, bdaddr);
1351 }
1352
1353 static void hci_clear_adv_cache(unsigned long arg)
1354 {
1355 struct hci_dev *hdev = (void *) arg;
1356
1357 hci_dev_lock(hdev);
1358
1359 hci_adv_entries_clear(hdev);
1360
1361 hci_dev_unlock(hdev);
1362 }
1363
1364 int hci_adv_entries_clear(struct hci_dev *hdev)
1365 {
1366 struct adv_entry *entry, *tmp;
1367
1368 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1369 list_del(&entry->list);
1370 kfree(entry);
1371 }
1372
1373 BT_DBG("%s adv cache cleared", hdev->name);
1374
1375 return 0;
1376 }
1377
1378 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1379 {
1380 struct adv_entry *entry;
1381
1382 list_for_each_entry(entry, &hdev->adv_entries, list)
1383 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1384 return entry;
1385
1386 return NULL;
1387 }
1388
1389 static inline int is_connectable_adv(u8 evt_type)
1390 {
1391 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1392 return 1;
1393
1394 return 0;
1395 }
1396
1397 int hci_add_adv_entry(struct hci_dev *hdev,
1398 struct hci_ev_le_advertising_info *ev)
1399 {
1400 struct adv_entry *entry;
1401
1402 if (!is_connectable_adv(ev->evt_type))
1403 return -EINVAL;
1404
1405 /* Only new entries should be added to adv_entries. So, if
1406 * bdaddr was found, don't add it. */
1407 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1408 return 0;
1409
1410 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1411 if (!entry)
1412 return -ENOMEM;
1413
1414 bacpy(&entry->bdaddr, &ev->bdaddr);
1415 entry->bdaddr_type = ev->bdaddr_type;
1416
1417 list_add(&entry->list, &hdev->adv_entries);
1418
1419 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1420 batostr(&entry->bdaddr), entry->bdaddr_type);
1421
1422 return 0;
1423 }
1424
1425 /* Register HCI device */
1426 int hci_register_dev(struct hci_dev *hdev)
1427 {
1428 struct list_head *head = &hci_dev_list, *p;
1429 int i, id, error;
1430
1431 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1432 hdev->bus, hdev->owner);
1433
1434 if (!hdev->open || !hdev->close || !hdev->destruct)
1435 return -EINVAL;
1436
1437 /* Do not allow HCI_AMP devices to register at index 0,
1438 * so the index can be used as the AMP controller ID.
1439 */
1440 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1441
1442 write_lock_bh(&hci_dev_list_lock);
1443
1444 /* Find first available device id */
1445 list_for_each(p, &hci_dev_list) {
1446 if (list_entry(p, struct hci_dev, list)->id != id)
1447 break;
1448 head = p; id++;
1449 }
1450
1451 sprintf(hdev->name, "hci%d", id);
1452 hdev->id = id;
1453 list_add(&hdev->list, head);
1454
1455 atomic_set(&hdev->refcnt, 1);
1456 spin_lock_init(&hdev->lock);
1457
1458 hdev->flags = 0;
1459 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1460 hdev->esco_type = (ESCO_HV1);
1461 hdev->link_mode = (HCI_LM_ACCEPT);
1462 hdev->io_capability = 0x03; /* No Input No Output */
1463
1464 hdev->idle_timeout = 0;
1465 hdev->sniff_max_interval = 800;
1466 hdev->sniff_min_interval = 80;
1467
1468 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1469 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1470 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1471
1472 skb_queue_head_init(&hdev->rx_q);
1473 skb_queue_head_init(&hdev->cmd_q);
1474 skb_queue_head_init(&hdev->raw_q);
1475
1476 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1477
1478 for (i = 0; i < NUM_REASSEMBLY; i++)
1479 hdev->reassembly[i] = NULL;
1480
1481 init_waitqueue_head(&hdev->req_wait_q);
1482 mutex_init(&hdev->req_lock);
1483
1484 inquiry_cache_init(hdev);
1485
1486 hci_conn_hash_init(hdev);
1487
1488 INIT_LIST_HEAD(&hdev->mgmt_pending);
1489
1490 INIT_LIST_HEAD(&hdev->blacklist);
1491
1492 INIT_LIST_HEAD(&hdev->uuids);
1493
1494 INIT_LIST_HEAD(&hdev->link_keys);
1495
1496 INIT_LIST_HEAD(&hdev->remote_oob_data);
1497
1498 INIT_LIST_HEAD(&hdev->adv_entries);
1499 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1500 (unsigned long) hdev);
1501
1502 INIT_WORK(&hdev->power_on, hci_power_on);
1503 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1504
1505 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1506
1507 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1508
1509 atomic_set(&hdev->promisc, 0);
1510
1511 write_unlock_bh(&hci_dev_list_lock);
1512
1513 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1514 if (!hdev->workqueue) {
1515 error = -ENOMEM;
1516 goto err;
1517 }
1518
1519 error = hci_add_sysfs(hdev);
1520 if (error < 0)
1521 goto err_wqueue;
1522
1523 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1524 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1525 if (hdev->rfkill) {
1526 if (rfkill_register(hdev->rfkill) < 0) {
1527 rfkill_destroy(hdev->rfkill);
1528 hdev->rfkill = NULL;
1529 }
1530 }
1531
1532 set_bit(HCI_AUTO_OFF, &hdev->flags);
1533 set_bit(HCI_SETUP, &hdev->flags);
1534 queue_work(hdev->workqueue, &hdev->power_on);
1535
1536 hci_notify(hdev, HCI_DEV_REG);
1537
1538 return id;
1539
1540 err_wqueue:
1541 destroy_workqueue(hdev->workqueue);
1542 err:
1543 write_lock_bh(&hci_dev_list_lock);
1544 list_del(&hdev->list);
1545 write_unlock_bh(&hci_dev_list_lock);
1546
1547 return error;
1548 }
1549 EXPORT_SYMBOL(hci_register_dev);
1550
1551 /* Unregister HCI device */
1552 void hci_unregister_dev(struct hci_dev *hdev)
1553 {
1554 int i;
1555
1556 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1557
1558 write_lock_bh(&hci_dev_list_lock);
1559 list_del(&hdev->list);
1560 write_unlock_bh(&hci_dev_list_lock);
1561
1562 hci_dev_do_close(hdev);
1563
1564 for (i = 0; i < NUM_REASSEMBLY; i++)
1565 kfree_skb(hdev->reassembly[i]);
1566
1567 if (!test_bit(HCI_INIT, &hdev->flags) &&
1568 !test_bit(HCI_SETUP, &hdev->flags)) {
1569 hci_dev_lock_bh(hdev);
1570 mgmt_index_removed(hdev);
1571 hci_dev_unlock_bh(hdev);
1572 }
1573
1574 /* mgmt_index_removed should take care of emptying the
1575 * pending list */
1576 BUG_ON(!list_empty(&hdev->mgmt_pending));
1577
1578 hci_notify(hdev, HCI_DEV_UNREG);
1579
1580 if (hdev->rfkill) {
1581 rfkill_unregister(hdev->rfkill);
1582 rfkill_destroy(hdev->rfkill);
1583 }
1584
1585 hci_del_sysfs(hdev);
1586
1587 del_timer(&hdev->adv_timer);
1588
1589 destroy_workqueue(hdev->workqueue);
1590
1591 hci_dev_lock_bh(hdev);
1592 hci_blacklist_clear(hdev);
1593 hci_uuids_clear(hdev);
1594 hci_link_keys_clear(hdev);
1595 hci_remote_oob_data_clear(hdev);
1596 hci_adv_entries_clear(hdev);
1597 hci_dev_unlock_bh(hdev);
1598
1599 __hci_dev_put(hdev);
1600 }
1601 EXPORT_SYMBOL(hci_unregister_dev);
1602
1603 /* Suspend HCI device */
1604 int hci_suspend_dev(struct hci_dev *hdev)
1605 {
1606 hci_notify(hdev, HCI_DEV_SUSPEND);
1607 return 0;
1608 }
1609 EXPORT_SYMBOL(hci_suspend_dev);
1610
1611 /* Resume HCI device */
1612 int hci_resume_dev(struct hci_dev *hdev)
1613 {
1614 hci_notify(hdev, HCI_DEV_RESUME);
1615 return 0;
1616 }
1617 EXPORT_SYMBOL(hci_resume_dev);
1618
1619 /* Receive frame from HCI drivers */
1620 int hci_recv_frame(struct sk_buff *skb)
1621 {
1622 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1623 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1624 && !test_bit(HCI_INIT, &hdev->flags))) {
1625 kfree_skb(skb);
1626 return -ENXIO;
1627 }
1628
1629 /* Incomming skb */
1630 bt_cb(skb)->incoming = 1;
1631
1632 /* Time stamp */
1633 __net_timestamp(skb);
1634
1635 /* Queue frame for rx task */
1636 skb_queue_tail(&hdev->rx_q, skb);
1637 tasklet_schedule(&hdev->rx_task);
1638
1639 return 0;
1640 }
1641 EXPORT_SYMBOL(hci_recv_frame);
1642
1643 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1644 int count, __u8 index)
1645 {
1646 int len = 0;
1647 int hlen = 0;
1648 int remain = count;
1649 struct sk_buff *skb;
1650 struct bt_skb_cb *scb;
1651
1652 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1653 index >= NUM_REASSEMBLY)
1654 return -EILSEQ;
1655
1656 skb = hdev->reassembly[index];
1657
1658 if (!skb) {
1659 switch (type) {
1660 case HCI_ACLDATA_PKT:
1661 len = HCI_MAX_FRAME_SIZE;
1662 hlen = HCI_ACL_HDR_SIZE;
1663 break;
1664 case HCI_EVENT_PKT:
1665 len = HCI_MAX_EVENT_SIZE;
1666 hlen = HCI_EVENT_HDR_SIZE;
1667 break;
1668 case HCI_SCODATA_PKT:
1669 len = HCI_MAX_SCO_SIZE;
1670 hlen = HCI_SCO_HDR_SIZE;
1671 break;
1672 }
1673
1674 skb = bt_skb_alloc(len, GFP_ATOMIC);
1675 if (!skb)
1676 return -ENOMEM;
1677
1678 scb = (void *) skb->cb;
1679 scb->expect = hlen;
1680 scb->pkt_type = type;
1681
1682 skb->dev = (void *) hdev;
1683 hdev->reassembly[index] = skb;
1684 }
1685
1686 while (count) {
1687 scb = (void *) skb->cb;
1688 len = min(scb->expect, (__u16)count);
1689
1690 memcpy(skb_put(skb, len), data, len);
1691
1692 count -= len;
1693 data += len;
1694 scb->expect -= len;
1695 remain = count;
1696
1697 switch (type) {
1698 case HCI_EVENT_PKT:
1699 if (skb->len == HCI_EVENT_HDR_SIZE) {
1700 struct hci_event_hdr *h = hci_event_hdr(skb);
1701 scb->expect = h->plen;
1702
1703 if (skb_tailroom(skb) < scb->expect) {
1704 kfree_skb(skb);
1705 hdev->reassembly[index] = NULL;
1706 return -ENOMEM;
1707 }
1708 }
1709 break;
1710
1711 case HCI_ACLDATA_PKT:
1712 if (skb->len == HCI_ACL_HDR_SIZE) {
1713 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1714 scb->expect = __le16_to_cpu(h->dlen);
1715
1716 if (skb_tailroom(skb) < scb->expect) {
1717 kfree_skb(skb);
1718 hdev->reassembly[index] = NULL;
1719 return -ENOMEM;
1720 }
1721 }
1722 break;
1723
1724 case HCI_SCODATA_PKT:
1725 if (skb->len == HCI_SCO_HDR_SIZE) {
1726 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1727 scb->expect = h->dlen;
1728
1729 if (skb_tailroom(skb) < scb->expect) {
1730 kfree_skb(skb);
1731 hdev->reassembly[index] = NULL;
1732 return -ENOMEM;
1733 }
1734 }
1735 break;
1736 }
1737
1738 if (scb->expect == 0) {
1739 /* Complete frame */
1740
1741 bt_cb(skb)->pkt_type = type;
1742 hci_recv_frame(skb);
1743
1744 hdev->reassembly[index] = NULL;
1745 return remain;
1746 }
1747 }
1748
1749 return remain;
1750 }
1751
1752 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1753 {
1754 int rem = 0;
1755
1756 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1757 return -EILSEQ;
1758
1759 while (count) {
1760 rem = hci_reassembly(hdev, type, data, count, type - 1);
1761 if (rem < 0)
1762 return rem;
1763
1764 data += (count - rem);
1765 count = rem;
1766 }
1767
1768 return rem;
1769 }
1770 EXPORT_SYMBOL(hci_recv_fragment);
1771
1772 #define STREAM_REASSEMBLY 0
1773
1774 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1775 {
1776 int type;
1777 int rem = 0;
1778
1779 while (count) {
1780 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1781
1782 if (!skb) {
1783 struct { char type; } *pkt;
1784
1785 /* Start of the frame */
1786 pkt = data;
1787 type = pkt->type;
1788
1789 data++;
1790 count--;
1791 } else
1792 type = bt_cb(skb)->pkt_type;
1793
1794 rem = hci_reassembly(hdev, type, data, count,
1795 STREAM_REASSEMBLY);
1796 if (rem < 0)
1797 return rem;
1798
1799 data += (count - rem);
1800 count = rem;
1801 }
1802
1803 return rem;
1804 }
1805 EXPORT_SYMBOL(hci_recv_stream_fragment);
1806
1807 /* ---- Interface to upper protocols ---- */
1808
1809 /* Register/Unregister protocols.
1810 * hci_task_lock is used to ensure that no tasks are running. */
1811 int hci_register_proto(struct hci_proto *hp)
1812 {
1813 int err = 0;
1814
1815 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1816
1817 if (hp->id >= HCI_MAX_PROTO)
1818 return -EINVAL;
1819
1820 write_lock_bh(&hci_task_lock);
1821
1822 if (!hci_proto[hp->id])
1823 hci_proto[hp->id] = hp;
1824 else
1825 err = -EEXIST;
1826
1827 write_unlock_bh(&hci_task_lock);
1828
1829 return err;
1830 }
1831 EXPORT_SYMBOL(hci_register_proto);
1832
1833 int hci_unregister_proto(struct hci_proto *hp)
1834 {
1835 int err = 0;
1836
1837 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1838
1839 if (hp->id >= HCI_MAX_PROTO)
1840 return -EINVAL;
1841
1842 write_lock_bh(&hci_task_lock);
1843
1844 if (hci_proto[hp->id])
1845 hci_proto[hp->id] = NULL;
1846 else
1847 err = -ENOENT;
1848
1849 write_unlock_bh(&hci_task_lock);
1850
1851 return err;
1852 }
1853 EXPORT_SYMBOL(hci_unregister_proto);
1854
1855 int hci_register_cb(struct hci_cb *cb)
1856 {
1857 BT_DBG("%p name %s", cb, cb->name);
1858
1859 write_lock_bh(&hci_cb_list_lock);
1860 list_add(&cb->list, &hci_cb_list);
1861 write_unlock_bh(&hci_cb_list_lock);
1862
1863 return 0;
1864 }
1865 EXPORT_SYMBOL(hci_register_cb);
1866
1867 int hci_unregister_cb(struct hci_cb *cb)
1868 {
1869 BT_DBG("%p name %s", cb, cb->name);
1870
1871 write_lock_bh(&hci_cb_list_lock);
1872 list_del(&cb->list);
1873 write_unlock_bh(&hci_cb_list_lock);
1874
1875 return 0;
1876 }
1877 EXPORT_SYMBOL(hci_unregister_cb);
1878
1879 static int hci_send_frame(struct sk_buff *skb)
1880 {
1881 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1882
1883 if (!hdev) {
1884 kfree_skb(skb);
1885 return -ENODEV;
1886 }
1887
1888 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1889
1890 if (atomic_read(&hdev->promisc)) {
1891 /* Time stamp */
1892 __net_timestamp(skb);
1893
1894 hci_send_to_sock(hdev, skb, NULL);
1895 }
1896
1897 /* Get rid of skb owner, prior to sending to the driver. */
1898 skb_orphan(skb);
1899
1900 return hdev->send(skb);
1901 }
1902
1903 /* Send HCI command */
1904 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1905 {
1906 int len = HCI_COMMAND_HDR_SIZE + plen;
1907 struct hci_command_hdr *hdr;
1908 struct sk_buff *skb;
1909
1910 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1911
1912 skb = bt_skb_alloc(len, GFP_ATOMIC);
1913 if (!skb) {
1914 BT_ERR("%s no memory for command", hdev->name);
1915 return -ENOMEM;
1916 }
1917
1918 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1919 hdr->opcode = cpu_to_le16(opcode);
1920 hdr->plen = plen;
1921
1922 if (plen)
1923 memcpy(skb_put(skb, plen), param, plen);
1924
1925 BT_DBG("skb len %d", skb->len);
1926
1927 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1928 skb->dev = (void *) hdev;
1929
1930 if (test_bit(HCI_INIT, &hdev->flags))
1931 hdev->init_last_cmd = opcode;
1932
1933 skb_queue_tail(&hdev->cmd_q, skb);
1934 tasklet_schedule(&hdev->cmd_task);
1935
1936 return 0;
1937 }
1938
1939 /* Get data from the previously sent command */
1940 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1941 {
1942 struct hci_command_hdr *hdr;
1943
1944 if (!hdev->sent_cmd)
1945 return NULL;
1946
1947 hdr = (void *) hdev->sent_cmd->data;
1948
1949 if (hdr->opcode != cpu_to_le16(opcode))
1950 return NULL;
1951
1952 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1953
1954 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1955 }
1956
1957 /* Send ACL data */
1958 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1959 {
1960 struct hci_acl_hdr *hdr;
1961 int len = skb->len;
1962
1963 skb_push(skb, HCI_ACL_HDR_SIZE);
1964 skb_reset_transport_header(skb);
1965 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1966 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1967 hdr->dlen = cpu_to_le16(len);
1968 }
1969
1970 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1971 struct sk_buff *skb, __u16 flags)
1972 {
1973 struct hci_dev *hdev = conn->hdev;
1974 struct sk_buff *list;
1975
1976 list = skb_shinfo(skb)->frag_list;
1977 if (!list) {
1978 /* Non fragmented */
1979 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1980
1981 skb_queue_tail(queue, skb);
1982 } else {
1983 /* Fragmented */
1984 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1985
1986 skb_shinfo(skb)->frag_list = NULL;
1987
1988 /* Queue all fragments atomically */
1989 spin_lock_bh(&queue->lock);
1990
1991 __skb_queue_tail(queue, skb);
1992
1993 flags &= ~ACL_START;
1994 flags |= ACL_CONT;
1995 do {
1996 skb = list; list = list->next;
1997
1998 skb->dev = (void *) hdev;
1999 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2000 hci_add_acl_hdr(skb, conn->handle, flags);
2001
2002 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2003
2004 __skb_queue_tail(queue, skb);
2005 } while (list);
2006
2007 spin_unlock_bh(&queue->lock);
2008 }
2009 }
2010
2011 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2012 {
2013 struct hci_conn *conn = chan->conn;
2014 struct hci_dev *hdev = conn->hdev;
2015
2016 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2017
2018 skb->dev = (void *) hdev;
2019 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2020 hci_add_acl_hdr(skb, conn->handle, flags);
2021
2022 hci_queue_acl(conn, &chan->data_q, skb, flags);
2023
2024 tasklet_schedule(&hdev->tx_task);
2025 }
2026 EXPORT_SYMBOL(hci_send_acl);
2027
2028 /* Send SCO data */
2029 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2030 {
2031 struct hci_dev *hdev = conn->hdev;
2032 struct hci_sco_hdr hdr;
2033
2034 BT_DBG("%s len %d", hdev->name, skb->len);
2035
2036 hdr.handle = cpu_to_le16(conn->handle);
2037 hdr.dlen = skb->len;
2038
2039 skb_push(skb, HCI_SCO_HDR_SIZE);
2040 skb_reset_transport_header(skb);
2041 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2042
2043 skb->dev = (void *) hdev;
2044 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2045
2046 skb_queue_tail(&conn->data_q, skb);
2047 tasklet_schedule(&hdev->tx_task);
2048 }
2049 EXPORT_SYMBOL(hci_send_sco);
2050
2051 /* ---- HCI TX task (outgoing data) ---- */
2052
2053 /* HCI Connection scheduler */
2054 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2055 {
2056 struct hci_conn_hash *h = &hdev->conn_hash;
2057 struct hci_conn *conn = NULL, *c;
2058 int num = 0, min = ~0;
2059
2060 /* We don't have to lock device here. Connections are always
2061 * added and removed with TX task disabled. */
2062 list_for_each_entry(c, &h->list, list) {
2063 if (c->type != type || skb_queue_empty(&c->data_q))
2064 continue;
2065
2066 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2067 continue;
2068
2069 num++;
2070
2071 if (c->sent < min) {
2072 min = c->sent;
2073 conn = c;
2074 }
2075
2076 if (hci_conn_num(hdev, type) == num)
2077 break;
2078 }
2079
2080 if (conn) {
2081 int cnt, q;
2082
2083 switch (conn->type) {
2084 case ACL_LINK:
2085 cnt = hdev->acl_cnt;
2086 break;
2087 case SCO_LINK:
2088 case ESCO_LINK:
2089 cnt = hdev->sco_cnt;
2090 break;
2091 case LE_LINK:
2092 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2093 break;
2094 default:
2095 cnt = 0;
2096 BT_ERR("Unknown link type");
2097 }
2098
2099 q = cnt / num;
2100 *quote = q ? q : 1;
2101 } else
2102 *quote = 0;
2103
2104 BT_DBG("conn %p quote %d", conn, *quote);
2105 return conn;
2106 }
2107
2108 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2109 {
2110 struct hci_conn_hash *h = &hdev->conn_hash;
2111 struct hci_conn *c;
2112
2113 BT_ERR("%s link tx timeout", hdev->name);
2114
2115 /* Kill stalled connections */
2116 list_for_each_entry(c, &h->list, list) {
2117 if (c->type == type && c->sent) {
2118 BT_ERR("%s killing stalled connection %s",
2119 hdev->name, batostr(&c->dst));
2120 hci_acl_disconn(c, 0x13);
2121 }
2122 }
2123 }
2124
2125 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126 int *quote)
2127 {
2128 struct hci_conn_hash *h = &hdev->conn_hash;
2129 struct hci_chan *chan = NULL;
2130 int num = 0, min = ~0, cur_prio = 0;
2131 struct hci_conn *conn;
2132 int cnt, q, conn_num = 0;
2133
2134 BT_DBG("%s", hdev->name);
2135
2136 list_for_each_entry(conn, &h->list, list) {
2137 struct hci_chan_hash *ch;
2138 struct hci_chan *tmp;
2139
2140 if (conn->type != type)
2141 continue;
2142
2143 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2144 continue;
2145
2146 conn_num++;
2147
2148 ch = &conn->chan_hash;
2149
2150 list_for_each_entry(tmp, &ch->list, list) {
2151 struct sk_buff *skb;
2152
2153 if (skb_queue_empty(&tmp->data_q))
2154 continue;
2155
2156 skb = skb_peek(&tmp->data_q);
2157 if (skb->priority < cur_prio)
2158 continue;
2159
2160 if (skb->priority > cur_prio) {
2161 num = 0;
2162 min = ~0;
2163 cur_prio = skb->priority;
2164 }
2165
2166 num++;
2167
2168 if (conn->sent < min) {
2169 min = conn->sent;
2170 chan = tmp;
2171 }
2172 }
2173
2174 if (hci_conn_num(hdev, type) == conn_num)
2175 break;
2176 }
2177
2178 if (!chan)
2179 return NULL;
2180
2181 switch (chan->conn->type) {
2182 case ACL_LINK:
2183 cnt = hdev->acl_cnt;
2184 break;
2185 case SCO_LINK:
2186 case ESCO_LINK:
2187 cnt = hdev->sco_cnt;
2188 break;
2189 case LE_LINK:
2190 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2191 break;
2192 default:
2193 cnt = 0;
2194 BT_ERR("Unknown link type");
2195 }
2196
2197 q = cnt / num;
2198 *quote = q ? q : 1;
2199 BT_DBG("chan %p quote %d", chan, *quote);
2200 return chan;
2201 }
2202
2203 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2204 {
2205 struct hci_conn_hash *h = &hdev->conn_hash;
2206 struct hci_conn *conn;
2207 int num = 0;
2208
2209 BT_DBG("%s", hdev->name);
2210
2211 list_for_each_entry(conn, &h->list, list) {
2212 struct hci_chan_hash *ch;
2213 struct hci_chan *chan;
2214
2215 if (conn->type != type)
2216 continue;
2217
2218 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2219 continue;
2220
2221 num++;
2222
2223 ch = &conn->chan_hash;
2224 list_for_each_entry(chan, &ch->list, list) {
2225 struct sk_buff *skb;
2226
2227 if (chan->sent) {
2228 chan->sent = 0;
2229 continue;
2230 }
2231
2232 if (skb_queue_empty(&chan->data_q))
2233 continue;
2234
2235 skb = skb_peek(&chan->data_q);
2236 if (skb->priority >= HCI_PRIO_MAX - 1)
2237 continue;
2238
2239 skb->priority = HCI_PRIO_MAX - 1;
2240
2241 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2242 skb->priority);
2243 }
2244
2245 if (hci_conn_num(hdev, type) == num)
2246 break;
2247 }
2248 }
2249
2250 static inline void hci_sched_acl(struct hci_dev *hdev)
2251 {
2252 struct hci_chan *chan;
2253 struct sk_buff *skb;
2254 int quote;
2255 unsigned int cnt;
2256
2257 BT_DBG("%s", hdev->name);
2258
2259 if (!hci_conn_num(hdev, ACL_LINK))
2260 return;
2261
2262 if (!test_bit(HCI_RAW, &hdev->flags)) {
2263 /* ACL tx timeout must be longer than maximum
2264 * link supervision timeout (40.9 seconds) */
2265 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2266 hci_link_tx_to(hdev, ACL_LINK);
2267 }
2268
2269 cnt = hdev->acl_cnt;
2270
2271 while (hdev->acl_cnt &&
2272 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2273 u32 priority = (skb_peek(&chan->data_q))->priority;
2274 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2275 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2276 skb->len, skb->priority);
2277
2278 /* Stop if priority has changed */
2279 if (skb->priority < priority)
2280 break;
2281
2282 skb = skb_dequeue(&chan->data_q);
2283
2284 hci_conn_enter_active_mode(chan->conn,
2285 bt_cb(skb)->force_active);
2286
2287 hci_send_frame(skb);
2288 hdev->acl_last_tx = jiffies;
2289
2290 hdev->acl_cnt--;
2291 chan->sent++;
2292 chan->conn->sent++;
2293 }
2294 }
2295
2296 if (cnt != hdev->acl_cnt)
2297 hci_prio_recalculate(hdev, ACL_LINK);
2298 }
2299
2300 /* Schedule SCO */
2301 static inline void hci_sched_sco(struct hci_dev *hdev)
2302 {
2303 struct hci_conn *conn;
2304 struct sk_buff *skb;
2305 int quote;
2306
2307 BT_DBG("%s", hdev->name);
2308
2309 if (!hci_conn_num(hdev, SCO_LINK))
2310 return;
2311
2312 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2313 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2314 BT_DBG("skb %p len %d", skb, skb->len);
2315 hci_send_frame(skb);
2316
2317 conn->sent++;
2318 if (conn->sent == ~0)
2319 conn->sent = 0;
2320 }
2321 }
2322 }
2323
2324 static inline void hci_sched_esco(struct hci_dev *hdev)
2325 {
2326 struct hci_conn *conn;
2327 struct sk_buff *skb;
2328 int quote;
2329
2330 BT_DBG("%s", hdev->name);
2331
2332 if (!hci_conn_num(hdev, ESCO_LINK))
2333 return;
2334
2335 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2336 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2337 BT_DBG("skb %p len %d", skb, skb->len);
2338 hci_send_frame(skb);
2339
2340 conn->sent++;
2341 if (conn->sent == ~0)
2342 conn->sent = 0;
2343 }
2344 }
2345 }
2346
2347 static inline void hci_sched_le(struct hci_dev *hdev)
2348 {
2349 struct hci_chan *chan;
2350 struct sk_buff *skb;
2351 int quote, cnt, tmp;
2352
2353 BT_DBG("%s", hdev->name);
2354
2355 if (!hci_conn_num(hdev, LE_LINK))
2356 return;
2357
2358 if (!test_bit(HCI_RAW, &hdev->flags)) {
2359 /* LE tx timeout must be longer than maximum
2360 * link supervision timeout (40.9 seconds) */
2361 if (!hdev->le_cnt && hdev->le_pkts &&
2362 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2363 hci_link_tx_to(hdev, LE_LINK);
2364 }
2365
2366 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2367 tmp = cnt;
2368 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2369 u32 priority = (skb_peek(&chan->data_q))->priority;
2370 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2371 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2372 skb->len, skb->priority);
2373
2374 /* Stop if priority has changed */
2375 if (skb->priority < priority)
2376 break;
2377
2378 skb = skb_dequeue(&chan->data_q);
2379
2380 hci_send_frame(skb);
2381 hdev->le_last_tx = jiffies;
2382
2383 cnt--;
2384 chan->sent++;
2385 chan->conn->sent++;
2386 }
2387 }
2388
2389 if (hdev->le_pkts)
2390 hdev->le_cnt = cnt;
2391 else
2392 hdev->acl_cnt = cnt;
2393
2394 if (cnt != tmp)
2395 hci_prio_recalculate(hdev, LE_LINK);
2396 }
2397
2398 static void hci_tx_task(unsigned long arg)
2399 {
2400 struct hci_dev *hdev = (struct hci_dev *) arg;
2401 struct sk_buff *skb;
2402
2403 read_lock(&hci_task_lock);
2404
2405 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2406 hdev->sco_cnt, hdev->le_cnt);
2407
2408 /* Schedule queues and send stuff to HCI driver */
2409
2410 hci_sched_acl(hdev);
2411
2412 hci_sched_sco(hdev);
2413
2414 hci_sched_esco(hdev);
2415
2416 hci_sched_le(hdev);
2417
2418 /* Send next queued raw (unknown type) packet */
2419 while ((skb = skb_dequeue(&hdev->raw_q)))
2420 hci_send_frame(skb);
2421
2422 read_unlock(&hci_task_lock);
2423 }
2424
2425 /* ----- HCI RX task (incoming data processing) ----- */
2426
2427 /* ACL data packet */
2428 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2429 {
2430 struct hci_acl_hdr *hdr = (void *) skb->data;
2431 struct hci_conn *conn;
2432 __u16 handle, flags;
2433
2434 skb_pull(skb, HCI_ACL_HDR_SIZE);
2435
2436 handle = __le16_to_cpu(hdr->handle);
2437 flags = hci_flags(handle);
2438 handle = hci_handle(handle);
2439
2440 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2441
2442 hdev->stat.acl_rx++;
2443
2444 hci_dev_lock(hdev);
2445 conn = hci_conn_hash_lookup_handle(hdev, handle);
2446 hci_dev_unlock(hdev);
2447
2448 if (conn) {
2449 register struct hci_proto *hp;
2450
2451 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2452
2453 /* Send to upper protocol */
2454 hp = hci_proto[HCI_PROTO_L2CAP];
2455 if (hp && hp->recv_acldata) {
2456 hp->recv_acldata(conn, skb, flags);
2457 return;
2458 }
2459 } else {
2460 BT_ERR("%s ACL packet for unknown connection handle %d",
2461 hdev->name, handle);
2462 }
2463
2464 kfree_skb(skb);
2465 }
2466
2467 /* SCO data packet */
2468 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2469 {
2470 struct hci_sco_hdr *hdr = (void *) skb->data;
2471 struct hci_conn *conn;
2472 __u16 handle;
2473
2474 skb_pull(skb, HCI_SCO_HDR_SIZE);
2475
2476 handle = __le16_to_cpu(hdr->handle);
2477
2478 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2479
2480 hdev->stat.sco_rx++;
2481
2482 hci_dev_lock(hdev);
2483 conn = hci_conn_hash_lookup_handle(hdev, handle);
2484 hci_dev_unlock(hdev);
2485
2486 if (conn) {
2487 register struct hci_proto *hp;
2488
2489 /* Send to upper protocol */
2490 hp = hci_proto[HCI_PROTO_SCO];
2491 if (hp && hp->recv_scodata) {
2492 hp->recv_scodata(conn, skb);
2493 return;
2494 }
2495 } else {
2496 BT_ERR("%s SCO packet for unknown connection handle %d",
2497 hdev->name, handle);
2498 }
2499
2500 kfree_skb(skb);
2501 }
2502
2503 static void hci_rx_task(unsigned long arg)
2504 {
2505 struct hci_dev *hdev = (struct hci_dev *) arg;
2506 struct sk_buff *skb;
2507
2508 BT_DBG("%s", hdev->name);
2509
2510 read_lock(&hci_task_lock);
2511
2512 while ((skb = skb_dequeue(&hdev->rx_q))) {
2513 if (atomic_read(&hdev->promisc)) {
2514 /* Send copy to the sockets */
2515 hci_send_to_sock(hdev, skb, NULL);
2516 }
2517
2518 if (test_bit(HCI_RAW, &hdev->flags)) {
2519 kfree_skb(skb);
2520 continue;
2521 }
2522
2523 if (test_bit(HCI_INIT, &hdev->flags)) {
2524 /* Don't process data packets in this states. */
2525 switch (bt_cb(skb)->pkt_type) {
2526 case HCI_ACLDATA_PKT:
2527 case HCI_SCODATA_PKT:
2528 kfree_skb(skb);
2529 continue;
2530 }
2531 }
2532
2533 /* Process frame */
2534 switch (bt_cb(skb)->pkt_type) {
2535 case HCI_EVENT_PKT:
2536 hci_event_packet(hdev, skb);
2537 break;
2538
2539 case HCI_ACLDATA_PKT:
2540 BT_DBG("%s ACL data packet", hdev->name);
2541 hci_acldata_packet(hdev, skb);
2542 break;
2543
2544 case HCI_SCODATA_PKT:
2545 BT_DBG("%s SCO data packet", hdev->name);
2546 hci_scodata_packet(hdev, skb);
2547 break;
2548
2549 default:
2550 kfree_skb(skb);
2551 break;
2552 }
2553 }
2554
2555 read_unlock(&hci_task_lock);
2556 }
2557
2558 static void hci_cmd_task(unsigned long arg)
2559 {
2560 struct hci_dev *hdev = (struct hci_dev *) arg;
2561 struct sk_buff *skb;
2562
2563 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2564
2565 /* Send queued commands */
2566 if (atomic_read(&hdev->cmd_cnt)) {
2567 skb = skb_dequeue(&hdev->cmd_q);
2568 if (!skb)
2569 return;
2570
2571 kfree_skb(hdev->sent_cmd);
2572
2573 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2574 if (hdev->sent_cmd) {
2575 atomic_dec(&hdev->cmd_cnt);
2576 hci_send_frame(skb);
2577 if (test_bit(HCI_RESET, &hdev->flags))
2578 del_timer(&hdev->cmd_timer);
2579 else
2580 mod_timer(&hdev->cmd_timer,
2581 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2582 } else {
2583 skb_queue_head(&hdev->cmd_q, skb);
2584 tasklet_schedule(&hdev->cmd_task);
2585 }
2586 }
2587 }
2588
2589 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2590 {
2591 /* General inquiry access code (GIAC) */
2592 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2593 struct hci_cp_inquiry cp;
2594
2595 BT_DBG("%s", hdev->name);
2596
2597 if (test_bit(HCI_INQUIRY, &hdev->flags))
2598 return -EINPROGRESS;
2599
2600 memset(&cp, 0, sizeof(cp));
2601 memcpy(&cp.lap, lap, sizeof(cp.lap));
2602 cp.length = length;
2603
2604 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2605 }
2606
2607 int hci_cancel_inquiry(struct hci_dev *hdev)
2608 {
2609 BT_DBG("%s", hdev->name);
2610
2611 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2612 return -EPERM;
2613
2614 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2615 }