Bluetooth: Do not set HCI_RAW when HS enabled
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82 return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 return;
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
128 {
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170 {
171 int ret;
172
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195 struct hci_cp_delete_stored_link_key cp;
196 struct sk_buff *skb;
197 __le16 param;
198 __u8 flt_type;
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 skb->dev = (void *) hdev;
208
209 skb_queue_tail(&hdev->cmd_q, skb);
210 tasklet_schedule(&hdev->cmd_task);
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220 }
221
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225 /* Read Local Version */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240 }
241 #endif
242
243 /* Read BD Address */
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252 /* Read Voice Setting */
253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
258 flt_type = HCI_FLT_CLEAR_ALL;
259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261 /* Connection accept timeout ~20 secs */
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
304 /* Encryption */
305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310 __le16 policy = cpu_to_le16(opt);
311
312 BT_DBG("%s %x", hdev->name, policy);
313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322 struct hci_dev *hdev = NULL, *d;
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
330 list_for_each_entry(d, &hci_dev_list, list) {
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338 }
339
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev *hdev)
342 {
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353 }
354
355 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356 {
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366 }
367
368 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369 {
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_entry *ie;
372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
377 /* Entry not in the cache. Add new one. */
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
380 return;
381
382 ie->next = cache->list;
383 cache->list = ie;
384 }
385
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
388 cache->timestamp = jiffies;
389 }
390
391 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392 {
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411 }
412
413 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414 {
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
428 }
429
430 int hci_inquiry(void __user *arg)
431 {
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
455 timeo = ir.length * msecs_to_jiffies(2000);
456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
470 if (!buf) {
471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
486 } else
487 err = -EFAULT;
488
489 kfree(buf);
490
491 done:
492 hci_dev_put(hdev);
493 return err;
494 }
495
496 /* ---- HCI ioctl helpers ---- */
497
498 int hci_dev_open(__u16 dev)
499 {
500 struct hci_dev *hdev;
501 int ret = 0;
502
503 hdev = hci_dev_get(dev);
504 if (!hdev)
505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
524 /* Treat all non BR/EDR controllers as raw devices if
525 enable_hs is not set */
526 if (hdev->dev_type != HCI_BREDR && !enable_hs)
527 set_bit(HCI_RAW, &hdev->flags);
528
529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
537 hdev->init_last_cmd = 0;
538
539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
541
542 if (lmp_host_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
553 if (!test_bit(HCI_SETUP, &hdev->flags)) {
554 hci_dev_lock_bh(hdev);
555 mgmt_powered(hdev, 1);
556 hci_dev_unlock_bh(hdev);
557 }
558 } else {
559 /* Init failed, cleanup */
560 tasklet_kill(&hdev->rx_task);
561 tasklet_kill(&hdev->tx_task);
562 tasklet_kill(&hdev->cmd_task);
563
564 skb_queue_purge(&hdev->cmd_q);
565 skb_queue_purge(&hdev->rx_q);
566
567 if (hdev->flush)
568 hdev->flush(hdev);
569
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
573 }
574
575 hdev->close(hdev);
576 hdev->flags = 0;
577 }
578
579 done:
580 hci_req_unlock(hdev);
581 hci_dev_put(hdev);
582 return ret;
583 }
584
585 static int hci_dev_do_close(struct hci_dev *hdev)
586 {
587 BT_DBG("%s %p", hdev->name, hdev);
588
589 hci_req_cancel(hdev, ENODEV);
590 hci_req_lock(hdev);
591
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593 del_timer_sync(&hdev->cmd_timer);
594 hci_req_unlock(hdev);
595 return 0;
596 }
597
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
601
602 if (hdev->discov_timeout > 0) {
603 cancel_delayed_work(&hdev->discov_off);
604 hdev->discov_timeout = 0;
605 }
606
607 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
608 cancel_delayed_work(&hdev->power_off);
609
610 hci_dev_lock_bh(hdev);
611 inquiry_cache_flush(hdev);
612 hci_conn_hash_flush(hdev);
613 hci_dev_unlock_bh(hdev);
614
615 hci_notify(hdev, HCI_DEV_DOWN);
616
617 if (hdev->flush)
618 hdev->flush(hdev);
619
620 /* Reset device */
621 skb_queue_purge(&hdev->cmd_q);
622 atomic_set(&hdev->cmd_cnt, 1);
623 if (!test_bit(HCI_RAW, &hdev->flags)) {
624 set_bit(HCI_INIT, &hdev->flags);
625 __hci_request(hdev, hci_reset_req, 0,
626 msecs_to_jiffies(HCI_INIT_TIMEOUT));
627 clear_bit(HCI_INIT, &hdev->flags);
628 }
629
630 /* Kill cmd task */
631 tasklet_kill(&hdev->cmd_task);
632
633 /* Drop queues */
634 skb_queue_purge(&hdev->rx_q);
635 skb_queue_purge(&hdev->cmd_q);
636 skb_queue_purge(&hdev->raw_q);
637
638 /* Drop last sent command */
639 if (hdev->sent_cmd) {
640 del_timer_sync(&hdev->cmd_timer);
641 kfree_skb(hdev->sent_cmd);
642 hdev->sent_cmd = NULL;
643 }
644
645 /* After this point our queues are empty
646 * and no tasks are scheduled. */
647 hdev->close(hdev);
648
649 hci_dev_lock_bh(hdev);
650 mgmt_powered(hdev, 0);
651 hci_dev_unlock_bh(hdev);
652
653 /* Clear flags */
654 hdev->flags = 0;
655
656 hci_req_unlock(hdev);
657
658 hci_dev_put(hdev);
659 return 0;
660 }
661
662 int hci_dev_close(__u16 dev)
663 {
664 struct hci_dev *hdev;
665 int err;
666
667 hdev = hci_dev_get(dev);
668 if (!hdev)
669 return -ENODEV;
670 err = hci_dev_do_close(hdev);
671 hci_dev_put(hdev);
672 return err;
673 }
674
675 int hci_dev_reset(__u16 dev)
676 {
677 struct hci_dev *hdev;
678 int ret = 0;
679
680 hdev = hci_dev_get(dev);
681 if (!hdev)
682 return -ENODEV;
683
684 hci_req_lock(hdev);
685 tasklet_disable(&hdev->tx_task);
686
687 if (!test_bit(HCI_UP, &hdev->flags))
688 goto done;
689
690 /* Drop queues */
691 skb_queue_purge(&hdev->rx_q);
692 skb_queue_purge(&hdev->cmd_q);
693
694 hci_dev_lock_bh(hdev);
695 inquiry_cache_flush(hdev);
696 hci_conn_hash_flush(hdev);
697 hci_dev_unlock_bh(hdev);
698
699 if (hdev->flush)
700 hdev->flush(hdev);
701
702 atomic_set(&hdev->cmd_cnt, 1);
703 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
704
705 if (!test_bit(HCI_RAW, &hdev->flags))
706 ret = __hci_request(hdev, hci_reset_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
709 done:
710 tasklet_enable(&hdev->tx_task);
711 hci_req_unlock(hdev);
712 hci_dev_put(hdev);
713 return ret;
714 }
715
716 int hci_dev_reset_stat(__u16 dev)
717 {
718 struct hci_dev *hdev;
719 int ret = 0;
720
721 hdev = hci_dev_get(dev);
722 if (!hdev)
723 return -ENODEV;
724
725 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
726
727 hci_dev_put(hdev);
728
729 return ret;
730 }
731
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
736 int err = 0;
737
738 if (copy_from_user(&dr, arg, sizeof(dr)))
739 return -EFAULT;
740
741 hdev = hci_dev_get(dr.dev_id);
742 if (!hdev)
743 return -ENODEV;
744
745 switch (cmd) {
746 case HCISETAUTH:
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
749 break;
750
751 case HCISETENCRYPT:
752 if (!lmp_encrypt_capable(hdev)) {
753 err = -EOPNOTSUPP;
754 break;
755 }
756
757 if (!test_bit(HCI_AUTH, &hdev->flags)) {
758 /* Auth must be enabled first */
759 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
761 if (err)
762 break;
763 }
764
765 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
767 break;
768
769 case HCISETSCAN:
770 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
771 msecs_to_jiffies(HCI_INIT_TIMEOUT));
772 break;
773
774 case HCISETLINKPOL:
775 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
777 break;
778
779 case HCISETLINKMODE:
780 hdev->link_mode = ((__u16) dr.dev_opt) &
781 (HCI_LM_MASTER | HCI_LM_ACCEPT);
782 break;
783
784 case HCISETPTYPE:
785 hdev->pkt_type = (__u16) dr.dev_opt;
786 break;
787
788 case HCISETACLMTU:
789 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
790 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
791 break;
792
793 case HCISETSCOMTU:
794 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
795 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
796 break;
797
798 default:
799 err = -EINVAL;
800 break;
801 }
802
803 hci_dev_put(hdev);
804 return err;
805 }
806
807 int hci_get_dev_list(void __user *arg)
808 {
809 struct hci_dev *hdev;
810 struct hci_dev_list_req *dl;
811 struct hci_dev_req *dr;
812 int n = 0, size, err;
813 __u16 dev_num;
814
815 if (get_user(dev_num, (__u16 __user *) arg))
816 return -EFAULT;
817
818 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
819 return -EINVAL;
820
821 size = sizeof(*dl) + dev_num * sizeof(*dr);
822
823 dl = kzalloc(size, GFP_KERNEL);
824 if (!dl)
825 return -ENOMEM;
826
827 dr = dl->dev_req;
828
829 read_lock_bh(&hci_dev_list_lock);
830 list_for_each_entry(hdev, &hci_dev_list, list) {
831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
832 cancel_delayed_work(&hdev->power_off);
833
834 if (!test_bit(HCI_MGMT, &hdev->flags))
835 set_bit(HCI_PAIRABLE, &hdev->flags);
836
837 (dr + n)->dev_id = hdev->id;
838 (dr + n)->dev_opt = hdev->flags;
839
840 if (++n >= dev_num)
841 break;
842 }
843 read_unlock_bh(&hci_dev_list_lock);
844
845 dl->dev_num = n;
846 size = sizeof(*dl) + n * sizeof(*dr);
847
848 err = copy_to_user(arg, dl, size);
849 kfree(dl);
850
851 return err ? -EFAULT : 0;
852 }
853
854 int hci_get_dev_info(void __user *arg)
855 {
856 struct hci_dev *hdev;
857 struct hci_dev_info di;
858 int err = 0;
859
860 if (copy_from_user(&di, arg, sizeof(di)))
861 return -EFAULT;
862
863 hdev = hci_dev_get(di.dev_id);
864 if (!hdev)
865 return -ENODEV;
866
867 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
868 cancel_delayed_work_sync(&hdev->power_off);
869
870 if (!test_bit(HCI_MGMT, &hdev->flags))
871 set_bit(HCI_PAIRABLE, &hdev->flags);
872
873 strcpy(di.name, hdev->name);
874 di.bdaddr = hdev->bdaddr;
875 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
876 di.flags = hdev->flags;
877 di.pkt_type = hdev->pkt_type;
878 di.acl_mtu = hdev->acl_mtu;
879 di.acl_pkts = hdev->acl_pkts;
880 di.sco_mtu = hdev->sco_mtu;
881 di.sco_pkts = hdev->sco_pkts;
882 di.link_policy = hdev->link_policy;
883 di.link_mode = hdev->link_mode;
884
885 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
886 memcpy(&di.features, &hdev->features, sizeof(di.features));
887
888 if (copy_to_user(arg, &di, sizeof(di)))
889 err = -EFAULT;
890
891 hci_dev_put(hdev);
892
893 return err;
894 }
895
896 /* ---- Interface to HCI drivers ---- */
897
898 static int hci_rfkill_set_block(void *data, bool blocked)
899 {
900 struct hci_dev *hdev = data;
901
902 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
903
904 if (!blocked)
905 return 0;
906
907 hci_dev_do_close(hdev);
908
909 return 0;
910 }
911
912 static const struct rfkill_ops hci_rfkill_ops = {
913 .set_block = hci_rfkill_set_block,
914 };
915
916 /* Alloc HCI device */
917 struct hci_dev *hci_alloc_dev(void)
918 {
919 struct hci_dev *hdev;
920
921 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
922 if (!hdev)
923 return NULL;
924
925 hci_init_sysfs(hdev);
926 skb_queue_head_init(&hdev->driver_init);
927
928 return hdev;
929 }
930 EXPORT_SYMBOL(hci_alloc_dev);
931
932 /* Free HCI device */
933 void hci_free_dev(struct hci_dev *hdev)
934 {
935 skb_queue_purge(&hdev->driver_init);
936
937 /* will free via device release */
938 put_device(&hdev->dev);
939 }
940 EXPORT_SYMBOL(hci_free_dev);
941
942 static void hci_power_on(struct work_struct *work)
943 {
944 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
945
946 BT_DBG("%s", hdev->name);
947
948 if (hci_dev_open(hdev->id) < 0)
949 return;
950
951 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
952 queue_delayed_work(hdev->workqueue, &hdev->power_off,
953 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
954
955 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
956 mgmt_index_added(hdev);
957 }
958
959 static void hci_power_off(struct work_struct *work)
960 {
961 struct hci_dev *hdev = container_of(work, struct hci_dev,
962 power_off.work);
963
964 BT_DBG("%s", hdev->name);
965
966 clear_bit(HCI_AUTO_OFF, &hdev->flags);
967
968 hci_dev_close(hdev->id);
969 }
970
971 static void hci_discov_off(struct work_struct *work)
972 {
973 struct hci_dev *hdev;
974 u8 scan = SCAN_PAGE;
975
976 hdev = container_of(work, struct hci_dev, discov_off.work);
977
978 BT_DBG("%s", hdev->name);
979
980 hci_dev_lock_bh(hdev);
981
982 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
983
984 hdev->discov_timeout = 0;
985
986 hci_dev_unlock_bh(hdev);
987 }
988
989 int hci_uuids_clear(struct hci_dev *hdev)
990 {
991 struct list_head *p, *n;
992
993 list_for_each_safe(p, n, &hdev->uuids) {
994 struct bt_uuid *uuid;
995
996 uuid = list_entry(p, struct bt_uuid, list);
997
998 list_del(p);
999 kfree(uuid);
1000 }
1001
1002 return 0;
1003 }
1004
1005 int hci_link_keys_clear(struct hci_dev *hdev)
1006 {
1007 struct list_head *p, *n;
1008
1009 list_for_each_safe(p, n, &hdev->link_keys) {
1010 struct link_key *key;
1011
1012 key = list_entry(p, struct link_key, list);
1013
1014 list_del(p);
1015 kfree(key);
1016 }
1017
1018 return 0;
1019 }
1020
1021 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1022 {
1023 struct link_key *k;
1024
1025 list_for_each_entry(k, &hdev->link_keys, list)
1026 if (bacmp(bdaddr, &k->bdaddr) == 0)
1027 return k;
1028
1029 return NULL;
1030 }
1031
1032 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1033 u8 key_type, u8 old_key_type)
1034 {
1035 /* Legacy key */
1036 if (key_type < 0x03)
1037 return 1;
1038
1039 /* Debug keys are insecure so don't store them persistently */
1040 if (key_type == HCI_LK_DEBUG_COMBINATION)
1041 return 0;
1042
1043 /* Changed combination key and there's no previous one */
1044 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1045 return 0;
1046
1047 /* Security mode 3 case */
1048 if (!conn)
1049 return 1;
1050
1051 /* Neither local nor remote side had no-bonding as requirement */
1052 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1053 return 1;
1054
1055 /* Local side had dedicated bonding as requirement */
1056 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1057 return 1;
1058
1059 /* Remote side had dedicated bonding as requirement */
1060 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1061 return 1;
1062
1063 /* If none of the above criteria match, then don't store the key
1064 * persistently */
1065 return 0;
1066 }
1067
1068 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1069 {
1070 struct link_key *k;
1071
1072 list_for_each_entry(k, &hdev->link_keys, list) {
1073 struct key_master_id *id;
1074
1075 if (k->type != HCI_LK_SMP_LTK)
1076 continue;
1077
1078 if (k->dlen != sizeof(*id))
1079 continue;
1080
1081 id = (void *) &k->data;
1082 if (id->ediv == ediv &&
1083 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1084 return k;
1085 }
1086
1087 return NULL;
1088 }
1089 EXPORT_SYMBOL(hci_find_ltk);
1090
1091 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1092 bdaddr_t *bdaddr, u8 type)
1093 {
1094 struct link_key *k;
1095
1096 list_for_each_entry(k, &hdev->link_keys, list)
1097 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1098 return k;
1099
1100 return NULL;
1101 }
1102 EXPORT_SYMBOL(hci_find_link_key_type);
1103
1104 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1105 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1106 {
1107 struct link_key *key, *old_key;
1108 u8 old_key_type, persistent;
1109
1110 old_key = hci_find_link_key(hdev, bdaddr);
1111 if (old_key) {
1112 old_key_type = old_key->type;
1113 key = old_key;
1114 } else {
1115 old_key_type = conn ? conn->key_type : 0xff;
1116 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1117 if (!key)
1118 return -ENOMEM;
1119 list_add(&key->list, &hdev->link_keys);
1120 }
1121
1122 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1123
1124 /* Some buggy controller combinations generate a changed
1125 * combination key for legacy pairing even when there's no
1126 * previous key */
1127 if (type == HCI_LK_CHANGED_COMBINATION &&
1128 (!conn || conn->remote_auth == 0xff) &&
1129 old_key_type == 0xff) {
1130 type = HCI_LK_COMBINATION;
1131 if (conn)
1132 conn->key_type = type;
1133 }
1134
1135 bacpy(&key->bdaddr, bdaddr);
1136 memcpy(key->val, val, 16);
1137 key->pin_len = pin_len;
1138
1139 if (type == HCI_LK_CHANGED_COMBINATION)
1140 key->type = old_key_type;
1141 else
1142 key->type = type;
1143
1144 if (!new_key)
1145 return 0;
1146
1147 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1148
1149 mgmt_new_link_key(hdev, key, persistent);
1150
1151 if (!persistent) {
1152 list_del(&key->list);
1153 kfree(key);
1154 }
1155
1156 return 0;
1157 }
1158
1159 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1160 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1161 {
1162 struct link_key *key, *old_key;
1163 struct key_master_id *id;
1164 u8 old_key_type;
1165
1166 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1167
1168 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1169 if (old_key) {
1170 key = old_key;
1171 old_key_type = old_key->type;
1172 } else {
1173 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1174 if (!key)
1175 return -ENOMEM;
1176 list_add(&key->list, &hdev->link_keys);
1177 old_key_type = 0xff;
1178 }
1179
1180 key->dlen = sizeof(*id);
1181
1182 bacpy(&key->bdaddr, bdaddr);
1183 memcpy(key->val, ltk, sizeof(key->val));
1184 key->type = HCI_LK_SMP_LTK;
1185 key->pin_len = key_size;
1186
1187 id = (void *) &key->data;
1188 id->ediv = ediv;
1189 memcpy(id->rand, rand, sizeof(id->rand));
1190
1191 if (new_key)
1192 mgmt_new_link_key(hdev, key, old_key_type);
1193
1194 return 0;
1195 }
1196
1197 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1198 {
1199 struct link_key *key;
1200
1201 key = hci_find_link_key(hdev, bdaddr);
1202 if (!key)
1203 return -ENOENT;
1204
1205 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1206
1207 list_del(&key->list);
1208 kfree(key);
1209
1210 return 0;
1211 }
1212
1213 /* HCI command timer function */
1214 static void hci_cmd_timer(unsigned long arg)
1215 {
1216 struct hci_dev *hdev = (void *) arg;
1217
1218 BT_ERR("%s command tx timeout", hdev->name);
1219 atomic_set(&hdev->cmd_cnt, 1);
1220 tasklet_schedule(&hdev->cmd_task);
1221 }
1222
1223 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1224 bdaddr_t *bdaddr)
1225 {
1226 struct oob_data *data;
1227
1228 list_for_each_entry(data, &hdev->remote_oob_data, list)
1229 if (bacmp(bdaddr, &data->bdaddr) == 0)
1230 return data;
1231
1232 return NULL;
1233 }
1234
1235 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1236 {
1237 struct oob_data *data;
1238
1239 data = hci_find_remote_oob_data(hdev, bdaddr);
1240 if (!data)
1241 return -ENOENT;
1242
1243 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1244
1245 list_del(&data->list);
1246 kfree(data);
1247
1248 return 0;
1249 }
1250
1251 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1252 {
1253 struct oob_data *data, *n;
1254
1255 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1256 list_del(&data->list);
1257 kfree(data);
1258 }
1259
1260 return 0;
1261 }
1262
1263 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1264 u8 *randomizer)
1265 {
1266 struct oob_data *data;
1267
1268 data = hci_find_remote_oob_data(hdev, bdaddr);
1269
1270 if (!data) {
1271 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1272 if (!data)
1273 return -ENOMEM;
1274
1275 bacpy(&data->bdaddr, bdaddr);
1276 list_add(&data->list, &hdev->remote_oob_data);
1277 }
1278
1279 memcpy(data->hash, hash, sizeof(data->hash));
1280 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1281
1282 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1283
1284 return 0;
1285 }
1286
1287 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1288 bdaddr_t *bdaddr)
1289 {
1290 struct bdaddr_list *b;
1291
1292 list_for_each_entry(b, &hdev->blacklist, list)
1293 if (bacmp(bdaddr, &b->bdaddr) == 0)
1294 return b;
1295
1296 return NULL;
1297 }
1298
1299 int hci_blacklist_clear(struct hci_dev *hdev)
1300 {
1301 struct list_head *p, *n;
1302
1303 list_for_each_safe(p, n, &hdev->blacklist) {
1304 struct bdaddr_list *b;
1305
1306 b = list_entry(p, struct bdaddr_list, list);
1307
1308 list_del(p);
1309 kfree(b);
1310 }
1311
1312 return 0;
1313 }
1314
1315 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1316 {
1317 struct bdaddr_list *entry;
1318
1319 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1320 return -EBADF;
1321
1322 if (hci_blacklist_lookup(hdev, bdaddr))
1323 return -EEXIST;
1324
1325 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1326 if (!entry)
1327 return -ENOMEM;
1328
1329 bacpy(&entry->bdaddr, bdaddr);
1330
1331 list_add(&entry->list, &hdev->blacklist);
1332
1333 return mgmt_device_blocked(hdev, bdaddr);
1334 }
1335
1336 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337 {
1338 struct bdaddr_list *entry;
1339
1340 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1341 return hci_blacklist_clear(hdev);
1342 }
1343
1344 entry = hci_blacklist_lookup(hdev, bdaddr);
1345 if (!entry) {
1346 return -ENOENT;
1347 }
1348
1349 list_del(&entry->list);
1350 kfree(entry);
1351
1352 return mgmt_device_unblocked(hdev, bdaddr);
1353 }
1354
1355 static void hci_clear_adv_cache(unsigned long arg)
1356 {
1357 struct hci_dev *hdev = (void *) arg;
1358
1359 hci_dev_lock(hdev);
1360
1361 hci_adv_entries_clear(hdev);
1362
1363 hci_dev_unlock(hdev);
1364 }
1365
1366 int hci_adv_entries_clear(struct hci_dev *hdev)
1367 {
1368 struct adv_entry *entry, *tmp;
1369
1370 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1371 list_del(&entry->list);
1372 kfree(entry);
1373 }
1374
1375 BT_DBG("%s adv cache cleared", hdev->name);
1376
1377 return 0;
1378 }
1379
1380 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381 {
1382 struct adv_entry *entry;
1383
1384 list_for_each_entry(entry, &hdev->adv_entries, list)
1385 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1386 return entry;
1387
1388 return NULL;
1389 }
1390
1391 static inline int is_connectable_adv(u8 evt_type)
1392 {
1393 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1394 return 1;
1395
1396 return 0;
1397 }
1398
1399 int hci_add_adv_entry(struct hci_dev *hdev,
1400 struct hci_ev_le_advertising_info *ev)
1401 {
1402 struct adv_entry *entry;
1403
1404 if (!is_connectable_adv(ev->evt_type))
1405 return -EINVAL;
1406
1407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
1409 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1410 return 0;
1411
1412 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1413 if (!entry)
1414 return -ENOMEM;
1415
1416 bacpy(&entry->bdaddr, &ev->bdaddr);
1417 entry->bdaddr_type = ev->bdaddr_type;
1418
1419 list_add(&entry->list, &hdev->adv_entries);
1420
1421 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1422 batostr(&entry->bdaddr), entry->bdaddr_type);
1423
1424 return 0;
1425 }
1426
1427 /* Register HCI device */
1428 int hci_register_dev(struct hci_dev *hdev)
1429 {
1430 struct list_head *head = &hci_dev_list, *p;
1431 int i, id, error;
1432
1433 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1434 hdev->bus, hdev->owner);
1435
1436 if (!hdev->open || !hdev->close || !hdev->destruct)
1437 return -EINVAL;
1438
1439 /* Do not allow HCI_AMP devices to register at index 0,
1440 * so the index can be used as the AMP controller ID.
1441 */
1442 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1443
1444 write_lock_bh(&hci_dev_list_lock);
1445
1446 /* Find first available device id */
1447 list_for_each(p, &hci_dev_list) {
1448 if (list_entry(p, struct hci_dev, list)->id != id)
1449 break;
1450 head = p; id++;
1451 }
1452
1453 sprintf(hdev->name, "hci%d", id);
1454 hdev->id = id;
1455 list_add(&hdev->list, head);
1456
1457 atomic_set(&hdev->refcnt, 1);
1458 spin_lock_init(&hdev->lock);
1459
1460 hdev->flags = 0;
1461 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1462 hdev->esco_type = (ESCO_HV1);
1463 hdev->link_mode = (HCI_LM_ACCEPT);
1464 hdev->io_capability = 0x03; /* No Input No Output */
1465
1466 hdev->idle_timeout = 0;
1467 hdev->sniff_max_interval = 800;
1468 hdev->sniff_min_interval = 80;
1469
1470 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1471 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1472 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1473
1474 skb_queue_head_init(&hdev->rx_q);
1475 skb_queue_head_init(&hdev->cmd_q);
1476 skb_queue_head_init(&hdev->raw_q);
1477
1478 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1479
1480 for (i = 0; i < NUM_REASSEMBLY; i++)
1481 hdev->reassembly[i] = NULL;
1482
1483 init_waitqueue_head(&hdev->req_wait_q);
1484 mutex_init(&hdev->req_lock);
1485
1486 inquiry_cache_init(hdev);
1487
1488 hci_conn_hash_init(hdev);
1489
1490 INIT_LIST_HEAD(&hdev->mgmt_pending);
1491
1492 INIT_LIST_HEAD(&hdev->blacklist);
1493
1494 INIT_LIST_HEAD(&hdev->uuids);
1495
1496 INIT_LIST_HEAD(&hdev->link_keys);
1497
1498 INIT_LIST_HEAD(&hdev->remote_oob_data);
1499
1500 INIT_LIST_HEAD(&hdev->adv_entries);
1501 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1502 (unsigned long) hdev);
1503
1504 INIT_WORK(&hdev->power_on, hci_power_on);
1505 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1506
1507 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1508
1509 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1510
1511 atomic_set(&hdev->promisc, 0);
1512
1513 write_unlock_bh(&hci_dev_list_lock);
1514
1515 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1516 if (!hdev->workqueue) {
1517 error = -ENOMEM;
1518 goto err;
1519 }
1520
1521 error = hci_add_sysfs(hdev);
1522 if (error < 0)
1523 goto err_wqueue;
1524
1525 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1526 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1527 if (hdev->rfkill) {
1528 if (rfkill_register(hdev->rfkill) < 0) {
1529 rfkill_destroy(hdev->rfkill);
1530 hdev->rfkill = NULL;
1531 }
1532 }
1533
1534 set_bit(HCI_AUTO_OFF, &hdev->flags);
1535 set_bit(HCI_SETUP, &hdev->flags);
1536 queue_work(hdev->workqueue, &hdev->power_on);
1537
1538 hci_notify(hdev, HCI_DEV_REG);
1539
1540 return id;
1541
1542 err_wqueue:
1543 destroy_workqueue(hdev->workqueue);
1544 err:
1545 write_lock_bh(&hci_dev_list_lock);
1546 list_del(&hdev->list);
1547 write_unlock_bh(&hci_dev_list_lock);
1548
1549 return error;
1550 }
1551 EXPORT_SYMBOL(hci_register_dev);
1552
1553 /* Unregister HCI device */
1554 void hci_unregister_dev(struct hci_dev *hdev)
1555 {
1556 int i;
1557
1558 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1559
1560 write_lock_bh(&hci_dev_list_lock);
1561 list_del(&hdev->list);
1562 write_unlock_bh(&hci_dev_list_lock);
1563
1564 hci_dev_do_close(hdev);
1565
1566 for (i = 0; i < NUM_REASSEMBLY; i++)
1567 kfree_skb(hdev->reassembly[i]);
1568
1569 if (!test_bit(HCI_INIT, &hdev->flags) &&
1570 !test_bit(HCI_SETUP, &hdev->flags)) {
1571 hci_dev_lock_bh(hdev);
1572 mgmt_index_removed(hdev);
1573 hci_dev_unlock_bh(hdev);
1574 }
1575
1576 /* mgmt_index_removed should take care of emptying the
1577 * pending list */
1578 BUG_ON(!list_empty(&hdev->mgmt_pending));
1579
1580 hci_notify(hdev, HCI_DEV_UNREG);
1581
1582 if (hdev->rfkill) {
1583 rfkill_unregister(hdev->rfkill);
1584 rfkill_destroy(hdev->rfkill);
1585 }
1586
1587 hci_del_sysfs(hdev);
1588
1589 del_timer(&hdev->adv_timer);
1590
1591 destroy_workqueue(hdev->workqueue);
1592
1593 hci_dev_lock_bh(hdev);
1594 hci_blacklist_clear(hdev);
1595 hci_uuids_clear(hdev);
1596 hci_link_keys_clear(hdev);
1597 hci_remote_oob_data_clear(hdev);
1598 hci_adv_entries_clear(hdev);
1599 hci_dev_unlock_bh(hdev);
1600
1601 __hci_dev_put(hdev);
1602 }
1603 EXPORT_SYMBOL(hci_unregister_dev);
1604
1605 /* Suspend HCI device */
1606 int hci_suspend_dev(struct hci_dev *hdev)
1607 {
1608 hci_notify(hdev, HCI_DEV_SUSPEND);
1609 return 0;
1610 }
1611 EXPORT_SYMBOL(hci_suspend_dev);
1612
1613 /* Resume HCI device */
1614 int hci_resume_dev(struct hci_dev *hdev)
1615 {
1616 hci_notify(hdev, HCI_DEV_RESUME);
1617 return 0;
1618 }
1619 EXPORT_SYMBOL(hci_resume_dev);
1620
1621 /* Receive frame from HCI drivers */
1622 int hci_recv_frame(struct sk_buff *skb)
1623 {
1624 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1625 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1626 && !test_bit(HCI_INIT, &hdev->flags))) {
1627 kfree_skb(skb);
1628 return -ENXIO;
1629 }
1630
1631 /* Incomming skb */
1632 bt_cb(skb)->incoming = 1;
1633
1634 /* Time stamp */
1635 __net_timestamp(skb);
1636
1637 /* Queue frame for rx task */
1638 skb_queue_tail(&hdev->rx_q, skb);
1639 tasklet_schedule(&hdev->rx_task);
1640
1641 return 0;
1642 }
1643 EXPORT_SYMBOL(hci_recv_frame);
1644
1645 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1646 int count, __u8 index)
1647 {
1648 int len = 0;
1649 int hlen = 0;
1650 int remain = count;
1651 struct sk_buff *skb;
1652 struct bt_skb_cb *scb;
1653
1654 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1655 index >= NUM_REASSEMBLY)
1656 return -EILSEQ;
1657
1658 skb = hdev->reassembly[index];
1659
1660 if (!skb) {
1661 switch (type) {
1662 case HCI_ACLDATA_PKT:
1663 len = HCI_MAX_FRAME_SIZE;
1664 hlen = HCI_ACL_HDR_SIZE;
1665 break;
1666 case HCI_EVENT_PKT:
1667 len = HCI_MAX_EVENT_SIZE;
1668 hlen = HCI_EVENT_HDR_SIZE;
1669 break;
1670 case HCI_SCODATA_PKT:
1671 len = HCI_MAX_SCO_SIZE;
1672 hlen = HCI_SCO_HDR_SIZE;
1673 break;
1674 }
1675
1676 skb = bt_skb_alloc(len, GFP_ATOMIC);
1677 if (!skb)
1678 return -ENOMEM;
1679
1680 scb = (void *) skb->cb;
1681 scb->expect = hlen;
1682 scb->pkt_type = type;
1683
1684 skb->dev = (void *) hdev;
1685 hdev->reassembly[index] = skb;
1686 }
1687
1688 while (count) {
1689 scb = (void *) skb->cb;
1690 len = min(scb->expect, (__u16)count);
1691
1692 memcpy(skb_put(skb, len), data, len);
1693
1694 count -= len;
1695 data += len;
1696 scb->expect -= len;
1697 remain = count;
1698
1699 switch (type) {
1700 case HCI_EVENT_PKT:
1701 if (skb->len == HCI_EVENT_HDR_SIZE) {
1702 struct hci_event_hdr *h = hci_event_hdr(skb);
1703 scb->expect = h->plen;
1704
1705 if (skb_tailroom(skb) < scb->expect) {
1706 kfree_skb(skb);
1707 hdev->reassembly[index] = NULL;
1708 return -ENOMEM;
1709 }
1710 }
1711 break;
1712
1713 case HCI_ACLDATA_PKT:
1714 if (skb->len == HCI_ACL_HDR_SIZE) {
1715 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1716 scb->expect = __le16_to_cpu(h->dlen);
1717
1718 if (skb_tailroom(skb) < scb->expect) {
1719 kfree_skb(skb);
1720 hdev->reassembly[index] = NULL;
1721 return -ENOMEM;
1722 }
1723 }
1724 break;
1725
1726 case HCI_SCODATA_PKT:
1727 if (skb->len == HCI_SCO_HDR_SIZE) {
1728 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1729 scb->expect = h->dlen;
1730
1731 if (skb_tailroom(skb) < scb->expect) {
1732 kfree_skb(skb);
1733 hdev->reassembly[index] = NULL;
1734 return -ENOMEM;
1735 }
1736 }
1737 break;
1738 }
1739
1740 if (scb->expect == 0) {
1741 /* Complete frame */
1742
1743 bt_cb(skb)->pkt_type = type;
1744 hci_recv_frame(skb);
1745
1746 hdev->reassembly[index] = NULL;
1747 return remain;
1748 }
1749 }
1750
1751 return remain;
1752 }
1753
1754 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1755 {
1756 int rem = 0;
1757
1758 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1759 return -EILSEQ;
1760
1761 while (count) {
1762 rem = hci_reassembly(hdev, type, data, count, type - 1);
1763 if (rem < 0)
1764 return rem;
1765
1766 data += (count - rem);
1767 count = rem;
1768 }
1769
1770 return rem;
1771 }
1772 EXPORT_SYMBOL(hci_recv_fragment);
1773
1774 #define STREAM_REASSEMBLY 0
1775
1776 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1777 {
1778 int type;
1779 int rem = 0;
1780
1781 while (count) {
1782 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1783
1784 if (!skb) {
1785 struct { char type; } *pkt;
1786
1787 /* Start of the frame */
1788 pkt = data;
1789 type = pkt->type;
1790
1791 data++;
1792 count--;
1793 } else
1794 type = bt_cb(skb)->pkt_type;
1795
1796 rem = hci_reassembly(hdev, type, data, count,
1797 STREAM_REASSEMBLY);
1798 if (rem < 0)
1799 return rem;
1800
1801 data += (count - rem);
1802 count = rem;
1803 }
1804
1805 return rem;
1806 }
1807 EXPORT_SYMBOL(hci_recv_stream_fragment);
1808
1809 /* ---- Interface to upper protocols ---- */
1810
1811 /* Register/Unregister protocols.
1812 * hci_task_lock is used to ensure that no tasks are running. */
1813 int hci_register_proto(struct hci_proto *hp)
1814 {
1815 int err = 0;
1816
1817 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1818
1819 if (hp->id >= HCI_MAX_PROTO)
1820 return -EINVAL;
1821
1822 write_lock_bh(&hci_task_lock);
1823
1824 if (!hci_proto[hp->id])
1825 hci_proto[hp->id] = hp;
1826 else
1827 err = -EEXIST;
1828
1829 write_unlock_bh(&hci_task_lock);
1830
1831 return err;
1832 }
1833 EXPORT_SYMBOL(hci_register_proto);
1834
1835 int hci_unregister_proto(struct hci_proto *hp)
1836 {
1837 int err = 0;
1838
1839 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1840
1841 if (hp->id >= HCI_MAX_PROTO)
1842 return -EINVAL;
1843
1844 write_lock_bh(&hci_task_lock);
1845
1846 if (hci_proto[hp->id])
1847 hci_proto[hp->id] = NULL;
1848 else
1849 err = -ENOENT;
1850
1851 write_unlock_bh(&hci_task_lock);
1852
1853 return err;
1854 }
1855 EXPORT_SYMBOL(hci_unregister_proto);
1856
1857 int hci_register_cb(struct hci_cb *cb)
1858 {
1859 BT_DBG("%p name %s", cb, cb->name);
1860
1861 write_lock_bh(&hci_cb_list_lock);
1862 list_add(&cb->list, &hci_cb_list);
1863 write_unlock_bh(&hci_cb_list_lock);
1864
1865 return 0;
1866 }
1867 EXPORT_SYMBOL(hci_register_cb);
1868
1869 int hci_unregister_cb(struct hci_cb *cb)
1870 {
1871 BT_DBG("%p name %s", cb, cb->name);
1872
1873 write_lock_bh(&hci_cb_list_lock);
1874 list_del(&cb->list);
1875 write_unlock_bh(&hci_cb_list_lock);
1876
1877 return 0;
1878 }
1879 EXPORT_SYMBOL(hci_unregister_cb);
1880
1881 static int hci_send_frame(struct sk_buff *skb)
1882 {
1883 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1884
1885 if (!hdev) {
1886 kfree_skb(skb);
1887 return -ENODEV;
1888 }
1889
1890 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1891
1892 if (atomic_read(&hdev->promisc)) {
1893 /* Time stamp */
1894 __net_timestamp(skb);
1895
1896 hci_send_to_sock(hdev, skb, NULL);
1897 }
1898
1899 /* Get rid of skb owner, prior to sending to the driver. */
1900 skb_orphan(skb);
1901
1902 return hdev->send(skb);
1903 }
1904
1905 /* Send HCI command */
1906 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1907 {
1908 int len = HCI_COMMAND_HDR_SIZE + plen;
1909 struct hci_command_hdr *hdr;
1910 struct sk_buff *skb;
1911
1912 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1913
1914 skb = bt_skb_alloc(len, GFP_ATOMIC);
1915 if (!skb) {
1916 BT_ERR("%s no memory for command", hdev->name);
1917 return -ENOMEM;
1918 }
1919
1920 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1921 hdr->opcode = cpu_to_le16(opcode);
1922 hdr->plen = plen;
1923
1924 if (plen)
1925 memcpy(skb_put(skb, plen), param, plen);
1926
1927 BT_DBG("skb len %d", skb->len);
1928
1929 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1930 skb->dev = (void *) hdev;
1931
1932 if (test_bit(HCI_INIT, &hdev->flags))
1933 hdev->init_last_cmd = opcode;
1934
1935 skb_queue_tail(&hdev->cmd_q, skb);
1936 tasklet_schedule(&hdev->cmd_task);
1937
1938 return 0;
1939 }
1940
1941 /* Get data from the previously sent command */
1942 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1943 {
1944 struct hci_command_hdr *hdr;
1945
1946 if (!hdev->sent_cmd)
1947 return NULL;
1948
1949 hdr = (void *) hdev->sent_cmd->data;
1950
1951 if (hdr->opcode != cpu_to_le16(opcode))
1952 return NULL;
1953
1954 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1955
1956 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1957 }
1958
1959 /* Send ACL data */
1960 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1961 {
1962 struct hci_acl_hdr *hdr;
1963 int len = skb->len;
1964
1965 skb_push(skb, HCI_ACL_HDR_SIZE);
1966 skb_reset_transport_header(skb);
1967 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1968 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1969 hdr->dlen = cpu_to_le16(len);
1970 }
1971
1972 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1973 struct sk_buff *skb, __u16 flags)
1974 {
1975 struct hci_dev *hdev = conn->hdev;
1976 struct sk_buff *list;
1977
1978 list = skb_shinfo(skb)->frag_list;
1979 if (!list) {
1980 /* Non fragmented */
1981 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1982
1983 skb_queue_tail(queue, skb);
1984 } else {
1985 /* Fragmented */
1986 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1987
1988 skb_shinfo(skb)->frag_list = NULL;
1989
1990 /* Queue all fragments atomically */
1991 spin_lock_bh(&queue->lock);
1992
1993 __skb_queue_tail(queue, skb);
1994
1995 flags &= ~ACL_START;
1996 flags |= ACL_CONT;
1997 do {
1998 skb = list; list = list->next;
1999
2000 skb->dev = (void *) hdev;
2001 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2002 hci_add_acl_hdr(skb, conn->handle, flags);
2003
2004 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2005
2006 __skb_queue_tail(queue, skb);
2007 } while (list);
2008
2009 spin_unlock_bh(&queue->lock);
2010 }
2011 }
2012
2013 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2014 {
2015 struct hci_conn *conn = chan->conn;
2016 struct hci_dev *hdev = conn->hdev;
2017
2018 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2019
2020 skb->dev = (void *) hdev;
2021 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2022 hci_add_acl_hdr(skb, conn->handle, flags);
2023
2024 hci_queue_acl(conn, &chan->data_q, skb, flags);
2025
2026 tasklet_schedule(&hdev->tx_task);
2027 }
2028 EXPORT_SYMBOL(hci_send_acl);
2029
2030 /* Send SCO data */
2031 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2032 {
2033 struct hci_dev *hdev = conn->hdev;
2034 struct hci_sco_hdr hdr;
2035
2036 BT_DBG("%s len %d", hdev->name, skb->len);
2037
2038 hdr.handle = cpu_to_le16(conn->handle);
2039 hdr.dlen = skb->len;
2040
2041 skb_push(skb, HCI_SCO_HDR_SIZE);
2042 skb_reset_transport_header(skb);
2043 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2044
2045 skb->dev = (void *) hdev;
2046 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2047
2048 skb_queue_tail(&conn->data_q, skb);
2049 tasklet_schedule(&hdev->tx_task);
2050 }
2051 EXPORT_SYMBOL(hci_send_sco);
2052
2053 /* ---- HCI TX task (outgoing data) ---- */
2054
2055 /* HCI Connection scheduler */
2056 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2057 {
2058 struct hci_conn_hash *h = &hdev->conn_hash;
2059 struct hci_conn *conn = NULL, *c;
2060 int num = 0, min = ~0;
2061
2062 /* We don't have to lock device here. Connections are always
2063 * added and removed with TX task disabled. */
2064 list_for_each_entry(c, &h->list, list) {
2065 if (c->type != type || skb_queue_empty(&c->data_q))
2066 continue;
2067
2068 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2069 continue;
2070
2071 num++;
2072
2073 if (c->sent < min) {
2074 min = c->sent;
2075 conn = c;
2076 }
2077
2078 if (hci_conn_num(hdev, type) == num)
2079 break;
2080 }
2081
2082 if (conn) {
2083 int cnt, q;
2084
2085 switch (conn->type) {
2086 case ACL_LINK:
2087 cnt = hdev->acl_cnt;
2088 break;
2089 case SCO_LINK:
2090 case ESCO_LINK:
2091 cnt = hdev->sco_cnt;
2092 break;
2093 case LE_LINK:
2094 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2095 break;
2096 default:
2097 cnt = 0;
2098 BT_ERR("Unknown link type");
2099 }
2100
2101 q = cnt / num;
2102 *quote = q ? q : 1;
2103 } else
2104 *quote = 0;
2105
2106 BT_DBG("conn %p quote %d", conn, *quote);
2107 return conn;
2108 }
2109
2110 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2111 {
2112 struct hci_conn_hash *h = &hdev->conn_hash;
2113 struct hci_conn *c;
2114
2115 BT_ERR("%s link tx timeout", hdev->name);
2116
2117 /* Kill stalled connections */
2118 list_for_each_entry(c, &h->list, list) {
2119 if (c->type == type && c->sent) {
2120 BT_ERR("%s killing stalled connection %s",
2121 hdev->name, batostr(&c->dst));
2122 hci_acl_disconn(c, 0x13);
2123 }
2124 }
2125 }
2126
2127 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2128 int *quote)
2129 {
2130 struct hci_conn_hash *h = &hdev->conn_hash;
2131 struct hci_chan *chan = NULL;
2132 int num = 0, min = ~0, cur_prio = 0;
2133 struct hci_conn *conn;
2134 int cnt, q, conn_num = 0;
2135
2136 BT_DBG("%s", hdev->name);
2137
2138 list_for_each_entry(conn, &h->list, list) {
2139 struct hci_chan_hash *ch;
2140 struct hci_chan *tmp;
2141
2142 if (conn->type != type)
2143 continue;
2144
2145 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2146 continue;
2147
2148 conn_num++;
2149
2150 ch = &conn->chan_hash;
2151
2152 list_for_each_entry(tmp, &ch->list, list) {
2153 struct sk_buff *skb;
2154
2155 if (skb_queue_empty(&tmp->data_q))
2156 continue;
2157
2158 skb = skb_peek(&tmp->data_q);
2159 if (skb->priority < cur_prio)
2160 continue;
2161
2162 if (skb->priority > cur_prio) {
2163 num = 0;
2164 min = ~0;
2165 cur_prio = skb->priority;
2166 }
2167
2168 num++;
2169
2170 if (conn->sent < min) {
2171 min = conn->sent;
2172 chan = tmp;
2173 }
2174 }
2175
2176 if (hci_conn_num(hdev, type) == conn_num)
2177 break;
2178 }
2179
2180 if (!chan)
2181 return NULL;
2182
2183 switch (chan->conn->type) {
2184 case ACL_LINK:
2185 cnt = hdev->acl_cnt;
2186 break;
2187 case SCO_LINK:
2188 case ESCO_LINK:
2189 cnt = hdev->sco_cnt;
2190 break;
2191 case LE_LINK:
2192 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2193 break;
2194 default:
2195 cnt = 0;
2196 BT_ERR("Unknown link type");
2197 }
2198
2199 q = cnt / num;
2200 *quote = q ? q : 1;
2201 BT_DBG("chan %p quote %d", chan, *quote);
2202 return chan;
2203 }
2204
2205 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2206 {
2207 struct hci_conn_hash *h = &hdev->conn_hash;
2208 struct hci_conn *conn;
2209 int num = 0;
2210
2211 BT_DBG("%s", hdev->name);
2212
2213 list_for_each_entry(conn, &h->list, list) {
2214 struct hci_chan_hash *ch;
2215 struct hci_chan *chan;
2216
2217 if (conn->type != type)
2218 continue;
2219
2220 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2221 continue;
2222
2223 num++;
2224
2225 ch = &conn->chan_hash;
2226 list_for_each_entry(chan, &ch->list, list) {
2227 struct sk_buff *skb;
2228
2229 if (chan->sent) {
2230 chan->sent = 0;
2231 continue;
2232 }
2233
2234 if (skb_queue_empty(&chan->data_q))
2235 continue;
2236
2237 skb = skb_peek(&chan->data_q);
2238 if (skb->priority >= HCI_PRIO_MAX - 1)
2239 continue;
2240
2241 skb->priority = HCI_PRIO_MAX - 1;
2242
2243 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2244 skb->priority);
2245 }
2246
2247 if (hci_conn_num(hdev, type) == num)
2248 break;
2249 }
2250 }
2251
2252 static inline void hci_sched_acl(struct hci_dev *hdev)
2253 {
2254 struct hci_chan *chan;
2255 struct sk_buff *skb;
2256 int quote;
2257 unsigned int cnt;
2258
2259 BT_DBG("%s", hdev->name);
2260
2261 if (!hci_conn_num(hdev, ACL_LINK))
2262 return;
2263
2264 if (!test_bit(HCI_RAW, &hdev->flags)) {
2265 /* ACL tx timeout must be longer than maximum
2266 * link supervision timeout (40.9 seconds) */
2267 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2268 hci_link_tx_to(hdev, ACL_LINK);
2269 }
2270
2271 cnt = hdev->acl_cnt;
2272
2273 while (hdev->acl_cnt &&
2274 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2275 u32 priority = (skb_peek(&chan->data_q))->priority;
2276 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2277 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2278 skb->len, skb->priority);
2279
2280 /* Stop if priority has changed */
2281 if (skb->priority < priority)
2282 break;
2283
2284 skb = skb_dequeue(&chan->data_q);
2285
2286 hci_conn_enter_active_mode(chan->conn,
2287 bt_cb(skb)->force_active);
2288
2289 hci_send_frame(skb);
2290 hdev->acl_last_tx = jiffies;
2291
2292 hdev->acl_cnt--;
2293 chan->sent++;
2294 chan->conn->sent++;
2295 }
2296 }
2297
2298 if (cnt != hdev->acl_cnt)
2299 hci_prio_recalculate(hdev, ACL_LINK);
2300 }
2301
2302 /* Schedule SCO */
2303 static inline void hci_sched_sco(struct hci_dev *hdev)
2304 {
2305 struct hci_conn *conn;
2306 struct sk_buff *skb;
2307 int quote;
2308
2309 BT_DBG("%s", hdev->name);
2310
2311 if (!hci_conn_num(hdev, SCO_LINK))
2312 return;
2313
2314 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2315 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2316 BT_DBG("skb %p len %d", skb, skb->len);
2317 hci_send_frame(skb);
2318
2319 conn->sent++;
2320 if (conn->sent == ~0)
2321 conn->sent = 0;
2322 }
2323 }
2324 }
2325
2326 static inline void hci_sched_esco(struct hci_dev *hdev)
2327 {
2328 struct hci_conn *conn;
2329 struct sk_buff *skb;
2330 int quote;
2331
2332 BT_DBG("%s", hdev->name);
2333
2334 if (!hci_conn_num(hdev, ESCO_LINK))
2335 return;
2336
2337 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2338 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2339 BT_DBG("skb %p len %d", skb, skb->len);
2340 hci_send_frame(skb);
2341
2342 conn->sent++;
2343 if (conn->sent == ~0)
2344 conn->sent = 0;
2345 }
2346 }
2347 }
2348
2349 static inline void hci_sched_le(struct hci_dev *hdev)
2350 {
2351 struct hci_chan *chan;
2352 struct sk_buff *skb;
2353 int quote, cnt, tmp;
2354
2355 BT_DBG("%s", hdev->name);
2356
2357 if (!hci_conn_num(hdev, LE_LINK))
2358 return;
2359
2360 if (!test_bit(HCI_RAW, &hdev->flags)) {
2361 /* LE tx timeout must be longer than maximum
2362 * link supervision timeout (40.9 seconds) */
2363 if (!hdev->le_cnt && hdev->le_pkts &&
2364 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2365 hci_link_tx_to(hdev, LE_LINK);
2366 }
2367
2368 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2369 tmp = cnt;
2370 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2371 u32 priority = (skb_peek(&chan->data_q))->priority;
2372 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2373 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2374 skb->len, skb->priority);
2375
2376 /* Stop if priority has changed */
2377 if (skb->priority < priority)
2378 break;
2379
2380 skb = skb_dequeue(&chan->data_q);
2381
2382 hci_send_frame(skb);
2383 hdev->le_last_tx = jiffies;
2384
2385 cnt--;
2386 chan->sent++;
2387 chan->conn->sent++;
2388 }
2389 }
2390
2391 if (hdev->le_pkts)
2392 hdev->le_cnt = cnt;
2393 else
2394 hdev->acl_cnt = cnt;
2395
2396 if (cnt != tmp)
2397 hci_prio_recalculate(hdev, LE_LINK);
2398 }
2399
2400 static void hci_tx_task(unsigned long arg)
2401 {
2402 struct hci_dev *hdev = (struct hci_dev *) arg;
2403 struct sk_buff *skb;
2404
2405 read_lock(&hci_task_lock);
2406
2407 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2408 hdev->sco_cnt, hdev->le_cnt);
2409
2410 /* Schedule queues and send stuff to HCI driver */
2411
2412 hci_sched_acl(hdev);
2413
2414 hci_sched_sco(hdev);
2415
2416 hci_sched_esco(hdev);
2417
2418 hci_sched_le(hdev);
2419
2420 /* Send next queued raw (unknown type) packet */
2421 while ((skb = skb_dequeue(&hdev->raw_q)))
2422 hci_send_frame(skb);
2423
2424 read_unlock(&hci_task_lock);
2425 }
2426
2427 /* ----- HCI RX task (incoming data processing) ----- */
2428
2429 /* ACL data packet */
2430 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2431 {
2432 struct hci_acl_hdr *hdr = (void *) skb->data;
2433 struct hci_conn *conn;
2434 __u16 handle, flags;
2435
2436 skb_pull(skb, HCI_ACL_HDR_SIZE);
2437
2438 handle = __le16_to_cpu(hdr->handle);
2439 flags = hci_flags(handle);
2440 handle = hci_handle(handle);
2441
2442 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2443
2444 hdev->stat.acl_rx++;
2445
2446 hci_dev_lock(hdev);
2447 conn = hci_conn_hash_lookup_handle(hdev, handle);
2448 hci_dev_unlock(hdev);
2449
2450 if (conn) {
2451 register struct hci_proto *hp;
2452
2453 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2454
2455 /* Send to upper protocol */
2456 hp = hci_proto[HCI_PROTO_L2CAP];
2457 if (hp && hp->recv_acldata) {
2458 hp->recv_acldata(conn, skb, flags);
2459 return;
2460 }
2461 } else {
2462 BT_ERR("%s ACL packet for unknown connection handle %d",
2463 hdev->name, handle);
2464 }
2465
2466 kfree_skb(skb);
2467 }
2468
2469 /* SCO data packet */
2470 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2471 {
2472 struct hci_sco_hdr *hdr = (void *) skb->data;
2473 struct hci_conn *conn;
2474 __u16 handle;
2475
2476 skb_pull(skb, HCI_SCO_HDR_SIZE);
2477
2478 handle = __le16_to_cpu(hdr->handle);
2479
2480 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2481
2482 hdev->stat.sco_rx++;
2483
2484 hci_dev_lock(hdev);
2485 conn = hci_conn_hash_lookup_handle(hdev, handle);
2486 hci_dev_unlock(hdev);
2487
2488 if (conn) {
2489 register struct hci_proto *hp;
2490
2491 /* Send to upper protocol */
2492 hp = hci_proto[HCI_PROTO_SCO];
2493 if (hp && hp->recv_scodata) {
2494 hp->recv_scodata(conn, skb);
2495 return;
2496 }
2497 } else {
2498 BT_ERR("%s SCO packet for unknown connection handle %d",
2499 hdev->name, handle);
2500 }
2501
2502 kfree_skb(skb);
2503 }
2504
2505 static void hci_rx_task(unsigned long arg)
2506 {
2507 struct hci_dev *hdev = (struct hci_dev *) arg;
2508 struct sk_buff *skb;
2509
2510 BT_DBG("%s", hdev->name);
2511
2512 read_lock(&hci_task_lock);
2513
2514 while ((skb = skb_dequeue(&hdev->rx_q))) {
2515 if (atomic_read(&hdev->promisc)) {
2516 /* Send copy to the sockets */
2517 hci_send_to_sock(hdev, skb, NULL);
2518 }
2519
2520 if (test_bit(HCI_RAW, &hdev->flags)) {
2521 kfree_skb(skb);
2522 continue;
2523 }
2524
2525 if (test_bit(HCI_INIT, &hdev->flags)) {
2526 /* Don't process data packets in this states. */
2527 switch (bt_cb(skb)->pkt_type) {
2528 case HCI_ACLDATA_PKT:
2529 case HCI_SCODATA_PKT:
2530 kfree_skb(skb);
2531 continue;
2532 }
2533 }
2534
2535 /* Process frame */
2536 switch (bt_cb(skb)->pkt_type) {
2537 case HCI_EVENT_PKT:
2538 hci_event_packet(hdev, skb);
2539 break;
2540
2541 case HCI_ACLDATA_PKT:
2542 BT_DBG("%s ACL data packet", hdev->name);
2543 hci_acldata_packet(hdev, skb);
2544 break;
2545
2546 case HCI_SCODATA_PKT:
2547 BT_DBG("%s SCO data packet", hdev->name);
2548 hci_scodata_packet(hdev, skb);
2549 break;
2550
2551 default:
2552 kfree_skb(skb);
2553 break;
2554 }
2555 }
2556
2557 read_unlock(&hci_task_lock);
2558 }
2559
2560 static void hci_cmd_task(unsigned long arg)
2561 {
2562 struct hci_dev *hdev = (struct hci_dev *) arg;
2563 struct sk_buff *skb;
2564
2565 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2566
2567 /* Send queued commands */
2568 if (atomic_read(&hdev->cmd_cnt)) {
2569 skb = skb_dequeue(&hdev->cmd_q);
2570 if (!skb)
2571 return;
2572
2573 kfree_skb(hdev->sent_cmd);
2574
2575 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2576 if (hdev->sent_cmd) {
2577 atomic_dec(&hdev->cmd_cnt);
2578 hci_send_frame(skb);
2579 if (test_bit(HCI_RESET, &hdev->flags))
2580 del_timer(&hdev->cmd_timer);
2581 else
2582 mod_timer(&hdev->cmd_timer,
2583 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2584 } else {
2585 skb_queue_head(&hdev->cmd_q, skb);
2586 tasklet_schedule(&hdev->cmd_task);
2587 }
2588 }
2589 }
2590
2591 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2592 {
2593 /* General inquiry access code (GIAC) */
2594 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2595 struct hci_cp_inquiry cp;
2596
2597 BT_DBG("%s", hdev->name);
2598
2599 if (test_bit(HCI_INQUIRY, &hdev->flags))
2600 return -EINPROGRESS;
2601
2602 memset(&cp, 0, sizeof(cp));
2603 memcpy(&cp.lap, lap, sizeof(cp.lap));
2604 cp.length = length;
2605
2606 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2607 }
2608
2609 int hci_cancel_inquiry(struct hci_dev *hdev)
2610 {
2611 BT_DBG("%s", hdev->name);
2612
2613 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2614 return -EPERM;
2615
2616 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2617 }