Bluetooth: Add add/remove_remote_oob_data management commands
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81 return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104 return;
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111 }
112
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
114 {
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122 }
123
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126 unsigned long opt, __u32 timeout)
127 {
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
158 }
159
160 hdev->req_status = hdev->req_result = 0;
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165 }
166
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169 {
170 int ret;
171
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181 }
182
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184 {
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
189 }
190
191 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192 {
193 struct hci_cp_delete_stored_link_key cp;
194 struct sk_buff *skb;
195 __le16 param;
196 __u8 flt_type;
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
205 skb->dev = (void *) hdev;
206
207 skb_queue_tail(&hdev->cmd_q, skb);
208 tasklet_schedule(&hdev->cmd_task);
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
217
218 /* Read Local Supported Features */
219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
220
221 /* Read Local Version */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
226
227 #if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
236 }
237 #endif
238
239 /* Read BD Address */
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
247
248 /* Read Voice Setting */
249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
254 flt_type = HCI_FLT_CLEAR_ALL;
255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
256
257 /* Connection accept timeout ~20 secs */
258 param = cpu_to_le16(0x7d00);
259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
264 }
265
266 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
267 {
268 BT_DBG("%s", hdev->name);
269
270 /* Read LE buffer size */
271 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
272 }
273
274 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
275 {
276 __u8 scan = opt;
277
278 BT_DBG("%s %x", hdev->name, scan);
279
280 /* Inquiry and Page scans */
281 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
282 }
283
284 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
285 {
286 __u8 auth = opt;
287
288 BT_DBG("%s %x", hdev->name, auth);
289
290 /* Authentication */
291 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
292 }
293
294 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
295 {
296 __u8 encrypt = opt;
297
298 BT_DBG("%s %x", hdev->name, encrypt);
299
300 /* Encryption */
301 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
302 }
303
304 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
305 {
306 __le16 policy = cpu_to_le16(opt);
307
308 BT_DBG("%s %x", hdev->name, policy);
309
310 /* Default link policy */
311 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
312 }
313
314 /* Get HCI device by index.
315 * Device is held on return. */
316 struct hci_dev *hci_dev_get(int index)
317 {
318 struct hci_dev *hdev = NULL;
319 struct list_head *p;
320
321 BT_DBG("%d", index);
322
323 if (index < 0)
324 return NULL;
325
326 read_lock(&hci_dev_list_lock);
327 list_for_each(p, &hci_dev_list) {
328 struct hci_dev *d = list_entry(p, struct hci_dev, list);
329 if (d->id == index) {
330 hdev = hci_dev_hold(d);
331 break;
332 }
333 }
334 read_unlock(&hci_dev_list_lock);
335 return hdev;
336 }
337
338 /* ---- Inquiry support ---- */
339 static void inquiry_cache_flush(struct hci_dev *hdev)
340 {
341 struct inquiry_cache *cache = &hdev->inq_cache;
342 struct inquiry_entry *next = cache->list, *e;
343
344 BT_DBG("cache %p", cache);
345
346 cache->list = NULL;
347 while ((e = next)) {
348 next = e->next;
349 kfree(e);
350 }
351 }
352
353 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
354 {
355 struct inquiry_cache *cache = &hdev->inq_cache;
356 struct inquiry_entry *e;
357
358 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
359
360 for (e = cache->list; e; e = e->next)
361 if (!bacmp(&e->data.bdaddr, bdaddr))
362 break;
363 return e;
364 }
365
366 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
367 {
368 struct inquiry_cache *cache = &hdev->inq_cache;
369 struct inquiry_entry *ie;
370
371 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
372
373 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
374 if (!ie) {
375 /* Entry not in the cache. Add new one. */
376 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
377 if (!ie)
378 return;
379
380 ie->next = cache->list;
381 cache->list = ie;
382 }
383
384 memcpy(&ie->data, data, sizeof(*data));
385 ie->timestamp = jiffies;
386 cache->timestamp = jiffies;
387 }
388
389 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
390 {
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_info *info = (struct inquiry_info *) buf;
393 struct inquiry_entry *e;
394 int copied = 0;
395
396 for (e = cache->list; e && copied < num; e = e->next, copied++) {
397 struct inquiry_data *data = &e->data;
398 bacpy(&info->bdaddr, &data->bdaddr);
399 info->pscan_rep_mode = data->pscan_rep_mode;
400 info->pscan_period_mode = data->pscan_period_mode;
401 info->pscan_mode = data->pscan_mode;
402 memcpy(info->dev_class, data->dev_class, 3);
403 info->clock_offset = data->clock_offset;
404 info++;
405 }
406
407 BT_DBG("cache %p, copied %d", cache, copied);
408 return copied;
409 }
410
411 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
412 {
413 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
414 struct hci_cp_inquiry cp;
415
416 BT_DBG("%s", hdev->name);
417
418 if (test_bit(HCI_INQUIRY, &hdev->flags))
419 return;
420
421 /* Start Inquiry */
422 memcpy(&cp.lap, &ir->lap, 3);
423 cp.length = ir->length;
424 cp.num_rsp = ir->num_rsp;
425 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
426 }
427
428 int hci_inquiry(void __user *arg)
429 {
430 __u8 __user *ptr = arg;
431 struct hci_inquiry_req ir;
432 struct hci_dev *hdev;
433 int err = 0, do_inquiry = 0, max_rsp;
434 long timeo;
435 __u8 *buf;
436
437 if (copy_from_user(&ir, ptr, sizeof(ir)))
438 return -EFAULT;
439
440 hdev = hci_dev_get(ir.dev_id);
441 if (!hdev)
442 return -ENODEV;
443
444 hci_dev_lock_bh(hdev);
445 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
446 inquiry_cache_empty(hdev) ||
447 ir.flags & IREQ_CACHE_FLUSH) {
448 inquiry_cache_flush(hdev);
449 do_inquiry = 1;
450 }
451 hci_dev_unlock_bh(hdev);
452
453 timeo = ir.length * msecs_to_jiffies(2000);
454
455 if (do_inquiry) {
456 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
457 if (err < 0)
458 goto done;
459 }
460
461 /* for unlimited number of responses we will use buffer with 255 entries */
462 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
463
464 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
465 * copy it to the user space.
466 */
467 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
468 if (!buf) {
469 err = -ENOMEM;
470 goto done;
471 }
472
473 hci_dev_lock_bh(hdev);
474 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
475 hci_dev_unlock_bh(hdev);
476
477 BT_DBG("num_rsp %d", ir.num_rsp);
478
479 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
480 ptr += sizeof(ir);
481 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
482 ir.num_rsp))
483 err = -EFAULT;
484 } else
485 err = -EFAULT;
486
487 kfree(buf);
488
489 done:
490 hci_dev_put(hdev);
491 return err;
492 }
493
494 /* ---- HCI ioctl helpers ---- */
495
496 int hci_dev_open(__u16 dev)
497 {
498 struct hci_dev *hdev;
499 int ret = 0;
500
501 hdev = hci_dev_get(dev);
502 if (!hdev)
503 return -ENODEV;
504
505 BT_DBG("%s %p", hdev->name, hdev);
506
507 hci_req_lock(hdev);
508
509 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
510 ret = -ERFKILL;
511 goto done;
512 }
513
514 if (test_bit(HCI_UP, &hdev->flags)) {
515 ret = -EALREADY;
516 goto done;
517 }
518
519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
520 set_bit(HCI_RAW, &hdev->flags);
521
522 /* Treat all non BR/EDR controllers as raw devices for now */
523 if (hdev->dev_type != HCI_BREDR)
524 set_bit(HCI_RAW, &hdev->flags);
525
526 if (hdev->open(hdev)) {
527 ret = -EIO;
528 goto done;
529 }
530
531 if (!test_bit(HCI_RAW, &hdev->flags)) {
532 atomic_set(&hdev->cmd_cnt, 1);
533 set_bit(HCI_INIT, &hdev->flags);
534 hdev->init_last_cmd = 0;
535
536 ret = __hci_request(hdev, hci_init_req, 0,
537 msecs_to_jiffies(HCI_INIT_TIMEOUT));
538
539 if (lmp_le_capable(hdev))
540 ret = __hci_request(hdev, hci_le_init_req, 0,
541 msecs_to_jiffies(HCI_INIT_TIMEOUT));
542
543 clear_bit(HCI_INIT, &hdev->flags);
544 }
545
546 if (!ret) {
547 hci_dev_hold(hdev);
548 set_bit(HCI_UP, &hdev->flags);
549 hci_notify(hdev, HCI_DEV_UP);
550 if (!test_bit(HCI_SETUP, &hdev->flags))
551 mgmt_powered(hdev->id, 1);
552 } else {
553 /* Init failed, cleanup */
554 tasklet_kill(&hdev->rx_task);
555 tasklet_kill(&hdev->tx_task);
556 tasklet_kill(&hdev->cmd_task);
557
558 skb_queue_purge(&hdev->cmd_q);
559 skb_queue_purge(&hdev->rx_q);
560
561 if (hdev->flush)
562 hdev->flush(hdev);
563
564 if (hdev->sent_cmd) {
565 kfree_skb(hdev->sent_cmd);
566 hdev->sent_cmd = NULL;
567 }
568
569 hdev->close(hdev);
570 hdev->flags = 0;
571 }
572
573 done:
574 hci_req_unlock(hdev);
575 hci_dev_put(hdev);
576 return ret;
577 }
578
579 static int hci_dev_do_close(struct hci_dev *hdev)
580 {
581 BT_DBG("%s %p", hdev->name, hdev);
582
583 hci_req_cancel(hdev, ENODEV);
584 hci_req_lock(hdev);
585
586 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
587 hci_req_unlock(hdev);
588 return 0;
589 }
590
591 /* Kill RX and TX tasks */
592 tasklet_kill(&hdev->rx_task);
593 tasklet_kill(&hdev->tx_task);
594
595 hci_dev_lock_bh(hdev);
596 inquiry_cache_flush(hdev);
597 hci_conn_hash_flush(hdev);
598 hci_dev_unlock_bh(hdev);
599
600 hci_notify(hdev, HCI_DEV_DOWN);
601
602 if (hdev->flush)
603 hdev->flush(hdev);
604
605 /* Reset device */
606 skb_queue_purge(&hdev->cmd_q);
607 atomic_set(&hdev->cmd_cnt, 1);
608 if (!test_bit(HCI_RAW, &hdev->flags)) {
609 set_bit(HCI_INIT, &hdev->flags);
610 __hci_request(hdev, hci_reset_req, 0,
611 msecs_to_jiffies(250));
612 clear_bit(HCI_INIT, &hdev->flags);
613 }
614
615 /* Kill cmd task */
616 tasklet_kill(&hdev->cmd_task);
617
618 /* Drop queues */
619 skb_queue_purge(&hdev->rx_q);
620 skb_queue_purge(&hdev->cmd_q);
621 skb_queue_purge(&hdev->raw_q);
622
623 /* Drop last sent command */
624 if (hdev->sent_cmd) {
625 del_timer_sync(&hdev->cmd_timer);
626 kfree_skb(hdev->sent_cmd);
627 hdev->sent_cmd = NULL;
628 }
629
630 /* After this point our queues are empty
631 * and no tasks are scheduled. */
632 hdev->close(hdev);
633
634 mgmt_powered(hdev->id, 0);
635
636 /* Clear flags */
637 hdev->flags = 0;
638
639 hci_req_unlock(hdev);
640
641 hci_dev_put(hdev);
642 return 0;
643 }
644
645 int hci_dev_close(__u16 dev)
646 {
647 struct hci_dev *hdev;
648 int err;
649
650 hdev = hci_dev_get(dev);
651 if (!hdev)
652 return -ENODEV;
653 err = hci_dev_do_close(hdev);
654 hci_dev_put(hdev);
655 return err;
656 }
657
658 int hci_dev_reset(__u16 dev)
659 {
660 struct hci_dev *hdev;
661 int ret = 0;
662
663 hdev = hci_dev_get(dev);
664 if (!hdev)
665 return -ENODEV;
666
667 hci_req_lock(hdev);
668 tasklet_disable(&hdev->tx_task);
669
670 if (!test_bit(HCI_UP, &hdev->flags))
671 goto done;
672
673 /* Drop queues */
674 skb_queue_purge(&hdev->rx_q);
675 skb_queue_purge(&hdev->cmd_q);
676
677 hci_dev_lock_bh(hdev);
678 inquiry_cache_flush(hdev);
679 hci_conn_hash_flush(hdev);
680 hci_dev_unlock_bh(hdev);
681
682 if (hdev->flush)
683 hdev->flush(hdev);
684
685 atomic_set(&hdev->cmd_cnt, 1);
686 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
687
688 if (!test_bit(HCI_RAW, &hdev->flags))
689 ret = __hci_request(hdev, hci_reset_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
691
692 done:
693 tasklet_enable(&hdev->tx_task);
694 hci_req_unlock(hdev);
695 hci_dev_put(hdev);
696 return ret;
697 }
698
699 int hci_dev_reset_stat(__u16 dev)
700 {
701 struct hci_dev *hdev;
702 int ret = 0;
703
704 hdev = hci_dev_get(dev);
705 if (!hdev)
706 return -ENODEV;
707
708 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
709
710 hci_dev_put(hdev);
711
712 return ret;
713 }
714
715 int hci_dev_cmd(unsigned int cmd, void __user *arg)
716 {
717 struct hci_dev *hdev;
718 struct hci_dev_req dr;
719 int err = 0;
720
721 if (copy_from_user(&dr, arg, sizeof(dr)))
722 return -EFAULT;
723
724 hdev = hci_dev_get(dr.dev_id);
725 if (!hdev)
726 return -ENODEV;
727
728 switch (cmd) {
729 case HCISETAUTH:
730 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
732 break;
733
734 case HCISETENCRYPT:
735 if (!lmp_encrypt_capable(hdev)) {
736 err = -EOPNOTSUPP;
737 break;
738 }
739
740 if (!test_bit(HCI_AUTH, &hdev->flags)) {
741 /* Auth must be enabled first */
742 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
743 msecs_to_jiffies(HCI_INIT_TIMEOUT));
744 if (err)
745 break;
746 }
747
748 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
750 break;
751
752 case HCISETSCAN:
753 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
755 break;
756
757 case HCISETLINKPOL:
758 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 break;
761
762 case HCISETLINKMODE:
763 hdev->link_mode = ((__u16) dr.dev_opt) &
764 (HCI_LM_MASTER | HCI_LM_ACCEPT);
765 break;
766
767 case HCISETPTYPE:
768 hdev->pkt_type = (__u16) dr.dev_opt;
769 break;
770
771 case HCISETACLMTU:
772 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
773 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
774 break;
775
776 case HCISETSCOMTU:
777 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
779 break;
780
781 default:
782 err = -EINVAL;
783 break;
784 }
785
786 hci_dev_put(hdev);
787 return err;
788 }
789
790 int hci_get_dev_list(void __user *arg)
791 {
792 struct hci_dev_list_req *dl;
793 struct hci_dev_req *dr;
794 struct list_head *p;
795 int n = 0, size, err;
796 __u16 dev_num;
797
798 if (get_user(dev_num, (__u16 __user *) arg))
799 return -EFAULT;
800
801 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
802 return -EINVAL;
803
804 size = sizeof(*dl) + dev_num * sizeof(*dr);
805
806 dl = kzalloc(size, GFP_KERNEL);
807 if (!dl)
808 return -ENOMEM;
809
810 dr = dl->dev_req;
811
812 read_lock_bh(&hci_dev_list_lock);
813 list_for_each(p, &hci_dev_list) {
814 struct hci_dev *hdev;
815
816 hdev = list_entry(p, struct hci_dev, list);
817
818 hci_del_off_timer(hdev);
819
820 if (!test_bit(HCI_MGMT, &hdev->flags))
821 set_bit(HCI_PAIRABLE, &hdev->flags);
822
823 (dr + n)->dev_id = hdev->id;
824 (dr + n)->dev_opt = hdev->flags;
825
826 if (++n >= dev_num)
827 break;
828 }
829 read_unlock_bh(&hci_dev_list_lock);
830
831 dl->dev_num = n;
832 size = sizeof(*dl) + n * sizeof(*dr);
833
834 err = copy_to_user(arg, dl, size);
835 kfree(dl);
836
837 return err ? -EFAULT : 0;
838 }
839
840 int hci_get_dev_info(void __user *arg)
841 {
842 struct hci_dev *hdev;
843 struct hci_dev_info di;
844 int err = 0;
845
846 if (copy_from_user(&di, arg, sizeof(di)))
847 return -EFAULT;
848
849 hdev = hci_dev_get(di.dev_id);
850 if (!hdev)
851 return -ENODEV;
852
853 hci_del_off_timer(hdev);
854
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
858 strcpy(di.name, hdev->name);
859 di.bdaddr = hdev->bdaddr;
860 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
861 di.flags = hdev->flags;
862 di.pkt_type = hdev->pkt_type;
863 di.acl_mtu = hdev->acl_mtu;
864 di.acl_pkts = hdev->acl_pkts;
865 di.sco_mtu = hdev->sco_mtu;
866 di.sco_pkts = hdev->sco_pkts;
867 di.link_policy = hdev->link_policy;
868 di.link_mode = hdev->link_mode;
869
870 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
871 memcpy(&di.features, &hdev->features, sizeof(di.features));
872
873 if (copy_to_user(arg, &di, sizeof(di)))
874 err = -EFAULT;
875
876 hci_dev_put(hdev);
877
878 return err;
879 }
880
881 /* ---- Interface to HCI drivers ---- */
882
883 static int hci_rfkill_set_block(void *data, bool blocked)
884 {
885 struct hci_dev *hdev = data;
886
887 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
888
889 if (!blocked)
890 return 0;
891
892 hci_dev_do_close(hdev);
893
894 return 0;
895 }
896
897 static const struct rfkill_ops hci_rfkill_ops = {
898 .set_block = hci_rfkill_set_block,
899 };
900
901 /* Alloc HCI device */
902 struct hci_dev *hci_alloc_dev(void)
903 {
904 struct hci_dev *hdev;
905
906 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
907 if (!hdev)
908 return NULL;
909
910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913 }
914 EXPORT_SYMBOL(hci_alloc_dev);
915
916 /* Free HCI device */
917 void hci_free_dev(struct hci_dev *hdev)
918 {
919 skb_queue_purge(&hdev->driver_init);
920
921 /* will free via device release */
922 put_device(&hdev->dev);
923 }
924 EXPORT_SYMBOL(hci_free_dev);
925
926 static void hci_power_on(struct work_struct *work)
927 {
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941 }
942
943 static void hci_power_off(struct work_struct *work)
944 {
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950 }
951
952 static void hci_auto_off(unsigned long data)
953 {
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961 }
962
963 void hci_del_off_timer(struct hci_dev *hdev)
964 {
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969 }
970
971 int hci_uuids_clear(struct hci_dev *hdev)
972 {
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985 }
986
987 int hci_link_keys_clear(struct hci_dev *hdev)
988 {
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001 }
1002
1003 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004 {
1005 struct list_head *p;
1006
1007 list_for_each(p, &hdev->link_keys) {
1008 struct link_key *k;
1009
1010 k = list_entry(p, struct link_key, list);
1011
1012 if (bacmp(bdaddr, &k->bdaddr) == 0)
1013 return k;
1014 }
1015
1016 return NULL;
1017 }
1018
1019 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1020 u8 *val, u8 type, u8 pin_len)
1021 {
1022 struct link_key *key, *old_key;
1023 u8 old_key_type;
1024
1025 old_key = hci_find_link_key(hdev, bdaddr);
1026 if (old_key) {
1027 old_key_type = old_key->type;
1028 key = old_key;
1029 } else {
1030 old_key_type = 0xff;
1031 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1032 if (!key)
1033 return -ENOMEM;
1034 list_add(&key->list, &hdev->link_keys);
1035 }
1036
1037 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1038
1039 bacpy(&key->bdaddr, bdaddr);
1040 memcpy(key->val, val, 16);
1041 key->type = type;
1042 key->pin_len = pin_len;
1043
1044 if (new_key)
1045 mgmt_new_key(hdev->id, key, old_key_type);
1046
1047 if (type == 0x06)
1048 key->type = old_key_type;
1049
1050 return 0;
1051 }
1052
1053 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1054 {
1055 struct link_key *key;
1056
1057 key = hci_find_link_key(hdev, bdaddr);
1058 if (!key)
1059 return -ENOENT;
1060
1061 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1062
1063 list_del(&key->list);
1064 kfree(key);
1065
1066 return 0;
1067 }
1068
1069 /* HCI command timer function */
1070 static void hci_cmd_timer(unsigned long arg)
1071 {
1072 struct hci_dev *hdev = (void *) arg;
1073
1074 BT_ERR("%s command tx timeout", hdev->name);
1075 atomic_set(&hdev->cmd_cnt, 1);
1076 tasklet_schedule(&hdev->cmd_task);
1077 }
1078
1079 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1080 bdaddr_t *bdaddr)
1081 {
1082 struct oob_data *data;
1083
1084 list_for_each_entry(data, &hdev->remote_oob_data, list)
1085 if (bacmp(bdaddr, &data->bdaddr) == 0)
1086 return data;
1087
1088 return NULL;
1089 }
1090
1091 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1092 {
1093 struct oob_data *data;
1094
1095 data = hci_find_remote_oob_data(hdev, bdaddr);
1096 if (!data)
1097 return -ENOENT;
1098
1099 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1100
1101 list_del(&data->list);
1102 kfree(data);
1103
1104 return 0;
1105 }
1106
1107 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1108 {
1109 struct oob_data *data, *n;
1110
1111 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1112 list_del(&data->list);
1113 kfree(data);
1114 }
1115
1116 return 0;
1117 }
1118
1119 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1120 u8 *randomizer)
1121 {
1122 struct oob_data *data;
1123
1124 data = hci_find_remote_oob_data(hdev, bdaddr);
1125
1126 if (!data) {
1127 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1128 if (!data)
1129 return -ENOMEM;
1130
1131 bacpy(&data->bdaddr, bdaddr);
1132 list_add(&data->list, &hdev->remote_oob_data);
1133 }
1134
1135 memcpy(data->hash, hash, sizeof(data->hash));
1136 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1137
1138 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1139
1140 return 0;
1141 }
1142
1143 /* Register HCI device */
1144 int hci_register_dev(struct hci_dev *hdev)
1145 {
1146 struct list_head *head = &hci_dev_list, *p;
1147 int i, id = 0;
1148
1149 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1150 hdev->bus, hdev->owner);
1151
1152 if (!hdev->open || !hdev->close || !hdev->destruct)
1153 return -EINVAL;
1154
1155 write_lock_bh(&hci_dev_list_lock);
1156
1157 /* Find first available device id */
1158 list_for_each(p, &hci_dev_list) {
1159 if (list_entry(p, struct hci_dev, list)->id != id)
1160 break;
1161 head = p; id++;
1162 }
1163
1164 sprintf(hdev->name, "hci%d", id);
1165 hdev->id = id;
1166 list_add(&hdev->list, head);
1167
1168 atomic_set(&hdev->refcnt, 1);
1169 spin_lock_init(&hdev->lock);
1170
1171 hdev->flags = 0;
1172 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1173 hdev->esco_type = (ESCO_HV1);
1174 hdev->link_mode = (HCI_LM_ACCEPT);
1175 hdev->io_capability = 0x03; /* No Input No Output */
1176
1177 hdev->idle_timeout = 0;
1178 hdev->sniff_max_interval = 800;
1179 hdev->sniff_min_interval = 80;
1180
1181 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1182 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1183 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1184
1185 skb_queue_head_init(&hdev->rx_q);
1186 skb_queue_head_init(&hdev->cmd_q);
1187 skb_queue_head_init(&hdev->raw_q);
1188
1189 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1190
1191 for (i = 0; i < NUM_REASSEMBLY; i++)
1192 hdev->reassembly[i] = NULL;
1193
1194 init_waitqueue_head(&hdev->req_wait_q);
1195 mutex_init(&hdev->req_lock);
1196
1197 inquiry_cache_init(hdev);
1198
1199 hci_conn_hash_init(hdev);
1200
1201 INIT_LIST_HEAD(&hdev->blacklist);
1202
1203 INIT_LIST_HEAD(&hdev->uuids);
1204
1205 INIT_LIST_HEAD(&hdev->link_keys);
1206
1207 INIT_LIST_HEAD(&hdev->remote_oob_data);
1208
1209 INIT_WORK(&hdev->power_on, hci_power_on);
1210 INIT_WORK(&hdev->power_off, hci_power_off);
1211 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1212
1213 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1214
1215 atomic_set(&hdev->promisc, 0);
1216
1217 write_unlock_bh(&hci_dev_list_lock);
1218
1219 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1220 if (!hdev->workqueue)
1221 goto nomem;
1222
1223 hci_register_sysfs(hdev);
1224
1225 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1226 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1227 if (hdev->rfkill) {
1228 if (rfkill_register(hdev->rfkill) < 0) {
1229 rfkill_destroy(hdev->rfkill);
1230 hdev->rfkill = NULL;
1231 }
1232 }
1233
1234 set_bit(HCI_AUTO_OFF, &hdev->flags);
1235 set_bit(HCI_SETUP, &hdev->flags);
1236 queue_work(hdev->workqueue, &hdev->power_on);
1237
1238 hci_notify(hdev, HCI_DEV_REG);
1239
1240 return id;
1241
1242 nomem:
1243 write_lock_bh(&hci_dev_list_lock);
1244 list_del(&hdev->list);
1245 write_unlock_bh(&hci_dev_list_lock);
1246
1247 return -ENOMEM;
1248 }
1249 EXPORT_SYMBOL(hci_register_dev);
1250
1251 /* Unregister HCI device */
1252 int hci_unregister_dev(struct hci_dev *hdev)
1253 {
1254 int i;
1255
1256 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1257
1258 write_lock_bh(&hci_dev_list_lock);
1259 list_del(&hdev->list);
1260 write_unlock_bh(&hci_dev_list_lock);
1261
1262 hci_dev_do_close(hdev);
1263
1264 for (i = 0; i < NUM_REASSEMBLY; i++)
1265 kfree_skb(hdev->reassembly[i]);
1266
1267 if (!test_bit(HCI_INIT, &hdev->flags) &&
1268 !test_bit(HCI_SETUP, &hdev->flags))
1269 mgmt_index_removed(hdev->id);
1270
1271 hci_notify(hdev, HCI_DEV_UNREG);
1272
1273 if (hdev->rfkill) {
1274 rfkill_unregister(hdev->rfkill);
1275 rfkill_destroy(hdev->rfkill);
1276 }
1277
1278 hci_unregister_sysfs(hdev);
1279
1280 hci_del_off_timer(hdev);
1281
1282 destroy_workqueue(hdev->workqueue);
1283
1284 hci_dev_lock_bh(hdev);
1285 hci_blacklist_clear(hdev);
1286 hci_uuids_clear(hdev);
1287 hci_link_keys_clear(hdev);
1288 hci_remote_oob_data_clear(hdev);
1289 hci_dev_unlock_bh(hdev);
1290
1291 __hci_dev_put(hdev);
1292
1293 return 0;
1294 }
1295 EXPORT_SYMBOL(hci_unregister_dev);
1296
1297 /* Suspend HCI device */
1298 int hci_suspend_dev(struct hci_dev *hdev)
1299 {
1300 hci_notify(hdev, HCI_DEV_SUSPEND);
1301 return 0;
1302 }
1303 EXPORT_SYMBOL(hci_suspend_dev);
1304
1305 /* Resume HCI device */
1306 int hci_resume_dev(struct hci_dev *hdev)
1307 {
1308 hci_notify(hdev, HCI_DEV_RESUME);
1309 return 0;
1310 }
1311 EXPORT_SYMBOL(hci_resume_dev);
1312
1313 /* Receive frame from HCI drivers */
1314 int hci_recv_frame(struct sk_buff *skb)
1315 {
1316 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1317 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1318 && !test_bit(HCI_INIT, &hdev->flags))) {
1319 kfree_skb(skb);
1320 return -ENXIO;
1321 }
1322
1323 /* Incomming skb */
1324 bt_cb(skb)->incoming = 1;
1325
1326 /* Time stamp */
1327 __net_timestamp(skb);
1328
1329 /* Queue frame for rx task */
1330 skb_queue_tail(&hdev->rx_q, skb);
1331 tasklet_schedule(&hdev->rx_task);
1332
1333 return 0;
1334 }
1335 EXPORT_SYMBOL(hci_recv_frame);
1336
1337 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1338 int count, __u8 index, gfp_t gfp_mask)
1339 {
1340 int len = 0;
1341 int hlen = 0;
1342 int remain = count;
1343 struct sk_buff *skb;
1344 struct bt_skb_cb *scb;
1345
1346 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1347 index >= NUM_REASSEMBLY)
1348 return -EILSEQ;
1349
1350 skb = hdev->reassembly[index];
1351
1352 if (!skb) {
1353 switch (type) {
1354 case HCI_ACLDATA_PKT:
1355 len = HCI_MAX_FRAME_SIZE;
1356 hlen = HCI_ACL_HDR_SIZE;
1357 break;
1358 case HCI_EVENT_PKT:
1359 len = HCI_MAX_EVENT_SIZE;
1360 hlen = HCI_EVENT_HDR_SIZE;
1361 break;
1362 case HCI_SCODATA_PKT:
1363 len = HCI_MAX_SCO_SIZE;
1364 hlen = HCI_SCO_HDR_SIZE;
1365 break;
1366 }
1367
1368 skb = bt_skb_alloc(len, gfp_mask);
1369 if (!skb)
1370 return -ENOMEM;
1371
1372 scb = (void *) skb->cb;
1373 scb->expect = hlen;
1374 scb->pkt_type = type;
1375
1376 skb->dev = (void *) hdev;
1377 hdev->reassembly[index] = skb;
1378 }
1379
1380 while (count) {
1381 scb = (void *) skb->cb;
1382 len = min(scb->expect, (__u16)count);
1383
1384 memcpy(skb_put(skb, len), data, len);
1385
1386 count -= len;
1387 data += len;
1388 scb->expect -= len;
1389 remain = count;
1390
1391 switch (type) {
1392 case HCI_EVENT_PKT:
1393 if (skb->len == HCI_EVENT_HDR_SIZE) {
1394 struct hci_event_hdr *h = hci_event_hdr(skb);
1395 scb->expect = h->plen;
1396
1397 if (skb_tailroom(skb) < scb->expect) {
1398 kfree_skb(skb);
1399 hdev->reassembly[index] = NULL;
1400 return -ENOMEM;
1401 }
1402 }
1403 break;
1404
1405 case HCI_ACLDATA_PKT:
1406 if (skb->len == HCI_ACL_HDR_SIZE) {
1407 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1408 scb->expect = __le16_to_cpu(h->dlen);
1409
1410 if (skb_tailroom(skb) < scb->expect) {
1411 kfree_skb(skb);
1412 hdev->reassembly[index] = NULL;
1413 return -ENOMEM;
1414 }
1415 }
1416 break;
1417
1418 case HCI_SCODATA_PKT:
1419 if (skb->len == HCI_SCO_HDR_SIZE) {
1420 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1421 scb->expect = h->dlen;
1422
1423 if (skb_tailroom(skb) < scb->expect) {
1424 kfree_skb(skb);
1425 hdev->reassembly[index] = NULL;
1426 return -ENOMEM;
1427 }
1428 }
1429 break;
1430 }
1431
1432 if (scb->expect == 0) {
1433 /* Complete frame */
1434
1435 bt_cb(skb)->pkt_type = type;
1436 hci_recv_frame(skb);
1437
1438 hdev->reassembly[index] = NULL;
1439 return remain;
1440 }
1441 }
1442
1443 return remain;
1444 }
1445
1446 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1447 {
1448 int rem = 0;
1449
1450 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1451 return -EILSEQ;
1452
1453 while (count) {
1454 rem = hci_reassembly(hdev, type, data, count,
1455 type - 1, GFP_ATOMIC);
1456 if (rem < 0)
1457 return rem;
1458
1459 data += (count - rem);
1460 count = rem;
1461 };
1462
1463 return rem;
1464 }
1465 EXPORT_SYMBOL(hci_recv_fragment);
1466
1467 #define STREAM_REASSEMBLY 0
1468
1469 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1470 {
1471 int type;
1472 int rem = 0;
1473
1474 while (count) {
1475 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1476
1477 if (!skb) {
1478 struct { char type; } *pkt;
1479
1480 /* Start of the frame */
1481 pkt = data;
1482 type = pkt->type;
1483
1484 data++;
1485 count--;
1486 } else
1487 type = bt_cb(skb)->pkt_type;
1488
1489 rem = hci_reassembly(hdev, type, data,
1490 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1491 if (rem < 0)
1492 return rem;
1493
1494 data += (count - rem);
1495 count = rem;
1496 };
1497
1498 return rem;
1499 }
1500 EXPORT_SYMBOL(hci_recv_stream_fragment);
1501
1502 /* ---- Interface to upper protocols ---- */
1503
1504 /* Register/Unregister protocols.
1505 * hci_task_lock is used to ensure that no tasks are running. */
1506 int hci_register_proto(struct hci_proto *hp)
1507 {
1508 int err = 0;
1509
1510 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1511
1512 if (hp->id >= HCI_MAX_PROTO)
1513 return -EINVAL;
1514
1515 write_lock_bh(&hci_task_lock);
1516
1517 if (!hci_proto[hp->id])
1518 hci_proto[hp->id] = hp;
1519 else
1520 err = -EEXIST;
1521
1522 write_unlock_bh(&hci_task_lock);
1523
1524 return err;
1525 }
1526 EXPORT_SYMBOL(hci_register_proto);
1527
1528 int hci_unregister_proto(struct hci_proto *hp)
1529 {
1530 int err = 0;
1531
1532 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1533
1534 if (hp->id >= HCI_MAX_PROTO)
1535 return -EINVAL;
1536
1537 write_lock_bh(&hci_task_lock);
1538
1539 if (hci_proto[hp->id])
1540 hci_proto[hp->id] = NULL;
1541 else
1542 err = -ENOENT;
1543
1544 write_unlock_bh(&hci_task_lock);
1545
1546 return err;
1547 }
1548 EXPORT_SYMBOL(hci_unregister_proto);
1549
1550 int hci_register_cb(struct hci_cb *cb)
1551 {
1552 BT_DBG("%p name %s", cb, cb->name);
1553
1554 write_lock_bh(&hci_cb_list_lock);
1555 list_add(&cb->list, &hci_cb_list);
1556 write_unlock_bh(&hci_cb_list_lock);
1557
1558 return 0;
1559 }
1560 EXPORT_SYMBOL(hci_register_cb);
1561
1562 int hci_unregister_cb(struct hci_cb *cb)
1563 {
1564 BT_DBG("%p name %s", cb, cb->name);
1565
1566 write_lock_bh(&hci_cb_list_lock);
1567 list_del(&cb->list);
1568 write_unlock_bh(&hci_cb_list_lock);
1569
1570 return 0;
1571 }
1572 EXPORT_SYMBOL(hci_unregister_cb);
1573
1574 static int hci_send_frame(struct sk_buff *skb)
1575 {
1576 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1577
1578 if (!hdev) {
1579 kfree_skb(skb);
1580 return -ENODEV;
1581 }
1582
1583 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1584
1585 if (atomic_read(&hdev->promisc)) {
1586 /* Time stamp */
1587 __net_timestamp(skb);
1588
1589 hci_send_to_sock(hdev, skb, NULL);
1590 }
1591
1592 /* Get rid of skb owner, prior to sending to the driver. */
1593 skb_orphan(skb);
1594
1595 return hdev->send(skb);
1596 }
1597
1598 /* Send HCI command */
1599 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1600 {
1601 int len = HCI_COMMAND_HDR_SIZE + plen;
1602 struct hci_command_hdr *hdr;
1603 struct sk_buff *skb;
1604
1605 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1606
1607 skb = bt_skb_alloc(len, GFP_ATOMIC);
1608 if (!skb) {
1609 BT_ERR("%s no memory for command", hdev->name);
1610 return -ENOMEM;
1611 }
1612
1613 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1614 hdr->opcode = cpu_to_le16(opcode);
1615 hdr->plen = plen;
1616
1617 if (plen)
1618 memcpy(skb_put(skb, plen), param, plen);
1619
1620 BT_DBG("skb len %d", skb->len);
1621
1622 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1623 skb->dev = (void *) hdev;
1624
1625 if (test_bit(HCI_INIT, &hdev->flags))
1626 hdev->init_last_cmd = opcode;
1627
1628 skb_queue_tail(&hdev->cmd_q, skb);
1629 tasklet_schedule(&hdev->cmd_task);
1630
1631 return 0;
1632 }
1633
1634 /* Get data from the previously sent command */
1635 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1636 {
1637 struct hci_command_hdr *hdr;
1638
1639 if (!hdev->sent_cmd)
1640 return NULL;
1641
1642 hdr = (void *) hdev->sent_cmd->data;
1643
1644 if (hdr->opcode != cpu_to_le16(opcode))
1645 return NULL;
1646
1647 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1648
1649 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1650 }
1651
1652 /* Send ACL data */
1653 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1654 {
1655 struct hci_acl_hdr *hdr;
1656 int len = skb->len;
1657
1658 skb_push(skb, HCI_ACL_HDR_SIZE);
1659 skb_reset_transport_header(skb);
1660 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1661 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1662 hdr->dlen = cpu_to_le16(len);
1663 }
1664
1665 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1666 {
1667 struct hci_dev *hdev = conn->hdev;
1668 struct sk_buff *list;
1669
1670 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1671
1672 skb->dev = (void *) hdev;
1673 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1674 hci_add_acl_hdr(skb, conn->handle, flags);
1675
1676 list = skb_shinfo(skb)->frag_list;
1677 if (!list) {
1678 /* Non fragmented */
1679 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1680
1681 skb_queue_tail(&conn->data_q, skb);
1682 } else {
1683 /* Fragmented */
1684 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1685
1686 skb_shinfo(skb)->frag_list = NULL;
1687
1688 /* Queue all fragments atomically */
1689 spin_lock_bh(&conn->data_q.lock);
1690
1691 __skb_queue_tail(&conn->data_q, skb);
1692
1693 flags &= ~ACL_START;
1694 flags |= ACL_CONT;
1695 do {
1696 skb = list; list = list->next;
1697
1698 skb->dev = (void *) hdev;
1699 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1700 hci_add_acl_hdr(skb, conn->handle, flags);
1701
1702 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1703
1704 __skb_queue_tail(&conn->data_q, skb);
1705 } while (list);
1706
1707 spin_unlock_bh(&conn->data_q.lock);
1708 }
1709
1710 tasklet_schedule(&hdev->tx_task);
1711 }
1712 EXPORT_SYMBOL(hci_send_acl);
1713
1714 /* Send SCO data */
1715 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1716 {
1717 struct hci_dev *hdev = conn->hdev;
1718 struct hci_sco_hdr hdr;
1719
1720 BT_DBG("%s len %d", hdev->name, skb->len);
1721
1722 hdr.handle = cpu_to_le16(conn->handle);
1723 hdr.dlen = skb->len;
1724
1725 skb_push(skb, HCI_SCO_HDR_SIZE);
1726 skb_reset_transport_header(skb);
1727 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1728
1729 skb->dev = (void *) hdev;
1730 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1731
1732 skb_queue_tail(&conn->data_q, skb);
1733 tasklet_schedule(&hdev->tx_task);
1734 }
1735 EXPORT_SYMBOL(hci_send_sco);
1736
1737 /* ---- HCI TX task (outgoing data) ---- */
1738
1739 /* HCI Connection scheduler */
1740 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1741 {
1742 struct hci_conn_hash *h = &hdev->conn_hash;
1743 struct hci_conn *conn = NULL;
1744 int num = 0, min = ~0;
1745 struct list_head *p;
1746
1747 /* We don't have to lock device here. Connections are always
1748 * added and removed with TX task disabled. */
1749 list_for_each(p, &h->list) {
1750 struct hci_conn *c;
1751 c = list_entry(p, struct hci_conn, list);
1752
1753 if (c->type != type || skb_queue_empty(&c->data_q))
1754 continue;
1755
1756 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1757 continue;
1758
1759 num++;
1760
1761 if (c->sent < min) {
1762 min = c->sent;
1763 conn = c;
1764 }
1765 }
1766
1767 if (conn) {
1768 int cnt, q;
1769
1770 switch (conn->type) {
1771 case ACL_LINK:
1772 cnt = hdev->acl_cnt;
1773 break;
1774 case SCO_LINK:
1775 case ESCO_LINK:
1776 cnt = hdev->sco_cnt;
1777 break;
1778 case LE_LINK:
1779 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1780 break;
1781 default:
1782 cnt = 0;
1783 BT_ERR("Unknown link type");
1784 }
1785
1786 q = cnt / num;
1787 *quote = q ? q : 1;
1788 } else
1789 *quote = 0;
1790
1791 BT_DBG("conn %p quote %d", conn, *quote);
1792 return conn;
1793 }
1794
1795 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1796 {
1797 struct hci_conn_hash *h = &hdev->conn_hash;
1798 struct list_head *p;
1799 struct hci_conn *c;
1800
1801 BT_ERR("%s link tx timeout", hdev->name);
1802
1803 /* Kill stalled connections */
1804 list_for_each(p, &h->list) {
1805 c = list_entry(p, struct hci_conn, list);
1806 if (c->type == type && c->sent) {
1807 BT_ERR("%s killing stalled connection %s",
1808 hdev->name, batostr(&c->dst));
1809 hci_acl_disconn(c, 0x13);
1810 }
1811 }
1812 }
1813
1814 static inline void hci_sched_acl(struct hci_dev *hdev)
1815 {
1816 struct hci_conn *conn;
1817 struct sk_buff *skb;
1818 int quote;
1819
1820 BT_DBG("%s", hdev->name);
1821
1822 if (!test_bit(HCI_RAW, &hdev->flags)) {
1823 /* ACL tx timeout must be longer than maximum
1824 * link supervision timeout (40.9 seconds) */
1825 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1826 hci_link_tx_to(hdev, ACL_LINK);
1827 }
1828
1829 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1830 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1831 BT_DBG("skb %p len %d", skb, skb->len);
1832
1833 hci_conn_enter_active_mode(conn);
1834
1835 hci_send_frame(skb);
1836 hdev->acl_last_tx = jiffies;
1837
1838 hdev->acl_cnt--;
1839 conn->sent++;
1840 }
1841 }
1842 }
1843
1844 /* Schedule SCO */
1845 static inline void hci_sched_sco(struct hci_dev *hdev)
1846 {
1847 struct hci_conn *conn;
1848 struct sk_buff *skb;
1849 int quote;
1850
1851 BT_DBG("%s", hdev->name);
1852
1853 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1854 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1855 BT_DBG("skb %p len %d", skb, skb->len);
1856 hci_send_frame(skb);
1857
1858 conn->sent++;
1859 if (conn->sent == ~0)
1860 conn->sent = 0;
1861 }
1862 }
1863 }
1864
1865 static inline void hci_sched_esco(struct hci_dev *hdev)
1866 {
1867 struct hci_conn *conn;
1868 struct sk_buff *skb;
1869 int quote;
1870
1871 BT_DBG("%s", hdev->name);
1872
1873 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1874 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1875 BT_DBG("skb %p len %d", skb, skb->len);
1876 hci_send_frame(skb);
1877
1878 conn->sent++;
1879 if (conn->sent == ~0)
1880 conn->sent = 0;
1881 }
1882 }
1883 }
1884
1885 static inline void hci_sched_le(struct hci_dev *hdev)
1886 {
1887 struct hci_conn *conn;
1888 struct sk_buff *skb;
1889 int quote, cnt;
1890
1891 BT_DBG("%s", hdev->name);
1892
1893 if (!test_bit(HCI_RAW, &hdev->flags)) {
1894 /* LE tx timeout must be longer than maximum
1895 * link supervision timeout (40.9 seconds) */
1896 if (!hdev->le_cnt && hdev->le_pkts &&
1897 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1898 hci_link_tx_to(hdev, LE_LINK);
1899 }
1900
1901 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1902 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1903 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1904 BT_DBG("skb %p len %d", skb, skb->len);
1905
1906 hci_send_frame(skb);
1907 hdev->le_last_tx = jiffies;
1908
1909 cnt--;
1910 conn->sent++;
1911 }
1912 }
1913 if (hdev->le_pkts)
1914 hdev->le_cnt = cnt;
1915 else
1916 hdev->acl_cnt = cnt;
1917 }
1918
1919 static void hci_tx_task(unsigned long arg)
1920 {
1921 struct hci_dev *hdev = (struct hci_dev *) arg;
1922 struct sk_buff *skb;
1923
1924 read_lock(&hci_task_lock);
1925
1926 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1927 hdev->sco_cnt, hdev->le_cnt);
1928
1929 /* Schedule queues and send stuff to HCI driver */
1930
1931 hci_sched_acl(hdev);
1932
1933 hci_sched_sco(hdev);
1934
1935 hci_sched_esco(hdev);
1936
1937 hci_sched_le(hdev);
1938
1939 /* Send next queued raw (unknown type) packet */
1940 while ((skb = skb_dequeue(&hdev->raw_q)))
1941 hci_send_frame(skb);
1942
1943 read_unlock(&hci_task_lock);
1944 }
1945
1946 /* ----- HCI RX task (incoming data proccessing) ----- */
1947
1948 /* ACL data packet */
1949 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1950 {
1951 struct hci_acl_hdr *hdr = (void *) skb->data;
1952 struct hci_conn *conn;
1953 __u16 handle, flags;
1954
1955 skb_pull(skb, HCI_ACL_HDR_SIZE);
1956
1957 handle = __le16_to_cpu(hdr->handle);
1958 flags = hci_flags(handle);
1959 handle = hci_handle(handle);
1960
1961 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1962
1963 hdev->stat.acl_rx++;
1964
1965 hci_dev_lock(hdev);
1966 conn = hci_conn_hash_lookup_handle(hdev, handle);
1967 hci_dev_unlock(hdev);
1968
1969 if (conn) {
1970 register struct hci_proto *hp;
1971
1972 hci_conn_enter_active_mode(conn);
1973
1974 /* Send to upper protocol */
1975 hp = hci_proto[HCI_PROTO_L2CAP];
1976 if (hp && hp->recv_acldata) {
1977 hp->recv_acldata(conn, skb, flags);
1978 return;
1979 }
1980 } else {
1981 BT_ERR("%s ACL packet for unknown connection handle %d",
1982 hdev->name, handle);
1983 }
1984
1985 kfree_skb(skb);
1986 }
1987
1988 /* SCO data packet */
1989 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1990 {
1991 struct hci_sco_hdr *hdr = (void *) skb->data;
1992 struct hci_conn *conn;
1993 __u16 handle;
1994
1995 skb_pull(skb, HCI_SCO_HDR_SIZE);
1996
1997 handle = __le16_to_cpu(hdr->handle);
1998
1999 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2000
2001 hdev->stat.sco_rx++;
2002
2003 hci_dev_lock(hdev);
2004 conn = hci_conn_hash_lookup_handle(hdev, handle);
2005 hci_dev_unlock(hdev);
2006
2007 if (conn) {
2008 register struct hci_proto *hp;
2009
2010 /* Send to upper protocol */
2011 hp = hci_proto[HCI_PROTO_SCO];
2012 if (hp && hp->recv_scodata) {
2013 hp->recv_scodata(conn, skb);
2014 return;
2015 }
2016 } else {
2017 BT_ERR("%s SCO packet for unknown connection handle %d",
2018 hdev->name, handle);
2019 }
2020
2021 kfree_skb(skb);
2022 }
2023
2024 static void hci_rx_task(unsigned long arg)
2025 {
2026 struct hci_dev *hdev = (struct hci_dev *) arg;
2027 struct sk_buff *skb;
2028
2029 BT_DBG("%s", hdev->name);
2030
2031 read_lock(&hci_task_lock);
2032
2033 while ((skb = skb_dequeue(&hdev->rx_q))) {
2034 if (atomic_read(&hdev->promisc)) {
2035 /* Send copy to the sockets */
2036 hci_send_to_sock(hdev, skb, NULL);
2037 }
2038
2039 if (test_bit(HCI_RAW, &hdev->flags)) {
2040 kfree_skb(skb);
2041 continue;
2042 }
2043
2044 if (test_bit(HCI_INIT, &hdev->flags)) {
2045 /* Don't process data packets in this states. */
2046 switch (bt_cb(skb)->pkt_type) {
2047 case HCI_ACLDATA_PKT:
2048 case HCI_SCODATA_PKT:
2049 kfree_skb(skb);
2050 continue;
2051 }
2052 }
2053
2054 /* Process frame */
2055 switch (bt_cb(skb)->pkt_type) {
2056 case HCI_EVENT_PKT:
2057 hci_event_packet(hdev, skb);
2058 break;
2059
2060 case HCI_ACLDATA_PKT:
2061 BT_DBG("%s ACL data packet", hdev->name);
2062 hci_acldata_packet(hdev, skb);
2063 break;
2064
2065 case HCI_SCODATA_PKT:
2066 BT_DBG("%s SCO data packet", hdev->name);
2067 hci_scodata_packet(hdev, skb);
2068 break;
2069
2070 default:
2071 kfree_skb(skb);
2072 break;
2073 }
2074 }
2075
2076 read_unlock(&hci_task_lock);
2077 }
2078
2079 static void hci_cmd_task(unsigned long arg)
2080 {
2081 struct hci_dev *hdev = (struct hci_dev *) arg;
2082 struct sk_buff *skb;
2083
2084 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2085
2086 /* Send queued commands */
2087 if (atomic_read(&hdev->cmd_cnt)) {
2088 skb = skb_dequeue(&hdev->cmd_q);
2089 if (!skb)
2090 return;
2091
2092 kfree_skb(hdev->sent_cmd);
2093
2094 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2095 if (hdev->sent_cmd) {
2096 atomic_dec(&hdev->cmd_cnt);
2097 hci_send_frame(skb);
2098 mod_timer(&hdev->cmd_timer,
2099 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2100 } else {
2101 skb_queue_head(&hdev->cmd_q, skb);
2102 tasklet_schedule(&hdev->cmd_task);
2103 }
2104 }
2105 }