Bluetooth: Add special handling with __hci_request and HCI_INIT
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 #define AUTO_OFF_TIMEOUT 2000
54
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81 return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104 return;
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111 }
112
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
114 {
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122 }
123
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126 unsigned long opt, __u32 timeout)
127 {
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
158 }
159
160 hdev->req_status = hdev->req_result = 0;
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165 }
166
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169 {
170 int ret;
171
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181 }
182
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184 {
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
189 }
190
191 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192 {
193 struct sk_buff *skb;
194 __le16 param;
195 __u8 flt_type;
196
197 BT_DBG("%s %ld", hdev->name, opt);
198
199 /* Driver initialization */
200
201 /* Special commands */
202 while ((skb = skb_dequeue(&hdev->driver_init))) {
203 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
204 skb->dev = (void *) hdev;
205
206 skb_queue_tail(&hdev->cmd_q, skb);
207 tasklet_schedule(&hdev->cmd_task);
208 }
209 skb_queue_purge(&hdev->driver_init);
210
211 /* Mandatory initialization */
212
213 /* Reset */
214 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
215 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
216
217 /* Read Local Supported Features */
218 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
219
220 /* Read Local Version */
221 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
222
223 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
224 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
225
226 #if 0
227 /* Host buffer size */
228 {
229 struct hci_cp_host_buffer_size cp;
230 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
231 cp.sco_mtu = HCI_MAX_SCO_SIZE;
232 cp.acl_max_pkt = cpu_to_le16(0xffff);
233 cp.sco_max_pkt = cpu_to_le16(0xffff);
234 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
235 }
236 #endif
237
238 /* Read BD Address */
239 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
240
241 /* Read Class of Device */
242 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
243
244 /* Read Local Name */
245 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
246
247 /* Read Voice Setting */
248 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
249
250 /* Optional initialization */
251
252 /* Clear Event Filters */
253 flt_type = HCI_FLT_CLEAR_ALL;
254 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
255
256 /* Page timeout ~20 secs */
257 param = cpu_to_le16(0x8000);
258 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
259
260 /* Connection accept timeout ~20 secs */
261 param = cpu_to_le16(0x7d00);
262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
263 }
264
265 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
266 {
267 __u8 scan = opt;
268
269 BT_DBG("%s %x", hdev->name, scan);
270
271 /* Inquiry and Page scans */
272 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
273 }
274
275 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
276 {
277 __u8 auth = opt;
278
279 BT_DBG("%s %x", hdev->name, auth);
280
281 /* Authentication */
282 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
283 }
284
285 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
286 {
287 __u8 encrypt = opt;
288
289 BT_DBG("%s %x", hdev->name, encrypt);
290
291 /* Encryption */
292 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
293 }
294
295 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
296 {
297 __le16 policy = cpu_to_le16(opt);
298
299 BT_DBG("%s %x", hdev->name, policy);
300
301 /* Default link policy */
302 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
303 }
304
305 /* Get HCI device by index.
306 * Device is held on return. */
307 struct hci_dev *hci_dev_get(int index)
308 {
309 struct hci_dev *hdev = NULL;
310 struct list_head *p;
311
312 BT_DBG("%d", index);
313
314 if (index < 0)
315 return NULL;
316
317 read_lock(&hci_dev_list_lock);
318 list_for_each(p, &hci_dev_list) {
319 struct hci_dev *d = list_entry(p, struct hci_dev, list);
320 if (d->id == index) {
321 hdev = hci_dev_hold(d);
322 break;
323 }
324 }
325 read_unlock(&hci_dev_list_lock);
326 return hdev;
327 }
328
329 /* ---- Inquiry support ---- */
330 static void inquiry_cache_flush(struct hci_dev *hdev)
331 {
332 struct inquiry_cache *cache = &hdev->inq_cache;
333 struct inquiry_entry *next = cache->list, *e;
334
335 BT_DBG("cache %p", cache);
336
337 cache->list = NULL;
338 while ((e = next)) {
339 next = e->next;
340 kfree(e);
341 }
342 }
343
344 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
345 {
346 struct inquiry_cache *cache = &hdev->inq_cache;
347 struct inquiry_entry *e;
348
349 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
350
351 for (e = cache->list; e; e = e->next)
352 if (!bacmp(&e->data.bdaddr, bdaddr))
353 break;
354 return e;
355 }
356
357 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
358 {
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_entry *ie;
361
362 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
363
364 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
365 if (!ie) {
366 /* Entry not in the cache. Add new one. */
367 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
368 if (!ie)
369 return;
370
371 ie->next = cache->list;
372 cache->list = ie;
373 }
374
375 memcpy(&ie->data, data, sizeof(*data));
376 ie->timestamp = jiffies;
377 cache->timestamp = jiffies;
378 }
379
380 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
381 {
382 struct inquiry_cache *cache = &hdev->inq_cache;
383 struct inquiry_info *info = (struct inquiry_info *) buf;
384 struct inquiry_entry *e;
385 int copied = 0;
386
387 for (e = cache->list; e && copied < num; e = e->next, copied++) {
388 struct inquiry_data *data = &e->data;
389 bacpy(&info->bdaddr, &data->bdaddr);
390 info->pscan_rep_mode = data->pscan_rep_mode;
391 info->pscan_period_mode = data->pscan_period_mode;
392 info->pscan_mode = data->pscan_mode;
393 memcpy(info->dev_class, data->dev_class, 3);
394 info->clock_offset = data->clock_offset;
395 info++;
396 }
397
398 BT_DBG("cache %p, copied %d", cache, copied);
399 return copied;
400 }
401
402 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
403 {
404 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
405 struct hci_cp_inquiry cp;
406
407 BT_DBG("%s", hdev->name);
408
409 if (test_bit(HCI_INQUIRY, &hdev->flags))
410 return;
411
412 /* Start Inquiry */
413 memcpy(&cp.lap, &ir->lap, 3);
414 cp.length = ir->length;
415 cp.num_rsp = ir->num_rsp;
416 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
417 }
418
419 int hci_inquiry(void __user *arg)
420 {
421 __u8 __user *ptr = arg;
422 struct hci_inquiry_req ir;
423 struct hci_dev *hdev;
424 int err = 0, do_inquiry = 0, max_rsp;
425 long timeo;
426 __u8 *buf;
427
428 if (copy_from_user(&ir, ptr, sizeof(ir)))
429 return -EFAULT;
430
431 if (!(hdev = hci_dev_get(ir.dev_id)))
432 return -ENODEV;
433
434 hci_dev_lock_bh(hdev);
435 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
436 inquiry_cache_empty(hdev) ||
437 ir.flags & IREQ_CACHE_FLUSH) {
438 inquiry_cache_flush(hdev);
439 do_inquiry = 1;
440 }
441 hci_dev_unlock_bh(hdev);
442
443 timeo = ir.length * msecs_to_jiffies(2000);
444
445 if (do_inquiry) {
446 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
447 if (err < 0)
448 goto done;
449 }
450
451 /* for unlimited number of responses we will use buffer with 255 entries */
452 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
453
454 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
455 * copy it to the user space.
456 */
457 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
458 if (!buf) {
459 err = -ENOMEM;
460 goto done;
461 }
462
463 hci_dev_lock_bh(hdev);
464 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
465 hci_dev_unlock_bh(hdev);
466
467 BT_DBG("num_rsp %d", ir.num_rsp);
468
469 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
470 ptr += sizeof(ir);
471 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
472 ir.num_rsp))
473 err = -EFAULT;
474 } else
475 err = -EFAULT;
476
477 kfree(buf);
478
479 done:
480 hci_dev_put(hdev);
481 return err;
482 }
483
484 /* ---- HCI ioctl helpers ---- */
485
486 int hci_dev_open(__u16 dev)
487 {
488 struct hci_dev *hdev;
489 int ret = 0;
490
491 if (!(hdev = hci_dev_get(dev)))
492 return -ENODEV;
493
494 BT_DBG("%s %p", hdev->name, hdev);
495
496 hci_req_lock(hdev);
497
498 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
499 ret = -ERFKILL;
500 goto done;
501 }
502
503 if (test_bit(HCI_UP, &hdev->flags)) {
504 ret = -EALREADY;
505 goto done;
506 }
507
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
509 set_bit(HCI_RAW, &hdev->flags);
510
511 /* Treat all non BR/EDR controllers as raw devices for now */
512 if (hdev->dev_type != HCI_BREDR)
513 set_bit(HCI_RAW, &hdev->flags);
514
515 if (hdev->open(hdev)) {
516 ret = -EIO;
517 goto done;
518 }
519
520 if (!test_bit(HCI_RAW, &hdev->flags)) {
521 atomic_set(&hdev->cmd_cnt, 1);
522 set_bit(HCI_INIT, &hdev->flags);
523 hdev->init_last_cmd = 0;
524
525 //__hci_request(hdev, hci_reset_req, 0, HZ);
526 ret = __hci_request(hdev, hci_init_req, 0,
527 msecs_to_jiffies(HCI_INIT_TIMEOUT));
528
529 clear_bit(HCI_INIT, &hdev->flags);
530 }
531
532 if (!ret) {
533 hci_dev_hold(hdev);
534 set_bit(HCI_UP, &hdev->flags);
535 hci_notify(hdev, HCI_DEV_UP);
536 if (!test_bit(HCI_SETUP, &hdev->flags))
537 mgmt_powered(hdev->id, 1);
538 } else {
539 /* Init failed, cleanup */
540 tasklet_kill(&hdev->rx_task);
541 tasklet_kill(&hdev->tx_task);
542 tasklet_kill(&hdev->cmd_task);
543
544 skb_queue_purge(&hdev->cmd_q);
545 skb_queue_purge(&hdev->rx_q);
546
547 if (hdev->flush)
548 hdev->flush(hdev);
549
550 if (hdev->sent_cmd) {
551 kfree_skb(hdev->sent_cmd);
552 hdev->sent_cmd = NULL;
553 }
554
555 hdev->close(hdev);
556 hdev->flags = 0;
557 }
558
559 done:
560 hci_req_unlock(hdev);
561 hci_dev_put(hdev);
562 return ret;
563 }
564
565 static int hci_dev_do_close(struct hci_dev *hdev)
566 {
567 BT_DBG("%s %p", hdev->name, hdev);
568
569 hci_req_cancel(hdev, ENODEV);
570 hci_req_lock(hdev);
571
572 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
573 hci_req_unlock(hdev);
574 return 0;
575 }
576
577 /* Kill RX and TX tasks */
578 tasklet_kill(&hdev->rx_task);
579 tasklet_kill(&hdev->tx_task);
580
581 hci_dev_lock_bh(hdev);
582 inquiry_cache_flush(hdev);
583 hci_conn_hash_flush(hdev);
584 hci_dev_unlock_bh(hdev);
585
586 hci_notify(hdev, HCI_DEV_DOWN);
587
588 if (hdev->flush)
589 hdev->flush(hdev);
590
591 /* Reset device */
592 skb_queue_purge(&hdev->cmd_q);
593 atomic_set(&hdev->cmd_cnt, 1);
594 if (!test_bit(HCI_RAW, &hdev->flags)) {
595 set_bit(HCI_INIT, &hdev->flags);
596 __hci_request(hdev, hci_reset_req, 0,
597 msecs_to_jiffies(250));
598 clear_bit(HCI_INIT, &hdev->flags);
599 }
600
601 /* Kill cmd task */
602 tasklet_kill(&hdev->cmd_task);
603
604 /* Drop queues */
605 skb_queue_purge(&hdev->rx_q);
606 skb_queue_purge(&hdev->cmd_q);
607 skb_queue_purge(&hdev->raw_q);
608
609 /* Drop last sent command */
610 if (hdev->sent_cmd) {
611 kfree_skb(hdev->sent_cmd);
612 hdev->sent_cmd = NULL;
613 }
614
615 /* After this point our queues are empty
616 * and no tasks are scheduled. */
617 hdev->close(hdev);
618
619 mgmt_powered(hdev->id, 0);
620
621 /* Clear flags */
622 hdev->flags = 0;
623
624 hci_req_unlock(hdev);
625
626 hci_dev_put(hdev);
627 return 0;
628 }
629
630 int hci_dev_close(__u16 dev)
631 {
632 struct hci_dev *hdev;
633 int err;
634
635 hdev = hci_dev_get(dev);
636 if (!hdev)
637 return -ENODEV;
638 err = hci_dev_do_close(hdev);
639 hci_dev_put(hdev);
640 return err;
641 }
642
643 int hci_dev_reset(__u16 dev)
644 {
645 struct hci_dev *hdev;
646 int ret = 0;
647
648 hdev = hci_dev_get(dev);
649 if (!hdev)
650 return -ENODEV;
651
652 hci_req_lock(hdev);
653 tasklet_disable(&hdev->tx_task);
654
655 if (!test_bit(HCI_UP, &hdev->flags))
656 goto done;
657
658 /* Drop queues */
659 skb_queue_purge(&hdev->rx_q);
660 skb_queue_purge(&hdev->cmd_q);
661
662 hci_dev_lock_bh(hdev);
663 inquiry_cache_flush(hdev);
664 hci_conn_hash_flush(hdev);
665 hci_dev_unlock_bh(hdev);
666
667 if (hdev->flush)
668 hdev->flush(hdev);
669
670 atomic_set(&hdev->cmd_cnt, 1);
671 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
672
673 if (!test_bit(HCI_RAW, &hdev->flags))
674 ret = __hci_request(hdev, hci_reset_req, 0,
675 msecs_to_jiffies(HCI_INIT_TIMEOUT));
676
677 done:
678 tasklet_enable(&hdev->tx_task);
679 hci_req_unlock(hdev);
680 hci_dev_put(hdev);
681 return ret;
682 }
683
684 int hci_dev_reset_stat(__u16 dev)
685 {
686 struct hci_dev *hdev;
687 int ret = 0;
688
689 hdev = hci_dev_get(dev);
690 if (!hdev)
691 return -ENODEV;
692
693 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
694
695 hci_dev_put(hdev);
696
697 return ret;
698 }
699
700 int hci_dev_cmd(unsigned int cmd, void __user *arg)
701 {
702 struct hci_dev *hdev;
703 struct hci_dev_req dr;
704 int err = 0;
705
706 if (copy_from_user(&dr, arg, sizeof(dr)))
707 return -EFAULT;
708
709 hdev = hci_dev_get(dr.dev_id);
710 if (!hdev)
711 return -ENODEV;
712
713 switch (cmd) {
714 case HCISETAUTH:
715 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
716 msecs_to_jiffies(HCI_INIT_TIMEOUT));
717 break;
718
719 case HCISETENCRYPT:
720 if (!lmp_encrypt_capable(hdev)) {
721 err = -EOPNOTSUPP;
722 break;
723 }
724
725 if (!test_bit(HCI_AUTH, &hdev->flags)) {
726 /* Auth must be enabled first */
727 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
728 msecs_to_jiffies(HCI_INIT_TIMEOUT));
729 if (err)
730 break;
731 }
732
733 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
734 msecs_to_jiffies(HCI_INIT_TIMEOUT));
735 break;
736
737 case HCISETSCAN:
738 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
739 msecs_to_jiffies(HCI_INIT_TIMEOUT));
740 break;
741
742 case HCISETLINKPOL:
743 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
744 msecs_to_jiffies(HCI_INIT_TIMEOUT));
745 break;
746
747 case HCISETLINKMODE:
748 hdev->link_mode = ((__u16) dr.dev_opt) &
749 (HCI_LM_MASTER | HCI_LM_ACCEPT);
750 break;
751
752 case HCISETPTYPE:
753 hdev->pkt_type = (__u16) dr.dev_opt;
754 break;
755
756 case HCISETACLMTU:
757 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
758 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
759 break;
760
761 case HCISETSCOMTU:
762 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
763 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
764 break;
765
766 default:
767 err = -EINVAL;
768 break;
769 }
770
771 hci_dev_put(hdev);
772 return err;
773 }
774
775 int hci_get_dev_list(void __user *arg)
776 {
777 struct hci_dev_list_req *dl;
778 struct hci_dev_req *dr;
779 struct list_head *p;
780 int n = 0, size, err;
781 __u16 dev_num;
782
783 if (get_user(dev_num, (__u16 __user *) arg))
784 return -EFAULT;
785
786 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
787 return -EINVAL;
788
789 size = sizeof(*dl) + dev_num * sizeof(*dr);
790
791 dl = kzalloc(size, GFP_KERNEL);
792 if (!dl)
793 return -ENOMEM;
794
795 dr = dl->dev_req;
796
797 read_lock_bh(&hci_dev_list_lock);
798 list_for_each(p, &hci_dev_list) {
799 struct hci_dev *hdev;
800
801 hdev = list_entry(p, struct hci_dev, list);
802
803 hci_del_off_timer(hdev);
804
805 if (!test_bit(HCI_MGMT, &hdev->flags))
806 set_bit(HCI_PAIRABLE, &hdev->flags);
807
808 (dr + n)->dev_id = hdev->id;
809 (dr + n)->dev_opt = hdev->flags;
810
811 if (++n >= dev_num)
812 break;
813 }
814 read_unlock_bh(&hci_dev_list_lock);
815
816 dl->dev_num = n;
817 size = sizeof(*dl) + n * sizeof(*dr);
818
819 err = copy_to_user(arg, dl, size);
820 kfree(dl);
821
822 return err ? -EFAULT : 0;
823 }
824
825 int hci_get_dev_info(void __user *arg)
826 {
827 struct hci_dev *hdev;
828 struct hci_dev_info di;
829 int err = 0;
830
831 if (copy_from_user(&di, arg, sizeof(di)))
832 return -EFAULT;
833
834 hdev = hci_dev_get(di.dev_id);
835 if (!hdev)
836 return -ENODEV;
837
838 hci_del_off_timer(hdev);
839
840 if (!test_bit(HCI_MGMT, &hdev->flags))
841 set_bit(HCI_PAIRABLE, &hdev->flags);
842
843 strcpy(di.name, hdev->name);
844 di.bdaddr = hdev->bdaddr;
845 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
846 di.flags = hdev->flags;
847 di.pkt_type = hdev->pkt_type;
848 di.acl_mtu = hdev->acl_mtu;
849 di.acl_pkts = hdev->acl_pkts;
850 di.sco_mtu = hdev->sco_mtu;
851 di.sco_pkts = hdev->sco_pkts;
852 di.link_policy = hdev->link_policy;
853 di.link_mode = hdev->link_mode;
854
855 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
856 memcpy(&di.features, &hdev->features, sizeof(di.features));
857
858 if (copy_to_user(arg, &di, sizeof(di)))
859 err = -EFAULT;
860
861 hci_dev_put(hdev);
862
863 return err;
864 }
865
866 /* ---- Interface to HCI drivers ---- */
867
868 static int hci_rfkill_set_block(void *data, bool blocked)
869 {
870 struct hci_dev *hdev = data;
871
872 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
873
874 if (!blocked)
875 return 0;
876
877 hci_dev_do_close(hdev);
878
879 return 0;
880 }
881
882 static const struct rfkill_ops hci_rfkill_ops = {
883 .set_block = hci_rfkill_set_block,
884 };
885
886 /* Alloc HCI device */
887 struct hci_dev *hci_alloc_dev(void)
888 {
889 struct hci_dev *hdev;
890
891 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
892 if (!hdev)
893 return NULL;
894
895 skb_queue_head_init(&hdev->driver_init);
896
897 return hdev;
898 }
899 EXPORT_SYMBOL(hci_alloc_dev);
900
901 /* Free HCI device */
902 void hci_free_dev(struct hci_dev *hdev)
903 {
904 skb_queue_purge(&hdev->driver_init);
905
906 /* will free via device release */
907 put_device(&hdev->dev);
908 }
909 EXPORT_SYMBOL(hci_free_dev);
910
911 static void hci_power_on(struct work_struct *work)
912 {
913 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
914
915 BT_DBG("%s", hdev->name);
916
917 if (hci_dev_open(hdev->id) < 0)
918 return;
919
920 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
921 mod_timer(&hdev->off_timer,
922 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
923
924 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
925 mgmt_index_added(hdev->id);
926 }
927
928 static void hci_power_off(struct work_struct *work)
929 {
930 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
931
932 BT_DBG("%s", hdev->name);
933
934 hci_dev_close(hdev->id);
935 }
936
937 static void hci_auto_off(unsigned long data)
938 {
939 struct hci_dev *hdev = (struct hci_dev *) data;
940
941 BT_DBG("%s", hdev->name);
942
943 clear_bit(HCI_AUTO_OFF, &hdev->flags);
944
945 queue_work(hdev->workqueue, &hdev->power_off);
946 }
947
948 void hci_del_off_timer(struct hci_dev *hdev)
949 {
950 BT_DBG("%s", hdev->name);
951
952 clear_bit(HCI_AUTO_OFF, &hdev->flags);
953 del_timer(&hdev->off_timer);
954 }
955
956 int hci_uuids_clear(struct hci_dev *hdev)
957 {
958 struct list_head *p, *n;
959
960 list_for_each_safe(p, n, &hdev->uuids) {
961 struct bt_uuid *uuid;
962
963 uuid = list_entry(p, struct bt_uuid, list);
964
965 list_del(p);
966 kfree(uuid);
967 }
968
969 return 0;
970 }
971
972 /* Register HCI device */
973 int hci_register_dev(struct hci_dev *hdev)
974 {
975 struct list_head *head = &hci_dev_list, *p;
976 int i, id = 0;
977
978 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
979 hdev->bus, hdev->owner);
980
981 if (!hdev->open || !hdev->close || !hdev->destruct)
982 return -EINVAL;
983
984 write_lock_bh(&hci_dev_list_lock);
985
986 /* Find first available device id */
987 list_for_each(p, &hci_dev_list) {
988 if (list_entry(p, struct hci_dev, list)->id != id)
989 break;
990 head = p; id++;
991 }
992
993 sprintf(hdev->name, "hci%d", id);
994 hdev->id = id;
995 list_add(&hdev->list, head);
996
997 atomic_set(&hdev->refcnt, 1);
998 spin_lock_init(&hdev->lock);
999
1000 hdev->flags = 0;
1001 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1002 hdev->esco_type = (ESCO_HV1);
1003 hdev->link_mode = (HCI_LM_ACCEPT);
1004
1005 hdev->idle_timeout = 0;
1006 hdev->sniff_max_interval = 800;
1007 hdev->sniff_min_interval = 80;
1008
1009 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1010 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1011 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1012
1013 skb_queue_head_init(&hdev->rx_q);
1014 skb_queue_head_init(&hdev->cmd_q);
1015 skb_queue_head_init(&hdev->raw_q);
1016
1017 for (i = 0; i < NUM_REASSEMBLY; i++)
1018 hdev->reassembly[i] = NULL;
1019
1020 init_waitqueue_head(&hdev->req_wait_q);
1021 mutex_init(&hdev->req_lock);
1022
1023 inquiry_cache_init(hdev);
1024
1025 hci_conn_hash_init(hdev);
1026
1027 INIT_LIST_HEAD(&hdev->blacklist);
1028
1029 INIT_LIST_HEAD(&hdev->uuids);
1030
1031 INIT_WORK(&hdev->power_on, hci_power_on);
1032 INIT_WORK(&hdev->power_off, hci_power_off);
1033 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1034
1035 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1036
1037 atomic_set(&hdev->promisc, 0);
1038
1039 write_unlock_bh(&hci_dev_list_lock);
1040
1041 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1042 if (!hdev->workqueue)
1043 goto nomem;
1044
1045 hci_register_sysfs(hdev);
1046
1047 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1048 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1049 if (hdev->rfkill) {
1050 if (rfkill_register(hdev->rfkill) < 0) {
1051 rfkill_destroy(hdev->rfkill);
1052 hdev->rfkill = NULL;
1053 }
1054 }
1055
1056 set_bit(HCI_AUTO_OFF, &hdev->flags);
1057 set_bit(HCI_SETUP, &hdev->flags);
1058 queue_work(hdev->workqueue, &hdev->power_on);
1059
1060 hci_notify(hdev, HCI_DEV_REG);
1061
1062 return id;
1063
1064 nomem:
1065 write_lock_bh(&hci_dev_list_lock);
1066 list_del(&hdev->list);
1067 write_unlock_bh(&hci_dev_list_lock);
1068
1069 return -ENOMEM;
1070 }
1071 EXPORT_SYMBOL(hci_register_dev);
1072
1073 /* Unregister HCI device */
1074 int hci_unregister_dev(struct hci_dev *hdev)
1075 {
1076 int i;
1077
1078 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1079
1080 write_lock_bh(&hci_dev_list_lock);
1081 list_del(&hdev->list);
1082 write_unlock_bh(&hci_dev_list_lock);
1083
1084 hci_dev_do_close(hdev);
1085
1086 for (i = 0; i < NUM_REASSEMBLY; i++)
1087 kfree_skb(hdev->reassembly[i]);
1088
1089 if (!test_bit(HCI_INIT, &hdev->flags) &&
1090 !test_bit(HCI_SETUP, &hdev->flags))
1091 mgmt_index_removed(hdev->id);
1092
1093 hci_notify(hdev, HCI_DEV_UNREG);
1094
1095 if (hdev->rfkill) {
1096 rfkill_unregister(hdev->rfkill);
1097 rfkill_destroy(hdev->rfkill);
1098 }
1099
1100 hci_unregister_sysfs(hdev);
1101
1102 destroy_workqueue(hdev->workqueue);
1103
1104 hci_dev_lock_bh(hdev);
1105 hci_blacklist_clear(hdev);
1106 hci_uuids_clear(hdev);
1107 hci_dev_unlock_bh(hdev);
1108
1109 __hci_dev_put(hdev);
1110
1111 return 0;
1112 }
1113 EXPORT_SYMBOL(hci_unregister_dev);
1114
1115 /* Suspend HCI device */
1116 int hci_suspend_dev(struct hci_dev *hdev)
1117 {
1118 hci_notify(hdev, HCI_DEV_SUSPEND);
1119 return 0;
1120 }
1121 EXPORT_SYMBOL(hci_suspend_dev);
1122
1123 /* Resume HCI device */
1124 int hci_resume_dev(struct hci_dev *hdev)
1125 {
1126 hci_notify(hdev, HCI_DEV_RESUME);
1127 return 0;
1128 }
1129 EXPORT_SYMBOL(hci_resume_dev);
1130
1131 /* Receive frame from HCI drivers */
1132 int hci_recv_frame(struct sk_buff *skb)
1133 {
1134 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1135 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1136 && !test_bit(HCI_INIT, &hdev->flags))) {
1137 kfree_skb(skb);
1138 return -ENXIO;
1139 }
1140
1141 /* Incomming skb */
1142 bt_cb(skb)->incoming = 1;
1143
1144 /* Time stamp */
1145 __net_timestamp(skb);
1146
1147 /* Queue frame for rx task */
1148 skb_queue_tail(&hdev->rx_q, skb);
1149 tasklet_schedule(&hdev->rx_task);
1150
1151 return 0;
1152 }
1153 EXPORT_SYMBOL(hci_recv_frame);
1154
1155 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1156 int count, __u8 index, gfp_t gfp_mask)
1157 {
1158 int len = 0;
1159 int hlen = 0;
1160 int remain = count;
1161 struct sk_buff *skb;
1162 struct bt_skb_cb *scb;
1163
1164 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1165 index >= NUM_REASSEMBLY)
1166 return -EILSEQ;
1167
1168 skb = hdev->reassembly[index];
1169
1170 if (!skb) {
1171 switch (type) {
1172 case HCI_ACLDATA_PKT:
1173 len = HCI_MAX_FRAME_SIZE;
1174 hlen = HCI_ACL_HDR_SIZE;
1175 break;
1176 case HCI_EVENT_PKT:
1177 len = HCI_MAX_EVENT_SIZE;
1178 hlen = HCI_EVENT_HDR_SIZE;
1179 break;
1180 case HCI_SCODATA_PKT:
1181 len = HCI_MAX_SCO_SIZE;
1182 hlen = HCI_SCO_HDR_SIZE;
1183 break;
1184 }
1185
1186 skb = bt_skb_alloc(len, gfp_mask);
1187 if (!skb)
1188 return -ENOMEM;
1189
1190 scb = (void *) skb->cb;
1191 scb->expect = hlen;
1192 scb->pkt_type = type;
1193
1194 skb->dev = (void *) hdev;
1195 hdev->reassembly[index] = skb;
1196 }
1197
1198 while (count) {
1199 scb = (void *) skb->cb;
1200 len = min(scb->expect, (__u16)count);
1201
1202 memcpy(skb_put(skb, len), data, len);
1203
1204 count -= len;
1205 data += len;
1206 scb->expect -= len;
1207 remain = count;
1208
1209 switch (type) {
1210 case HCI_EVENT_PKT:
1211 if (skb->len == HCI_EVENT_HDR_SIZE) {
1212 struct hci_event_hdr *h = hci_event_hdr(skb);
1213 scb->expect = h->plen;
1214
1215 if (skb_tailroom(skb) < scb->expect) {
1216 kfree_skb(skb);
1217 hdev->reassembly[index] = NULL;
1218 return -ENOMEM;
1219 }
1220 }
1221 break;
1222
1223 case HCI_ACLDATA_PKT:
1224 if (skb->len == HCI_ACL_HDR_SIZE) {
1225 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1226 scb->expect = __le16_to_cpu(h->dlen);
1227
1228 if (skb_tailroom(skb) < scb->expect) {
1229 kfree_skb(skb);
1230 hdev->reassembly[index] = NULL;
1231 return -ENOMEM;
1232 }
1233 }
1234 break;
1235
1236 case HCI_SCODATA_PKT:
1237 if (skb->len == HCI_SCO_HDR_SIZE) {
1238 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1239 scb->expect = h->dlen;
1240
1241 if (skb_tailroom(skb) < scb->expect) {
1242 kfree_skb(skb);
1243 hdev->reassembly[index] = NULL;
1244 return -ENOMEM;
1245 }
1246 }
1247 break;
1248 }
1249
1250 if (scb->expect == 0) {
1251 /* Complete frame */
1252
1253 bt_cb(skb)->pkt_type = type;
1254 hci_recv_frame(skb);
1255
1256 hdev->reassembly[index] = NULL;
1257 return remain;
1258 }
1259 }
1260
1261 return remain;
1262 }
1263
1264 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1265 {
1266 int rem = 0;
1267
1268 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1269 return -EILSEQ;
1270
1271 while (count) {
1272 rem = hci_reassembly(hdev, type, data, count,
1273 type - 1, GFP_ATOMIC);
1274 if (rem < 0)
1275 return rem;
1276
1277 data += (count - rem);
1278 count = rem;
1279 };
1280
1281 return rem;
1282 }
1283 EXPORT_SYMBOL(hci_recv_fragment);
1284
1285 #define STREAM_REASSEMBLY 0
1286
1287 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1288 {
1289 int type;
1290 int rem = 0;
1291
1292 while (count) {
1293 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1294
1295 if (!skb) {
1296 struct { char type; } *pkt;
1297
1298 /* Start of the frame */
1299 pkt = data;
1300 type = pkt->type;
1301
1302 data++;
1303 count--;
1304 } else
1305 type = bt_cb(skb)->pkt_type;
1306
1307 rem = hci_reassembly(hdev, type, data,
1308 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1309 if (rem < 0)
1310 return rem;
1311
1312 data += (count - rem);
1313 count = rem;
1314 };
1315
1316 return rem;
1317 }
1318 EXPORT_SYMBOL(hci_recv_stream_fragment);
1319
1320 /* ---- Interface to upper protocols ---- */
1321
1322 /* Register/Unregister protocols.
1323 * hci_task_lock is used to ensure that no tasks are running. */
1324 int hci_register_proto(struct hci_proto *hp)
1325 {
1326 int err = 0;
1327
1328 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1329
1330 if (hp->id >= HCI_MAX_PROTO)
1331 return -EINVAL;
1332
1333 write_lock_bh(&hci_task_lock);
1334
1335 if (!hci_proto[hp->id])
1336 hci_proto[hp->id] = hp;
1337 else
1338 err = -EEXIST;
1339
1340 write_unlock_bh(&hci_task_lock);
1341
1342 return err;
1343 }
1344 EXPORT_SYMBOL(hci_register_proto);
1345
1346 int hci_unregister_proto(struct hci_proto *hp)
1347 {
1348 int err = 0;
1349
1350 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1351
1352 if (hp->id >= HCI_MAX_PROTO)
1353 return -EINVAL;
1354
1355 write_lock_bh(&hci_task_lock);
1356
1357 if (hci_proto[hp->id])
1358 hci_proto[hp->id] = NULL;
1359 else
1360 err = -ENOENT;
1361
1362 write_unlock_bh(&hci_task_lock);
1363
1364 return err;
1365 }
1366 EXPORT_SYMBOL(hci_unregister_proto);
1367
1368 int hci_register_cb(struct hci_cb *cb)
1369 {
1370 BT_DBG("%p name %s", cb, cb->name);
1371
1372 write_lock_bh(&hci_cb_list_lock);
1373 list_add(&cb->list, &hci_cb_list);
1374 write_unlock_bh(&hci_cb_list_lock);
1375
1376 return 0;
1377 }
1378 EXPORT_SYMBOL(hci_register_cb);
1379
1380 int hci_unregister_cb(struct hci_cb *cb)
1381 {
1382 BT_DBG("%p name %s", cb, cb->name);
1383
1384 write_lock_bh(&hci_cb_list_lock);
1385 list_del(&cb->list);
1386 write_unlock_bh(&hci_cb_list_lock);
1387
1388 return 0;
1389 }
1390 EXPORT_SYMBOL(hci_unregister_cb);
1391
1392 static int hci_send_frame(struct sk_buff *skb)
1393 {
1394 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1395
1396 if (!hdev) {
1397 kfree_skb(skb);
1398 return -ENODEV;
1399 }
1400
1401 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1402
1403 if (atomic_read(&hdev->promisc)) {
1404 /* Time stamp */
1405 __net_timestamp(skb);
1406
1407 hci_send_to_sock(hdev, skb, NULL);
1408 }
1409
1410 /* Get rid of skb owner, prior to sending to the driver. */
1411 skb_orphan(skb);
1412
1413 return hdev->send(skb);
1414 }
1415
1416 /* Send HCI command */
1417 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1418 {
1419 int len = HCI_COMMAND_HDR_SIZE + plen;
1420 struct hci_command_hdr *hdr;
1421 struct sk_buff *skb;
1422
1423 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1424
1425 skb = bt_skb_alloc(len, GFP_ATOMIC);
1426 if (!skb) {
1427 BT_ERR("%s no memory for command", hdev->name);
1428 return -ENOMEM;
1429 }
1430
1431 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1432 hdr->opcode = cpu_to_le16(opcode);
1433 hdr->plen = plen;
1434
1435 if (plen)
1436 memcpy(skb_put(skb, plen), param, plen);
1437
1438 BT_DBG("skb len %d", skb->len);
1439
1440 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1441 skb->dev = (void *) hdev;
1442
1443 if (test_bit(HCI_INIT, &hdev->flags))
1444 hdev->init_last_cmd = opcode;
1445
1446 skb_queue_tail(&hdev->cmd_q, skb);
1447 tasklet_schedule(&hdev->cmd_task);
1448
1449 return 0;
1450 }
1451
1452 /* Get data from the previously sent command */
1453 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1454 {
1455 struct hci_command_hdr *hdr;
1456
1457 if (!hdev->sent_cmd)
1458 return NULL;
1459
1460 hdr = (void *) hdev->sent_cmd->data;
1461
1462 if (hdr->opcode != cpu_to_le16(opcode))
1463 return NULL;
1464
1465 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1466
1467 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1468 }
1469
1470 /* Send ACL data */
1471 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1472 {
1473 struct hci_acl_hdr *hdr;
1474 int len = skb->len;
1475
1476 skb_push(skb, HCI_ACL_HDR_SIZE);
1477 skb_reset_transport_header(skb);
1478 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1479 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1480 hdr->dlen = cpu_to_le16(len);
1481 }
1482
1483 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1484 {
1485 struct hci_dev *hdev = conn->hdev;
1486 struct sk_buff *list;
1487
1488 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1489
1490 skb->dev = (void *) hdev;
1491 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1492 hci_add_acl_hdr(skb, conn->handle, flags);
1493
1494 list = skb_shinfo(skb)->frag_list;
1495 if (!list) {
1496 /* Non fragmented */
1497 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1498
1499 skb_queue_tail(&conn->data_q, skb);
1500 } else {
1501 /* Fragmented */
1502 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1503
1504 skb_shinfo(skb)->frag_list = NULL;
1505
1506 /* Queue all fragments atomically */
1507 spin_lock_bh(&conn->data_q.lock);
1508
1509 __skb_queue_tail(&conn->data_q, skb);
1510
1511 flags &= ~ACL_START;
1512 flags |= ACL_CONT;
1513 do {
1514 skb = list; list = list->next;
1515
1516 skb->dev = (void *) hdev;
1517 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1518 hci_add_acl_hdr(skb, conn->handle, flags);
1519
1520 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1521
1522 __skb_queue_tail(&conn->data_q, skb);
1523 } while (list);
1524
1525 spin_unlock_bh(&conn->data_q.lock);
1526 }
1527
1528 tasklet_schedule(&hdev->tx_task);
1529 }
1530 EXPORT_SYMBOL(hci_send_acl);
1531
1532 /* Send SCO data */
1533 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1534 {
1535 struct hci_dev *hdev = conn->hdev;
1536 struct hci_sco_hdr hdr;
1537
1538 BT_DBG("%s len %d", hdev->name, skb->len);
1539
1540 hdr.handle = cpu_to_le16(conn->handle);
1541 hdr.dlen = skb->len;
1542
1543 skb_push(skb, HCI_SCO_HDR_SIZE);
1544 skb_reset_transport_header(skb);
1545 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1546
1547 skb->dev = (void *) hdev;
1548 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1549
1550 skb_queue_tail(&conn->data_q, skb);
1551 tasklet_schedule(&hdev->tx_task);
1552 }
1553 EXPORT_SYMBOL(hci_send_sco);
1554
1555 /* ---- HCI TX task (outgoing data) ---- */
1556
1557 /* HCI Connection scheduler */
1558 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1559 {
1560 struct hci_conn_hash *h = &hdev->conn_hash;
1561 struct hci_conn *conn = NULL;
1562 int num = 0, min = ~0;
1563 struct list_head *p;
1564
1565 /* We don't have to lock device here. Connections are always
1566 * added and removed with TX task disabled. */
1567 list_for_each(p, &h->list) {
1568 struct hci_conn *c;
1569 c = list_entry(p, struct hci_conn, list);
1570
1571 if (c->type != type || skb_queue_empty(&c->data_q))
1572 continue;
1573
1574 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1575 continue;
1576
1577 num++;
1578
1579 if (c->sent < min) {
1580 min = c->sent;
1581 conn = c;
1582 }
1583 }
1584
1585 if (conn) {
1586 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1587 int q = cnt / num;
1588 *quote = q ? q : 1;
1589 } else
1590 *quote = 0;
1591
1592 BT_DBG("conn %p quote %d", conn, *quote);
1593 return conn;
1594 }
1595
1596 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1597 {
1598 struct hci_conn_hash *h = &hdev->conn_hash;
1599 struct list_head *p;
1600 struct hci_conn *c;
1601
1602 BT_ERR("%s ACL tx timeout", hdev->name);
1603
1604 /* Kill stalled connections */
1605 list_for_each(p, &h->list) {
1606 c = list_entry(p, struct hci_conn, list);
1607 if (c->type == ACL_LINK && c->sent) {
1608 BT_ERR("%s killing stalled ACL connection %s",
1609 hdev->name, batostr(&c->dst));
1610 hci_acl_disconn(c, 0x13);
1611 }
1612 }
1613 }
1614
1615 static inline void hci_sched_acl(struct hci_dev *hdev)
1616 {
1617 struct hci_conn *conn;
1618 struct sk_buff *skb;
1619 int quote;
1620
1621 BT_DBG("%s", hdev->name);
1622
1623 if (!test_bit(HCI_RAW, &hdev->flags)) {
1624 /* ACL tx timeout must be longer than maximum
1625 * link supervision timeout (40.9 seconds) */
1626 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1627 hci_acl_tx_to(hdev);
1628 }
1629
1630 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1631 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1632 BT_DBG("skb %p len %d", skb, skb->len);
1633
1634 hci_conn_enter_active_mode(conn);
1635
1636 hci_send_frame(skb);
1637 hdev->acl_last_tx = jiffies;
1638
1639 hdev->acl_cnt--;
1640 conn->sent++;
1641 }
1642 }
1643 }
1644
1645 /* Schedule SCO */
1646 static inline void hci_sched_sco(struct hci_dev *hdev)
1647 {
1648 struct hci_conn *conn;
1649 struct sk_buff *skb;
1650 int quote;
1651
1652 BT_DBG("%s", hdev->name);
1653
1654 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1655 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1656 BT_DBG("skb %p len %d", skb, skb->len);
1657 hci_send_frame(skb);
1658
1659 conn->sent++;
1660 if (conn->sent == ~0)
1661 conn->sent = 0;
1662 }
1663 }
1664 }
1665
1666 static inline void hci_sched_esco(struct hci_dev *hdev)
1667 {
1668 struct hci_conn *conn;
1669 struct sk_buff *skb;
1670 int quote;
1671
1672 BT_DBG("%s", hdev->name);
1673
1674 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1675 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1676 BT_DBG("skb %p len %d", skb, skb->len);
1677 hci_send_frame(skb);
1678
1679 conn->sent++;
1680 if (conn->sent == ~0)
1681 conn->sent = 0;
1682 }
1683 }
1684 }
1685
1686 static void hci_tx_task(unsigned long arg)
1687 {
1688 struct hci_dev *hdev = (struct hci_dev *) arg;
1689 struct sk_buff *skb;
1690
1691 read_lock(&hci_task_lock);
1692
1693 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1694
1695 /* Schedule queues and send stuff to HCI driver */
1696
1697 hci_sched_acl(hdev);
1698
1699 hci_sched_sco(hdev);
1700
1701 hci_sched_esco(hdev);
1702
1703 /* Send next queued raw (unknown type) packet */
1704 while ((skb = skb_dequeue(&hdev->raw_q)))
1705 hci_send_frame(skb);
1706
1707 read_unlock(&hci_task_lock);
1708 }
1709
1710 /* ----- HCI RX task (incoming data proccessing) ----- */
1711
1712 /* ACL data packet */
1713 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1714 {
1715 struct hci_acl_hdr *hdr = (void *) skb->data;
1716 struct hci_conn *conn;
1717 __u16 handle, flags;
1718
1719 skb_pull(skb, HCI_ACL_HDR_SIZE);
1720
1721 handle = __le16_to_cpu(hdr->handle);
1722 flags = hci_flags(handle);
1723 handle = hci_handle(handle);
1724
1725 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1726
1727 hdev->stat.acl_rx++;
1728
1729 hci_dev_lock(hdev);
1730 conn = hci_conn_hash_lookup_handle(hdev, handle);
1731 hci_dev_unlock(hdev);
1732
1733 if (conn) {
1734 register struct hci_proto *hp;
1735
1736 hci_conn_enter_active_mode(conn);
1737
1738 /* Send to upper protocol */
1739 hp = hci_proto[HCI_PROTO_L2CAP];
1740 if (hp && hp->recv_acldata) {
1741 hp->recv_acldata(conn, skb, flags);
1742 return;
1743 }
1744 } else {
1745 BT_ERR("%s ACL packet for unknown connection handle %d",
1746 hdev->name, handle);
1747 }
1748
1749 kfree_skb(skb);
1750 }
1751
1752 /* SCO data packet */
1753 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1754 {
1755 struct hci_sco_hdr *hdr = (void *) skb->data;
1756 struct hci_conn *conn;
1757 __u16 handle;
1758
1759 skb_pull(skb, HCI_SCO_HDR_SIZE);
1760
1761 handle = __le16_to_cpu(hdr->handle);
1762
1763 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1764
1765 hdev->stat.sco_rx++;
1766
1767 hci_dev_lock(hdev);
1768 conn = hci_conn_hash_lookup_handle(hdev, handle);
1769 hci_dev_unlock(hdev);
1770
1771 if (conn) {
1772 register struct hci_proto *hp;
1773
1774 /* Send to upper protocol */
1775 hp = hci_proto[HCI_PROTO_SCO];
1776 if (hp && hp->recv_scodata) {
1777 hp->recv_scodata(conn, skb);
1778 return;
1779 }
1780 } else {
1781 BT_ERR("%s SCO packet for unknown connection handle %d",
1782 hdev->name, handle);
1783 }
1784
1785 kfree_skb(skb);
1786 }
1787
1788 static void hci_rx_task(unsigned long arg)
1789 {
1790 struct hci_dev *hdev = (struct hci_dev *) arg;
1791 struct sk_buff *skb;
1792
1793 BT_DBG("%s", hdev->name);
1794
1795 read_lock(&hci_task_lock);
1796
1797 while ((skb = skb_dequeue(&hdev->rx_q))) {
1798 if (atomic_read(&hdev->promisc)) {
1799 /* Send copy to the sockets */
1800 hci_send_to_sock(hdev, skb, NULL);
1801 }
1802
1803 if (test_bit(HCI_RAW, &hdev->flags)) {
1804 kfree_skb(skb);
1805 continue;
1806 }
1807
1808 if (test_bit(HCI_INIT, &hdev->flags)) {
1809 /* Don't process data packets in this states. */
1810 switch (bt_cb(skb)->pkt_type) {
1811 case HCI_ACLDATA_PKT:
1812 case HCI_SCODATA_PKT:
1813 kfree_skb(skb);
1814 continue;
1815 }
1816 }
1817
1818 /* Process frame */
1819 switch (bt_cb(skb)->pkt_type) {
1820 case HCI_EVENT_PKT:
1821 hci_event_packet(hdev, skb);
1822 break;
1823
1824 case HCI_ACLDATA_PKT:
1825 BT_DBG("%s ACL data packet", hdev->name);
1826 hci_acldata_packet(hdev, skb);
1827 break;
1828
1829 case HCI_SCODATA_PKT:
1830 BT_DBG("%s SCO data packet", hdev->name);
1831 hci_scodata_packet(hdev, skb);
1832 break;
1833
1834 default:
1835 kfree_skb(skb);
1836 break;
1837 }
1838 }
1839
1840 read_unlock(&hci_task_lock);
1841 }
1842
1843 static void hci_cmd_task(unsigned long arg)
1844 {
1845 struct hci_dev *hdev = (struct hci_dev *) arg;
1846 struct sk_buff *skb;
1847
1848 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1849
1850 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1851 BT_ERR("%s command tx timeout", hdev->name);
1852 atomic_set(&hdev->cmd_cnt, 1);
1853 }
1854
1855 /* Send queued commands */
1856 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1857 kfree_skb(hdev->sent_cmd);
1858
1859 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1860 if (hdev->sent_cmd) {
1861 atomic_dec(&hdev->cmd_cnt);
1862 hci_send_frame(skb);
1863 hdev->cmd_last_tx = jiffies;
1864 } else {
1865 skb_queue_head(&hdev->cmd_q, skb);
1866 tasklet_schedule(&hdev->cmd_task);
1867 }
1868 }
1869 }