Bluetooth: Implement set_pairable managment command
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
27#include <linux/jiffies.h>
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/workqueue.h>
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
43#include <linux/rfkill.h>
44#include <net/sock.h>
45
46#include <asm/system.h>
47#include <linux/uaccess.h>
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
53#define AUTO_OFF_TIMEOUT 2000
54
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
81 return atomic_notifier_chain_register(&hci_notifier, nb);
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87}
88
89static void hci_notify(struct hci_dev *hdev, int event)
90{
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92}
93
94/* ---- HCI requests ---- */
95
96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97{
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If the request has set req_last_cmd (typical for multi-HCI
101 * command requests) check if the completed command matches
102 * this, and if not just return. Single HCI command requests
103 * typically leave req_last_cmd as 0 */
104 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
105 return;
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_err(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170{
171 int ret;
172
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
194 struct sk_buff *skb;
195 __le16 param;
196 __u8 flt_type;
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
205 skb->dev = (void *) hdev;
206
207 skb_queue_tail(&hdev->cmd_q, skb);
208 tasklet_schedule(&hdev->cmd_task);
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
217
218 /* Read Local Supported Features */
219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
220
221 /* Read Local Version */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
236 }
237#endif
238
239 /* Read BD Address */
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
247
248 /* Read Voice Setting */
249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
254 flt_type = HCI_FLT_CLEAR_ALL;
255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
256
257 /* Page timeout ~20 secs */
258 param = cpu_to_le16(0x8000);
259 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
260
261 /* Connection accept timeout ~20 secs */
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
266}
267
268static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269{
270 __u8 scan = opt;
271
272 BT_DBG("%s %x", hdev->name, scan);
273
274 /* Inquiry and Page scans */
275 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
276}
277
278static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 auth = opt;
281
282 BT_DBG("%s %x", hdev->name, auth);
283
284 /* Authentication */
285 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
286}
287
288static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 encrypt = opt;
291
292 BT_DBG("%s %x", hdev->name, encrypt);
293
294 /* Encryption */
295 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
296}
297
298static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __le16 policy = cpu_to_le16(opt);
301
302 BT_DBG("%s %x", hdev->name, policy);
303
304 /* Default link policy */
305 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306}
307
308/* Get HCI device by index.
309 * Device is held on return. */
310struct hci_dev *hci_dev_get(int index)
311{
312 struct hci_dev *hdev = NULL;
313 struct list_head *p;
314
315 BT_DBG("%d", index);
316
317 if (index < 0)
318 return NULL;
319
320 read_lock(&hci_dev_list_lock);
321 list_for_each(p, &hci_dev_list) {
322 struct hci_dev *d = list_entry(p, struct hci_dev, list);
323 if (d->id == index) {
324 hdev = hci_dev_hold(d);
325 break;
326 }
327 }
328 read_unlock(&hci_dev_list_lock);
329 return hdev;
330}
331
332/* ---- Inquiry support ---- */
333static void inquiry_cache_flush(struct hci_dev *hdev)
334{
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *next = cache->list, *e;
337
338 BT_DBG("cache %p", cache);
339
340 cache->list = NULL;
341 while ((e = next)) {
342 next = e->next;
343 kfree(e);
344 }
345}
346
347struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
348{
349 struct inquiry_cache *cache = &hdev->inq_cache;
350 struct inquiry_entry *e;
351
352 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
353
354 for (e = cache->list; e; e = e->next)
355 if (!bacmp(&e->data.bdaddr, bdaddr))
356 break;
357 return e;
358}
359
360void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
361{
362 struct inquiry_cache *cache = &hdev->inq_cache;
363 struct inquiry_entry *ie;
364
365 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
366
367 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
368 if (!ie) {
369 /* Entry not in the cache. Add new one. */
370 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
371 if (!ie)
372 return;
373
374 ie->next = cache->list;
375 cache->list = ie;
376 }
377
378 memcpy(&ie->data, data, sizeof(*data));
379 ie->timestamp = jiffies;
380 cache->timestamp = jiffies;
381}
382
383static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
384{
385 struct inquiry_cache *cache = &hdev->inq_cache;
386 struct inquiry_info *info = (struct inquiry_info *) buf;
387 struct inquiry_entry *e;
388 int copied = 0;
389
390 for (e = cache->list; e && copied < num; e = e->next, copied++) {
391 struct inquiry_data *data = &e->data;
392 bacpy(&info->bdaddr, &data->bdaddr);
393 info->pscan_rep_mode = data->pscan_rep_mode;
394 info->pscan_period_mode = data->pscan_period_mode;
395 info->pscan_mode = data->pscan_mode;
396 memcpy(info->dev_class, data->dev_class, 3);
397 info->clock_offset = data->clock_offset;
398 info++;
399 }
400
401 BT_DBG("cache %p, copied %d", cache, copied);
402 return copied;
403}
404
405static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
406{
407 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
408 struct hci_cp_inquiry cp;
409
410 BT_DBG("%s", hdev->name);
411
412 if (test_bit(HCI_INQUIRY, &hdev->flags))
413 return;
414
415 /* Start Inquiry */
416 memcpy(&cp.lap, &ir->lap, 3);
417 cp.length = ir->length;
418 cp.num_rsp = ir->num_rsp;
419 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
420}
421
422int hci_inquiry(void __user *arg)
423{
424 __u8 __user *ptr = arg;
425 struct hci_inquiry_req ir;
426 struct hci_dev *hdev;
427 int err = 0, do_inquiry = 0, max_rsp;
428 long timeo;
429 __u8 *buf;
430
431 if (copy_from_user(&ir, ptr, sizeof(ir)))
432 return -EFAULT;
433
434 if (!(hdev = hci_dev_get(ir.dev_id)))
435 return -ENODEV;
436
437 hci_dev_lock_bh(hdev);
438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
439 inquiry_cache_empty(hdev) ||
440 ir.flags & IREQ_CACHE_FLUSH) {
441 inquiry_cache_flush(hdev);
442 do_inquiry = 1;
443 }
444 hci_dev_unlock_bh(hdev);
445
446 timeo = ir.length * msecs_to_jiffies(2000);
447
448 if (do_inquiry) {
449 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450 if (err < 0)
451 goto done;
452 }
453
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
459 */
460 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
461 if (!buf) {
462 err = -ENOMEM;
463 goto done;
464 }
465
466 hci_dev_lock_bh(hdev);
467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
468 hci_dev_unlock_bh(hdev);
469
470 BT_DBG("num_rsp %d", ir.num_rsp);
471
472 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 ptr += sizeof(ir);
474 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 ir.num_rsp))
476 err = -EFAULT;
477 } else
478 err = -EFAULT;
479
480 kfree(buf);
481
482done:
483 hci_dev_put(hdev);
484 return err;
485}
486
487/* ---- HCI ioctl helpers ---- */
488
489int hci_dev_open(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int ret = 0;
493
494 if (!(hdev = hci_dev_get(dev)))
495 return -ENODEV;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_lock(hdev);
500
501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 ret = -ERFKILL;
503 goto done;
504 }
505
506 if (test_bit(HCI_UP, &hdev->flags)) {
507 ret = -EALREADY;
508 goto done;
509 }
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
513
514 /* Treat all non BR/EDR controllers as raw devices for now */
515 if (hdev->dev_type != HCI_BREDR)
516 set_bit(HCI_RAW, &hdev->flags);
517
518 if (hdev->open(hdev)) {
519 ret = -EIO;
520 goto done;
521 }
522
523 if (!test_bit(HCI_RAW, &hdev->flags)) {
524 atomic_set(&hdev->cmd_cnt, 1);
525 set_bit(HCI_INIT, &hdev->flags);
526
527 //__hci_request(hdev, hci_reset_req, 0, HZ);
528 ret = __hci_request(hdev, hci_init_req, 0,
529 msecs_to_jiffies(HCI_INIT_TIMEOUT));
530
531 clear_bit(HCI_INIT, &hdev->flags);
532 }
533
534 if (!ret) {
535 hci_dev_hold(hdev);
536 set_bit(HCI_UP, &hdev->flags);
537 hci_notify(hdev, HCI_DEV_UP);
538 if (!test_bit(HCI_SETUP, &hdev->flags))
539 mgmt_powered(hdev->id, 1);
540 } else {
541 /* Init failed, cleanup */
542 tasklet_kill(&hdev->rx_task);
543 tasklet_kill(&hdev->tx_task);
544 tasklet_kill(&hdev->cmd_task);
545
546 skb_queue_purge(&hdev->cmd_q);
547 skb_queue_purge(&hdev->rx_q);
548
549 if (hdev->flush)
550 hdev->flush(hdev);
551
552 if (hdev->sent_cmd) {
553 kfree_skb(hdev->sent_cmd);
554 hdev->sent_cmd = NULL;
555 }
556
557 hdev->close(hdev);
558 hdev->flags = 0;
559 }
560
561done:
562 hci_req_unlock(hdev);
563 hci_dev_put(hdev);
564 return ret;
565}
566
567static int hci_dev_do_close(struct hci_dev *hdev)
568{
569 BT_DBG("%s %p", hdev->name, hdev);
570
571 hci_req_cancel(hdev, ENODEV);
572 hci_req_lock(hdev);
573
574 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
575 hci_req_unlock(hdev);
576 return 0;
577 }
578
579 /* Kill RX and TX tasks */
580 tasklet_kill(&hdev->rx_task);
581 tasklet_kill(&hdev->tx_task);
582
583 hci_dev_lock_bh(hdev);
584 inquiry_cache_flush(hdev);
585 hci_conn_hash_flush(hdev);
586 hci_dev_unlock_bh(hdev);
587
588 hci_notify(hdev, HCI_DEV_DOWN);
589
590 if (hdev->flush)
591 hdev->flush(hdev);
592
593 /* Reset device */
594 skb_queue_purge(&hdev->cmd_q);
595 atomic_set(&hdev->cmd_cnt, 1);
596 if (!test_bit(HCI_RAW, &hdev->flags)) {
597 set_bit(HCI_INIT, &hdev->flags);
598 __hci_request(hdev, hci_reset_req, 0,
599 msecs_to_jiffies(250));
600 clear_bit(HCI_INIT, &hdev->flags);
601 }
602
603 /* Kill cmd task */
604 tasklet_kill(&hdev->cmd_task);
605
606 /* Drop queues */
607 skb_queue_purge(&hdev->rx_q);
608 skb_queue_purge(&hdev->cmd_q);
609 skb_queue_purge(&hdev->raw_q);
610
611 /* Drop last sent command */
612 if (hdev->sent_cmd) {
613 kfree_skb(hdev->sent_cmd);
614 hdev->sent_cmd = NULL;
615 }
616
617 /* After this point our queues are empty
618 * and no tasks are scheduled. */
619 hdev->close(hdev);
620
621 mgmt_powered(hdev->id, 0);
622
623 /* Clear flags */
624 hdev->flags = 0;
625
626 hci_req_unlock(hdev);
627
628 hci_dev_put(hdev);
629 return 0;
630}
631
632int hci_dev_close(__u16 dev)
633{
634 struct hci_dev *hdev;
635 int err;
636
637 hdev = hci_dev_get(dev);
638 if (!hdev)
639 return -ENODEV;
640 err = hci_dev_do_close(hdev);
641 hci_dev_put(hdev);
642 return err;
643}
644
645int hci_dev_reset(__u16 dev)
646{
647 struct hci_dev *hdev;
648 int ret = 0;
649
650 hdev = hci_dev_get(dev);
651 if (!hdev)
652 return -ENODEV;
653
654 hci_req_lock(hdev);
655 tasklet_disable(&hdev->tx_task);
656
657 if (!test_bit(HCI_UP, &hdev->flags))
658 goto done;
659
660 /* Drop queues */
661 skb_queue_purge(&hdev->rx_q);
662 skb_queue_purge(&hdev->cmd_q);
663
664 hci_dev_lock_bh(hdev);
665 inquiry_cache_flush(hdev);
666 hci_conn_hash_flush(hdev);
667 hci_dev_unlock_bh(hdev);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
672 atomic_set(&hdev->cmd_cnt, 1);
673 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
674
675 if (!test_bit(HCI_RAW, &hdev->flags))
676 ret = __hci_request(hdev, hci_reset_req, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
679done:
680 tasklet_enable(&hdev->tx_task);
681 hci_req_unlock(hdev);
682 hci_dev_put(hdev);
683 return ret;
684}
685
686int hci_dev_reset_stat(__u16 dev)
687{
688 struct hci_dev *hdev;
689 int ret = 0;
690
691 hdev = hci_dev_get(dev);
692 if (!hdev)
693 return -ENODEV;
694
695 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696
697 hci_dev_put(hdev);
698
699 return ret;
700}
701
702int hci_dev_cmd(unsigned int cmd, void __user *arg)
703{
704 struct hci_dev *hdev;
705 struct hci_dev_req dr;
706 int err = 0;
707
708 if (copy_from_user(&dr, arg, sizeof(dr)))
709 return -EFAULT;
710
711 hdev = hci_dev_get(dr.dev_id);
712 if (!hdev)
713 return -ENODEV;
714
715 switch (cmd) {
716 case HCISETAUTH:
717 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
718 msecs_to_jiffies(HCI_INIT_TIMEOUT));
719 break;
720
721 case HCISETENCRYPT:
722 if (!lmp_encrypt_capable(hdev)) {
723 err = -EOPNOTSUPP;
724 break;
725 }
726
727 if (!test_bit(HCI_AUTH, &hdev->flags)) {
728 /* Auth must be enabled first */
729 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
730 msecs_to_jiffies(HCI_INIT_TIMEOUT));
731 if (err)
732 break;
733 }
734
735 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT));
737 break;
738
739 case HCISETSCAN:
740 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
741 msecs_to_jiffies(HCI_INIT_TIMEOUT));
742 break;
743
744 case HCISETLINKPOL:
745 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
746 msecs_to_jiffies(HCI_INIT_TIMEOUT));
747 break;
748
749 case HCISETLINKMODE:
750 hdev->link_mode = ((__u16) dr.dev_opt) &
751 (HCI_LM_MASTER | HCI_LM_ACCEPT);
752 break;
753
754 case HCISETPTYPE:
755 hdev->pkt_type = (__u16) dr.dev_opt;
756 break;
757
758 case HCISETACLMTU:
759 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
760 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
761 break;
762
763 case HCISETSCOMTU:
764 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
765 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
766 break;
767
768 default:
769 err = -EINVAL;
770 break;
771 }
772
773 hci_dev_put(hdev);
774 return err;
775}
776
777int hci_get_dev_list(void __user *arg)
778{
779 struct hci_dev_list_req *dl;
780 struct hci_dev_req *dr;
781 struct list_head *p;
782 int n = 0, size, err;
783 __u16 dev_num;
784
785 if (get_user(dev_num, (__u16 __user *) arg))
786 return -EFAULT;
787
788 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
789 return -EINVAL;
790
791 size = sizeof(*dl) + dev_num * sizeof(*dr);
792
793 dl = kzalloc(size, GFP_KERNEL);
794 if (!dl)
795 return -ENOMEM;
796
797 dr = dl->dev_req;
798
799 read_lock_bh(&hci_dev_list_lock);
800 list_for_each(p, &hci_dev_list) {
801 struct hci_dev *hdev;
802
803 hdev = list_entry(p, struct hci_dev, list);
804
805 hci_del_off_timer(hdev);
806
807 if (!test_bit(HCI_MGMT, &hdev->flags))
808 set_bit(HCI_PAIRABLE, &hdev->flags);
809
810 (dr + n)->dev_id = hdev->id;
811 (dr + n)->dev_opt = hdev->flags;
812
813 if (++n >= dev_num)
814 break;
815 }
816 read_unlock_bh(&hci_dev_list_lock);
817
818 dl->dev_num = n;
819 size = sizeof(*dl) + n * sizeof(*dr);
820
821 err = copy_to_user(arg, dl, size);
822 kfree(dl);
823
824 return err ? -EFAULT : 0;
825}
826
827int hci_get_dev_info(void __user *arg)
828{
829 struct hci_dev *hdev;
830 struct hci_dev_info di;
831 int err = 0;
832
833 if (copy_from_user(&di, arg, sizeof(di)))
834 return -EFAULT;
835
836 hdev = hci_dev_get(di.dev_id);
837 if (!hdev)
838 return -ENODEV;
839
840 hci_del_off_timer(hdev);
841
842 if (!test_bit(HCI_MGMT, &hdev->flags))
843 set_bit(HCI_PAIRABLE, &hdev->flags);
844
845 strcpy(di.name, hdev->name);
846 di.bdaddr = hdev->bdaddr;
847 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
848 di.flags = hdev->flags;
849 di.pkt_type = hdev->pkt_type;
850 di.acl_mtu = hdev->acl_mtu;
851 di.acl_pkts = hdev->acl_pkts;
852 di.sco_mtu = hdev->sco_mtu;
853 di.sco_pkts = hdev->sco_pkts;
854 di.link_policy = hdev->link_policy;
855 di.link_mode = hdev->link_mode;
856
857 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
858 memcpy(&di.features, &hdev->features, sizeof(di.features));
859
860 if (copy_to_user(arg, &di, sizeof(di)))
861 err = -EFAULT;
862
863 hci_dev_put(hdev);
864
865 return err;
866}
867
868/* ---- Interface to HCI drivers ---- */
869
870static int hci_rfkill_set_block(void *data, bool blocked)
871{
872 struct hci_dev *hdev = data;
873
874 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
875
876 if (!blocked)
877 return 0;
878
879 hci_dev_do_close(hdev);
880
881 return 0;
882}
883
884static const struct rfkill_ops hci_rfkill_ops = {
885 .set_block = hci_rfkill_set_block,
886};
887
888/* Alloc HCI device */
889struct hci_dev *hci_alloc_dev(void)
890{
891 struct hci_dev *hdev;
892
893 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
894 if (!hdev)
895 return NULL;
896
897 skb_queue_head_init(&hdev->driver_init);
898
899 return hdev;
900}
901EXPORT_SYMBOL(hci_alloc_dev);
902
903/* Free HCI device */
904void hci_free_dev(struct hci_dev *hdev)
905{
906 skb_queue_purge(&hdev->driver_init);
907
908 /* will free via device release */
909 put_device(&hdev->dev);
910}
911EXPORT_SYMBOL(hci_free_dev);
912
913static void hci_power_on(struct work_struct *work)
914{
915 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
916
917 BT_DBG("%s", hdev->name);
918
919 if (hci_dev_open(hdev->id) < 0)
920 return;
921
922 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
923 mod_timer(&hdev->off_timer,
924 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
925
926 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
927 mgmt_index_added(hdev->id);
928}
929
930static void hci_power_off(struct work_struct *work)
931{
932 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
933
934 BT_DBG("%s", hdev->name);
935
936 hci_dev_close(hdev->id);
937}
938
939static void hci_auto_off(unsigned long data)
940{
941 struct hci_dev *hdev = (struct hci_dev *) data;
942
943 BT_DBG("%s", hdev->name);
944
945 clear_bit(HCI_AUTO_OFF, &hdev->flags);
946
947 queue_work(hdev->workqueue, &hdev->power_off);
948}
949
950void hci_del_off_timer(struct hci_dev *hdev)
951{
952 BT_DBG("%s", hdev->name);
953
954 clear_bit(HCI_AUTO_OFF, &hdev->flags);
955 del_timer(&hdev->off_timer);
956}
957
958/* Register HCI device */
959int hci_register_dev(struct hci_dev *hdev)
960{
961 struct list_head *head = &hci_dev_list, *p;
962 int i, id = 0;
963
964 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
965 hdev->bus, hdev->owner);
966
967 if (!hdev->open || !hdev->close || !hdev->destruct)
968 return -EINVAL;
969
970 write_lock_bh(&hci_dev_list_lock);
971
972 /* Find first available device id */
973 list_for_each(p, &hci_dev_list) {
974 if (list_entry(p, struct hci_dev, list)->id != id)
975 break;
976 head = p; id++;
977 }
978
979 sprintf(hdev->name, "hci%d", id);
980 hdev->id = id;
981 list_add(&hdev->list, head);
982
983 atomic_set(&hdev->refcnt, 1);
984 spin_lock_init(&hdev->lock);
985
986 hdev->flags = 0;
987 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
988 hdev->esco_type = (ESCO_HV1);
989 hdev->link_mode = (HCI_LM_ACCEPT);
990
991 hdev->idle_timeout = 0;
992 hdev->sniff_max_interval = 800;
993 hdev->sniff_min_interval = 80;
994
995 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
996 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
997 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
998
999 skb_queue_head_init(&hdev->rx_q);
1000 skb_queue_head_init(&hdev->cmd_q);
1001 skb_queue_head_init(&hdev->raw_q);
1002
1003 for (i = 0; i < NUM_REASSEMBLY; i++)
1004 hdev->reassembly[i] = NULL;
1005
1006 init_waitqueue_head(&hdev->req_wait_q);
1007 mutex_init(&hdev->req_lock);
1008
1009 inquiry_cache_init(hdev);
1010
1011 hci_conn_hash_init(hdev);
1012
1013 INIT_LIST_HEAD(&hdev->blacklist);
1014
1015 INIT_WORK(&hdev->power_on, hci_power_on);
1016 INIT_WORK(&hdev->power_off, hci_power_off);
1017 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1018
1019 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1020
1021 atomic_set(&hdev->promisc, 0);
1022
1023 write_unlock_bh(&hci_dev_list_lock);
1024
1025 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1026 if (!hdev->workqueue)
1027 goto nomem;
1028
1029 hci_register_sysfs(hdev);
1030
1031 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1032 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1033 if (hdev->rfkill) {
1034 if (rfkill_register(hdev->rfkill) < 0) {
1035 rfkill_destroy(hdev->rfkill);
1036 hdev->rfkill = NULL;
1037 }
1038 }
1039
1040 set_bit(HCI_AUTO_OFF, &hdev->flags);
1041 set_bit(HCI_SETUP, &hdev->flags);
1042 queue_work(hdev->workqueue, &hdev->power_on);
1043
1044 hci_notify(hdev, HCI_DEV_REG);
1045
1046 return id;
1047
1048nomem:
1049 write_lock_bh(&hci_dev_list_lock);
1050 list_del(&hdev->list);
1051 write_unlock_bh(&hci_dev_list_lock);
1052
1053 return -ENOMEM;
1054}
1055EXPORT_SYMBOL(hci_register_dev);
1056
1057/* Unregister HCI device */
1058int hci_unregister_dev(struct hci_dev *hdev)
1059{
1060 int i;
1061
1062 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1063
1064 write_lock_bh(&hci_dev_list_lock);
1065 list_del(&hdev->list);
1066 write_unlock_bh(&hci_dev_list_lock);
1067
1068 hci_dev_do_close(hdev);
1069
1070 for (i = 0; i < NUM_REASSEMBLY; i++)
1071 kfree_skb(hdev->reassembly[i]);
1072
1073 if (!test_bit(HCI_INIT, &hdev->flags) &&
1074 !test_bit(HCI_SETUP, &hdev->flags))
1075 mgmt_index_removed(hdev->id);
1076
1077 hci_notify(hdev, HCI_DEV_UNREG);
1078
1079 if (hdev->rfkill) {
1080 rfkill_unregister(hdev->rfkill);
1081 rfkill_destroy(hdev->rfkill);
1082 }
1083
1084 hci_unregister_sysfs(hdev);
1085
1086 destroy_workqueue(hdev->workqueue);
1087
1088 hci_dev_lock_bh(hdev);
1089 hci_blacklist_clear(hdev);
1090 hci_dev_unlock_bh(hdev);
1091
1092 __hci_dev_put(hdev);
1093
1094 return 0;
1095}
1096EXPORT_SYMBOL(hci_unregister_dev);
1097
1098/* Suspend HCI device */
1099int hci_suspend_dev(struct hci_dev *hdev)
1100{
1101 hci_notify(hdev, HCI_DEV_SUSPEND);
1102 return 0;
1103}
1104EXPORT_SYMBOL(hci_suspend_dev);
1105
1106/* Resume HCI device */
1107int hci_resume_dev(struct hci_dev *hdev)
1108{
1109 hci_notify(hdev, HCI_DEV_RESUME);
1110 return 0;
1111}
1112EXPORT_SYMBOL(hci_resume_dev);
1113
1114/* Receive frame from HCI drivers */
1115int hci_recv_frame(struct sk_buff *skb)
1116{
1117 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1118 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1119 && !test_bit(HCI_INIT, &hdev->flags))) {
1120 kfree_skb(skb);
1121 return -ENXIO;
1122 }
1123
1124 /* Incomming skb */
1125 bt_cb(skb)->incoming = 1;
1126
1127 /* Time stamp */
1128 __net_timestamp(skb);
1129
1130 /* Queue frame for rx task */
1131 skb_queue_tail(&hdev->rx_q, skb);
1132 tasklet_schedule(&hdev->rx_task);
1133
1134 return 0;
1135}
1136EXPORT_SYMBOL(hci_recv_frame);
1137
1138static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1139 int count, __u8 index, gfp_t gfp_mask)
1140{
1141 int len = 0;
1142 int hlen = 0;
1143 int remain = count;
1144 struct sk_buff *skb;
1145 struct bt_skb_cb *scb;
1146
1147 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1148 index >= NUM_REASSEMBLY)
1149 return -EILSEQ;
1150
1151 skb = hdev->reassembly[index];
1152
1153 if (!skb) {
1154 switch (type) {
1155 case HCI_ACLDATA_PKT:
1156 len = HCI_MAX_FRAME_SIZE;
1157 hlen = HCI_ACL_HDR_SIZE;
1158 break;
1159 case HCI_EVENT_PKT:
1160 len = HCI_MAX_EVENT_SIZE;
1161 hlen = HCI_EVENT_HDR_SIZE;
1162 break;
1163 case HCI_SCODATA_PKT:
1164 len = HCI_MAX_SCO_SIZE;
1165 hlen = HCI_SCO_HDR_SIZE;
1166 break;
1167 }
1168
1169 skb = bt_skb_alloc(len, gfp_mask);
1170 if (!skb)
1171 return -ENOMEM;
1172
1173 scb = (void *) skb->cb;
1174 scb->expect = hlen;
1175 scb->pkt_type = type;
1176
1177 skb->dev = (void *) hdev;
1178 hdev->reassembly[index] = skb;
1179 }
1180
1181 while (count) {
1182 scb = (void *) skb->cb;
1183 len = min(scb->expect, (__u16)count);
1184
1185 memcpy(skb_put(skb, len), data, len);
1186
1187 count -= len;
1188 data += len;
1189 scb->expect -= len;
1190 remain = count;
1191
1192 switch (type) {
1193 case HCI_EVENT_PKT:
1194 if (skb->len == HCI_EVENT_HDR_SIZE) {
1195 struct hci_event_hdr *h = hci_event_hdr(skb);
1196 scb->expect = h->plen;
1197
1198 if (skb_tailroom(skb) < scb->expect) {
1199 kfree_skb(skb);
1200 hdev->reassembly[index] = NULL;
1201 return -ENOMEM;
1202 }
1203 }
1204 break;
1205
1206 case HCI_ACLDATA_PKT:
1207 if (skb->len == HCI_ACL_HDR_SIZE) {
1208 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1209 scb->expect = __le16_to_cpu(h->dlen);
1210
1211 if (skb_tailroom(skb) < scb->expect) {
1212 kfree_skb(skb);
1213 hdev->reassembly[index] = NULL;
1214 return -ENOMEM;
1215 }
1216 }
1217 break;
1218
1219 case HCI_SCODATA_PKT:
1220 if (skb->len == HCI_SCO_HDR_SIZE) {
1221 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1222 scb->expect = h->dlen;
1223
1224 if (skb_tailroom(skb) < scb->expect) {
1225 kfree_skb(skb);
1226 hdev->reassembly[index] = NULL;
1227 return -ENOMEM;
1228 }
1229 }
1230 break;
1231 }
1232
1233 if (scb->expect == 0) {
1234 /* Complete frame */
1235
1236 bt_cb(skb)->pkt_type = type;
1237 hci_recv_frame(skb);
1238
1239 hdev->reassembly[index] = NULL;
1240 return remain;
1241 }
1242 }
1243
1244 return remain;
1245}
1246
1247int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1248{
1249 int rem = 0;
1250
1251 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1252 return -EILSEQ;
1253
1254 while (count) {
1255 rem = hci_reassembly(hdev, type, data, count,
1256 type - 1, GFP_ATOMIC);
1257 if (rem < 0)
1258 return rem;
1259
1260 data += (count - rem);
1261 count = rem;
1262 };
1263
1264 return rem;
1265}
1266EXPORT_SYMBOL(hci_recv_fragment);
1267
1268#define STREAM_REASSEMBLY 0
1269
1270int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1271{
1272 int type;
1273 int rem = 0;
1274
1275 while (count) {
1276 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1277
1278 if (!skb) {
1279 struct { char type; } *pkt;
1280
1281 /* Start of the frame */
1282 pkt = data;
1283 type = pkt->type;
1284
1285 data++;
1286 count--;
1287 } else
1288 type = bt_cb(skb)->pkt_type;
1289
1290 rem = hci_reassembly(hdev, type, data,
1291 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1292 if (rem < 0)
1293 return rem;
1294
1295 data += (count - rem);
1296 count = rem;
1297 };
1298
1299 return rem;
1300}
1301EXPORT_SYMBOL(hci_recv_stream_fragment);
1302
1303/* ---- Interface to upper protocols ---- */
1304
1305/* Register/Unregister protocols.
1306 * hci_task_lock is used to ensure that no tasks are running. */
1307int hci_register_proto(struct hci_proto *hp)
1308{
1309 int err = 0;
1310
1311 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1312
1313 if (hp->id >= HCI_MAX_PROTO)
1314 return -EINVAL;
1315
1316 write_lock_bh(&hci_task_lock);
1317
1318 if (!hci_proto[hp->id])
1319 hci_proto[hp->id] = hp;
1320 else
1321 err = -EEXIST;
1322
1323 write_unlock_bh(&hci_task_lock);
1324
1325 return err;
1326}
1327EXPORT_SYMBOL(hci_register_proto);
1328
1329int hci_unregister_proto(struct hci_proto *hp)
1330{
1331 int err = 0;
1332
1333 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1334
1335 if (hp->id >= HCI_MAX_PROTO)
1336 return -EINVAL;
1337
1338 write_lock_bh(&hci_task_lock);
1339
1340 if (hci_proto[hp->id])
1341 hci_proto[hp->id] = NULL;
1342 else
1343 err = -ENOENT;
1344
1345 write_unlock_bh(&hci_task_lock);
1346
1347 return err;
1348}
1349EXPORT_SYMBOL(hci_unregister_proto);
1350
1351int hci_register_cb(struct hci_cb *cb)
1352{
1353 BT_DBG("%p name %s", cb, cb->name);
1354
1355 write_lock_bh(&hci_cb_list_lock);
1356 list_add(&cb->list, &hci_cb_list);
1357 write_unlock_bh(&hci_cb_list_lock);
1358
1359 return 0;
1360}
1361EXPORT_SYMBOL(hci_register_cb);
1362
1363int hci_unregister_cb(struct hci_cb *cb)
1364{
1365 BT_DBG("%p name %s", cb, cb->name);
1366
1367 write_lock_bh(&hci_cb_list_lock);
1368 list_del(&cb->list);
1369 write_unlock_bh(&hci_cb_list_lock);
1370
1371 return 0;
1372}
1373EXPORT_SYMBOL(hci_unregister_cb);
1374
1375static int hci_send_frame(struct sk_buff *skb)
1376{
1377 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1378
1379 if (!hdev) {
1380 kfree_skb(skb);
1381 return -ENODEV;
1382 }
1383
1384 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1385
1386 if (atomic_read(&hdev->promisc)) {
1387 /* Time stamp */
1388 __net_timestamp(skb);
1389
1390 hci_send_to_sock(hdev, skb, NULL);
1391 }
1392
1393 /* Get rid of skb owner, prior to sending to the driver. */
1394 skb_orphan(skb);
1395
1396 return hdev->send(skb);
1397}
1398
1399/* Send HCI command */
1400int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1401{
1402 int len = HCI_COMMAND_HDR_SIZE + plen;
1403 struct hci_command_hdr *hdr;
1404 struct sk_buff *skb;
1405
1406 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1407
1408 skb = bt_skb_alloc(len, GFP_ATOMIC);
1409 if (!skb) {
1410 BT_ERR("%s no memory for command", hdev->name);
1411 return -ENOMEM;
1412 }
1413
1414 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1415 hdr->opcode = cpu_to_le16(opcode);
1416 hdr->plen = plen;
1417
1418 if (plen)
1419 memcpy(skb_put(skb, plen), param, plen);
1420
1421 BT_DBG("skb len %d", skb->len);
1422
1423 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1424 skb->dev = (void *) hdev;
1425
1426 skb_queue_tail(&hdev->cmd_q, skb);
1427 tasklet_schedule(&hdev->cmd_task);
1428
1429 return 0;
1430}
1431
1432/* Get data from the previously sent command */
1433void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1434{
1435 struct hci_command_hdr *hdr;
1436
1437 if (!hdev->sent_cmd)
1438 return NULL;
1439
1440 hdr = (void *) hdev->sent_cmd->data;
1441
1442 if (hdr->opcode != cpu_to_le16(opcode))
1443 return NULL;
1444
1445 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1446
1447 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1448}
1449
1450/* Send ACL data */
1451static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1452{
1453 struct hci_acl_hdr *hdr;
1454 int len = skb->len;
1455
1456 skb_push(skb, HCI_ACL_HDR_SIZE);
1457 skb_reset_transport_header(skb);
1458 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1459 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1460 hdr->dlen = cpu_to_le16(len);
1461}
1462
1463void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1464{
1465 struct hci_dev *hdev = conn->hdev;
1466 struct sk_buff *list;
1467
1468 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1469
1470 skb->dev = (void *) hdev;
1471 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1472 hci_add_acl_hdr(skb, conn->handle, flags);
1473
1474 list = skb_shinfo(skb)->frag_list;
1475 if (!list) {
1476 /* Non fragmented */
1477 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1478
1479 skb_queue_tail(&conn->data_q, skb);
1480 } else {
1481 /* Fragmented */
1482 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1483
1484 skb_shinfo(skb)->frag_list = NULL;
1485
1486 /* Queue all fragments atomically */
1487 spin_lock_bh(&conn->data_q.lock);
1488
1489 __skb_queue_tail(&conn->data_q, skb);
1490
1491 flags &= ~ACL_START;
1492 flags |= ACL_CONT;
1493 do {
1494 skb = list; list = list->next;
1495
1496 skb->dev = (void *) hdev;
1497 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1498 hci_add_acl_hdr(skb, conn->handle, flags);
1499
1500 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1501
1502 __skb_queue_tail(&conn->data_q, skb);
1503 } while (list);
1504
1505 spin_unlock_bh(&conn->data_q.lock);
1506 }
1507
1508 tasklet_schedule(&hdev->tx_task);
1509}
1510EXPORT_SYMBOL(hci_send_acl);
1511
1512/* Send SCO data */
1513void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1514{
1515 struct hci_dev *hdev = conn->hdev;
1516 struct hci_sco_hdr hdr;
1517
1518 BT_DBG("%s len %d", hdev->name, skb->len);
1519
1520 hdr.handle = cpu_to_le16(conn->handle);
1521 hdr.dlen = skb->len;
1522
1523 skb_push(skb, HCI_SCO_HDR_SIZE);
1524 skb_reset_transport_header(skb);
1525 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1526
1527 skb->dev = (void *) hdev;
1528 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1529
1530 skb_queue_tail(&conn->data_q, skb);
1531 tasklet_schedule(&hdev->tx_task);
1532}
1533EXPORT_SYMBOL(hci_send_sco);
1534
1535/* ---- HCI TX task (outgoing data) ---- */
1536
1537/* HCI Connection scheduler */
1538static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1539{
1540 struct hci_conn_hash *h = &hdev->conn_hash;
1541 struct hci_conn *conn = NULL;
1542 int num = 0, min = ~0;
1543 struct list_head *p;
1544
1545 /* We don't have to lock device here. Connections are always
1546 * added and removed with TX task disabled. */
1547 list_for_each(p, &h->list) {
1548 struct hci_conn *c;
1549 c = list_entry(p, struct hci_conn, list);
1550
1551 if (c->type != type || skb_queue_empty(&c->data_q))
1552 continue;
1553
1554 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1555 continue;
1556
1557 num++;
1558
1559 if (c->sent < min) {
1560 min = c->sent;
1561 conn = c;
1562 }
1563 }
1564
1565 if (conn) {
1566 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1567 int q = cnt / num;
1568 *quote = q ? q : 1;
1569 } else
1570 *quote = 0;
1571
1572 BT_DBG("conn %p quote %d", conn, *quote);
1573 return conn;
1574}
1575
1576static inline void hci_acl_tx_to(struct hci_dev *hdev)
1577{
1578 struct hci_conn_hash *h = &hdev->conn_hash;
1579 struct list_head *p;
1580 struct hci_conn *c;
1581
1582 BT_ERR("%s ACL tx timeout", hdev->name);
1583
1584 /* Kill stalled connections */
1585 list_for_each(p, &h->list) {
1586 c = list_entry(p, struct hci_conn, list);
1587 if (c->type == ACL_LINK && c->sent) {
1588 BT_ERR("%s killing stalled ACL connection %s",
1589 hdev->name, batostr(&c->dst));
1590 hci_acl_disconn(c, 0x13);
1591 }
1592 }
1593}
1594
1595static inline void hci_sched_acl(struct hci_dev *hdev)
1596{
1597 struct hci_conn *conn;
1598 struct sk_buff *skb;
1599 int quote;
1600
1601 BT_DBG("%s", hdev->name);
1602
1603 if (!test_bit(HCI_RAW, &hdev->flags)) {
1604 /* ACL tx timeout must be longer than maximum
1605 * link supervision timeout (40.9 seconds) */
1606 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1607 hci_acl_tx_to(hdev);
1608 }
1609
1610 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1611 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1612 BT_DBG("skb %p len %d", skb, skb->len);
1613
1614 hci_conn_enter_active_mode(conn);
1615
1616 hci_send_frame(skb);
1617 hdev->acl_last_tx = jiffies;
1618
1619 hdev->acl_cnt--;
1620 conn->sent++;
1621 }
1622 }
1623}
1624
1625/* Schedule SCO */
1626static inline void hci_sched_sco(struct hci_dev *hdev)
1627{
1628 struct hci_conn *conn;
1629 struct sk_buff *skb;
1630 int quote;
1631
1632 BT_DBG("%s", hdev->name);
1633
1634 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1635 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1636 BT_DBG("skb %p len %d", skb, skb->len);
1637 hci_send_frame(skb);
1638
1639 conn->sent++;
1640 if (conn->sent == ~0)
1641 conn->sent = 0;
1642 }
1643 }
1644}
1645
1646static inline void hci_sched_esco(struct hci_dev *hdev)
1647{
1648 struct hci_conn *conn;
1649 struct sk_buff *skb;
1650 int quote;
1651
1652 BT_DBG("%s", hdev->name);
1653
1654 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1655 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1656 BT_DBG("skb %p len %d", skb, skb->len);
1657 hci_send_frame(skb);
1658
1659 conn->sent++;
1660 if (conn->sent == ~0)
1661 conn->sent = 0;
1662 }
1663 }
1664}
1665
1666static void hci_tx_task(unsigned long arg)
1667{
1668 struct hci_dev *hdev = (struct hci_dev *) arg;
1669 struct sk_buff *skb;
1670
1671 read_lock(&hci_task_lock);
1672
1673 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1674
1675 /* Schedule queues and send stuff to HCI driver */
1676
1677 hci_sched_acl(hdev);
1678
1679 hci_sched_sco(hdev);
1680
1681 hci_sched_esco(hdev);
1682
1683 /* Send next queued raw (unknown type) packet */
1684 while ((skb = skb_dequeue(&hdev->raw_q)))
1685 hci_send_frame(skb);
1686
1687 read_unlock(&hci_task_lock);
1688}
1689
1690/* ----- HCI RX task (incoming data proccessing) ----- */
1691
1692/* ACL data packet */
1693static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1694{
1695 struct hci_acl_hdr *hdr = (void *) skb->data;
1696 struct hci_conn *conn;
1697 __u16 handle, flags;
1698
1699 skb_pull(skb, HCI_ACL_HDR_SIZE);
1700
1701 handle = __le16_to_cpu(hdr->handle);
1702 flags = hci_flags(handle);
1703 handle = hci_handle(handle);
1704
1705 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1706
1707 hdev->stat.acl_rx++;
1708
1709 hci_dev_lock(hdev);
1710 conn = hci_conn_hash_lookup_handle(hdev, handle);
1711 hci_dev_unlock(hdev);
1712
1713 if (conn) {
1714 register struct hci_proto *hp;
1715
1716 hci_conn_enter_active_mode(conn);
1717
1718 /* Send to upper protocol */
1719 hp = hci_proto[HCI_PROTO_L2CAP];
1720 if (hp && hp->recv_acldata) {
1721 hp->recv_acldata(conn, skb, flags);
1722 return;
1723 }
1724 } else {
1725 BT_ERR("%s ACL packet for unknown connection handle %d",
1726 hdev->name, handle);
1727 }
1728
1729 kfree_skb(skb);
1730}
1731
1732/* SCO data packet */
1733static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1734{
1735 struct hci_sco_hdr *hdr = (void *) skb->data;
1736 struct hci_conn *conn;
1737 __u16 handle;
1738
1739 skb_pull(skb, HCI_SCO_HDR_SIZE);
1740
1741 handle = __le16_to_cpu(hdr->handle);
1742
1743 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1744
1745 hdev->stat.sco_rx++;
1746
1747 hci_dev_lock(hdev);
1748 conn = hci_conn_hash_lookup_handle(hdev, handle);
1749 hci_dev_unlock(hdev);
1750
1751 if (conn) {
1752 register struct hci_proto *hp;
1753
1754 /* Send to upper protocol */
1755 hp = hci_proto[HCI_PROTO_SCO];
1756 if (hp && hp->recv_scodata) {
1757 hp->recv_scodata(conn, skb);
1758 return;
1759 }
1760 } else {
1761 BT_ERR("%s SCO packet for unknown connection handle %d",
1762 hdev->name, handle);
1763 }
1764
1765 kfree_skb(skb);
1766}
1767
1768static void hci_rx_task(unsigned long arg)
1769{
1770 struct hci_dev *hdev = (struct hci_dev *) arg;
1771 struct sk_buff *skb;
1772
1773 BT_DBG("%s", hdev->name);
1774
1775 read_lock(&hci_task_lock);
1776
1777 while ((skb = skb_dequeue(&hdev->rx_q))) {
1778 if (atomic_read(&hdev->promisc)) {
1779 /* Send copy to the sockets */
1780 hci_send_to_sock(hdev, skb, NULL);
1781 }
1782
1783 if (test_bit(HCI_RAW, &hdev->flags)) {
1784 kfree_skb(skb);
1785 continue;
1786 }
1787
1788 if (test_bit(HCI_INIT, &hdev->flags)) {
1789 /* Don't process data packets in this states. */
1790 switch (bt_cb(skb)->pkt_type) {
1791 case HCI_ACLDATA_PKT:
1792 case HCI_SCODATA_PKT:
1793 kfree_skb(skb);
1794 continue;
1795 }
1796 }
1797
1798 /* Process frame */
1799 switch (bt_cb(skb)->pkt_type) {
1800 case HCI_EVENT_PKT:
1801 hci_event_packet(hdev, skb);
1802 break;
1803
1804 case HCI_ACLDATA_PKT:
1805 BT_DBG("%s ACL data packet", hdev->name);
1806 hci_acldata_packet(hdev, skb);
1807 break;
1808
1809 case HCI_SCODATA_PKT:
1810 BT_DBG("%s SCO data packet", hdev->name);
1811 hci_scodata_packet(hdev, skb);
1812 break;
1813
1814 default:
1815 kfree_skb(skb);
1816 break;
1817 }
1818 }
1819
1820 read_unlock(&hci_task_lock);
1821}
1822
1823static void hci_cmd_task(unsigned long arg)
1824{
1825 struct hci_dev *hdev = (struct hci_dev *) arg;
1826 struct sk_buff *skb;
1827
1828 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1829
1830 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1831 BT_ERR("%s command tx timeout", hdev->name);
1832 atomic_set(&hdev->cmd_cnt, 1);
1833 }
1834
1835 /* Send queued commands */
1836 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1837 kfree_skb(hdev->sent_cmd);
1838
1839 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1840 if (hdev->sent_cmd) {
1841 atomic_dec(&hdev->cmd_cnt);
1842 hci_send_frame(skb);
1843 hdev->cmd_last_tx = jiffies;
1844 } else {
1845 skb_queue_head(&hdev->cmd_q, skb);
1846 tasklet_schedule(&hdev->cmd_task);
1847 }
1848 }
1849}