Bluetooth: Add set_io_capability management command
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
1da177e4
LT
44#include <net/sock.h>
45
46#include <asm/system.h>
70f23020 47#include <linux/uaccess.h>
1da177e4
LT
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
ab81cbf9
JH
53#define AUTO_OFF_TIMEOUT 2000
54
1da177e4
LT
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
e041c683 75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
e041c683 81 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
e041c683 86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
87}
88
6516455d 89static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 90{
e041c683 91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
92}
93
94/* ---- HCI requests ---- */
95
23bb5763 96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 97{
23bb5763
JH
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
a5040efa
JH
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 104 return;
1da177e4
LT
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
8e87d142 125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
3ff50b79 158 }
1da177e4 159
a5040efa 160 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
7c6a329e
MH
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
1da177e4
LT
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
a9de9248 188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
b0916ea0 193 struct hci_cp_delete_stored_link_key cp;
1da177e4 194 struct sk_buff *skb;
1ebb9252 195 __le16 param;
89f2783d 196 __u8 flt_type;
1da177e4
LT
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 205 skb->dev = (void *) hdev;
c78ae283 206
1da177e4 207 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 208 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
7a9d4020 215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
217
218 /* Read Local Supported Features */
a9de9248 219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 220
1143e5a6 221 /* Read Local Version */
a9de9248 222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 223
1da177e4 224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
aca3192c 231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
236 }
237#endif
238
239 /* Read BD Address */
a9de9248
MH
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
247
248 /* Read Voice Setting */
a9de9248 249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
89f2783d 254 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 256
1da177e4 257 /* Connection accept timeout ~20 secs */
aca3192c 258 param = cpu_to_le16(0x7d00);
a9de9248 259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
264}
265
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 scan = opt;
269
270 BT_DBG("%s %x", hdev->name, scan);
271
272 /* Inquiry and Page scans */
a9de9248 273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
274}
275
276static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 auth = opt;
279
280 BT_DBG("%s %x", hdev->name, auth);
281
282 /* Authentication */
a9de9248 283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
284}
285
286static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __u8 encrypt = opt;
289
290 BT_DBG("%s %x", hdev->name, encrypt);
291
e4e8e37c 292 /* Encryption */
a9de9248 293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
294}
295
e4e8e37c
MH
296static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297{
298 __le16 policy = cpu_to_le16(opt);
299
a418b893 300 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
301
302 /* Default link policy */
303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304}
305
8e87d142 306/* Get HCI device by index.
1da177e4
LT
307 * Device is held on return. */
308struct hci_dev *hci_dev_get(int index)
309{
310 struct hci_dev *hdev = NULL;
311 struct list_head *p;
312
313 BT_DBG("%d", index);
314
315 if (index < 0)
316 return NULL;
317
318 read_lock(&hci_dev_list_lock);
319 list_for_each(p, &hci_dev_list) {
320 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
1da177e4
LT
329
330/* ---- Inquiry support ---- */
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 361 struct inquiry_entry *ie;
1da177e4
LT
362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
70f23020
AE
365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
1da177e4 367 /* Entry not in the cache. Add new one. */
70f23020
AE
368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
1da177e4 370 return;
70f23020
AE
371
372 ie->next = cache->list;
373 cache->list = ie;
1da177e4
LT
374 }
375
70f23020
AE
376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
1da177e4
LT
378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
a9de9248 417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
432 if (!(hdev = hci_dev_get(ir.dev_id)))
433 return -ENODEV;
434
435 hci_dev_lock_bh(hdev);
8e87d142 436 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
437 inquiry_cache_empty(hdev) ||
438 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
439 inquiry_cache_flush(hdev);
440 do_inquiry = 1;
441 }
442 hci_dev_unlock_bh(hdev);
443
04837f64 444 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
445
446 if (do_inquiry) {
447 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
448 if (err < 0)
449 goto done;
450 }
1da177e4
LT
451
452 /* for unlimited number of responses we will use buffer with 255 entries */
453 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
454
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space.
457 */
70f23020
AE
458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
459 if (!buf) {
1da177e4
LT
460 err = -ENOMEM;
461 goto done;
462 }
463
464 hci_dev_lock_bh(hdev);
465 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
466 hci_dev_unlock_bh(hdev);
467
468 BT_DBG("num_rsp %d", ir.num_rsp);
469
470 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
471 ptr += sizeof(ir);
472 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
473 ir.num_rsp))
474 err = -EFAULT;
8e87d142 475 } else
1da177e4
LT
476 err = -EFAULT;
477
478 kfree(buf);
479
480done:
481 hci_dev_put(hdev);
482 return err;
483}
484
485/* ---- HCI ioctl helpers ---- */
486
487int hci_dev_open(__u16 dev)
488{
489 struct hci_dev *hdev;
490 int ret = 0;
491
492 if (!(hdev = hci_dev_get(dev)))
493 return -ENODEV;
494
495 BT_DBG("%s %p", hdev->name, hdev);
496
497 hci_req_lock(hdev);
498
611b30f7
MH
499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
500 ret = -ERFKILL;
501 goto done;
502 }
503
1da177e4
LT
504 if (test_bit(HCI_UP, &hdev->flags)) {
505 ret = -EALREADY;
506 goto done;
507 }
508
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
510 set_bit(HCI_RAW, &hdev->flags);
511
943da25d
MH
512 /* Treat all non BR/EDR controllers as raw devices for now */
513 if (hdev->dev_type != HCI_BREDR)
514 set_bit(HCI_RAW, &hdev->flags);
515
1da177e4
LT
516 if (hdev->open(hdev)) {
517 ret = -EIO;
518 goto done;
519 }
520
521 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags);
a5040efa 524 hdev->init_last_cmd = 0;
1da177e4
LT
525
526 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
527 ret = __hci_request(hdev, hci_init_req, 0,
528 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
529
530 clear_bit(HCI_INIT, &hdev->flags);
531 }
532
533 if (!ret) {
534 hci_dev_hold(hdev);
535 set_bit(HCI_UP, &hdev->flags);
536 hci_notify(hdev, HCI_DEV_UP);
5add6af8
JH
537 if (!test_bit(HCI_SETUP, &hdev->flags))
538 mgmt_powered(hdev->id, 1);
8e87d142 539 } else {
1da177e4
LT
540 /* Init failed, cleanup */
541 tasklet_kill(&hdev->rx_task);
542 tasklet_kill(&hdev->tx_task);
543 tasklet_kill(&hdev->cmd_task);
544
545 skb_queue_purge(&hdev->cmd_q);
546 skb_queue_purge(&hdev->rx_q);
547
548 if (hdev->flush)
549 hdev->flush(hdev);
550
551 if (hdev->sent_cmd) {
552 kfree_skb(hdev->sent_cmd);
553 hdev->sent_cmd = NULL;
554 }
555
556 hdev->close(hdev);
557 hdev->flags = 0;
558 }
559
560done:
561 hci_req_unlock(hdev);
562 hci_dev_put(hdev);
563 return ret;
564}
565
566static int hci_dev_do_close(struct hci_dev *hdev)
567{
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_cancel(hdev, ENODEV);
571 hci_req_lock(hdev);
572
573 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
574 hci_req_unlock(hdev);
575 return 0;
576 }
577
578 /* Kill RX and TX tasks */
579 tasklet_kill(&hdev->rx_task);
580 tasklet_kill(&hdev->tx_task);
581
582 hci_dev_lock_bh(hdev);
583 inquiry_cache_flush(hdev);
584 hci_conn_hash_flush(hdev);
585 hci_dev_unlock_bh(hdev);
586
587 hci_notify(hdev, HCI_DEV_DOWN);
588
589 if (hdev->flush)
590 hdev->flush(hdev);
591
592 /* Reset device */
593 skb_queue_purge(&hdev->cmd_q);
594 atomic_set(&hdev->cmd_cnt, 1);
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
597 __hci_request(hdev, hci_reset_req, 0,
598 msecs_to_jiffies(250));
1da177e4
LT
599 clear_bit(HCI_INIT, &hdev->flags);
600 }
601
602 /* Kill cmd task */
603 tasklet_kill(&hdev->cmd_task);
604
605 /* Drop queues */
606 skb_queue_purge(&hdev->rx_q);
607 skb_queue_purge(&hdev->cmd_q);
608 skb_queue_purge(&hdev->raw_q);
609
610 /* Drop last sent command */
611 if (hdev->sent_cmd) {
612 kfree_skb(hdev->sent_cmd);
613 hdev->sent_cmd = NULL;
614 }
615
616 /* After this point our queues are empty
617 * and no tasks are scheduled. */
618 hdev->close(hdev);
619
5add6af8
JH
620 mgmt_powered(hdev->id, 0);
621
1da177e4
LT
622 /* Clear flags */
623 hdev->flags = 0;
624
625 hci_req_unlock(hdev);
626
627 hci_dev_put(hdev);
628 return 0;
629}
630
631int hci_dev_close(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int err;
635
70f23020
AE
636 hdev = hci_dev_get(dev);
637 if (!hdev)
1da177e4
LT
638 return -ENODEV;
639 err = hci_dev_do_close(hdev);
640 hci_dev_put(hdev);
641 return err;
642}
643
644int hci_dev_reset(__u16 dev)
645{
646 struct hci_dev *hdev;
647 int ret = 0;
648
70f23020
AE
649 hdev = hci_dev_get(dev);
650 if (!hdev)
1da177e4
LT
651 return -ENODEV;
652
653 hci_req_lock(hdev);
654 tasklet_disable(&hdev->tx_task);
655
656 if (!test_bit(HCI_UP, &hdev->flags))
657 goto done;
658
659 /* Drop queues */
660 skb_queue_purge(&hdev->rx_q);
661 skb_queue_purge(&hdev->cmd_q);
662
663 hci_dev_lock_bh(hdev);
664 inquiry_cache_flush(hdev);
665 hci_conn_hash_flush(hdev);
666 hci_dev_unlock_bh(hdev);
667
668 if (hdev->flush)
669 hdev->flush(hdev);
670
8e87d142 671 atomic_set(&hdev->cmd_cnt, 1);
1da177e4
LT
672 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
673
674 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
675 ret = __hci_request(hdev, hci_reset_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
677
678done:
679 tasklet_enable(&hdev->tx_task);
680 hci_req_unlock(hdev);
681 hci_dev_put(hdev);
682 return ret;
683}
684
685int hci_dev_reset_stat(__u16 dev)
686{
687 struct hci_dev *hdev;
688 int ret = 0;
689
70f23020
AE
690 hdev = hci_dev_get(dev);
691 if (!hdev)
1da177e4
LT
692 return -ENODEV;
693
694 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
695
696 hci_dev_put(hdev);
697
698 return ret;
699}
700
701int hci_dev_cmd(unsigned int cmd, void __user *arg)
702{
703 struct hci_dev *hdev;
704 struct hci_dev_req dr;
705 int err = 0;
706
707 if (copy_from_user(&dr, arg, sizeof(dr)))
708 return -EFAULT;
709
70f23020
AE
710 hdev = hci_dev_get(dr.dev_id);
711 if (!hdev)
1da177e4
LT
712 return -ENODEV;
713
714 switch (cmd) {
715 case HCISETAUTH:
04837f64
MH
716 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
717 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
718 break;
719
720 case HCISETENCRYPT:
721 if (!lmp_encrypt_capable(hdev)) {
722 err = -EOPNOTSUPP;
723 break;
724 }
725
726 if (!test_bit(HCI_AUTH, &hdev->flags)) {
727 /* Auth must be enabled first */
04837f64
MH
728 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
730 if (err)
731 break;
732 }
733
04837f64
MH
734 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
735 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
736 break;
737
738 case HCISETSCAN:
04837f64
MH
739 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
741 break;
742
1da177e4 743 case HCISETLINKPOL:
e4e8e37c
MH
744 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
745 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
746 break;
747
748 case HCISETLINKMODE:
e4e8e37c
MH
749 hdev->link_mode = ((__u16) dr.dev_opt) &
750 (HCI_LM_MASTER | HCI_LM_ACCEPT);
751 break;
752
753 case HCISETPTYPE:
754 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
755 break;
756
757 case HCISETACLMTU:
e4e8e37c
MH
758 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
759 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
760 break;
761
762 case HCISETSCOMTU:
e4e8e37c
MH
763 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
764 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
765 break;
766
767 default:
768 err = -EINVAL;
769 break;
770 }
e4e8e37c 771
1da177e4
LT
772 hci_dev_put(hdev);
773 return err;
774}
775
776int hci_get_dev_list(void __user *arg)
777{
778 struct hci_dev_list_req *dl;
779 struct hci_dev_req *dr;
780 struct list_head *p;
781 int n = 0, size, err;
782 __u16 dev_num;
783
784 if (get_user(dev_num, (__u16 __user *) arg))
785 return -EFAULT;
786
787 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
788 return -EINVAL;
789
790 size = sizeof(*dl) + dev_num * sizeof(*dr);
791
70f23020
AE
792 dl = kzalloc(size, GFP_KERNEL);
793 if (!dl)
1da177e4
LT
794 return -ENOMEM;
795
796 dr = dl->dev_req;
797
798 read_lock_bh(&hci_dev_list_lock);
799 list_for_each(p, &hci_dev_list) {
800 struct hci_dev *hdev;
c542a06c 801
1da177e4 802 hdev = list_entry(p, struct hci_dev, list);
c542a06c 803
ab81cbf9 804 hci_del_off_timer(hdev);
c542a06c
JH
805
806 if (!test_bit(HCI_MGMT, &hdev->flags))
807 set_bit(HCI_PAIRABLE, &hdev->flags);
808
1da177e4
LT
809 (dr + n)->dev_id = hdev->id;
810 (dr + n)->dev_opt = hdev->flags;
c542a06c 811
1da177e4
LT
812 if (++n >= dev_num)
813 break;
814 }
815 read_unlock_bh(&hci_dev_list_lock);
816
817 dl->dev_num = n;
818 size = sizeof(*dl) + n * sizeof(*dr);
819
820 err = copy_to_user(arg, dl, size);
821 kfree(dl);
822
823 return err ? -EFAULT : 0;
824}
825
826int hci_get_dev_info(void __user *arg)
827{
828 struct hci_dev *hdev;
829 struct hci_dev_info di;
830 int err = 0;
831
832 if (copy_from_user(&di, arg, sizeof(di)))
833 return -EFAULT;
834
70f23020
AE
835 hdev = hci_dev_get(di.dev_id);
836 if (!hdev)
1da177e4
LT
837 return -ENODEV;
838
ab81cbf9
JH
839 hci_del_off_timer(hdev);
840
c542a06c
JH
841 if (!test_bit(HCI_MGMT, &hdev->flags))
842 set_bit(HCI_PAIRABLE, &hdev->flags);
843
1da177e4
LT
844 strcpy(di.name, hdev->name);
845 di.bdaddr = hdev->bdaddr;
943da25d 846 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
847 di.flags = hdev->flags;
848 di.pkt_type = hdev->pkt_type;
849 di.acl_mtu = hdev->acl_mtu;
850 di.acl_pkts = hdev->acl_pkts;
851 di.sco_mtu = hdev->sco_mtu;
852 di.sco_pkts = hdev->sco_pkts;
853 di.link_policy = hdev->link_policy;
854 di.link_mode = hdev->link_mode;
855
856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 memcpy(&di.features, &hdev->features, sizeof(di.features));
858
859 if (copy_to_user(arg, &di, sizeof(di)))
860 err = -EFAULT;
861
862 hci_dev_put(hdev);
863
864 return err;
865}
866
867/* ---- Interface to HCI drivers ---- */
868
611b30f7
MH
869static int hci_rfkill_set_block(void *data, bool blocked)
870{
871 struct hci_dev *hdev = data;
872
873 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
874
875 if (!blocked)
876 return 0;
877
878 hci_dev_do_close(hdev);
879
880 return 0;
881}
882
883static const struct rfkill_ops hci_rfkill_ops = {
884 .set_block = hci_rfkill_set_block,
885};
886
1da177e4
LT
887/* Alloc HCI device */
888struct hci_dev *hci_alloc_dev(void)
889{
890 struct hci_dev *hdev;
891
25ea6db0 892 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
893 if (!hdev)
894 return NULL;
895
1da177e4
LT
896 skb_queue_head_init(&hdev->driver_init);
897
898 return hdev;
899}
900EXPORT_SYMBOL(hci_alloc_dev);
901
902/* Free HCI device */
903void hci_free_dev(struct hci_dev *hdev)
904{
905 skb_queue_purge(&hdev->driver_init);
906
a91f2e39
MH
907 /* will free via device release */
908 put_device(&hdev->dev);
1da177e4
LT
909}
910EXPORT_SYMBOL(hci_free_dev);
911
ab81cbf9
JH
912static void hci_power_on(struct work_struct *work)
913{
914 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
915
916 BT_DBG("%s", hdev->name);
917
918 if (hci_dev_open(hdev->id) < 0)
919 return;
920
921 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
922 mod_timer(&hdev->off_timer,
923 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
924
925 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
926 mgmt_index_added(hdev->id);
927}
928
929static void hci_power_off(struct work_struct *work)
930{
931 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
932
933 BT_DBG("%s", hdev->name);
934
935 hci_dev_close(hdev->id);
936}
937
938static void hci_auto_off(unsigned long data)
939{
940 struct hci_dev *hdev = (struct hci_dev *) data;
941
942 BT_DBG("%s", hdev->name);
943
944 clear_bit(HCI_AUTO_OFF, &hdev->flags);
945
946 queue_work(hdev->workqueue, &hdev->power_off);
947}
948
949void hci_del_off_timer(struct hci_dev *hdev)
950{
951 BT_DBG("%s", hdev->name);
952
953 clear_bit(HCI_AUTO_OFF, &hdev->flags);
954 del_timer(&hdev->off_timer);
955}
956
2aeb9a1a
JH
957int hci_uuids_clear(struct hci_dev *hdev)
958{
959 struct list_head *p, *n;
960
961 list_for_each_safe(p, n, &hdev->uuids) {
962 struct bt_uuid *uuid;
963
964 uuid = list_entry(p, struct bt_uuid, list);
965
966 list_del(p);
967 kfree(uuid);
968 }
969
970 return 0;
971}
972
55ed8ca1
JH
973int hci_link_keys_clear(struct hci_dev *hdev)
974{
975 struct list_head *p, *n;
976
977 list_for_each_safe(p, n, &hdev->link_keys) {
978 struct link_key *key;
979
980 key = list_entry(p, struct link_key, list);
981
982 list_del(p);
983 kfree(key);
984 }
985
986 return 0;
987}
988
989struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
990{
991 struct list_head *p;
992
993 list_for_each(p, &hdev->link_keys) {
994 struct link_key *k;
995
996 k = list_entry(p, struct link_key, list);
997
998 if (bacmp(bdaddr, &k->bdaddr) == 0)
999 return k;
1000 }
1001
1002 return NULL;
1003}
1004
1005int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1006 u8 *val, u8 type, u8 pin_len)
1007{
1008 struct link_key *key, *old_key;
1009 u8 old_key_type;
1010
1011 old_key = hci_find_link_key(hdev, bdaddr);
1012 if (old_key) {
1013 old_key_type = old_key->type;
1014 key = old_key;
1015 } else {
1016 old_key_type = 0xff;
1017 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1018 if (!key)
1019 return -ENOMEM;
1020 list_add(&key->list, &hdev->link_keys);
1021 }
1022
1023 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1024
1025 bacpy(&key->bdaddr, bdaddr);
1026 memcpy(key->val, val, 16);
1027 key->type = type;
1028 key->pin_len = pin_len;
1029
1030 if (new_key)
1031 mgmt_new_key(hdev->id, key, old_key_type);
1032
1033 if (type == 0x06)
1034 key->type = old_key_type;
1035
1036 return 0;
1037}
1038
1039int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1040{
1041 struct link_key *key;
1042
1043 key = hci_find_link_key(hdev, bdaddr);
1044 if (!key)
1045 return -ENOENT;
1046
1047 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1048
1049 list_del(&key->list);
1050 kfree(key);
1051
1052 return 0;
1053}
1054
1da177e4
LT
1055/* Register HCI device */
1056int hci_register_dev(struct hci_dev *hdev)
1057{
1058 struct list_head *head = &hci_dev_list, *p;
ef222013 1059 int i, id = 0;
1da177e4 1060
c13854ce
MH
1061 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1062 hdev->bus, hdev->owner);
1da177e4
LT
1063
1064 if (!hdev->open || !hdev->close || !hdev->destruct)
1065 return -EINVAL;
1066
1067 write_lock_bh(&hci_dev_list_lock);
1068
1069 /* Find first available device id */
1070 list_for_each(p, &hci_dev_list) {
1071 if (list_entry(p, struct hci_dev, list)->id != id)
1072 break;
1073 head = p; id++;
1074 }
8e87d142 1075
1da177e4
LT
1076 sprintf(hdev->name, "hci%d", id);
1077 hdev->id = id;
1078 list_add(&hdev->list, head);
1079
1080 atomic_set(&hdev->refcnt, 1);
1081 spin_lock_init(&hdev->lock);
1082
1083 hdev->flags = 0;
1084 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1085 hdev->esco_type = (ESCO_HV1);
1da177e4 1086 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1087 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1088
04837f64
MH
1089 hdev->idle_timeout = 0;
1090 hdev->sniff_max_interval = 800;
1091 hdev->sniff_min_interval = 80;
1092
70f23020 1093 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1094 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1095 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1096
1097 skb_queue_head_init(&hdev->rx_q);
1098 skb_queue_head_init(&hdev->cmd_q);
1099 skb_queue_head_init(&hdev->raw_q);
1100
cd4c5391 1101 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1102 hdev->reassembly[i] = NULL;
1103
1da177e4 1104 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1105 mutex_init(&hdev->req_lock);
1da177e4
LT
1106
1107 inquiry_cache_init(hdev);
1108
1109 hci_conn_hash_init(hdev);
1110
ea4bd8ba 1111 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1112
2aeb9a1a
JH
1113 INIT_LIST_HEAD(&hdev->uuids);
1114
55ed8ca1
JH
1115 INIT_LIST_HEAD(&hdev->link_keys);
1116
ab81cbf9
JH
1117 INIT_WORK(&hdev->power_on, hci_power_on);
1118 INIT_WORK(&hdev->power_off, hci_power_off);
1119 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1120
1da177e4
LT
1121 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1122
1123 atomic_set(&hdev->promisc, 0);
1124
1125 write_unlock_bh(&hci_dev_list_lock);
1126
f48fd9c8
MH
1127 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1128 if (!hdev->workqueue)
1129 goto nomem;
1130
1da177e4
LT
1131 hci_register_sysfs(hdev);
1132
611b30f7
MH
1133 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1134 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1135 if (hdev->rfkill) {
1136 if (rfkill_register(hdev->rfkill) < 0) {
1137 rfkill_destroy(hdev->rfkill);
1138 hdev->rfkill = NULL;
1139 }
1140 }
1141
ab81cbf9
JH
1142 set_bit(HCI_AUTO_OFF, &hdev->flags);
1143 set_bit(HCI_SETUP, &hdev->flags);
1144 queue_work(hdev->workqueue, &hdev->power_on);
1145
1da177e4
LT
1146 hci_notify(hdev, HCI_DEV_REG);
1147
1148 return id;
f48fd9c8
MH
1149
1150nomem:
1151 write_lock_bh(&hci_dev_list_lock);
1152 list_del(&hdev->list);
1153 write_unlock_bh(&hci_dev_list_lock);
1154
1155 return -ENOMEM;
1da177e4
LT
1156}
1157EXPORT_SYMBOL(hci_register_dev);
1158
1159/* Unregister HCI device */
1160int hci_unregister_dev(struct hci_dev *hdev)
1161{
ef222013
MH
1162 int i;
1163
c13854ce 1164 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1165
1da177e4
LT
1166 write_lock_bh(&hci_dev_list_lock);
1167 list_del(&hdev->list);
1168 write_unlock_bh(&hci_dev_list_lock);
1169
1170 hci_dev_do_close(hdev);
1171
cd4c5391 1172 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1173 kfree_skb(hdev->reassembly[i]);
1174
ab81cbf9
JH
1175 if (!test_bit(HCI_INIT, &hdev->flags) &&
1176 !test_bit(HCI_SETUP, &hdev->flags))
1177 mgmt_index_removed(hdev->id);
1178
1da177e4
LT
1179 hci_notify(hdev, HCI_DEV_UNREG);
1180
611b30f7
MH
1181 if (hdev->rfkill) {
1182 rfkill_unregister(hdev->rfkill);
1183 rfkill_destroy(hdev->rfkill);
1184 }
1185
147e2d59
DY
1186 hci_unregister_sysfs(hdev);
1187
f48fd9c8
MH
1188 destroy_workqueue(hdev->workqueue);
1189
e2e0cacb
JH
1190 hci_dev_lock_bh(hdev);
1191 hci_blacklist_clear(hdev);
2aeb9a1a 1192 hci_uuids_clear(hdev);
55ed8ca1 1193 hci_link_keys_clear(hdev);
e2e0cacb
JH
1194 hci_dev_unlock_bh(hdev);
1195
1da177e4 1196 __hci_dev_put(hdev);
ef222013 1197
1da177e4
LT
1198 return 0;
1199}
1200EXPORT_SYMBOL(hci_unregister_dev);
1201
1202/* Suspend HCI device */
1203int hci_suspend_dev(struct hci_dev *hdev)
1204{
1205 hci_notify(hdev, HCI_DEV_SUSPEND);
1206 return 0;
1207}
1208EXPORT_SYMBOL(hci_suspend_dev);
1209
1210/* Resume HCI device */
1211int hci_resume_dev(struct hci_dev *hdev)
1212{
1213 hci_notify(hdev, HCI_DEV_RESUME);
1214 return 0;
1215}
1216EXPORT_SYMBOL(hci_resume_dev);
1217
76bca880
MH
1218/* Receive frame from HCI drivers */
1219int hci_recv_frame(struct sk_buff *skb)
1220{
1221 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1222 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1223 && !test_bit(HCI_INIT, &hdev->flags))) {
1224 kfree_skb(skb);
1225 return -ENXIO;
1226 }
1227
1228 /* Incomming skb */
1229 bt_cb(skb)->incoming = 1;
1230
1231 /* Time stamp */
1232 __net_timestamp(skb);
1233
1234 /* Queue frame for rx task */
1235 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1236 tasklet_schedule(&hdev->rx_task);
1237
76bca880
MH
1238 return 0;
1239}
1240EXPORT_SYMBOL(hci_recv_frame);
1241
33e882a5
SS
1242static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1243 int count, __u8 index, gfp_t gfp_mask)
1244{
1245 int len = 0;
1246 int hlen = 0;
1247 int remain = count;
1248 struct sk_buff *skb;
1249 struct bt_skb_cb *scb;
1250
1251 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1252 index >= NUM_REASSEMBLY)
1253 return -EILSEQ;
1254
1255 skb = hdev->reassembly[index];
1256
1257 if (!skb) {
1258 switch (type) {
1259 case HCI_ACLDATA_PKT:
1260 len = HCI_MAX_FRAME_SIZE;
1261 hlen = HCI_ACL_HDR_SIZE;
1262 break;
1263 case HCI_EVENT_PKT:
1264 len = HCI_MAX_EVENT_SIZE;
1265 hlen = HCI_EVENT_HDR_SIZE;
1266 break;
1267 case HCI_SCODATA_PKT:
1268 len = HCI_MAX_SCO_SIZE;
1269 hlen = HCI_SCO_HDR_SIZE;
1270 break;
1271 }
1272
1273 skb = bt_skb_alloc(len, gfp_mask);
1274 if (!skb)
1275 return -ENOMEM;
1276
1277 scb = (void *) skb->cb;
1278 scb->expect = hlen;
1279 scb->pkt_type = type;
1280
1281 skb->dev = (void *) hdev;
1282 hdev->reassembly[index] = skb;
1283 }
1284
1285 while (count) {
1286 scb = (void *) skb->cb;
1287 len = min(scb->expect, (__u16)count);
1288
1289 memcpy(skb_put(skb, len), data, len);
1290
1291 count -= len;
1292 data += len;
1293 scb->expect -= len;
1294 remain = count;
1295
1296 switch (type) {
1297 case HCI_EVENT_PKT:
1298 if (skb->len == HCI_EVENT_HDR_SIZE) {
1299 struct hci_event_hdr *h = hci_event_hdr(skb);
1300 scb->expect = h->plen;
1301
1302 if (skb_tailroom(skb) < scb->expect) {
1303 kfree_skb(skb);
1304 hdev->reassembly[index] = NULL;
1305 return -ENOMEM;
1306 }
1307 }
1308 break;
1309
1310 case HCI_ACLDATA_PKT:
1311 if (skb->len == HCI_ACL_HDR_SIZE) {
1312 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1313 scb->expect = __le16_to_cpu(h->dlen);
1314
1315 if (skb_tailroom(skb) < scb->expect) {
1316 kfree_skb(skb);
1317 hdev->reassembly[index] = NULL;
1318 return -ENOMEM;
1319 }
1320 }
1321 break;
1322
1323 case HCI_SCODATA_PKT:
1324 if (skb->len == HCI_SCO_HDR_SIZE) {
1325 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1326 scb->expect = h->dlen;
1327
1328 if (skb_tailroom(skb) < scb->expect) {
1329 kfree_skb(skb);
1330 hdev->reassembly[index] = NULL;
1331 return -ENOMEM;
1332 }
1333 }
1334 break;
1335 }
1336
1337 if (scb->expect == 0) {
1338 /* Complete frame */
1339
1340 bt_cb(skb)->pkt_type = type;
1341 hci_recv_frame(skb);
1342
1343 hdev->reassembly[index] = NULL;
1344 return remain;
1345 }
1346 }
1347
1348 return remain;
1349}
1350
ef222013
MH
1351int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1352{
f39a3c06
SS
1353 int rem = 0;
1354
ef222013
MH
1355 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1356 return -EILSEQ;
1357
da5f6c37 1358 while (count) {
f39a3c06
SS
1359 rem = hci_reassembly(hdev, type, data, count,
1360 type - 1, GFP_ATOMIC);
1361 if (rem < 0)
1362 return rem;
ef222013 1363
f39a3c06
SS
1364 data += (count - rem);
1365 count = rem;
da5f6c37 1366 };
ef222013 1367
f39a3c06 1368 return rem;
ef222013
MH
1369}
1370EXPORT_SYMBOL(hci_recv_fragment);
1371
99811510
SS
1372#define STREAM_REASSEMBLY 0
1373
1374int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1375{
1376 int type;
1377 int rem = 0;
1378
da5f6c37 1379 while (count) {
99811510
SS
1380 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1381
1382 if (!skb) {
1383 struct { char type; } *pkt;
1384
1385 /* Start of the frame */
1386 pkt = data;
1387 type = pkt->type;
1388
1389 data++;
1390 count--;
1391 } else
1392 type = bt_cb(skb)->pkt_type;
1393
1394 rem = hci_reassembly(hdev, type, data,
1395 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1396 if (rem < 0)
1397 return rem;
1398
1399 data += (count - rem);
1400 count = rem;
da5f6c37 1401 };
99811510
SS
1402
1403 return rem;
1404}
1405EXPORT_SYMBOL(hci_recv_stream_fragment);
1406
1da177e4
LT
1407/* ---- Interface to upper protocols ---- */
1408
1409/* Register/Unregister protocols.
1410 * hci_task_lock is used to ensure that no tasks are running. */
1411int hci_register_proto(struct hci_proto *hp)
1412{
1413 int err = 0;
1414
1415 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1416
1417 if (hp->id >= HCI_MAX_PROTO)
1418 return -EINVAL;
1419
1420 write_lock_bh(&hci_task_lock);
1421
1422 if (!hci_proto[hp->id])
1423 hci_proto[hp->id] = hp;
1424 else
1425 err = -EEXIST;
1426
1427 write_unlock_bh(&hci_task_lock);
1428
1429 return err;
1430}
1431EXPORT_SYMBOL(hci_register_proto);
1432
1433int hci_unregister_proto(struct hci_proto *hp)
1434{
1435 int err = 0;
1436
1437 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1438
1439 if (hp->id >= HCI_MAX_PROTO)
1440 return -EINVAL;
1441
1442 write_lock_bh(&hci_task_lock);
1443
1444 if (hci_proto[hp->id])
1445 hci_proto[hp->id] = NULL;
1446 else
1447 err = -ENOENT;
1448
1449 write_unlock_bh(&hci_task_lock);
1450
1451 return err;
1452}
1453EXPORT_SYMBOL(hci_unregister_proto);
1454
1455int hci_register_cb(struct hci_cb *cb)
1456{
1457 BT_DBG("%p name %s", cb, cb->name);
1458
1459 write_lock_bh(&hci_cb_list_lock);
1460 list_add(&cb->list, &hci_cb_list);
1461 write_unlock_bh(&hci_cb_list_lock);
1462
1463 return 0;
1464}
1465EXPORT_SYMBOL(hci_register_cb);
1466
1467int hci_unregister_cb(struct hci_cb *cb)
1468{
1469 BT_DBG("%p name %s", cb, cb->name);
1470
1471 write_lock_bh(&hci_cb_list_lock);
1472 list_del(&cb->list);
1473 write_unlock_bh(&hci_cb_list_lock);
1474
1475 return 0;
1476}
1477EXPORT_SYMBOL(hci_unregister_cb);
1478
1479static int hci_send_frame(struct sk_buff *skb)
1480{
1481 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1482
1483 if (!hdev) {
1484 kfree_skb(skb);
1485 return -ENODEV;
1486 }
1487
0d48d939 1488 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1489
1490 if (atomic_read(&hdev->promisc)) {
1491 /* Time stamp */
a61bbcf2 1492 __net_timestamp(skb);
1da177e4 1493
eec8d2bc 1494 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1495 }
1496
1497 /* Get rid of skb owner, prior to sending to the driver. */
1498 skb_orphan(skb);
1499
1500 return hdev->send(skb);
1501}
1502
1503/* Send HCI command */
a9de9248 1504int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1505{
1506 int len = HCI_COMMAND_HDR_SIZE + plen;
1507 struct hci_command_hdr *hdr;
1508 struct sk_buff *skb;
1509
a9de9248 1510 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1511
1512 skb = bt_skb_alloc(len, GFP_ATOMIC);
1513 if (!skb) {
ef222013 1514 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1515 return -ENOMEM;
1516 }
1517
1518 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1519 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1520 hdr->plen = plen;
1521
1522 if (plen)
1523 memcpy(skb_put(skb, plen), param, plen);
1524
1525 BT_DBG("skb len %d", skb->len);
1526
0d48d939 1527 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1528 skb->dev = (void *) hdev;
c78ae283 1529
a5040efa
JH
1530 if (test_bit(HCI_INIT, &hdev->flags))
1531 hdev->init_last_cmd = opcode;
1532
1da177e4 1533 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1534 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1535
1536 return 0;
1537}
1da177e4
LT
1538
1539/* Get data from the previously sent command */
a9de9248 1540void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1541{
1542 struct hci_command_hdr *hdr;
1543
1544 if (!hdev->sent_cmd)
1545 return NULL;
1546
1547 hdr = (void *) hdev->sent_cmd->data;
1548
a9de9248 1549 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1550 return NULL;
1551
a9de9248 1552 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1553
1554 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1555}
1556
1557/* Send ACL data */
1558static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1559{
1560 struct hci_acl_hdr *hdr;
1561 int len = skb->len;
1562
badff6d0
ACM
1563 skb_push(skb, HCI_ACL_HDR_SIZE);
1564 skb_reset_transport_header(skb);
9c70220b 1565 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1566 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1567 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1568}
1569
9a9c6a34 1570void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1da177e4
LT
1571{
1572 struct hci_dev *hdev = conn->hdev;
1573 struct sk_buff *list;
1574
1575 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1576
1577 skb->dev = (void *) hdev;
0d48d939 1578 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1579 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4 1580
70f23020
AE
1581 list = skb_shinfo(skb)->frag_list;
1582 if (!list) {
1da177e4
LT
1583 /* Non fragmented */
1584 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1585
1586 skb_queue_tail(&conn->data_q, skb);
1587 } else {
1588 /* Fragmented */
1589 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1590
1591 skb_shinfo(skb)->frag_list = NULL;
1592
1593 /* Queue all fragments atomically */
1594 spin_lock_bh(&conn->data_q.lock);
1595
1596 __skb_queue_tail(&conn->data_q, skb);
e702112f
AE
1597
1598 flags &= ~ACL_START;
1599 flags |= ACL_CONT;
1da177e4
LT
1600 do {
1601 skb = list; list = list->next;
8e87d142 1602
1da177e4 1603 skb->dev = (void *) hdev;
0d48d939 1604 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1605 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1606
1607 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1608
1609 __skb_queue_tail(&conn->data_q, skb);
1610 } while (list);
1611
1612 spin_unlock_bh(&conn->data_q.lock);
1613 }
1614
c78ae283 1615 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1616}
1617EXPORT_SYMBOL(hci_send_acl);
1618
1619/* Send SCO data */
0d861d8b 1620void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
1621{
1622 struct hci_dev *hdev = conn->hdev;
1623 struct hci_sco_hdr hdr;
1624
1625 BT_DBG("%s len %d", hdev->name, skb->len);
1626
aca3192c 1627 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1628 hdr.dlen = skb->len;
1629
badff6d0
ACM
1630 skb_push(skb, HCI_SCO_HDR_SIZE);
1631 skb_reset_transport_header(skb);
9c70220b 1632 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1633
1634 skb->dev = (void *) hdev;
0d48d939 1635 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1636
1da177e4 1637 skb_queue_tail(&conn->data_q, skb);
c78ae283 1638 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1639}
1640EXPORT_SYMBOL(hci_send_sco);
1641
1642/* ---- HCI TX task (outgoing data) ---- */
1643
1644/* HCI Connection scheduler */
1645static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1646{
1647 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1648 struct hci_conn *conn = NULL;
1da177e4
LT
1649 int num = 0, min = ~0;
1650 struct list_head *p;
1651
8e87d142 1652 /* We don't have to lock device here. Connections are always
1da177e4
LT
1653 * added and removed with TX task disabled. */
1654 list_for_each(p, &h->list) {
1655 struct hci_conn *c;
1656 c = list_entry(p, struct hci_conn, list);
1657
769be974 1658 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1659 continue;
769be974
MH
1660
1661 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1662 continue;
1663
1da177e4
LT
1664 num++;
1665
1666 if (c->sent < min) {
1667 min = c->sent;
1668 conn = c;
1669 }
1670 }
1671
1672 if (conn) {
1673 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1674 int q = cnt / num;
1675 *quote = q ? q : 1;
1676 } else
1677 *quote = 0;
1678
1679 BT_DBG("conn %p quote %d", conn, *quote);
1680 return conn;
1681}
1682
1683static inline void hci_acl_tx_to(struct hci_dev *hdev)
1684{
1685 struct hci_conn_hash *h = &hdev->conn_hash;
1686 struct list_head *p;
1687 struct hci_conn *c;
1688
1689 BT_ERR("%s ACL tx timeout", hdev->name);
1690
1691 /* Kill stalled connections */
1692 list_for_each(p, &h->list) {
1693 c = list_entry(p, struct hci_conn, list);
1694 if (c->type == ACL_LINK && c->sent) {
1695 BT_ERR("%s killing stalled ACL connection %s",
1696 hdev->name, batostr(&c->dst));
1697 hci_acl_disconn(c, 0x13);
1698 }
1699 }
1700}
1701
1702static inline void hci_sched_acl(struct hci_dev *hdev)
1703{
1704 struct hci_conn *conn;
1705 struct sk_buff *skb;
1706 int quote;
1707
1708 BT_DBG("%s", hdev->name);
1709
1710 if (!test_bit(HCI_RAW, &hdev->flags)) {
1711 /* ACL tx timeout must be longer than maximum
1712 * link supervision timeout (40.9 seconds) */
82453021 1713 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1da177e4
LT
1714 hci_acl_tx_to(hdev);
1715 }
1716
1717 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1718 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1719 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1720
1721 hci_conn_enter_active_mode(conn);
1722
1da177e4
LT
1723 hci_send_frame(skb);
1724 hdev->acl_last_tx = jiffies;
1725
1726 hdev->acl_cnt--;
1727 conn->sent++;
1728 }
1729 }
1730}
1731
1732/* Schedule SCO */
1733static inline void hci_sched_sco(struct hci_dev *hdev)
1734{
1735 struct hci_conn *conn;
1736 struct sk_buff *skb;
1737 int quote;
1738
1739 BT_DBG("%s", hdev->name);
1740
1741 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1742 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1743 BT_DBG("skb %p len %d", skb, skb->len);
1744 hci_send_frame(skb);
1745
1746 conn->sent++;
1747 if (conn->sent == ~0)
1748 conn->sent = 0;
1749 }
1750 }
1751}
1752
b6a0dc82
MH
1753static inline void hci_sched_esco(struct hci_dev *hdev)
1754{
1755 struct hci_conn *conn;
1756 struct sk_buff *skb;
1757 int quote;
1758
1759 BT_DBG("%s", hdev->name);
1760
1761 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1762 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1763 BT_DBG("skb %p len %d", skb, skb->len);
1764 hci_send_frame(skb);
1765
1766 conn->sent++;
1767 if (conn->sent == ~0)
1768 conn->sent = 0;
1769 }
1770 }
1771}
1772
1da177e4
LT
1773static void hci_tx_task(unsigned long arg)
1774{
1775 struct hci_dev *hdev = (struct hci_dev *) arg;
1776 struct sk_buff *skb;
1777
1778 read_lock(&hci_task_lock);
1779
1780 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1781
1782 /* Schedule queues and send stuff to HCI driver */
1783
1784 hci_sched_acl(hdev);
1785
1786 hci_sched_sco(hdev);
1787
b6a0dc82
MH
1788 hci_sched_esco(hdev);
1789
1da177e4
LT
1790 /* Send next queued raw (unknown type) packet */
1791 while ((skb = skb_dequeue(&hdev->raw_q)))
1792 hci_send_frame(skb);
1793
1794 read_unlock(&hci_task_lock);
1795}
1796
1797/* ----- HCI RX task (incoming data proccessing) ----- */
1798
1799/* ACL data packet */
1800static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1801{
1802 struct hci_acl_hdr *hdr = (void *) skb->data;
1803 struct hci_conn *conn;
1804 __u16 handle, flags;
1805
1806 skb_pull(skb, HCI_ACL_HDR_SIZE);
1807
1808 handle = __le16_to_cpu(hdr->handle);
1809 flags = hci_flags(handle);
1810 handle = hci_handle(handle);
1811
1812 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1813
1814 hdev->stat.acl_rx++;
1815
1816 hci_dev_lock(hdev);
1817 conn = hci_conn_hash_lookup_handle(hdev, handle);
1818 hci_dev_unlock(hdev);
8e87d142 1819
1da177e4
LT
1820 if (conn) {
1821 register struct hci_proto *hp;
1822
04837f64
MH
1823 hci_conn_enter_active_mode(conn);
1824
1da177e4 1825 /* Send to upper protocol */
70f23020
AE
1826 hp = hci_proto[HCI_PROTO_L2CAP];
1827 if (hp && hp->recv_acldata) {
1da177e4
LT
1828 hp->recv_acldata(conn, skb, flags);
1829 return;
1830 }
1831 } else {
8e87d142 1832 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1833 hdev->name, handle);
1834 }
1835
1836 kfree_skb(skb);
1837}
1838
1839/* SCO data packet */
1840static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1841{
1842 struct hci_sco_hdr *hdr = (void *) skb->data;
1843 struct hci_conn *conn;
1844 __u16 handle;
1845
1846 skb_pull(skb, HCI_SCO_HDR_SIZE);
1847
1848 handle = __le16_to_cpu(hdr->handle);
1849
1850 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1851
1852 hdev->stat.sco_rx++;
1853
1854 hci_dev_lock(hdev);
1855 conn = hci_conn_hash_lookup_handle(hdev, handle);
1856 hci_dev_unlock(hdev);
1857
1858 if (conn) {
1859 register struct hci_proto *hp;
1860
1861 /* Send to upper protocol */
70f23020
AE
1862 hp = hci_proto[HCI_PROTO_SCO];
1863 if (hp && hp->recv_scodata) {
1da177e4
LT
1864 hp->recv_scodata(conn, skb);
1865 return;
1866 }
1867 } else {
8e87d142 1868 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1869 hdev->name, handle);
1870 }
1871
1872 kfree_skb(skb);
1873}
1874
6516455d 1875static void hci_rx_task(unsigned long arg)
1da177e4
LT
1876{
1877 struct hci_dev *hdev = (struct hci_dev *) arg;
1878 struct sk_buff *skb;
1879
1880 BT_DBG("%s", hdev->name);
1881
1882 read_lock(&hci_task_lock);
1883
1884 while ((skb = skb_dequeue(&hdev->rx_q))) {
1885 if (atomic_read(&hdev->promisc)) {
1886 /* Send copy to the sockets */
eec8d2bc 1887 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1888 }
1889
1890 if (test_bit(HCI_RAW, &hdev->flags)) {
1891 kfree_skb(skb);
1892 continue;
1893 }
1894
1895 if (test_bit(HCI_INIT, &hdev->flags)) {
1896 /* Don't process data packets in this states. */
0d48d939 1897 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1898 case HCI_ACLDATA_PKT:
1899 case HCI_SCODATA_PKT:
1900 kfree_skb(skb);
1901 continue;
3ff50b79 1902 }
1da177e4
LT
1903 }
1904
1905 /* Process frame */
0d48d939 1906 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1907 case HCI_EVENT_PKT:
1908 hci_event_packet(hdev, skb);
1909 break;
1910
1911 case HCI_ACLDATA_PKT:
1912 BT_DBG("%s ACL data packet", hdev->name);
1913 hci_acldata_packet(hdev, skb);
1914 break;
1915
1916 case HCI_SCODATA_PKT:
1917 BT_DBG("%s SCO data packet", hdev->name);
1918 hci_scodata_packet(hdev, skb);
1919 break;
1920
1921 default:
1922 kfree_skb(skb);
1923 break;
1924 }
1925 }
1926
1927 read_unlock(&hci_task_lock);
1928}
1929
1930static void hci_cmd_task(unsigned long arg)
1931{
1932 struct hci_dev *hdev = (struct hci_dev *) arg;
1933 struct sk_buff *skb;
1934
1935 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1936
82453021 1937 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1da177e4
LT
1938 BT_ERR("%s command tx timeout", hdev->name);
1939 atomic_set(&hdev->cmd_cnt, 1);
1940 }
1941
1942 /* Send queued commands */
1943 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
7585b97a 1944 kfree_skb(hdev->sent_cmd);
1da177e4 1945
70f23020
AE
1946 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1947 if (hdev->sent_cmd) {
1da177e4
LT
1948 atomic_dec(&hdev->cmd_cnt);
1949 hci_send_frame(skb);
1950 hdev->cmd_last_tx = jiffies;
1951 } else {
1952 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 1953 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1954 }
1955 }
1956}