Bluetooth: Add class of device control to the management interface
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
1da177e4
LT
44#include <net/sock.h>
45
46#include <asm/system.h>
70f23020 47#include <linux/uaccess.h>
1da177e4
LT
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
ab81cbf9
JH
53#define AUTO_OFF_TIMEOUT 2000
54
1da177e4
LT
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
e041c683 75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
e041c683 81 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
e041c683 86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
87}
88
6516455d 89static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 90{
e041c683 91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
92}
93
94/* ---- HCI requests ---- */
95
23bb5763 96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 97{
23bb5763
JH
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
a5040efa
JH
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 104 return;
1da177e4
LT
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
8e87d142 125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
3ff50b79 158 }
1da177e4 159
a5040efa 160 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
7c6a329e
MH
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
1da177e4
LT
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
a9de9248 188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
b0916ea0 193 struct hci_cp_delete_stored_link_key cp;
1da177e4 194 struct sk_buff *skb;
1ebb9252 195 __le16 param;
89f2783d 196 __u8 flt_type;
1da177e4
LT
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 205 skb->dev = (void *) hdev;
c78ae283 206
1da177e4 207 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 208 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
7a9d4020 215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
217
218 /* Read Local Supported Features */
a9de9248 219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 220
1143e5a6 221 /* Read Local Version */
a9de9248 222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 223
1da177e4 224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
aca3192c 231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
236 }
237#endif
238
239 /* Read BD Address */
a9de9248
MH
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
247
248 /* Read Voice Setting */
a9de9248 249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
89f2783d 254 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 256
1da177e4 257 /* Connection accept timeout ~20 secs */
aca3192c 258 param = cpu_to_le16(0x7d00);
a9de9248 259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
264}
265
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 scan = opt;
269
270 BT_DBG("%s %x", hdev->name, scan);
271
272 /* Inquiry and Page scans */
a9de9248 273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
274}
275
276static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 auth = opt;
279
280 BT_DBG("%s %x", hdev->name, auth);
281
282 /* Authentication */
a9de9248 283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
284}
285
286static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __u8 encrypt = opt;
289
290 BT_DBG("%s %x", hdev->name, encrypt);
291
e4e8e37c 292 /* Encryption */
a9de9248 293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
294}
295
e4e8e37c
MH
296static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297{
298 __le16 policy = cpu_to_le16(opt);
299
a418b893 300 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
301
302 /* Default link policy */
303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304}
305
8e87d142 306/* Get HCI device by index.
1da177e4
LT
307 * Device is held on return. */
308struct hci_dev *hci_dev_get(int index)
309{
310 struct hci_dev *hdev = NULL;
311 struct list_head *p;
312
313 BT_DBG("%d", index);
314
315 if (index < 0)
316 return NULL;
317
318 read_lock(&hci_dev_list_lock);
319 list_for_each(p, &hci_dev_list) {
320 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
1da177e4
LT
329
330/* ---- Inquiry support ---- */
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 361 struct inquiry_entry *ie;
1da177e4
LT
362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
70f23020
AE
365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
1da177e4 367 /* Entry not in the cache. Add new one. */
70f23020
AE
368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
1da177e4 370 return;
70f23020
AE
371
372 ie->next = cache->list;
373 cache->list = ie;
1da177e4
LT
374 }
375
70f23020
AE
376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
1da177e4
LT
378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
a9de9248 417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
432 if (!(hdev = hci_dev_get(ir.dev_id)))
433 return -ENODEV;
434
435 hci_dev_lock_bh(hdev);
8e87d142 436 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
437 inquiry_cache_empty(hdev) ||
438 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
439 inquiry_cache_flush(hdev);
440 do_inquiry = 1;
441 }
442 hci_dev_unlock_bh(hdev);
443
04837f64 444 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
445
446 if (do_inquiry) {
447 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
448 if (err < 0)
449 goto done;
450 }
1da177e4
LT
451
452 /* for unlimited number of responses we will use buffer with 255 entries */
453 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
454
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space.
457 */
70f23020
AE
458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
459 if (!buf) {
1da177e4
LT
460 err = -ENOMEM;
461 goto done;
462 }
463
464 hci_dev_lock_bh(hdev);
465 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
466 hci_dev_unlock_bh(hdev);
467
468 BT_DBG("num_rsp %d", ir.num_rsp);
469
470 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
471 ptr += sizeof(ir);
472 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
473 ir.num_rsp))
474 err = -EFAULT;
8e87d142 475 } else
1da177e4
LT
476 err = -EFAULT;
477
478 kfree(buf);
479
480done:
481 hci_dev_put(hdev);
482 return err;
483}
484
485/* ---- HCI ioctl helpers ---- */
486
487int hci_dev_open(__u16 dev)
488{
489 struct hci_dev *hdev;
490 int ret = 0;
491
492 if (!(hdev = hci_dev_get(dev)))
493 return -ENODEV;
494
495 BT_DBG("%s %p", hdev->name, hdev);
496
497 hci_req_lock(hdev);
498
611b30f7
MH
499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
500 ret = -ERFKILL;
501 goto done;
502 }
503
1da177e4
LT
504 if (test_bit(HCI_UP, &hdev->flags)) {
505 ret = -EALREADY;
506 goto done;
507 }
508
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
510 set_bit(HCI_RAW, &hdev->flags);
511
943da25d
MH
512 /* Treat all non BR/EDR controllers as raw devices for now */
513 if (hdev->dev_type != HCI_BREDR)
514 set_bit(HCI_RAW, &hdev->flags);
515
1da177e4
LT
516 if (hdev->open(hdev)) {
517 ret = -EIO;
518 goto done;
519 }
520
521 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags);
a5040efa 524 hdev->init_last_cmd = 0;
1da177e4
LT
525
526 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
527 ret = __hci_request(hdev, hci_init_req, 0,
528 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
529
530 clear_bit(HCI_INIT, &hdev->flags);
531 }
532
533 if (!ret) {
534 hci_dev_hold(hdev);
535 set_bit(HCI_UP, &hdev->flags);
536 hci_notify(hdev, HCI_DEV_UP);
5add6af8
JH
537 if (!test_bit(HCI_SETUP, &hdev->flags))
538 mgmt_powered(hdev->id, 1);
8e87d142 539 } else {
1da177e4
LT
540 /* Init failed, cleanup */
541 tasklet_kill(&hdev->rx_task);
542 tasklet_kill(&hdev->tx_task);
543 tasklet_kill(&hdev->cmd_task);
544
545 skb_queue_purge(&hdev->cmd_q);
546 skb_queue_purge(&hdev->rx_q);
547
548 if (hdev->flush)
549 hdev->flush(hdev);
550
551 if (hdev->sent_cmd) {
552 kfree_skb(hdev->sent_cmd);
553 hdev->sent_cmd = NULL;
554 }
555
556 hdev->close(hdev);
557 hdev->flags = 0;
558 }
559
560done:
561 hci_req_unlock(hdev);
562 hci_dev_put(hdev);
563 return ret;
564}
565
566static int hci_dev_do_close(struct hci_dev *hdev)
567{
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_cancel(hdev, ENODEV);
571 hci_req_lock(hdev);
572
573 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
574 hci_req_unlock(hdev);
575 return 0;
576 }
577
578 /* Kill RX and TX tasks */
579 tasklet_kill(&hdev->rx_task);
580 tasklet_kill(&hdev->tx_task);
581
582 hci_dev_lock_bh(hdev);
583 inquiry_cache_flush(hdev);
584 hci_conn_hash_flush(hdev);
585 hci_dev_unlock_bh(hdev);
586
587 hci_notify(hdev, HCI_DEV_DOWN);
588
589 if (hdev->flush)
590 hdev->flush(hdev);
591
592 /* Reset device */
593 skb_queue_purge(&hdev->cmd_q);
594 atomic_set(&hdev->cmd_cnt, 1);
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
597 __hci_request(hdev, hci_reset_req, 0,
598 msecs_to_jiffies(250));
1da177e4
LT
599 clear_bit(HCI_INIT, &hdev->flags);
600 }
601
602 /* Kill cmd task */
603 tasklet_kill(&hdev->cmd_task);
604
605 /* Drop queues */
606 skb_queue_purge(&hdev->rx_q);
607 skb_queue_purge(&hdev->cmd_q);
608 skb_queue_purge(&hdev->raw_q);
609
610 /* Drop last sent command */
611 if (hdev->sent_cmd) {
612 kfree_skb(hdev->sent_cmd);
613 hdev->sent_cmd = NULL;
614 }
615
616 /* After this point our queues are empty
617 * and no tasks are scheduled. */
618 hdev->close(hdev);
619
5add6af8
JH
620 mgmt_powered(hdev->id, 0);
621
1da177e4
LT
622 /* Clear flags */
623 hdev->flags = 0;
624
625 hci_req_unlock(hdev);
626
627 hci_dev_put(hdev);
628 return 0;
629}
630
631int hci_dev_close(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int err;
635
70f23020
AE
636 hdev = hci_dev_get(dev);
637 if (!hdev)
1da177e4
LT
638 return -ENODEV;
639 err = hci_dev_do_close(hdev);
640 hci_dev_put(hdev);
641 return err;
642}
643
644int hci_dev_reset(__u16 dev)
645{
646 struct hci_dev *hdev;
647 int ret = 0;
648
70f23020
AE
649 hdev = hci_dev_get(dev);
650 if (!hdev)
1da177e4
LT
651 return -ENODEV;
652
653 hci_req_lock(hdev);
654 tasklet_disable(&hdev->tx_task);
655
656 if (!test_bit(HCI_UP, &hdev->flags))
657 goto done;
658
659 /* Drop queues */
660 skb_queue_purge(&hdev->rx_q);
661 skb_queue_purge(&hdev->cmd_q);
662
663 hci_dev_lock_bh(hdev);
664 inquiry_cache_flush(hdev);
665 hci_conn_hash_flush(hdev);
666 hci_dev_unlock_bh(hdev);
667
668 if (hdev->flush)
669 hdev->flush(hdev);
670
8e87d142 671 atomic_set(&hdev->cmd_cnt, 1);
1da177e4
LT
672 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
673
674 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
675 ret = __hci_request(hdev, hci_reset_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
677
678done:
679 tasklet_enable(&hdev->tx_task);
680 hci_req_unlock(hdev);
681 hci_dev_put(hdev);
682 return ret;
683}
684
685int hci_dev_reset_stat(__u16 dev)
686{
687 struct hci_dev *hdev;
688 int ret = 0;
689
70f23020
AE
690 hdev = hci_dev_get(dev);
691 if (!hdev)
1da177e4
LT
692 return -ENODEV;
693
694 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
695
696 hci_dev_put(hdev);
697
698 return ret;
699}
700
701int hci_dev_cmd(unsigned int cmd, void __user *arg)
702{
703 struct hci_dev *hdev;
704 struct hci_dev_req dr;
705 int err = 0;
706
707 if (copy_from_user(&dr, arg, sizeof(dr)))
708 return -EFAULT;
709
70f23020
AE
710 hdev = hci_dev_get(dr.dev_id);
711 if (!hdev)
1da177e4
LT
712 return -ENODEV;
713
714 switch (cmd) {
715 case HCISETAUTH:
04837f64
MH
716 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
717 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
718 break;
719
720 case HCISETENCRYPT:
721 if (!lmp_encrypt_capable(hdev)) {
722 err = -EOPNOTSUPP;
723 break;
724 }
725
726 if (!test_bit(HCI_AUTH, &hdev->flags)) {
727 /* Auth must be enabled first */
04837f64
MH
728 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
730 if (err)
731 break;
732 }
733
04837f64
MH
734 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
735 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
736 break;
737
738 case HCISETSCAN:
04837f64
MH
739 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
741 break;
742
1da177e4 743 case HCISETLINKPOL:
e4e8e37c
MH
744 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
745 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
746 break;
747
748 case HCISETLINKMODE:
e4e8e37c
MH
749 hdev->link_mode = ((__u16) dr.dev_opt) &
750 (HCI_LM_MASTER | HCI_LM_ACCEPT);
751 break;
752
753 case HCISETPTYPE:
754 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
755 break;
756
757 case HCISETACLMTU:
e4e8e37c
MH
758 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
759 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
760 break;
761
762 case HCISETSCOMTU:
e4e8e37c
MH
763 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
764 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
765 break;
766
767 default:
768 err = -EINVAL;
769 break;
770 }
e4e8e37c 771
1da177e4
LT
772 hci_dev_put(hdev);
773 return err;
774}
775
776int hci_get_dev_list(void __user *arg)
777{
778 struct hci_dev_list_req *dl;
779 struct hci_dev_req *dr;
780 struct list_head *p;
781 int n = 0, size, err;
782 __u16 dev_num;
783
784 if (get_user(dev_num, (__u16 __user *) arg))
785 return -EFAULT;
786
787 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
788 return -EINVAL;
789
790 size = sizeof(*dl) + dev_num * sizeof(*dr);
791
70f23020
AE
792 dl = kzalloc(size, GFP_KERNEL);
793 if (!dl)
1da177e4
LT
794 return -ENOMEM;
795
796 dr = dl->dev_req;
797
798 read_lock_bh(&hci_dev_list_lock);
799 list_for_each(p, &hci_dev_list) {
800 struct hci_dev *hdev;
c542a06c 801
1da177e4 802 hdev = list_entry(p, struct hci_dev, list);
c542a06c 803
ab81cbf9 804 hci_del_off_timer(hdev);
c542a06c
JH
805
806 if (!test_bit(HCI_MGMT, &hdev->flags))
807 set_bit(HCI_PAIRABLE, &hdev->flags);
808
1da177e4
LT
809 (dr + n)->dev_id = hdev->id;
810 (dr + n)->dev_opt = hdev->flags;
c542a06c 811
1da177e4
LT
812 if (++n >= dev_num)
813 break;
814 }
815 read_unlock_bh(&hci_dev_list_lock);
816
817 dl->dev_num = n;
818 size = sizeof(*dl) + n * sizeof(*dr);
819
820 err = copy_to_user(arg, dl, size);
821 kfree(dl);
822
823 return err ? -EFAULT : 0;
824}
825
826int hci_get_dev_info(void __user *arg)
827{
828 struct hci_dev *hdev;
829 struct hci_dev_info di;
830 int err = 0;
831
832 if (copy_from_user(&di, arg, sizeof(di)))
833 return -EFAULT;
834
70f23020
AE
835 hdev = hci_dev_get(di.dev_id);
836 if (!hdev)
1da177e4
LT
837 return -ENODEV;
838
ab81cbf9
JH
839 hci_del_off_timer(hdev);
840
c542a06c
JH
841 if (!test_bit(HCI_MGMT, &hdev->flags))
842 set_bit(HCI_PAIRABLE, &hdev->flags);
843
1da177e4
LT
844 strcpy(di.name, hdev->name);
845 di.bdaddr = hdev->bdaddr;
943da25d 846 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
847 di.flags = hdev->flags;
848 di.pkt_type = hdev->pkt_type;
849 di.acl_mtu = hdev->acl_mtu;
850 di.acl_pkts = hdev->acl_pkts;
851 di.sco_mtu = hdev->sco_mtu;
852 di.sco_pkts = hdev->sco_pkts;
853 di.link_policy = hdev->link_policy;
854 di.link_mode = hdev->link_mode;
855
856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 memcpy(&di.features, &hdev->features, sizeof(di.features));
858
859 if (copy_to_user(arg, &di, sizeof(di)))
860 err = -EFAULT;
861
862 hci_dev_put(hdev);
863
864 return err;
865}
866
867/* ---- Interface to HCI drivers ---- */
868
611b30f7
MH
869static int hci_rfkill_set_block(void *data, bool blocked)
870{
871 struct hci_dev *hdev = data;
872
873 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
874
875 if (!blocked)
876 return 0;
877
878 hci_dev_do_close(hdev);
879
880 return 0;
881}
882
883static const struct rfkill_ops hci_rfkill_ops = {
884 .set_block = hci_rfkill_set_block,
885};
886
1da177e4
LT
887/* Alloc HCI device */
888struct hci_dev *hci_alloc_dev(void)
889{
890 struct hci_dev *hdev;
891
25ea6db0 892 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
893 if (!hdev)
894 return NULL;
895
1da177e4
LT
896 skb_queue_head_init(&hdev->driver_init);
897
898 return hdev;
899}
900EXPORT_SYMBOL(hci_alloc_dev);
901
902/* Free HCI device */
903void hci_free_dev(struct hci_dev *hdev)
904{
905 skb_queue_purge(&hdev->driver_init);
906
a91f2e39
MH
907 /* will free via device release */
908 put_device(&hdev->dev);
1da177e4
LT
909}
910EXPORT_SYMBOL(hci_free_dev);
911
ab81cbf9
JH
912static void hci_power_on(struct work_struct *work)
913{
914 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
915
916 BT_DBG("%s", hdev->name);
917
918 if (hci_dev_open(hdev->id) < 0)
919 return;
920
921 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
922 mod_timer(&hdev->off_timer,
923 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
924
925 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
926 mgmt_index_added(hdev->id);
927}
928
929static void hci_power_off(struct work_struct *work)
930{
931 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
932
933 BT_DBG("%s", hdev->name);
934
935 hci_dev_close(hdev->id);
936}
937
938static void hci_auto_off(unsigned long data)
939{
940 struct hci_dev *hdev = (struct hci_dev *) data;
941
942 BT_DBG("%s", hdev->name);
943
944 clear_bit(HCI_AUTO_OFF, &hdev->flags);
945
946 queue_work(hdev->workqueue, &hdev->power_off);
947}
948
949void hci_del_off_timer(struct hci_dev *hdev)
950{
951 BT_DBG("%s", hdev->name);
952
953 clear_bit(HCI_AUTO_OFF, &hdev->flags);
954 del_timer(&hdev->off_timer);
955}
956
2aeb9a1a
JH
957int hci_uuids_clear(struct hci_dev *hdev)
958{
959 struct list_head *p, *n;
960
961 list_for_each_safe(p, n, &hdev->uuids) {
962 struct bt_uuid *uuid;
963
964 uuid = list_entry(p, struct bt_uuid, list);
965
966 list_del(p);
967 kfree(uuid);
968 }
969
970 return 0;
971}
972
1da177e4
LT
973/* Register HCI device */
974int hci_register_dev(struct hci_dev *hdev)
975{
976 struct list_head *head = &hci_dev_list, *p;
ef222013 977 int i, id = 0;
1da177e4 978
c13854ce
MH
979 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
980 hdev->bus, hdev->owner);
1da177e4
LT
981
982 if (!hdev->open || !hdev->close || !hdev->destruct)
983 return -EINVAL;
984
985 write_lock_bh(&hci_dev_list_lock);
986
987 /* Find first available device id */
988 list_for_each(p, &hci_dev_list) {
989 if (list_entry(p, struct hci_dev, list)->id != id)
990 break;
991 head = p; id++;
992 }
8e87d142 993
1da177e4
LT
994 sprintf(hdev->name, "hci%d", id);
995 hdev->id = id;
996 list_add(&hdev->list, head);
997
998 atomic_set(&hdev->refcnt, 1);
999 spin_lock_init(&hdev->lock);
1000
1001 hdev->flags = 0;
1002 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1003 hdev->esco_type = (ESCO_HV1);
1da177e4
LT
1004 hdev->link_mode = (HCI_LM_ACCEPT);
1005
04837f64
MH
1006 hdev->idle_timeout = 0;
1007 hdev->sniff_max_interval = 800;
1008 hdev->sniff_min_interval = 80;
1009
70f23020 1010 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1011 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1012 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1013
1014 skb_queue_head_init(&hdev->rx_q);
1015 skb_queue_head_init(&hdev->cmd_q);
1016 skb_queue_head_init(&hdev->raw_q);
1017
cd4c5391 1018 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1019 hdev->reassembly[i] = NULL;
1020
1da177e4 1021 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1022 mutex_init(&hdev->req_lock);
1da177e4
LT
1023
1024 inquiry_cache_init(hdev);
1025
1026 hci_conn_hash_init(hdev);
1027
ea4bd8ba 1028 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1029
2aeb9a1a
JH
1030 INIT_LIST_HEAD(&hdev->uuids);
1031
ab81cbf9
JH
1032 INIT_WORK(&hdev->power_on, hci_power_on);
1033 INIT_WORK(&hdev->power_off, hci_power_off);
1034 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1035
1da177e4
LT
1036 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1037
1038 atomic_set(&hdev->promisc, 0);
1039
1040 write_unlock_bh(&hci_dev_list_lock);
1041
f48fd9c8
MH
1042 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1043 if (!hdev->workqueue)
1044 goto nomem;
1045
1da177e4
LT
1046 hci_register_sysfs(hdev);
1047
611b30f7
MH
1048 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1049 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1050 if (hdev->rfkill) {
1051 if (rfkill_register(hdev->rfkill) < 0) {
1052 rfkill_destroy(hdev->rfkill);
1053 hdev->rfkill = NULL;
1054 }
1055 }
1056
ab81cbf9
JH
1057 set_bit(HCI_AUTO_OFF, &hdev->flags);
1058 set_bit(HCI_SETUP, &hdev->flags);
1059 queue_work(hdev->workqueue, &hdev->power_on);
1060
1da177e4
LT
1061 hci_notify(hdev, HCI_DEV_REG);
1062
1063 return id;
f48fd9c8
MH
1064
1065nomem:
1066 write_lock_bh(&hci_dev_list_lock);
1067 list_del(&hdev->list);
1068 write_unlock_bh(&hci_dev_list_lock);
1069
1070 return -ENOMEM;
1da177e4
LT
1071}
1072EXPORT_SYMBOL(hci_register_dev);
1073
1074/* Unregister HCI device */
1075int hci_unregister_dev(struct hci_dev *hdev)
1076{
ef222013
MH
1077 int i;
1078
c13854ce 1079 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1080
1da177e4
LT
1081 write_lock_bh(&hci_dev_list_lock);
1082 list_del(&hdev->list);
1083 write_unlock_bh(&hci_dev_list_lock);
1084
1085 hci_dev_do_close(hdev);
1086
cd4c5391 1087 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1088 kfree_skb(hdev->reassembly[i]);
1089
ab81cbf9
JH
1090 if (!test_bit(HCI_INIT, &hdev->flags) &&
1091 !test_bit(HCI_SETUP, &hdev->flags))
1092 mgmt_index_removed(hdev->id);
1093
1da177e4
LT
1094 hci_notify(hdev, HCI_DEV_UNREG);
1095
611b30f7
MH
1096 if (hdev->rfkill) {
1097 rfkill_unregister(hdev->rfkill);
1098 rfkill_destroy(hdev->rfkill);
1099 }
1100
147e2d59
DY
1101 hci_unregister_sysfs(hdev);
1102
f48fd9c8
MH
1103 destroy_workqueue(hdev->workqueue);
1104
e2e0cacb
JH
1105 hci_dev_lock_bh(hdev);
1106 hci_blacklist_clear(hdev);
2aeb9a1a 1107 hci_uuids_clear(hdev);
e2e0cacb
JH
1108 hci_dev_unlock_bh(hdev);
1109
1da177e4 1110 __hci_dev_put(hdev);
ef222013 1111
1da177e4
LT
1112 return 0;
1113}
1114EXPORT_SYMBOL(hci_unregister_dev);
1115
1116/* Suspend HCI device */
1117int hci_suspend_dev(struct hci_dev *hdev)
1118{
1119 hci_notify(hdev, HCI_DEV_SUSPEND);
1120 return 0;
1121}
1122EXPORT_SYMBOL(hci_suspend_dev);
1123
1124/* Resume HCI device */
1125int hci_resume_dev(struct hci_dev *hdev)
1126{
1127 hci_notify(hdev, HCI_DEV_RESUME);
1128 return 0;
1129}
1130EXPORT_SYMBOL(hci_resume_dev);
1131
76bca880
MH
1132/* Receive frame from HCI drivers */
1133int hci_recv_frame(struct sk_buff *skb)
1134{
1135 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1136 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1137 && !test_bit(HCI_INIT, &hdev->flags))) {
1138 kfree_skb(skb);
1139 return -ENXIO;
1140 }
1141
1142 /* Incomming skb */
1143 bt_cb(skb)->incoming = 1;
1144
1145 /* Time stamp */
1146 __net_timestamp(skb);
1147
1148 /* Queue frame for rx task */
1149 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1150 tasklet_schedule(&hdev->rx_task);
1151
76bca880
MH
1152 return 0;
1153}
1154EXPORT_SYMBOL(hci_recv_frame);
1155
33e882a5
SS
1156static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1157 int count, __u8 index, gfp_t gfp_mask)
1158{
1159 int len = 0;
1160 int hlen = 0;
1161 int remain = count;
1162 struct sk_buff *skb;
1163 struct bt_skb_cb *scb;
1164
1165 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1166 index >= NUM_REASSEMBLY)
1167 return -EILSEQ;
1168
1169 skb = hdev->reassembly[index];
1170
1171 if (!skb) {
1172 switch (type) {
1173 case HCI_ACLDATA_PKT:
1174 len = HCI_MAX_FRAME_SIZE;
1175 hlen = HCI_ACL_HDR_SIZE;
1176 break;
1177 case HCI_EVENT_PKT:
1178 len = HCI_MAX_EVENT_SIZE;
1179 hlen = HCI_EVENT_HDR_SIZE;
1180 break;
1181 case HCI_SCODATA_PKT:
1182 len = HCI_MAX_SCO_SIZE;
1183 hlen = HCI_SCO_HDR_SIZE;
1184 break;
1185 }
1186
1187 skb = bt_skb_alloc(len, gfp_mask);
1188 if (!skb)
1189 return -ENOMEM;
1190
1191 scb = (void *) skb->cb;
1192 scb->expect = hlen;
1193 scb->pkt_type = type;
1194
1195 skb->dev = (void *) hdev;
1196 hdev->reassembly[index] = skb;
1197 }
1198
1199 while (count) {
1200 scb = (void *) skb->cb;
1201 len = min(scb->expect, (__u16)count);
1202
1203 memcpy(skb_put(skb, len), data, len);
1204
1205 count -= len;
1206 data += len;
1207 scb->expect -= len;
1208 remain = count;
1209
1210 switch (type) {
1211 case HCI_EVENT_PKT:
1212 if (skb->len == HCI_EVENT_HDR_SIZE) {
1213 struct hci_event_hdr *h = hci_event_hdr(skb);
1214 scb->expect = h->plen;
1215
1216 if (skb_tailroom(skb) < scb->expect) {
1217 kfree_skb(skb);
1218 hdev->reassembly[index] = NULL;
1219 return -ENOMEM;
1220 }
1221 }
1222 break;
1223
1224 case HCI_ACLDATA_PKT:
1225 if (skb->len == HCI_ACL_HDR_SIZE) {
1226 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1227 scb->expect = __le16_to_cpu(h->dlen);
1228
1229 if (skb_tailroom(skb) < scb->expect) {
1230 kfree_skb(skb);
1231 hdev->reassembly[index] = NULL;
1232 return -ENOMEM;
1233 }
1234 }
1235 break;
1236
1237 case HCI_SCODATA_PKT:
1238 if (skb->len == HCI_SCO_HDR_SIZE) {
1239 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1240 scb->expect = h->dlen;
1241
1242 if (skb_tailroom(skb) < scb->expect) {
1243 kfree_skb(skb);
1244 hdev->reassembly[index] = NULL;
1245 return -ENOMEM;
1246 }
1247 }
1248 break;
1249 }
1250
1251 if (scb->expect == 0) {
1252 /* Complete frame */
1253
1254 bt_cb(skb)->pkt_type = type;
1255 hci_recv_frame(skb);
1256
1257 hdev->reassembly[index] = NULL;
1258 return remain;
1259 }
1260 }
1261
1262 return remain;
1263}
1264
ef222013
MH
1265int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1266{
f39a3c06
SS
1267 int rem = 0;
1268
ef222013
MH
1269 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1270 return -EILSEQ;
1271
da5f6c37 1272 while (count) {
f39a3c06
SS
1273 rem = hci_reassembly(hdev, type, data, count,
1274 type - 1, GFP_ATOMIC);
1275 if (rem < 0)
1276 return rem;
ef222013 1277
f39a3c06
SS
1278 data += (count - rem);
1279 count = rem;
da5f6c37 1280 };
ef222013 1281
f39a3c06 1282 return rem;
ef222013
MH
1283}
1284EXPORT_SYMBOL(hci_recv_fragment);
1285
99811510
SS
1286#define STREAM_REASSEMBLY 0
1287
1288int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1289{
1290 int type;
1291 int rem = 0;
1292
da5f6c37 1293 while (count) {
99811510
SS
1294 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1295
1296 if (!skb) {
1297 struct { char type; } *pkt;
1298
1299 /* Start of the frame */
1300 pkt = data;
1301 type = pkt->type;
1302
1303 data++;
1304 count--;
1305 } else
1306 type = bt_cb(skb)->pkt_type;
1307
1308 rem = hci_reassembly(hdev, type, data,
1309 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1310 if (rem < 0)
1311 return rem;
1312
1313 data += (count - rem);
1314 count = rem;
da5f6c37 1315 };
99811510
SS
1316
1317 return rem;
1318}
1319EXPORT_SYMBOL(hci_recv_stream_fragment);
1320
1da177e4
LT
1321/* ---- Interface to upper protocols ---- */
1322
1323/* Register/Unregister protocols.
1324 * hci_task_lock is used to ensure that no tasks are running. */
1325int hci_register_proto(struct hci_proto *hp)
1326{
1327 int err = 0;
1328
1329 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1330
1331 if (hp->id >= HCI_MAX_PROTO)
1332 return -EINVAL;
1333
1334 write_lock_bh(&hci_task_lock);
1335
1336 if (!hci_proto[hp->id])
1337 hci_proto[hp->id] = hp;
1338 else
1339 err = -EEXIST;
1340
1341 write_unlock_bh(&hci_task_lock);
1342
1343 return err;
1344}
1345EXPORT_SYMBOL(hci_register_proto);
1346
1347int hci_unregister_proto(struct hci_proto *hp)
1348{
1349 int err = 0;
1350
1351 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1352
1353 if (hp->id >= HCI_MAX_PROTO)
1354 return -EINVAL;
1355
1356 write_lock_bh(&hci_task_lock);
1357
1358 if (hci_proto[hp->id])
1359 hci_proto[hp->id] = NULL;
1360 else
1361 err = -ENOENT;
1362
1363 write_unlock_bh(&hci_task_lock);
1364
1365 return err;
1366}
1367EXPORT_SYMBOL(hci_unregister_proto);
1368
1369int hci_register_cb(struct hci_cb *cb)
1370{
1371 BT_DBG("%p name %s", cb, cb->name);
1372
1373 write_lock_bh(&hci_cb_list_lock);
1374 list_add(&cb->list, &hci_cb_list);
1375 write_unlock_bh(&hci_cb_list_lock);
1376
1377 return 0;
1378}
1379EXPORT_SYMBOL(hci_register_cb);
1380
1381int hci_unregister_cb(struct hci_cb *cb)
1382{
1383 BT_DBG("%p name %s", cb, cb->name);
1384
1385 write_lock_bh(&hci_cb_list_lock);
1386 list_del(&cb->list);
1387 write_unlock_bh(&hci_cb_list_lock);
1388
1389 return 0;
1390}
1391EXPORT_SYMBOL(hci_unregister_cb);
1392
1393static int hci_send_frame(struct sk_buff *skb)
1394{
1395 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1396
1397 if (!hdev) {
1398 kfree_skb(skb);
1399 return -ENODEV;
1400 }
1401
0d48d939 1402 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1403
1404 if (atomic_read(&hdev->promisc)) {
1405 /* Time stamp */
a61bbcf2 1406 __net_timestamp(skb);
1da177e4 1407
eec8d2bc 1408 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1409 }
1410
1411 /* Get rid of skb owner, prior to sending to the driver. */
1412 skb_orphan(skb);
1413
1414 return hdev->send(skb);
1415}
1416
1417/* Send HCI command */
a9de9248 1418int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1419{
1420 int len = HCI_COMMAND_HDR_SIZE + plen;
1421 struct hci_command_hdr *hdr;
1422 struct sk_buff *skb;
1423
a9de9248 1424 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1425
1426 skb = bt_skb_alloc(len, GFP_ATOMIC);
1427 if (!skb) {
ef222013 1428 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1429 return -ENOMEM;
1430 }
1431
1432 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1433 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1434 hdr->plen = plen;
1435
1436 if (plen)
1437 memcpy(skb_put(skb, plen), param, plen);
1438
1439 BT_DBG("skb len %d", skb->len);
1440
0d48d939 1441 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1442 skb->dev = (void *) hdev;
c78ae283 1443
a5040efa
JH
1444 if (test_bit(HCI_INIT, &hdev->flags))
1445 hdev->init_last_cmd = opcode;
1446
1da177e4 1447 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1448 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1449
1450 return 0;
1451}
1da177e4
LT
1452
1453/* Get data from the previously sent command */
a9de9248 1454void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1455{
1456 struct hci_command_hdr *hdr;
1457
1458 if (!hdev->sent_cmd)
1459 return NULL;
1460
1461 hdr = (void *) hdev->sent_cmd->data;
1462
a9de9248 1463 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1464 return NULL;
1465
a9de9248 1466 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1467
1468 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1469}
1470
1471/* Send ACL data */
1472static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1473{
1474 struct hci_acl_hdr *hdr;
1475 int len = skb->len;
1476
badff6d0
ACM
1477 skb_push(skb, HCI_ACL_HDR_SIZE);
1478 skb_reset_transport_header(skb);
9c70220b 1479 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1480 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1481 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1482}
1483
9a9c6a34 1484void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1da177e4
LT
1485{
1486 struct hci_dev *hdev = conn->hdev;
1487 struct sk_buff *list;
1488
1489 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1490
1491 skb->dev = (void *) hdev;
0d48d939 1492 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1493 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4 1494
70f23020
AE
1495 list = skb_shinfo(skb)->frag_list;
1496 if (!list) {
1da177e4
LT
1497 /* Non fragmented */
1498 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1499
1500 skb_queue_tail(&conn->data_q, skb);
1501 } else {
1502 /* Fragmented */
1503 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1504
1505 skb_shinfo(skb)->frag_list = NULL;
1506
1507 /* Queue all fragments atomically */
1508 spin_lock_bh(&conn->data_q.lock);
1509
1510 __skb_queue_tail(&conn->data_q, skb);
e702112f
AE
1511
1512 flags &= ~ACL_START;
1513 flags |= ACL_CONT;
1da177e4
LT
1514 do {
1515 skb = list; list = list->next;
8e87d142 1516
1da177e4 1517 skb->dev = (void *) hdev;
0d48d939 1518 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1519 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1520
1521 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1522
1523 __skb_queue_tail(&conn->data_q, skb);
1524 } while (list);
1525
1526 spin_unlock_bh(&conn->data_q.lock);
1527 }
1528
c78ae283 1529 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1530}
1531EXPORT_SYMBOL(hci_send_acl);
1532
1533/* Send SCO data */
0d861d8b 1534void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
1535{
1536 struct hci_dev *hdev = conn->hdev;
1537 struct hci_sco_hdr hdr;
1538
1539 BT_DBG("%s len %d", hdev->name, skb->len);
1540
aca3192c 1541 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1542 hdr.dlen = skb->len;
1543
badff6d0
ACM
1544 skb_push(skb, HCI_SCO_HDR_SIZE);
1545 skb_reset_transport_header(skb);
9c70220b 1546 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1547
1548 skb->dev = (void *) hdev;
0d48d939 1549 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1550
1da177e4 1551 skb_queue_tail(&conn->data_q, skb);
c78ae283 1552 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1553}
1554EXPORT_SYMBOL(hci_send_sco);
1555
1556/* ---- HCI TX task (outgoing data) ---- */
1557
1558/* HCI Connection scheduler */
1559static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1560{
1561 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1562 struct hci_conn *conn = NULL;
1da177e4
LT
1563 int num = 0, min = ~0;
1564 struct list_head *p;
1565
8e87d142 1566 /* We don't have to lock device here. Connections are always
1da177e4
LT
1567 * added and removed with TX task disabled. */
1568 list_for_each(p, &h->list) {
1569 struct hci_conn *c;
1570 c = list_entry(p, struct hci_conn, list);
1571
769be974 1572 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1573 continue;
769be974
MH
1574
1575 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1576 continue;
1577
1da177e4
LT
1578 num++;
1579
1580 if (c->sent < min) {
1581 min = c->sent;
1582 conn = c;
1583 }
1584 }
1585
1586 if (conn) {
1587 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1588 int q = cnt / num;
1589 *quote = q ? q : 1;
1590 } else
1591 *quote = 0;
1592
1593 BT_DBG("conn %p quote %d", conn, *quote);
1594 return conn;
1595}
1596
1597static inline void hci_acl_tx_to(struct hci_dev *hdev)
1598{
1599 struct hci_conn_hash *h = &hdev->conn_hash;
1600 struct list_head *p;
1601 struct hci_conn *c;
1602
1603 BT_ERR("%s ACL tx timeout", hdev->name);
1604
1605 /* Kill stalled connections */
1606 list_for_each(p, &h->list) {
1607 c = list_entry(p, struct hci_conn, list);
1608 if (c->type == ACL_LINK && c->sent) {
1609 BT_ERR("%s killing stalled ACL connection %s",
1610 hdev->name, batostr(&c->dst));
1611 hci_acl_disconn(c, 0x13);
1612 }
1613 }
1614}
1615
1616static inline void hci_sched_acl(struct hci_dev *hdev)
1617{
1618 struct hci_conn *conn;
1619 struct sk_buff *skb;
1620 int quote;
1621
1622 BT_DBG("%s", hdev->name);
1623
1624 if (!test_bit(HCI_RAW, &hdev->flags)) {
1625 /* ACL tx timeout must be longer than maximum
1626 * link supervision timeout (40.9 seconds) */
82453021 1627 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1da177e4
LT
1628 hci_acl_tx_to(hdev);
1629 }
1630
1631 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1632 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1633 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1634
1635 hci_conn_enter_active_mode(conn);
1636
1da177e4
LT
1637 hci_send_frame(skb);
1638 hdev->acl_last_tx = jiffies;
1639
1640 hdev->acl_cnt--;
1641 conn->sent++;
1642 }
1643 }
1644}
1645
1646/* Schedule SCO */
1647static inline void hci_sched_sco(struct hci_dev *hdev)
1648{
1649 struct hci_conn *conn;
1650 struct sk_buff *skb;
1651 int quote;
1652
1653 BT_DBG("%s", hdev->name);
1654
1655 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1656 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1657 BT_DBG("skb %p len %d", skb, skb->len);
1658 hci_send_frame(skb);
1659
1660 conn->sent++;
1661 if (conn->sent == ~0)
1662 conn->sent = 0;
1663 }
1664 }
1665}
1666
b6a0dc82
MH
1667static inline void hci_sched_esco(struct hci_dev *hdev)
1668{
1669 struct hci_conn *conn;
1670 struct sk_buff *skb;
1671 int quote;
1672
1673 BT_DBG("%s", hdev->name);
1674
1675 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1676 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1677 BT_DBG("skb %p len %d", skb, skb->len);
1678 hci_send_frame(skb);
1679
1680 conn->sent++;
1681 if (conn->sent == ~0)
1682 conn->sent = 0;
1683 }
1684 }
1685}
1686
1da177e4
LT
1687static void hci_tx_task(unsigned long arg)
1688{
1689 struct hci_dev *hdev = (struct hci_dev *) arg;
1690 struct sk_buff *skb;
1691
1692 read_lock(&hci_task_lock);
1693
1694 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1695
1696 /* Schedule queues and send stuff to HCI driver */
1697
1698 hci_sched_acl(hdev);
1699
1700 hci_sched_sco(hdev);
1701
b6a0dc82
MH
1702 hci_sched_esco(hdev);
1703
1da177e4
LT
1704 /* Send next queued raw (unknown type) packet */
1705 while ((skb = skb_dequeue(&hdev->raw_q)))
1706 hci_send_frame(skb);
1707
1708 read_unlock(&hci_task_lock);
1709}
1710
1711/* ----- HCI RX task (incoming data proccessing) ----- */
1712
1713/* ACL data packet */
1714static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1715{
1716 struct hci_acl_hdr *hdr = (void *) skb->data;
1717 struct hci_conn *conn;
1718 __u16 handle, flags;
1719
1720 skb_pull(skb, HCI_ACL_HDR_SIZE);
1721
1722 handle = __le16_to_cpu(hdr->handle);
1723 flags = hci_flags(handle);
1724 handle = hci_handle(handle);
1725
1726 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1727
1728 hdev->stat.acl_rx++;
1729
1730 hci_dev_lock(hdev);
1731 conn = hci_conn_hash_lookup_handle(hdev, handle);
1732 hci_dev_unlock(hdev);
8e87d142 1733
1da177e4
LT
1734 if (conn) {
1735 register struct hci_proto *hp;
1736
04837f64
MH
1737 hci_conn_enter_active_mode(conn);
1738
1da177e4 1739 /* Send to upper protocol */
70f23020
AE
1740 hp = hci_proto[HCI_PROTO_L2CAP];
1741 if (hp && hp->recv_acldata) {
1da177e4
LT
1742 hp->recv_acldata(conn, skb, flags);
1743 return;
1744 }
1745 } else {
8e87d142 1746 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1747 hdev->name, handle);
1748 }
1749
1750 kfree_skb(skb);
1751}
1752
1753/* SCO data packet */
1754static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1755{
1756 struct hci_sco_hdr *hdr = (void *) skb->data;
1757 struct hci_conn *conn;
1758 __u16 handle;
1759
1760 skb_pull(skb, HCI_SCO_HDR_SIZE);
1761
1762 handle = __le16_to_cpu(hdr->handle);
1763
1764 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1765
1766 hdev->stat.sco_rx++;
1767
1768 hci_dev_lock(hdev);
1769 conn = hci_conn_hash_lookup_handle(hdev, handle);
1770 hci_dev_unlock(hdev);
1771
1772 if (conn) {
1773 register struct hci_proto *hp;
1774
1775 /* Send to upper protocol */
70f23020
AE
1776 hp = hci_proto[HCI_PROTO_SCO];
1777 if (hp && hp->recv_scodata) {
1da177e4
LT
1778 hp->recv_scodata(conn, skb);
1779 return;
1780 }
1781 } else {
8e87d142 1782 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1783 hdev->name, handle);
1784 }
1785
1786 kfree_skb(skb);
1787}
1788
6516455d 1789static void hci_rx_task(unsigned long arg)
1da177e4
LT
1790{
1791 struct hci_dev *hdev = (struct hci_dev *) arg;
1792 struct sk_buff *skb;
1793
1794 BT_DBG("%s", hdev->name);
1795
1796 read_lock(&hci_task_lock);
1797
1798 while ((skb = skb_dequeue(&hdev->rx_q))) {
1799 if (atomic_read(&hdev->promisc)) {
1800 /* Send copy to the sockets */
eec8d2bc 1801 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1802 }
1803
1804 if (test_bit(HCI_RAW, &hdev->flags)) {
1805 kfree_skb(skb);
1806 continue;
1807 }
1808
1809 if (test_bit(HCI_INIT, &hdev->flags)) {
1810 /* Don't process data packets in this states. */
0d48d939 1811 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1812 case HCI_ACLDATA_PKT:
1813 case HCI_SCODATA_PKT:
1814 kfree_skb(skb);
1815 continue;
3ff50b79 1816 }
1da177e4
LT
1817 }
1818
1819 /* Process frame */
0d48d939 1820 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1821 case HCI_EVENT_PKT:
1822 hci_event_packet(hdev, skb);
1823 break;
1824
1825 case HCI_ACLDATA_PKT:
1826 BT_DBG("%s ACL data packet", hdev->name);
1827 hci_acldata_packet(hdev, skb);
1828 break;
1829
1830 case HCI_SCODATA_PKT:
1831 BT_DBG("%s SCO data packet", hdev->name);
1832 hci_scodata_packet(hdev, skb);
1833 break;
1834
1835 default:
1836 kfree_skb(skb);
1837 break;
1838 }
1839 }
1840
1841 read_unlock(&hci_task_lock);
1842}
1843
1844static void hci_cmd_task(unsigned long arg)
1845{
1846 struct hci_dev *hdev = (struct hci_dev *) arg;
1847 struct sk_buff *skb;
1848
1849 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1850
82453021 1851 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1da177e4
LT
1852 BT_ERR("%s command tx timeout", hdev->name);
1853 atomic_set(&hdev->cmd_cnt, 1);
1854 }
1855
1856 /* Send queued commands */
1857 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
7585b97a 1858 kfree_skb(hdev->sent_cmd);
1da177e4 1859
70f23020
AE
1860 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1861 if (hdev->sent_cmd) {
1da177e4
LT
1862 atomic_dec(&hdev->cmd_cnt);
1863 hci_send_frame(skb);
1864 hdev->cmd_last_tx = jiffies;
1865 } else {
1866 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 1867 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1868 }
1869 }
1870}