Bluetooth: Implement automatic setup procedure for local adapters
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
1da177e4
LT
44#include <net/sock.h>
45
46#include <asm/system.h>
70f23020 47#include <linux/uaccess.h>
1da177e4
LT
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
ab81cbf9
JH
53#define AUTO_OFF_TIMEOUT 2000
54
1da177e4
LT
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
e041c683 75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
76
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
e041c683 81 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
e041c683 86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
87}
88
6516455d 89static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 90{
e041c683 91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
92}
93
94/* ---- HCI requests ---- */
95
23bb5763 96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 97{
23bb5763
JH
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If the request has set req_last_cmd (typical for multi-HCI
101 * command requests) check if the completed command matches
102 * this, and if not just return. Single HCI command requests
103 * typically leave req_last_cmd as 0 */
104 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
105 return;
1da177e4
LT
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
8e87d142 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
127 unsigned long opt, __u32 timeout)
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_err(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
3ff50b79 159 }
1da177e4 160
23bb5763 161 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
1da177e4
LT
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170{
171 int ret;
172
7c6a329e
MH
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
1da177e4
LT
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
a9de9248 189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
194 struct sk_buff *skb;
1ebb9252 195 __le16 param;
89f2783d 196 __u8 flt_type;
1da177e4
LT
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 205 skb->dev = (void *) hdev;
c78ae283 206
1da177e4 207 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 208 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
7a9d4020 215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
217
218 /* Read Local Supported Features */
a9de9248 219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 220
1143e5a6 221 /* Read Local Version */
a9de9248 222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 223
1da177e4 224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
aca3192c 231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
236 }
237#endif
238
239 /* Read BD Address */
a9de9248
MH
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
247
248 /* Read Voice Setting */
a9de9248 249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
89f2783d 254 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4
LT
256
257 /* Page timeout ~20 secs */
aca3192c 258 param = cpu_to_le16(0x8000);
a9de9248 259 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
1da177e4
LT
260
261 /* Connection accept timeout ~20 secs */
aca3192c 262 param = cpu_to_le16(0x7d00);
a9de9248 263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
23bb5763
JH
264
265 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
1da177e4
LT
266}
267
268static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269{
270 __u8 scan = opt;
271
272 BT_DBG("%s %x", hdev->name, scan);
273
274 /* Inquiry and Page scans */
a9de9248 275 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
276}
277
278static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 auth = opt;
281
282 BT_DBG("%s %x", hdev->name, auth);
283
284 /* Authentication */
a9de9248 285 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
286}
287
288static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 encrypt = opt;
291
292 BT_DBG("%s %x", hdev->name, encrypt);
293
e4e8e37c 294 /* Encryption */
a9de9248 295 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
296}
297
e4e8e37c
MH
298static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __le16 policy = cpu_to_le16(opt);
301
a418b893 302 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
303
304 /* Default link policy */
305 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306}
307
8e87d142 308/* Get HCI device by index.
1da177e4
LT
309 * Device is held on return. */
310struct hci_dev *hci_dev_get(int index)
311{
312 struct hci_dev *hdev = NULL;
313 struct list_head *p;
314
315 BT_DBG("%d", index);
316
317 if (index < 0)
318 return NULL;
319
320 read_lock(&hci_dev_list_lock);
321 list_for_each(p, &hci_dev_list) {
322 struct hci_dev *d = list_entry(p, struct hci_dev, list);
323 if (d->id == index) {
324 hdev = hci_dev_hold(d);
325 break;
326 }
327 }
328 read_unlock(&hci_dev_list_lock);
329 return hdev;
330}
1da177e4
LT
331
332/* ---- Inquiry support ---- */
333static void inquiry_cache_flush(struct hci_dev *hdev)
334{
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *next = cache->list, *e;
337
338 BT_DBG("cache %p", cache);
339
340 cache->list = NULL;
341 while ((e = next)) {
342 next = e->next;
343 kfree(e);
344 }
345}
346
347struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
348{
349 struct inquiry_cache *cache = &hdev->inq_cache;
350 struct inquiry_entry *e;
351
352 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
353
354 for (e = cache->list; e; e = e->next)
355 if (!bacmp(&e->data.bdaddr, bdaddr))
356 break;
357 return e;
358}
359
360void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
361{
362 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 363 struct inquiry_entry *ie;
1da177e4
LT
364
365 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
366
70f23020
AE
367 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
368 if (!ie) {
1da177e4 369 /* Entry not in the cache. Add new one. */
70f23020
AE
370 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
371 if (!ie)
1da177e4 372 return;
70f23020
AE
373
374 ie->next = cache->list;
375 cache->list = ie;
1da177e4
LT
376 }
377
70f23020
AE
378 memcpy(&ie->data, data, sizeof(*data));
379 ie->timestamp = jiffies;
1da177e4
LT
380 cache->timestamp = jiffies;
381}
382
383static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
384{
385 struct inquiry_cache *cache = &hdev->inq_cache;
386 struct inquiry_info *info = (struct inquiry_info *) buf;
387 struct inquiry_entry *e;
388 int copied = 0;
389
390 for (e = cache->list; e && copied < num; e = e->next, copied++) {
391 struct inquiry_data *data = &e->data;
392 bacpy(&info->bdaddr, &data->bdaddr);
393 info->pscan_rep_mode = data->pscan_rep_mode;
394 info->pscan_period_mode = data->pscan_period_mode;
395 info->pscan_mode = data->pscan_mode;
396 memcpy(info->dev_class, data->dev_class, 3);
397 info->clock_offset = data->clock_offset;
398 info++;
399 }
400
401 BT_DBG("cache %p, copied %d", cache, copied);
402 return copied;
403}
404
405static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
406{
407 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
408 struct hci_cp_inquiry cp;
409
410 BT_DBG("%s", hdev->name);
411
412 if (test_bit(HCI_INQUIRY, &hdev->flags))
413 return;
414
415 /* Start Inquiry */
416 memcpy(&cp.lap, &ir->lap, 3);
417 cp.length = ir->length;
418 cp.num_rsp = ir->num_rsp;
a9de9248 419 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
420}
421
422int hci_inquiry(void __user *arg)
423{
424 __u8 __user *ptr = arg;
425 struct hci_inquiry_req ir;
426 struct hci_dev *hdev;
427 int err = 0, do_inquiry = 0, max_rsp;
428 long timeo;
429 __u8 *buf;
430
431 if (copy_from_user(&ir, ptr, sizeof(ir)))
432 return -EFAULT;
433
434 if (!(hdev = hci_dev_get(ir.dev_id)))
435 return -ENODEV;
436
437 hci_dev_lock_bh(hdev);
8e87d142 438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
439 inquiry_cache_empty(hdev) ||
440 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
441 inquiry_cache_flush(hdev);
442 do_inquiry = 1;
443 }
444 hci_dev_unlock_bh(hdev);
445
04837f64 446 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
447
448 if (do_inquiry) {
449 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450 if (err < 0)
451 goto done;
452 }
1da177e4
LT
453
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
459 */
70f23020
AE
460 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
461 if (!buf) {
1da177e4
LT
462 err = -ENOMEM;
463 goto done;
464 }
465
466 hci_dev_lock_bh(hdev);
467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
468 hci_dev_unlock_bh(hdev);
469
470 BT_DBG("num_rsp %d", ir.num_rsp);
471
472 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 ptr += sizeof(ir);
474 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 ir.num_rsp))
476 err = -EFAULT;
8e87d142 477 } else
1da177e4
LT
478 err = -EFAULT;
479
480 kfree(buf);
481
482done:
483 hci_dev_put(hdev);
484 return err;
485}
486
487/* ---- HCI ioctl helpers ---- */
488
489int hci_dev_open(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int ret = 0;
493
494 if (!(hdev = hci_dev_get(dev)))
495 return -ENODEV;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_lock(hdev);
500
611b30f7
MH
501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 ret = -ERFKILL;
503 goto done;
504 }
505
1da177e4
LT
506 if (test_bit(HCI_UP, &hdev->flags)) {
507 ret = -EALREADY;
508 goto done;
509 }
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
513
943da25d
MH
514 /* Treat all non BR/EDR controllers as raw devices for now */
515 if (hdev->dev_type != HCI_BREDR)
516 set_bit(HCI_RAW, &hdev->flags);
517
1da177e4
LT
518 if (hdev->open(hdev)) {
519 ret = -EIO;
520 goto done;
521 }
522
523 if (!test_bit(HCI_RAW, &hdev->flags)) {
524 atomic_set(&hdev->cmd_cnt, 1);
525 set_bit(HCI_INIT, &hdev->flags);
526
527 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
528 ret = __hci_request(hdev, hci_init_req, 0,
529 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
530
531 clear_bit(HCI_INIT, &hdev->flags);
532 }
533
534 if (!ret) {
535 hci_dev_hold(hdev);
536 set_bit(HCI_UP, &hdev->flags);
537 hci_notify(hdev, HCI_DEV_UP);
8e87d142 538 } else {
1da177e4
LT
539 /* Init failed, cleanup */
540 tasklet_kill(&hdev->rx_task);
541 tasklet_kill(&hdev->tx_task);
542 tasklet_kill(&hdev->cmd_task);
543
544 skb_queue_purge(&hdev->cmd_q);
545 skb_queue_purge(&hdev->rx_q);
546
547 if (hdev->flush)
548 hdev->flush(hdev);
549
550 if (hdev->sent_cmd) {
551 kfree_skb(hdev->sent_cmd);
552 hdev->sent_cmd = NULL;
553 }
554
555 hdev->close(hdev);
556 hdev->flags = 0;
557 }
558
559done:
560 hci_req_unlock(hdev);
561 hci_dev_put(hdev);
562 return ret;
563}
564
565static int hci_dev_do_close(struct hci_dev *hdev)
566{
567 BT_DBG("%s %p", hdev->name, hdev);
568
569 hci_req_cancel(hdev, ENODEV);
570 hci_req_lock(hdev);
571
572 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
573 hci_req_unlock(hdev);
574 return 0;
575 }
576
577 /* Kill RX and TX tasks */
578 tasklet_kill(&hdev->rx_task);
579 tasklet_kill(&hdev->tx_task);
580
581 hci_dev_lock_bh(hdev);
582 inquiry_cache_flush(hdev);
583 hci_conn_hash_flush(hdev);
584 hci_dev_unlock_bh(hdev);
585
586 hci_notify(hdev, HCI_DEV_DOWN);
587
588 if (hdev->flush)
589 hdev->flush(hdev);
590
591 /* Reset device */
592 skb_queue_purge(&hdev->cmd_q);
593 atomic_set(&hdev->cmd_cnt, 1);
594 if (!test_bit(HCI_RAW, &hdev->flags)) {
595 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
596 __hci_request(hdev, hci_reset_req, 0,
597 msecs_to_jiffies(250));
1da177e4
LT
598 clear_bit(HCI_INIT, &hdev->flags);
599 }
600
601 /* Kill cmd task */
602 tasklet_kill(&hdev->cmd_task);
603
604 /* Drop queues */
605 skb_queue_purge(&hdev->rx_q);
606 skb_queue_purge(&hdev->cmd_q);
607 skb_queue_purge(&hdev->raw_q);
608
609 /* Drop last sent command */
610 if (hdev->sent_cmd) {
611 kfree_skb(hdev->sent_cmd);
612 hdev->sent_cmd = NULL;
613 }
614
615 /* After this point our queues are empty
616 * and no tasks are scheduled. */
617 hdev->close(hdev);
618
619 /* Clear flags */
620 hdev->flags = 0;
621
622 hci_req_unlock(hdev);
623
624 hci_dev_put(hdev);
625 return 0;
626}
627
628int hci_dev_close(__u16 dev)
629{
630 struct hci_dev *hdev;
631 int err;
632
70f23020
AE
633 hdev = hci_dev_get(dev);
634 if (!hdev)
1da177e4
LT
635 return -ENODEV;
636 err = hci_dev_do_close(hdev);
637 hci_dev_put(hdev);
638 return err;
639}
640
641int hci_dev_reset(__u16 dev)
642{
643 struct hci_dev *hdev;
644 int ret = 0;
645
70f23020
AE
646 hdev = hci_dev_get(dev);
647 if (!hdev)
1da177e4
LT
648 return -ENODEV;
649
650 hci_req_lock(hdev);
651 tasklet_disable(&hdev->tx_task);
652
653 if (!test_bit(HCI_UP, &hdev->flags))
654 goto done;
655
656 /* Drop queues */
657 skb_queue_purge(&hdev->rx_q);
658 skb_queue_purge(&hdev->cmd_q);
659
660 hci_dev_lock_bh(hdev);
661 inquiry_cache_flush(hdev);
662 hci_conn_hash_flush(hdev);
663 hci_dev_unlock_bh(hdev);
664
665 if (hdev->flush)
666 hdev->flush(hdev);
667
8e87d142 668 atomic_set(&hdev->cmd_cnt, 1);
1da177e4
LT
669 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
670
671 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
672 ret = __hci_request(hdev, hci_reset_req, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
674
675done:
676 tasklet_enable(&hdev->tx_task);
677 hci_req_unlock(hdev);
678 hci_dev_put(hdev);
679 return ret;
680}
681
682int hci_dev_reset_stat(__u16 dev)
683{
684 struct hci_dev *hdev;
685 int ret = 0;
686
70f23020
AE
687 hdev = hci_dev_get(dev);
688 if (!hdev)
1da177e4
LT
689 return -ENODEV;
690
691 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
692
693 hci_dev_put(hdev);
694
695 return ret;
696}
697
698int hci_dev_cmd(unsigned int cmd, void __user *arg)
699{
700 struct hci_dev *hdev;
701 struct hci_dev_req dr;
702 int err = 0;
703
704 if (copy_from_user(&dr, arg, sizeof(dr)))
705 return -EFAULT;
706
70f23020
AE
707 hdev = hci_dev_get(dr.dev_id);
708 if (!hdev)
1da177e4
LT
709 return -ENODEV;
710
711 switch (cmd) {
712 case HCISETAUTH:
04837f64
MH
713 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
715 break;
716
717 case HCISETENCRYPT:
718 if (!lmp_encrypt_capable(hdev)) {
719 err = -EOPNOTSUPP;
720 break;
721 }
722
723 if (!test_bit(HCI_AUTH, &hdev->flags)) {
724 /* Auth must be enabled first */
04837f64
MH
725 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
727 if (err)
728 break;
729 }
730
04837f64
MH
731 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
732 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
733 break;
734
735 case HCISETSCAN:
04837f64
MH
736 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
738 break;
739
1da177e4 740 case HCISETLINKPOL:
e4e8e37c
MH
741 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
742 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
743 break;
744
745 case HCISETLINKMODE:
e4e8e37c
MH
746 hdev->link_mode = ((__u16) dr.dev_opt) &
747 (HCI_LM_MASTER | HCI_LM_ACCEPT);
748 break;
749
750 case HCISETPTYPE:
751 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
752 break;
753
754 case HCISETACLMTU:
e4e8e37c
MH
755 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
756 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
757 break;
758
759 case HCISETSCOMTU:
e4e8e37c
MH
760 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
761 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
762 break;
763
764 default:
765 err = -EINVAL;
766 break;
767 }
e4e8e37c 768
1da177e4
LT
769 hci_dev_put(hdev);
770 return err;
771}
772
773int hci_get_dev_list(void __user *arg)
774{
775 struct hci_dev_list_req *dl;
776 struct hci_dev_req *dr;
777 struct list_head *p;
778 int n = 0, size, err;
779 __u16 dev_num;
780
781 if (get_user(dev_num, (__u16 __user *) arg))
782 return -EFAULT;
783
784 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
785 return -EINVAL;
786
787 size = sizeof(*dl) + dev_num * sizeof(*dr);
788
70f23020
AE
789 dl = kzalloc(size, GFP_KERNEL);
790 if (!dl)
1da177e4
LT
791 return -ENOMEM;
792
793 dr = dl->dev_req;
794
795 read_lock_bh(&hci_dev_list_lock);
796 list_for_each(p, &hci_dev_list) {
797 struct hci_dev *hdev;
798 hdev = list_entry(p, struct hci_dev, list);
ab81cbf9 799 hci_del_off_timer(hdev);
1da177e4
LT
800 (dr + n)->dev_id = hdev->id;
801 (dr + n)->dev_opt = hdev->flags;
802 if (++n >= dev_num)
803 break;
804 }
805 read_unlock_bh(&hci_dev_list_lock);
806
807 dl->dev_num = n;
808 size = sizeof(*dl) + n * sizeof(*dr);
809
810 err = copy_to_user(arg, dl, size);
811 kfree(dl);
812
813 return err ? -EFAULT : 0;
814}
815
816int hci_get_dev_info(void __user *arg)
817{
818 struct hci_dev *hdev;
819 struct hci_dev_info di;
820 int err = 0;
821
822 if (copy_from_user(&di, arg, sizeof(di)))
823 return -EFAULT;
824
70f23020
AE
825 hdev = hci_dev_get(di.dev_id);
826 if (!hdev)
1da177e4
LT
827 return -ENODEV;
828
ab81cbf9
JH
829 hci_del_off_timer(hdev);
830
1da177e4
LT
831 strcpy(di.name, hdev->name);
832 di.bdaddr = hdev->bdaddr;
943da25d 833 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
834 di.flags = hdev->flags;
835 di.pkt_type = hdev->pkt_type;
836 di.acl_mtu = hdev->acl_mtu;
837 di.acl_pkts = hdev->acl_pkts;
838 di.sco_mtu = hdev->sco_mtu;
839 di.sco_pkts = hdev->sco_pkts;
840 di.link_policy = hdev->link_policy;
841 di.link_mode = hdev->link_mode;
842
843 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
844 memcpy(&di.features, &hdev->features, sizeof(di.features));
845
846 if (copy_to_user(arg, &di, sizeof(di)))
847 err = -EFAULT;
848
849 hci_dev_put(hdev);
850
851 return err;
852}
853
854/* ---- Interface to HCI drivers ---- */
855
611b30f7
MH
856static int hci_rfkill_set_block(void *data, bool blocked)
857{
858 struct hci_dev *hdev = data;
859
860 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
861
862 if (!blocked)
863 return 0;
864
865 hci_dev_do_close(hdev);
866
867 return 0;
868}
869
870static const struct rfkill_ops hci_rfkill_ops = {
871 .set_block = hci_rfkill_set_block,
872};
873
1da177e4
LT
874/* Alloc HCI device */
875struct hci_dev *hci_alloc_dev(void)
876{
877 struct hci_dev *hdev;
878
25ea6db0 879 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
880 if (!hdev)
881 return NULL;
882
1da177e4
LT
883 skb_queue_head_init(&hdev->driver_init);
884
885 return hdev;
886}
887EXPORT_SYMBOL(hci_alloc_dev);
888
889/* Free HCI device */
890void hci_free_dev(struct hci_dev *hdev)
891{
892 skb_queue_purge(&hdev->driver_init);
893
a91f2e39
MH
894 /* will free via device release */
895 put_device(&hdev->dev);
1da177e4
LT
896}
897EXPORT_SYMBOL(hci_free_dev);
898
ab81cbf9
JH
899static void hci_power_on(struct work_struct *work)
900{
901 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
902
903 BT_DBG("%s", hdev->name);
904
905 if (hci_dev_open(hdev->id) < 0)
906 return;
907
908 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
909 mod_timer(&hdev->off_timer,
910 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
911
912 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
913 mgmt_index_added(hdev->id);
914}
915
916static void hci_power_off(struct work_struct *work)
917{
918 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
919
920 BT_DBG("%s", hdev->name);
921
922 hci_dev_close(hdev->id);
923}
924
925static void hci_auto_off(unsigned long data)
926{
927 struct hci_dev *hdev = (struct hci_dev *) data;
928
929 BT_DBG("%s", hdev->name);
930
931 clear_bit(HCI_AUTO_OFF, &hdev->flags);
932
933 queue_work(hdev->workqueue, &hdev->power_off);
934}
935
936void hci_del_off_timer(struct hci_dev *hdev)
937{
938 BT_DBG("%s", hdev->name);
939
940 clear_bit(HCI_AUTO_OFF, &hdev->flags);
941 del_timer(&hdev->off_timer);
942}
943
1da177e4
LT
944/* Register HCI device */
945int hci_register_dev(struct hci_dev *hdev)
946{
947 struct list_head *head = &hci_dev_list, *p;
ef222013 948 int i, id = 0;
1da177e4 949
c13854ce
MH
950 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
951 hdev->bus, hdev->owner);
1da177e4
LT
952
953 if (!hdev->open || !hdev->close || !hdev->destruct)
954 return -EINVAL;
955
956 write_lock_bh(&hci_dev_list_lock);
957
958 /* Find first available device id */
959 list_for_each(p, &hci_dev_list) {
960 if (list_entry(p, struct hci_dev, list)->id != id)
961 break;
962 head = p; id++;
963 }
8e87d142 964
1da177e4
LT
965 sprintf(hdev->name, "hci%d", id);
966 hdev->id = id;
967 list_add(&hdev->list, head);
968
969 atomic_set(&hdev->refcnt, 1);
970 spin_lock_init(&hdev->lock);
971
972 hdev->flags = 0;
973 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 974 hdev->esco_type = (ESCO_HV1);
1da177e4
LT
975 hdev->link_mode = (HCI_LM_ACCEPT);
976
04837f64
MH
977 hdev->idle_timeout = 0;
978 hdev->sniff_max_interval = 800;
979 hdev->sniff_min_interval = 80;
980
70f23020 981 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
982 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
983 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
984
985 skb_queue_head_init(&hdev->rx_q);
986 skb_queue_head_init(&hdev->cmd_q);
987 skb_queue_head_init(&hdev->raw_q);
988
cd4c5391 989 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
990 hdev->reassembly[i] = NULL;
991
1da177e4 992 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 993 mutex_init(&hdev->req_lock);
1da177e4
LT
994
995 inquiry_cache_init(hdev);
996
997 hci_conn_hash_init(hdev);
998
ea4bd8ba 999 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1000
ab81cbf9
JH
1001 INIT_WORK(&hdev->power_on, hci_power_on);
1002 INIT_WORK(&hdev->power_off, hci_power_off);
1003 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1004
1da177e4
LT
1005 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1006
1007 atomic_set(&hdev->promisc, 0);
1008
1009 write_unlock_bh(&hci_dev_list_lock);
1010
f48fd9c8
MH
1011 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1012 if (!hdev->workqueue)
1013 goto nomem;
1014
1da177e4
LT
1015 hci_register_sysfs(hdev);
1016
611b30f7
MH
1017 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1018 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1019 if (hdev->rfkill) {
1020 if (rfkill_register(hdev->rfkill) < 0) {
1021 rfkill_destroy(hdev->rfkill);
1022 hdev->rfkill = NULL;
1023 }
1024 }
1025
ab81cbf9
JH
1026 set_bit(HCI_AUTO_OFF, &hdev->flags);
1027 set_bit(HCI_SETUP, &hdev->flags);
1028 queue_work(hdev->workqueue, &hdev->power_on);
1029
1da177e4
LT
1030 hci_notify(hdev, HCI_DEV_REG);
1031
1032 return id;
f48fd9c8
MH
1033
1034nomem:
1035 write_lock_bh(&hci_dev_list_lock);
1036 list_del(&hdev->list);
1037 write_unlock_bh(&hci_dev_list_lock);
1038
1039 return -ENOMEM;
1da177e4
LT
1040}
1041EXPORT_SYMBOL(hci_register_dev);
1042
1043/* Unregister HCI device */
1044int hci_unregister_dev(struct hci_dev *hdev)
1045{
ef222013
MH
1046 int i;
1047
c13854ce 1048 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1049
1da177e4
LT
1050 write_lock_bh(&hci_dev_list_lock);
1051 list_del(&hdev->list);
1052 write_unlock_bh(&hci_dev_list_lock);
1053
1054 hci_dev_do_close(hdev);
1055
cd4c5391 1056 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1057 kfree_skb(hdev->reassembly[i]);
1058
ab81cbf9
JH
1059 if (!test_bit(HCI_INIT, &hdev->flags) &&
1060 !test_bit(HCI_SETUP, &hdev->flags))
1061 mgmt_index_removed(hdev->id);
1062
1da177e4
LT
1063 hci_notify(hdev, HCI_DEV_UNREG);
1064
611b30f7
MH
1065 if (hdev->rfkill) {
1066 rfkill_unregister(hdev->rfkill);
1067 rfkill_destroy(hdev->rfkill);
1068 }
1069
147e2d59
DY
1070 hci_unregister_sysfs(hdev);
1071
f48fd9c8
MH
1072 destroy_workqueue(hdev->workqueue);
1073
e2e0cacb
JH
1074 hci_dev_lock_bh(hdev);
1075 hci_blacklist_clear(hdev);
1076 hci_dev_unlock_bh(hdev);
1077
1da177e4 1078 __hci_dev_put(hdev);
ef222013 1079
1da177e4
LT
1080 return 0;
1081}
1082EXPORT_SYMBOL(hci_unregister_dev);
1083
1084/* Suspend HCI device */
1085int hci_suspend_dev(struct hci_dev *hdev)
1086{
1087 hci_notify(hdev, HCI_DEV_SUSPEND);
1088 return 0;
1089}
1090EXPORT_SYMBOL(hci_suspend_dev);
1091
1092/* Resume HCI device */
1093int hci_resume_dev(struct hci_dev *hdev)
1094{
1095 hci_notify(hdev, HCI_DEV_RESUME);
1096 return 0;
1097}
1098EXPORT_SYMBOL(hci_resume_dev);
1099
76bca880
MH
1100/* Receive frame from HCI drivers */
1101int hci_recv_frame(struct sk_buff *skb)
1102{
1103 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1104 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1105 && !test_bit(HCI_INIT, &hdev->flags))) {
1106 kfree_skb(skb);
1107 return -ENXIO;
1108 }
1109
1110 /* Incomming skb */
1111 bt_cb(skb)->incoming = 1;
1112
1113 /* Time stamp */
1114 __net_timestamp(skb);
1115
1116 /* Queue frame for rx task */
1117 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1118 tasklet_schedule(&hdev->rx_task);
1119
76bca880
MH
1120 return 0;
1121}
1122EXPORT_SYMBOL(hci_recv_frame);
1123
33e882a5
SS
1124static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1125 int count, __u8 index, gfp_t gfp_mask)
1126{
1127 int len = 0;
1128 int hlen = 0;
1129 int remain = count;
1130 struct sk_buff *skb;
1131 struct bt_skb_cb *scb;
1132
1133 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1134 index >= NUM_REASSEMBLY)
1135 return -EILSEQ;
1136
1137 skb = hdev->reassembly[index];
1138
1139 if (!skb) {
1140 switch (type) {
1141 case HCI_ACLDATA_PKT:
1142 len = HCI_MAX_FRAME_SIZE;
1143 hlen = HCI_ACL_HDR_SIZE;
1144 break;
1145 case HCI_EVENT_PKT:
1146 len = HCI_MAX_EVENT_SIZE;
1147 hlen = HCI_EVENT_HDR_SIZE;
1148 break;
1149 case HCI_SCODATA_PKT:
1150 len = HCI_MAX_SCO_SIZE;
1151 hlen = HCI_SCO_HDR_SIZE;
1152 break;
1153 }
1154
1155 skb = bt_skb_alloc(len, gfp_mask);
1156 if (!skb)
1157 return -ENOMEM;
1158
1159 scb = (void *) skb->cb;
1160 scb->expect = hlen;
1161 scb->pkt_type = type;
1162
1163 skb->dev = (void *) hdev;
1164 hdev->reassembly[index] = skb;
1165 }
1166
1167 while (count) {
1168 scb = (void *) skb->cb;
1169 len = min(scb->expect, (__u16)count);
1170
1171 memcpy(skb_put(skb, len), data, len);
1172
1173 count -= len;
1174 data += len;
1175 scb->expect -= len;
1176 remain = count;
1177
1178 switch (type) {
1179 case HCI_EVENT_PKT:
1180 if (skb->len == HCI_EVENT_HDR_SIZE) {
1181 struct hci_event_hdr *h = hci_event_hdr(skb);
1182 scb->expect = h->plen;
1183
1184 if (skb_tailroom(skb) < scb->expect) {
1185 kfree_skb(skb);
1186 hdev->reassembly[index] = NULL;
1187 return -ENOMEM;
1188 }
1189 }
1190 break;
1191
1192 case HCI_ACLDATA_PKT:
1193 if (skb->len == HCI_ACL_HDR_SIZE) {
1194 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1195 scb->expect = __le16_to_cpu(h->dlen);
1196
1197 if (skb_tailroom(skb) < scb->expect) {
1198 kfree_skb(skb);
1199 hdev->reassembly[index] = NULL;
1200 return -ENOMEM;
1201 }
1202 }
1203 break;
1204
1205 case HCI_SCODATA_PKT:
1206 if (skb->len == HCI_SCO_HDR_SIZE) {
1207 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1208 scb->expect = h->dlen;
1209
1210 if (skb_tailroom(skb) < scb->expect) {
1211 kfree_skb(skb);
1212 hdev->reassembly[index] = NULL;
1213 return -ENOMEM;
1214 }
1215 }
1216 break;
1217 }
1218
1219 if (scb->expect == 0) {
1220 /* Complete frame */
1221
1222 bt_cb(skb)->pkt_type = type;
1223 hci_recv_frame(skb);
1224
1225 hdev->reassembly[index] = NULL;
1226 return remain;
1227 }
1228 }
1229
1230 return remain;
1231}
1232
ef222013
MH
1233int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1234{
f39a3c06
SS
1235 int rem = 0;
1236
ef222013
MH
1237 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1238 return -EILSEQ;
1239
da5f6c37 1240 while (count) {
f39a3c06
SS
1241 rem = hci_reassembly(hdev, type, data, count,
1242 type - 1, GFP_ATOMIC);
1243 if (rem < 0)
1244 return rem;
ef222013 1245
f39a3c06
SS
1246 data += (count - rem);
1247 count = rem;
da5f6c37 1248 };
ef222013 1249
f39a3c06 1250 return rem;
ef222013
MH
1251}
1252EXPORT_SYMBOL(hci_recv_fragment);
1253
99811510
SS
1254#define STREAM_REASSEMBLY 0
1255
1256int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1257{
1258 int type;
1259 int rem = 0;
1260
da5f6c37 1261 while (count) {
99811510
SS
1262 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1263
1264 if (!skb) {
1265 struct { char type; } *pkt;
1266
1267 /* Start of the frame */
1268 pkt = data;
1269 type = pkt->type;
1270
1271 data++;
1272 count--;
1273 } else
1274 type = bt_cb(skb)->pkt_type;
1275
1276 rem = hci_reassembly(hdev, type, data,
1277 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1278 if (rem < 0)
1279 return rem;
1280
1281 data += (count - rem);
1282 count = rem;
da5f6c37 1283 };
99811510
SS
1284
1285 return rem;
1286}
1287EXPORT_SYMBOL(hci_recv_stream_fragment);
1288
1da177e4
LT
1289/* ---- Interface to upper protocols ---- */
1290
1291/* Register/Unregister protocols.
1292 * hci_task_lock is used to ensure that no tasks are running. */
1293int hci_register_proto(struct hci_proto *hp)
1294{
1295 int err = 0;
1296
1297 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1298
1299 if (hp->id >= HCI_MAX_PROTO)
1300 return -EINVAL;
1301
1302 write_lock_bh(&hci_task_lock);
1303
1304 if (!hci_proto[hp->id])
1305 hci_proto[hp->id] = hp;
1306 else
1307 err = -EEXIST;
1308
1309 write_unlock_bh(&hci_task_lock);
1310
1311 return err;
1312}
1313EXPORT_SYMBOL(hci_register_proto);
1314
1315int hci_unregister_proto(struct hci_proto *hp)
1316{
1317 int err = 0;
1318
1319 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1320
1321 if (hp->id >= HCI_MAX_PROTO)
1322 return -EINVAL;
1323
1324 write_lock_bh(&hci_task_lock);
1325
1326 if (hci_proto[hp->id])
1327 hci_proto[hp->id] = NULL;
1328 else
1329 err = -ENOENT;
1330
1331 write_unlock_bh(&hci_task_lock);
1332
1333 return err;
1334}
1335EXPORT_SYMBOL(hci_unregister_proto);
1336
1337int hci_register_cb(struct hci_cb *cb)
1338{
1339 BT_DBG("%p name %s", cb, cb->name);
1340
1341 write_lock_bh(&hci_cb_list_lock);
1342 list_add(&cb->list, &hci_cb_list);
1343 write_unlock_bh(&hci_cb_list_lock);
1344
1345 return 0;
1346}
1347EXPORT_SYMBOL(hci_register_cb);
1348
1349int hci_unregister_cb(struct hci_cb *cb)
1350{
1351 BT_DBG("%p name %s", cb, cb->name);
1352
1353 write_lock_bh(&hci_cb_list_lock);
1354 list_del(&cb->list);
1355 write_unlock_bh(&hci_cb_list_lock);
1356
1357 return 0;
1358}
1359EXPORT_SYMBOL(hci_unregister_cb);
1360
1361static int hci_send_frame(struct sk_buff *skb)
1362{
1363 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1364
1365 if (!hdev) {
1366 kfree_skb(skb);
1367 return -ENODEV;
1368 }
1369
0d48d939 1370 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1371
1372 if (atomic_read(&hdev->promisc)) {
1373 /* Time stamp */
a61bbcf2 1374 __net_timestamp(skb);
1da177e4
LT
1375
1376 hci_send_to_sock(hdev, skb);
1377 }
1378
1379 /* Get rid of skb owner, prior to sending to the driver. */
1380 skb_orphan(skb);
1381
1382 return hdev->send(skb);
1383}
1384
1385/* Send HCI command */
a9de9248 1386int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1387{
1388 int len = HCI_COMMAND_HDR_SIZE + plen;
1389 struct hci_command_hdr *hdr;
1390 struct sk_buff *skb;
1391
a9de9248 1392 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1393
1394 skb = bt_skb_alloc(len, GFP_ATOMIC);
1395 if (!skb) {
ef222013 1396 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1397 return -ENOMEM;
1398 }
1399
1400 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1401 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1402 hdr->plen = plen;
1403
1404 if (plen)
1405 memcpy(skb_put(skb, plen), param, plen);
1406
1407 BT_DBG("skb len %d", skb->len);
1408
0d48d939 1409 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1410 skb->dev = (void *) hdev;
c78ae283 1411
1da177e4 1412 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1413 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1414
1415 return 0;
1416}
1da177e4
LT
1417
1418/* Get data from the previously sent command */
a9de9248 1419void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1420{
1421 struct hci_command_hdr *hdr;
1422
1423 if (!hdev->sent_cmd)
1424 return NULL;
1425
1426 hdr = (void *) hdev->sent_cmd->data;
1427
a9de9248 1428 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1429 return NULL;
1430
a9de9248 1431 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1432
1433 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1434}
1435
1436/* Send ACL data */
1437static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1438{
1439 struct hci_acl_hdr *hdr;
1440 int len = skb->len;
1441
badff6d0
ACM
1442 skb_push(skb, HCI_ACL_HDR_SIZE);
1443 skb_reset_transport_header(skb);
9c70220b 1444 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1445 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1446 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1447}
1448
9a9c6a34 1449void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1da177e4
LT
1450{
1451 struct hci_dev *hdev = conn->hdev;
1452 struct sk_buff *list;
1453
1454 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1455
1456 skb->dev = (void *) hdev;
0d48d939 1457 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1458 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4 1459
70f23020
AE
1460 list = skb_shinfo(skb)->frag_list;
1461 if (!list) {
1da177e4
LT
1462 /* Non fragmented */
1463 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1464
1465 skb_queue_tail(&conn->data_q, skb);
1466 } else {
1467 /* Fragmented */
1468 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1469
1470 skb_shinfo(skb)->frag_list = NULL;
1471
1472 /* Queue all fragments atomically */
1473 spin_lock_bh(&conn->data_q.lock);
1474
1475 __skb_queue_tail(&conn->data_q, skb);
e702112f
AE
1476
1477 flags &= ~ACL_START;
1478 flags |= ACL_CONT;
1da177e4
LT
1479 do {
1480 skb = list; list = list->next;
8e87d142 1481
1da177e4 1482 skb->dev = (void *) hdev;
0d48d939 1483 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1484 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1485
1486 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1487
1488 __skb_queue_tail(&conn->data_q, skb);
1489 } while (list);
1490
1491 spin_unlock_bh(&conn->data_q.lock);
1492 }
1493
c78ae283 1494 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1495}
1496EXPORT_SYMBOL(hci_send_acl);
1497
1498/* Send SCO data */
0d861d8b 1499void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
1500{
1501 struct hci_dev *hdev = conn->hdev;
1502 struct hci_sco_hdr hdr;
1503
1504 BT_DBG("%s len %d", hdev->name, skb->len);
1505
aca3192c 1506 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1507 hdr.dlen = skb->len;
1508
badff6d0
ACM
1509 skb_push(skb, HCI_SCO_HDR_SIZE);
1510 skb_reset_transport_header(skb);
9c70220b 1511 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1512
1513 skb->dev = (void *) hdev;
0d48d939 1514 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 1515
1da177e4 1516 skb_queue_tail(&conn->data_q, skb);
c78ae283 1517 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1518}
1519EXPORT_SYMBOL(hci_send_sco);
1520
1521/* ---- HCI TX task (outgoing data) ---- */
1522
1523/* HCI Connection scheduler */
1524static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1525{
1526 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1527 struct hci_conn *conn = NULL;
1da177e4
LT
1528 int num = 0, min = ~0;
1529 struct list_head *p;
1530
8e87d142 1531 /* We don't have to lock device here. Connections are always
1da177e4
LT
1532 * added and removed with TX task disabled. */
1533 list_for_each(p, &h->list) {
1534 struct hci_conn *c;
1535 c = list_entry(p, struct hci_conn, list);
1536
769be974 1537 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1538 continue;
769be974
MH
1539
1540 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1541 continue;
1542
1da177e4
LT
1543 num++;
1544
1545 if (c->sent < min) {
1546 min = c->sent;
1547 conn = c;
1548 }
1549 }
1550
1551 if (conn) {
1552 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1553 int q = cnt / num;
1554 *quote = q ? q : 1;
1555 } else
1556 *quote = 0;
1557
1558 BT_DBG("conn %p quote %d", conn, *quote);
1559 return conn;
1560}
1561
1562static inline void hci_acl_tx_to(struct hci_dev *hdev)
1563{
1564 struct hci_conn_hash *h = &hdev->conn_hash;
1565 struct list_head *p;
1566 struct hci_conn *c;
1567
1568 BT_ERR("%s ACL tx timeout", hdev->name);
1569
1570 /* Kill stalled connections */
1571 list_for_each(p, &h->list) {
1572 c = list_entry(p, struct hci_conn, list);
1573 if (c->type == ACL_LINK && c->sent) {
1574 BT_ERR("%s killing stalled ACL connection %s",
1575 hdev->name, batostr(&c->dst));
1576 hci_acl_disconn(c, 0x13);
1577 }
1578 }
1579}
1580
1581static inline void hci_sched_acl(struct hci_dev *hdev)
1582{
1583 struct hci_conn *conn;
1584 struct sk_buff *skb;
1585 int quote;
1586
1587 BT_DBG("%s", hdev->name);
1588
1589 if (!test_bit(HCI_RAW, &hdev->flags)) {
1590 /* ACL tx timeout must be longer than maximum
1591 * link supervision timeout (40.9 seconds) */
82453021 1592 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1da177e4
LT
1593 hci_acl_tx_to(hdev);
1594 }
1595
1596 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1597 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1598 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1599
1600 hci_conn_enter_active_mode(conn);
1601
1da177e4
LT
1602 hci_send_frame(skb);
1603 hdev->acl_last_tx = jiffies;
1604
1605 hdev->acl_cnt--;
1606 conn->sent++;
1607 }
1608 }
1609}
1610
1611/* Schedule SCO */
1612static inline void hci_sched_sco(struct hci_dev *hdev)
1613{
1614 struct hci_conn *conn;
1615 struct sk_buff *skb;
1616 int quote;
1617
1618 BT_DBG("%s", hdev->name);
1619
1620 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1621 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1622 BT_DBG("skb %p len %d", skb, skb->len);
1623 hci_send_frame(skb);
1624
1625 conn->sent++;
1626 if (conn->sent == ~0)
1627 conn->sent = 0;
1628 }
1629 }
1630}
1631
b6a0dc82
MH
1632static inline void hci_sched_esco(struct hci_dev *hdev)
1633{
1634 struct hci_conn *conn;
1635 struct sk_buff *skb;
1636 int quote;
1637
1638 BT_DBG("%s", hdev->name);
1639
1640 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1641 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1642 BT_DBG("skb %p len %d", skb, skb->len);
1643 hci_send_frame(skb);
1644
1645 conn->sent++;
1646 if (conn->sent == ~0)
1647 conn->sent = 0;
1648 }
1649 }
1650}
1651
1da177e4
LT
1652static void hci_tx_task(unsigned long arg)
1653{
1654 struct hci_dev *hdev = (struct hci_dev *) arg;
1655 struct sk_buff *skb;
1656
1657 read_lock(&hci_task_lock);
1658
1659 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1660
1661 /* Schedule queues and send stuff to HCI driver */
1662
1663 hci_sched_acl(hdev);
1664
1665 hci_sched_sco(hdev);
1666
b6a0dc82
MH
1667 hci_sched_esco(hdev);
1668
1da177e4
LT
1669 /* Send next queued raw (unknown type) packet */
1670 while ((skb = skb_dequeue(&hdev->raw_q)))
1671 hci_send_frame(skb);
1672
1673 read_unlock(&hci_task_lock);
1674}
1675
1676/* ----- HCI RX task (incoming data proccessing) ----- */
1677
1678/* ACL data packet */
1679static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1680{
1681 struct hci_acl_hdr *hdr = (void *) skb->data;
1682 struct hci_conn *conn;
1683 __u16 handle, flags;
1684
1685 skb_pull(skb, HCI_ACL_HDR_SIZE);
1686
1687 handle = __le16_to_cpu(hdr->handle);
1688 flags = hci_flags(handle);
1689 handle = hci_handle(handle);
1690
1691 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1692
1693 hdev->stat.acl_rx++;
1694
1695 hci_dev_lock(hdev);
1696 conn = hci_conn_hash_lookup_handle(hdev, handle);
1697 hci_dev_unlock(hdev);
8e87d142 1698
1da177e4
LT
1699 if (conn) {
1700 register struct hci_proto *hp;
1701
04837f64
MH
1702 hci_conn_enter_active_mode(conn);
1703
1da177e4 1704 /* Send to upper protocol */
70f23020
AE
1705 hp = hci_proto[HCI_PROTO_L2CAP];
1706 if (hp && hp->recv_acldata) {
1da177e4
LT
1707 hp->recv_acldata(conn, skb, flags);
1708 return;
1709 }
1710 } else {
8e87d142 1711 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1712 hdev->name, handle);
1713 }
1714
1715 kfree_skb(skb);
1716}
1717
1718/* SCO data packet */
1719static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1720{
1721 struct hci_sco_hdr *hdr = (void *) skb->data;
1722 struct hci_conn *conn;
1723 __u16 handle;
1724
1725 skb_pull(skb, HCI_SCO_HDR_SIZE);
1726
1727 handle = __le16_to_cpu(hdr->handle);
1728
1729 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1730
1731 hdev->stat.sco_rx++;
1732
1733 hci_dev_lock(hdev);
1734 conn = hci_conn_hash_lookup_handle(hdev, handle);
1735 hci_dev_unlock(hdev);
1736
1737 if (conn) {
1738 register struct hci_proto *hp;
1739
1740 /* Send to upper protocol */
70f23020
AE
1741 hp = hci_proto[HCI_PROTO_SCO];
1742 if (hp && hp->recv_scodata) {
1da177e4
LT
1743 hp->recv_scodata(conn, skb);
1744 return;
1745 }
1746 } else {
8e87d142 1747 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1748 hdev->name, handle);
1749 }
1750
1751 kfree_skb(skb);
1752}
1753
6516455d 1754static void hci_rx_task(unsigned long arg)
1da177e4
LT
1755{
1756 struct hci_dev *hdev = (struct hci_dev *) arg;
1757 struct sk_buff *skb;
1758
1759 BT_DBG("%s", hdev->name);
1760
1761 read_lock(&hci_task_lock);
1762
1763 while ((skb = skb_dequeue(&hdev->rx_q))) {
1764 if (atomic_read(&hdev->promisc)) {
1765 /* Send copy to the sockets */
1766 hci_send_to_sock(hdev, skb);
1767 }
1768
1769 if (test_bit(HCI_RAW, &hdev->flags)) {
1770 kfree_skb(skb);
1771 continue;
1772 }
1773
1774 if (test_bit(HCI_INIT, &hdev->flags)) {
1775 /* Don't process data packets in this states. */
0d48d939 1776 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1777 case HCI_ACLDATA_PKT:
1778 case HCI_SCODATA_PKT:
1779 kfree_skb(skb);
1780 continue;
3ff50b79 1781 }
1da177e4
LT
1782 }
1783
1784 /* Process frame */
0d48d939 1785 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1786 case HCI_EVENT_PKT:
1787 hci_event_packet(hdev, skb);
1788 break;
1789
1790 case HCI_ACLDATA_PKT:
1791 BT_DBG("%s ACL data packet", hdev->name);
1792 hci_acldata_packet(hdev, skb);
1793 break;
1794
1795 case HCI_SCODATA_PKT:
1796 BT_DBG("%s SCO data packet", hdev->name);
1797 hci_scodata_packet(hdev, skb);
1798 break;
1799
1800 default:
1801 kfree_skb(skb);
1802 break;
1803 }
1804 }
1805
1806 read_unlock(&hci_task_lock);
1807}
1808
1809static void hci_cmd_task(unsigned long arg)
1810{
1811 struct hci_dev *hdev = (struct hci_dev *) arg;
1812 struct sk_buff *skb;
1813
1814 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1815
82453021 1816 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1da177e4
LT
1817 BT_ERR("%s command tx timeout", hdev->name);
1818 atomic_set(&hdev->cmd_cnt, 1);
1819 }
1820
1821 /* Send queued commands */
1822 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
7585b97a 1823 kfree_skb(hdev->sent_cmd);
1da177e4 1824
70f23020
AE
1825 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1826 if (hdev->sent_cmd) {
1da177e4
LT
1827 atomic_dec(&hdev->cmd_cnt);
1828 hci_send_frame(skb);
1829 hdev->cmd_last_tx = jiffies;
1830 } else {
1831 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 1832 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1833 }
1834 }
1835}