Bluetooth: Add signal handlers for channel moves
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
1da177e4
LT
57static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
1da177e4
LT
60
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
e041c683 76static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
77
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
e041c683 82 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
83}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
e041c683 87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
88}
89
6516455d 90static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 91{
e041c683 92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
93}
94
95/* ---- HCI requests ---- */
96
23bb5763 97void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 98{
23bb5763
JH
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
a5040efa
JH
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 105 return;
1da177e4
LT
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
8e87d142 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 127 unsigned long opt, __u32 timeout)
1da177e4
LT
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
e175072f 149 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
3ff50b79 159 }
1da177e4 160
a5040efa 161 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 169 unsigned long opt, __u32 timeout)
1da177e4
LT
170{
171 int ret;
172
7c6a329e
MH
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
1da177e4
LT
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
f630cf0d 189 set_bit(HCI_RESET, &hdev->flags);
a9de9248 190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
b0916ea0 195 struct hci_cp_delete_stored_link_key cp;
1da177e4 196 struct sk_buff *skb;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4
LT
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 207 skb->dev = (void *) hdev;
c78ae283 208
1da177e4 209 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 210 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
f630cf0d
GP
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
a9de9248 219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 220 }
1da177e4
LT
221
222 /* Read Local Supported Features */
a9de9248 223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 224
1143e5a6 225 /* Read Local Version */
a9de9248 226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 227
1da177e4 228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
aca3192c 235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
240 }
241#endif
242
243 /* Read BD Address */
a9de9248
MH
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
251
252 /* Read Voice Setting */
a9de9248 253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
89f2783d 258 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 260
1da177e4 261 /* Connection accept timeout ~20 secs */
aca3192c 262 param = cpu_to_le16(0x7d00);
a9de9248 263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
268}
269
6ed58ec5
VT
270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
1da177e4
LT
278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
a9de9248 285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
a9de9248 295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
e4e8e37c 304 /* Encryption */
a9de9248 305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
306}
307
e4e8e37c
MH
308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
a418b893 312 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
8e87d142 318/* Get HCI device by index.
1da177e4
LT
319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
8035ded4 322 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
8035ded4 330 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
1da177e4
LT
339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 371 struct inquiry_entry *ie;
1da177e4
LT
372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
70f23020
AE
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
1da177e4 377 /* Entry not in the cache. Add new one. */
70f23020
AE
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
1da177e4 380 return;
70f23020
AE
381
382 ie->next = cache->list;
383 cache->list = ie;
1da177e4
LT
384 }
385
70f23020
AE
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
1da177e4
LT
388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
a9de9248 427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
5a08ecce
AE
442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
1da177e4
LT
444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
8e87d142 447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
04837f64 455 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
1da177e4
LT
462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
01df8c31 469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 470 if (!buf) {
1da177e4
LT
471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
8e87d142 486 } else
1da177e4
LT
487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
5a08ecce
AE
503 hdev = hci_dev_get(dev);
504 if (!hdev)
1da177e4
LT
505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
611b30f7
MH
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
1da177e4
LT
516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
943da25d
MH
524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
527
1da177e4
LT
528 if (hdev->open(hdev)) {
529 ret = -EIO;
530 goto done;
531 }
532
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
a5040efa 536 hdev->init_last_cmd = 0;
1da177e4 537
04837f64
MH
538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 540
eead27da 541 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
1da177e4
LT
545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
5add6af8
JH
552 if (!test_bit(HCI_SETUP, &hdev->flags))
553 mgmt_powered(hdev->id, 1);
8e87d142 554 } else {
1da177e4
LT
555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
559
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
562
563 if (hdev->flush)
564 hdev->flush(hdev);
565
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
569 }
570
571 hdev->close(hdev);
572 hdev->flags = 0;
573 }
574
575done:
576 hci_req_unlock(hdev);
577 hci_dev_put(hdev);
578 return ret;
579}
580
581static int hci_dev_do_close(struct hci_dev *hdev)
582{
583 BT_DBG("%s %p", hdev->name, hdev);
584
585 hci_req_cancel(hdev, ENODEV);
586 hci_req_lock(hdev);
587
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 589 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
590 hci_req_unlock(hdev);
591 return 0;
592 }
593
594 /* Kill RX and TX tasks */
595 tasklet_kill(&hdev->rx_task);
596 tasklet_kill(&hdev->tx_task);
597
598 hci_dev_lock_bh(hdev);
599 inquiry_cache_flush(hdev);
600 hci_conn_hash_flush(hdev);
601 hci_dev_unlock_bh(hdev);
602
603 hci_notify(hdev, HCI_DEV_DOWN);
604
605 if (hdev->flush)
606 hdev->flush(hdev);
607
608 /* Reset device */
609 skb_queue_purge(&hdev->cmd_q);
610 atomic_set(&hdev->cmd_cnt, 1);
611 if (!test_bit(HCI_RAW, &hdev->flags)) {
612 set_bit(HCI_INIT, &hdev->flags);
04837f64 613 __hci_request(hdev, hci_reset_req, 0,
43611a7b 614 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
615 clear_bit(HCI_INIT, &hdev->flags);
616 }
617
618 /* Kill cmd task */
619 tasklet_kill(&hdev->cmd_task);
620
621 /* Drop queues */
622 skb_queue_purge(&hdev->rx_q);
623 skb_queue_purge(&hdev->cmd_q);
624 skb_queue_purge(&hdev->raw_q);
625
626 /* Drop last sent command */
627 if (hdev->sent_cmd) {
b79f44c1 628 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
629 kfree_skb(hdev->sent_cmd);
630 hdev->sent_cmd = NULL;
631 }
632
633 /* After this point our queues are empty
634 * and no tasks are scheduled. */
635 hdev->close(hdev);
636
5add6af8
JH
637 mgmt_powered(hdev->id, 0);
638
1da177e4
LT
639 /* Clear flags */
640 hdev->flags = 0;
641
642 hci_req_unlock(hdev);
643
644 hci_dev_put(hdev);
645 return 0;
646}
647
648int hci_dev_close(__u16 dev)
649{
650 struct hci_dev *hdev;
651 int err;
652
70f23020
AE
653 hdev = hci_dev_get(dev);
654 if (!hdev)
1da177e4
LT
655 return -ENODEV;
656 err = hci_dev_do_close(hdev);
657 hci_dev_put(hdev);
658 return err;
659}
660
661int hci_dev_reset(__u16 dev)
662{
663 struct hci_dev *hdev;
664 int ret = 0;
665
70f23020
AE
666 hdev = hci_dev_get(dev);
667 if (!hdev)
1da177e4
LT
668 return -ENODEV;
669
670 hci_req_lock(hdev);
671 tasklet_disable(&hdev->tx_task);
672
673 if (!test_bit(HCI_UP, &hdev->flags))
674 goto done;
675
676 /* Drop queues */
677 skb_queue_purge(&hdev->rx_q);
678 skb_queue_purge(&hdev->cmd_q);
679
680 hci_dev_lock_bh(hdev);
681 inquiry_cache_flush(hdev);
682 hci_conn_hash_flush(hdev);
683 hci_dev_unlock_bh(hdev);
684
685 if (hdev->flush)
686 hdev->flush(hdev);
687
8e87d142 688 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 689 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
690
691 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
692 ret = __hci_request(hdev, hci_reset_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
694
695done:
696 tasklet_enable(&hdev->tx_task);
697 hci_req_unlock(hdev);
698 hci_dev_put(hdev);
699 return ret;
700}
701
702int hci_dev_reset_stat(__u16 dev)
703{
704 struct hci_dev *hdev;
705 int ret = 0;
706
70f23020
AE
707 hdev = hci_dev_get(dev);
708 if (!hdev)
1da177e4
LT
709 return -ENODEV;
710
711 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
712
713 hci_dev_put(hdev);
714
715 return ret;
716}
717
718int hci_dev_cmd(unsigned int cmd, void __user *arg)
719{
720 struct hci_dev *hdev;
721 struct hci_dev_req dr;
722 int err = 0;
723
724 if (copy_from_user(&dr, arg, sizeof(dr)))
725 return -EFAULT;
726
70f23020
AE
727 hdev = hci_dev_get(dr.dev_id);
728 if (!hdev)
1da177e4
LT
729 return -ENODEV;
730
731 switch (cmd) {
732 case HCISETAUTH:
04837f64
MH
733 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
734 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
735 break;
736
737 case HCISETENCRYPT:
738 if (!lmp_encrypt_capable(hdev)) {
739 err = -EOPNOTSUPP;
740 break;
741 }
742
743 if (!test_bit(HCI_AUTH, &hdev->flags)) {
744 /* Auth must be enabled first */
04837f64
MH
745 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
746 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
747 if (err)
748 break;
749 }
750
04837f64
MH
751 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
752 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
753 break;
754
755 case HCISETSCAN:
04837f64
MH
756 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
757 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
758 break;
759
1da177e4 760 case HCISETLINKPOL:
e4e8e37c
MH
761 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
763 break;
764
765 case HCISETLINKMODE:
e4e8e37c
MH
766 hdev->link_mode = ((__u16) dr.dev_opt) &
767 (HCI_LM_MASTER | HCI_LM_ACCEPT);
768 break;
769
770 case HCISETPTYPE:
771 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
772 break;
773
774 case HCISETACLMTU:
e4e8e37c
MH
775 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
776 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
777 break;
778
779 case HCISETSCOMTU:
e4e8e37c
MH
780 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
781 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
782 break;
783
784 default:
785 err = -EINVAL;
786 break;
787 }
e4e8e37c 788
1da177e4
LT
789 hci_dev_put(hdev);
790 return err;
791}
792
793int hci_get_dev_list(void __user *arg)
794{
8035ded4 795 struct hci_dev *hdev;
1da177e4
LT
796 struct hci_dev_list_req *dl;
797 struct hci_dev_req *dr;
1da177e4
LT
798 int n = 0, size, err;
799 __u16 dev_num;
800
801 if (get_user(dev_num, (__u16 __user *) arg))
802 return -EFAULT;
803
804 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
805 return -EINVAL;
806
807 size = sizeof(*dl) + dev_num * sizeof(*dr);
808
70f23020
AE
809 dl = kzalloc(size, GFP_KERNEL);
810 if (!dl)
1da177e4
LT
811 return -ENOMEM;
812
813 dr = dl->dev_req;
814
815 read_lock_bh(&hci_dev_list_lock);
8035ded4 816 list_for_each_entry(hdev, &hci_dev_list, list) {
ab81cbf9 817 hci_del_off_timer(hdev);
c542a06c
JH
818
819 if (!test_bit(HCI_MGMT, &hdev->flags))
820 set_bit(HCI_PAIRABLE, &hdev->flags);
821
1da177e4
LT
822 (dr + n)->dev_id = hdev->id;
823 (dr + n)->dev_opt = hdev->flags;
c542a06c 824
1da177e4
LT
825 if (++n >= dev_num)
826 break;
827 }
828 read_unlock_bh(&hci_dev_list_lock);
829
830 dl->dev_num = n;
831 size = sizeof(*dl) + n * sizeof(*dr);
832
833 err = copy_to_user(arg, dl, size);
834 kfree(dl);
835
836 return err ? -EFAULT : 0;
837}
838
839int hci_get_dev_info(void __user *arg)
840{
841 struct hci_dev *hdev;
842 struct hci_dev_info di;
843 int err = 0;
844
845 if (copy_from_user(&di, arg, sizeof(di)))
846 return -EFAULT;
847
70f23020
AE
848 hdev = hci_dev_get(di.dev_id);
849 if (!hdev)
1da177e4
LT
850 return -ENODEV;
851
ab81cbf9
JH
852 hci_del_off_timer(hdev);
853
c542a06c
JH
854 if (!test_bit(HCI_MGMT, &hdev->flags))
855 set_bit(HCI_PAIRABLE, &hdev->flags);
856
1da177e4
LT
857 strcpy(di.name, hdev->name);
858 di.bdaddr = hdev->bdaddr;
943da25d 859 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
860 di.flags = hdev->flags;
861 di.pkt_type = hdev->pkt_type;
862 di.acl_mtu = hdev->acl_mtu;
863 di.acl_pkts = hdev->acl_pkts;
864 di.sco_mtu = hdev->sco_mtu;
865 di.sco_pkts = hdev->sco_pkts;
866 di.link_policy = hdev->link_policy;
867 di.link_mode = hdev->link_mode;
868
869 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
870 memcpy(&di.features, &hdev->features, sizeof(di.features));
871
872 if (copy_to_user(arg, &di, sizeof(di)))
873 err = -EFAULT;
874
875 hci_dev_put(hdev);
876
877 return err;
878}
879
880/* ---- Interface to HCI drivers ---- */
881
611b30f7
MH
882static int hci_rfkill_set_block(void *data, bool blocked)
883{
884 struct hci_dev *hdev = data;
885
886 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
887
888 if (!blocked)
889 return 0;
890
891 hci_dev_do_close(hdev);
892
893 return 0;
894}
895
896static const struct rfkill_ops hci_rfkill_ops = {
897 .set_block = hci_rfkill_set_block,
898};
899
1da177e4
LT
900/* Alloc HCI device */
901struct hci_dev *hci_alloc_dev(void)
902{
903 struct hci_dev *hdev;
904
25ea6db0 905 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
906 if (!hdev)
907 return NULL;
908
0ac7e700 909 hci_init_sysfs(hdev);
1da177e4
LT
910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913}
914EXPORT_SYMBOL(hci_alloc_dev);
915
916/* Free HCI device */
917void hci_free_dev(struct hci_dev *hdev)
918{
919 skb_queue_purge(&hdev->driver_init);
920
a91f2e39
MH
921 /* will free via device release */
922 put_device(&hdev->dev);
1da177e4
LT
923}
924EXPORT_SYMBOL(hci_free_dev);
925
ab81cbf9
JH
926static void hci_power_on(struct work_struct *work)
927{
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941}
942
943static void hci_power_off(struct work_struct *work)
944{
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950}
951
952static void hci_auto_off(unsigned long data)
953{
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961}
962
963void hci_del_off_timer(struct hci_dev *hdev)
964{
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969}
970
2aeb9a1a
JH
971int hci_uuids_clear(struct hci_dev *hdev)
972{
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985}
986
55ed8ca1
JH
987int hci_link_keys_clear(struct hci_dev *hdev)
988{
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001}
1002
1003struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004{
8035ded4 1005 struct link_key *k;
55ed8ca1 1006
8035ded4 1007 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1008 if (bacmp(bdaddr, &k->bdaddr) == 0)
1009 return k;
55ed8ca1
JH
1010
1011 return NULL;
1012}
1013
d25e28ab
JH
1014static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1015 u8 key_type, u8 old_key_type)
1016{
1017 /* Legacy key */
1018 if (key_type < 0x03)
1019 return 1;
1020
1021 /* Debug keys are insecure so don't store them persistently */
1022 if (key_type == HCI_LK_DEBUG_COMBINATION)
1023 return 0;
1024
1025 /* Changed combination key and there's no previous one */
1026 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1027 return 0;
1028
1029 /* Security mode 3 case */
1030 if (!conn)
1031 return 1;
1032
1033 /* Neither local nor remote side had no-bonding as requirement */
1034 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1035 return 1;
1036
1037 /* Local side had dedicated bonding as requirement */
1038 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1039 return 1;
1040
1041 /* Remote side had dedicated bonding as requirement */
1042 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1043 return 1;
1044
1045 /* If none of the above criteria match, then don't store the key
1046 * persistently */
1047 return 0;
1048}
1049
75d262c2
VCG
1050struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1051{
1052 struct link_key *k;
1053
1054 list_for_each_entry(k, &hdev->link_keys, list) {
1055 struct key_master_id *id;
1056
1057 if (k->type != HCI_LK_SMP_LTK)
1058 continue;
1059
1060 if (k->dlen != sizeof(*id))
1061 continue;
1062
1063 id = (void *) &k->data;
1064 if (id->ediv == ediv &&
1065 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1066 return k;
1067 }
1068
1069 return NULL;
1070}
1071EXPORT_SYMBOL(hci_find_ltk);
1072
1073struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1074 bdaddr_t *bdaddr, u8 type)
1075{
1076 struct link_key *k;
1077
1078 list_for_each_entry(k, &hdev->link_keys, list)
1079 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1080 return k;
1081
1082 return NULL;
1083}
1084EXPORT_SYMBOL(hci_find_link_key_type);
1085
d25e28ab
JH
1086int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1087 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1088{
1089 struct link_key *key, *old_key;
4df378a1 1090 u8 old_key_type, persistent;
55ed8ca1
JH
1091
1092 old_key = hci_find_link_key(hdev, bdaddr);
1093 if (old_key) {
1094 old_key_type = old_key->type;
1095 key = old_key;
1096 } else {
12adcf3a 1097 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1098 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1099 if (!key)
1100 return -ENOMEM;
1101 list_add(&key->list, &hdev->link_keys);
1102 }
1103
1104 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1105
d25e28ab
JH
1106 /* Some buggy controller combinations generate a changed
1107 * combination key for legacy pairing even when there's no
1108 * previous key */
1109 if (type == HCI_LK_CHANGED_COMBINATION &&
1110 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1111 old_key_type == 0xff) {
d25e28ab 1112 type = HCI_LK_COMBINATION;
655fe6ec
JH
1113 if (conn)
1114 conn->key_type = type;
1115 }
d25e28ab 1116
55ed8ca1
JH
1117 bacpy(&key->bdaddr, bdaddr);
1118 memcpy(key->val, val, 16);
55ed8ca1
JH
1119 key->pin_len = pin_len;
1120
b6020ba0 1121 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1122 key->type = old_key_type;
4748fed2
JH
1123 else
1124 key->type = type;
1125
4df378a1
JH
1126 if (!new_key)
1127 return 0;
1128
1129 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1130
1131 mgmt_new_key(hdev->id, key, persistent);
1132
1133 if (!persistent) {
1134 list_del(&key->list);
1135 kfree(key);
1136 }
55ed8ca1
JH
1137
1138 return 0;
1139}
1140
75d262c2 1141int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1142 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1143{
1144 struct link_key *key, *old_key;
1145 struct key_master_id *id;
1146 u8 old_key_type;
1147
1148 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1149
1150 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1151 if (old_key) {
1152 key = old_key;
1153 old_key_type = old_key->type;
1154 } else {
1155 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1156 if (!key)
1157 return -ENOMEM;
1158 list_add(&key->list, &hdev->link_keys);
1159 old_key_type = 0xff;
1160 }
1161
1162 key->dlen = sizeof(*id);
1163
1164 bacpy(&key->bdaddr, bdaddr);
1165 memcpy(key->val, ltk, sizeof(key->val));
1166 key->type = HCI_LK_SMP_LTK;
726b4ffc 1167 key->pin_len = key_size;
75d262c2
VCG
1168
1169 id = (void *) &key->data;
1170 id->ediv = ediv;
1171 memcpy(id->rand, rand, sizeof(id->rand));
1172
1173 if (new_key)
1174 mgmt_new_key(hdev->id, key, old_key_type);
1175
1176 return 0;
1177}
1178
55ed8ca1
JH
1179int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1180{
1181 struct link_key *key;
1182
1183 key = hci_find_link_key(hdev, bdaddr);
1184 if (!key)
1185 return -ENOENT;
1186
1187 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1188
1189 list_del(&key->list);
1190 kfree(key);
1191
1192 return 0;
1193}
1194
6bd32326
VT
1195/* HCI command timer function */
1196static void hci_cmd_timer(unsigned long arg)
1197{
1198 struct hci_dev *hdev = (void *) arg;
1199
1200 BT_ERR("%s command tx timeout", hdev->name);
1201 atomic_set(&hdev->cmd_cnt, 1);
1202 tasklet_schedule(&hdev->cmd_task);
1203}
1204
2763eda6
SJ
1205struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1206 bdaddr_t *bdaddr)
1207{
1208 struct oob_data *data;
1209
1210 list_for_each_entry(data, &hdev->remote_oob_data, list)
1211 if (bacmp(bdaddr, &data->bdaddr) == 0)
1212 return data;
1213
1214 return NULL;
1215}
1216
1217int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218{
1219 struct oob_data *data;
1220
1221 data = hci_find_remote_oob_data(hdev, bdaddr);
1222 if (!data)
1223 return -ENOENT;
1224
1225 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226
1227 list_del(&data->list);
1228 kfree(data);
1229
1230 return 0;
1231}
1232
1233int hci_remote_oob_data_clear(struct hci_dev *hdev)
1234{
1235 struct oob_data *data, *n;
1236
1237 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1238 list_del(&data->list);
1239 kfree(data);
1240 }
1241
1242 return 0;
1243}
1244
1245int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1246 u8 *randomizer)
1247{
1248 struct oob_data *data;
1249
1250 data = hci_find_remote_oob_data(hdev, bdaddr);
1251
1252 if (!data) {
1253 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1254 if (!data)
1255 return -ENOMEM;
1256
1257 bacpy(&data->bdaddr, bdaddr);
1258 list_add(&data->list, &hdev->remote_oob_data);
1259 }
1260
1261 memcpy(data->hash, hash, sizeof(data->hash));
1262 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1263
1264 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1265
1266 return 0;
1267}
1268
b2a66aad
AJ
1269struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1270 bdaddr_t *bdaddr)
1271{
8035ded4 1272 struct bdaddr_list *b;
b2a66aad 1273
8035ded4 1274 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1275 if (bacmp(bdaddr, &b->bdaddr) == 0)
1276 return b;
b2a66aad
AJ
1277
1278 return NULL;
1279}
1280
1281int hci_blacklist_clear(struct hci_dev *hdev)
1282{
1283 struct list_head *p, *n;
1284
1285 list_for_each_safe(p, n, &hdev->blacklist) {
1286 struct bdaddr_list *b;
1287
1288 b = list_entry(p, struct bdaddr_list, list);
1289
1290 list_del(p);
1291 kfree(b);
1292 }
1293
1294 return 0;
1295}
1296
1297int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298{
1299 struct bdaddr_list *entry;
b2a66aad
AJ
1300
1301 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1302 return -EBADF;
1303
5e762444
AJ
1304 if (hci_blacklist_lookup(hdev, bdaddr))
1305 return -EEXIST;
b2a66aad
AJ
1306
1307 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1308 if (!entry)
1309 return -ENOMEM;
b2a66aad
AJ
1310
1311 bacpy(&entry->bdaddr, bdaddr);
1312
1313 list_add(&entry->list, &hdev->blacklist);
1314
5e762444 1315 return mgmt_device_blocked(hdev->id, bdaddr);
b2a66aad
AJ
1316}
1317
1318int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1319{
1320 struct bdaddr_list *entry;
b2a66aad 1321
a7925bd2 1322 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
5e762444 1323 return hci_blacklist_clear(hdev);
a7925bd2 1324 }
b2a66aad
AJ
1325
1326 entry = hci_blacklist_lookup(hdev, bdaddr);
a7925bd2 1327 if (!entry) {
5e762444 1328 return -ENOENT;
a7925bd2 1329 }
b2a66aad
AJ
1330
1331 list_del(&entry->list);
1332 kfree(entry);
1333
5e762444 1334 return mgmt_device_unblocked(hdev->id, bdaddr);
b2a66aad
AJ
1335}
1336
35815085
AG
1337static void hci_clear_adv_cache(unsigned long arg)
1338{
1339 struct hci_dev *hdev = (void *) arg;
1340
1341 hci_dev_lock(hdev);
1342
1343 hci_adv_entries_clear(hdev);
1344
1345 hci_dev_unlock(hdev);
1346}
1347
76c8686f
AG
1348int hci_adv_entries_clear(struct hci_dev *hdev)
1349{
1350 struct adv_entry *entry, *tmp;
1351
1352 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1353 list_del(&entry->list);
1354 kfree(entry);
1355 }
1356
1357 BT_DBG("%s adv cache cleared", hdev->name);
1358
1359 return 0;
1360}
1361
1362struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1363{
1364 struct adv_entry *entry;
1365
1366 list_for_each_entry(entry, &hdev->adv_entries, list)
1367 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1368 return entry;
1369
1370 return NULL;
1371}
1372
1373static inline int is_connectable_adv(u8 evt_type)
1374{
1375 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1376 return 1;
1377
1378 return 0;
1379}
1380
1381int hci_add_adv_entry(struct hci_dev *hdev,
1382 struct hci_ev_le_advertising_info *ev)
1383{
1384 struct adv_entry *entry;
1385
1386 if (!is_connectable_adv(ev->evt_type))
1387 return -EINVAL;
1388
1389 /* Only new entries should be added to adv_entries. So, if
1390 * bdaddr was found, don't add it. */
1391 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1392 return 0;
1393
1394 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1395 if (!entry)
1396 return -ENOMEM;
1397
1398 bacpy(&entry->bdaddr, &ev->bdaddr);
1399 entry->bdaddr_type = ev->bdaddr_type;
1400
1401 list_add(&entry->list, &hdev->adv_entries);
1402
1403 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1404 batostr(&entry->bdaddr), entry->bdaddr_type);
1405
1406 return 0;
1407}
1408
1da177e4
LT
1409/* Register HCI device */
1410int hci_register_dev(struct hci_dev *hdev)
1411{
1412 struct list_head *head = &hci_dev_list, *p;
33ca954d 1413 int i, id = 0, error;
1da177e4 1414
c13854ce
MH
1415 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1416 hdev->bus, hdev->owner);
1da177e4
LT
1417
1418 if (!hdev->open || !hdev->close || !hdev->destruct)
1419 return -EINVAL;
1420
1421 write_lock_bh(&hci_dev_list_lock);
1422
1423 /* Find first available device id */
1424 list_for_each(p, &hci_dev_list) {
1425 if (list_entry(p, struct hci_dev, list)->id != id)
1426 break;
1427 head = p; id++;
1428 }
8e87d142 1429
1da177e4
LT
1430 sprintf(hdev->name, "hci%d", id);
1431 hdev->id = id;
1432 list_add(&hdev->list, head);
1433
1434 atomic_set(&hdev->refcnt, 1);
1435 spin_lock_init(&hdev->lock);
1436
1437 hdev->flags = 0;
1438 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1439 hdev->esco_type = (ESCO_HV1);
1da177e4 1440 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1441 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1442
04837f64
MH
1443 hdev->idle_timeout = 0;
1444 hdev->sniff_max_interval = 800;
1445 hdev->sniff_min_interval = 80;
1446
70f23020 1447 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1da177e4
LT
1448 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1449 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1450
1451 skb_queue_head_init(&hdev->rx_q);
1452 skb_queue_head_init(&hdev->cmd_q);
1453 skb_queue_head_init(&hdev->raw_q);
1454
6bd32326
VT
1455 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1456
cd4c5391 1457 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1458 hdev->reassembly[i] = NULL;
1459
1da177e4 1460 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1461 mutex_init(&hdev->req_lock);
1da177e4
LT
1462
1463 inquiry_cache_init(hdev);
1464
1465 hci_conn_hash_init(hdev);
1466
ea4bd8ba 1467 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1468
2aeb9a1a
JH
1469 INIT_LIST_HEAD(&hdev->uuids);
1470
55ed8ca1
JH
1471 INIT_LIST_HEAD(&hdev->link_keys);
1472
2763eda6
SJ
1473 INIT_LIST_HEAD(&hdev->remote_oob_data);
1474
76c8686f 1475 INIT_LIST_HEAD(&hdev->adv_entries);
35815085
AG
1476 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1477 (unsigned long) hdev);
76c8686f 1478
ab81cbf9
JH
1479 INIT_WORK(&hdev->power_on, hci_power_on);
1480 INIT_WORK(&hdev->power_off, hci_power_off);
1481 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1482
1da177e4
LT
1483 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1484
1485 atomic_set(&hdev->promisc, 0);
1486
1487 write_unlock_bh(&hci_dev_list_lock);
1488
f48fd9c8 1489 hdev->workqueue = create_singlethread_workqueue(hdev->name);
33ca954d
DH
1490 if (!hdev->workqueue) {
1491 error = -ENOMEM;
1492 goto err;
1493 }
f48fd9c8 1494
33ca954d
DH
1495 error = hci_add_sysfs(hdev);
1496 if (error < 0)
1497 goto err_wqueue;
1da177e4 1498
611b30f7
MH
1499 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1500 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1501 if (hdev->rfkill) {
1502 if (rfkill_register(hdev->rfkill) < 0) {
1503 rfkill_destroy(hdev->rfkill);
1504 hdev->rfkill = NULL;
1505 }
1506 }
1507
ab81cbf9
JH
1508 set_bit(HCI_AUTO_OFF, &hdev->flags);
1509 set_bit(HCI_SETUP, &hdev->flags);
1510 queue_work(hdev->workqueue, &hdev->power_on);
1511
1da177e4
LT
1512 hci_notify(hdev, HCI_DEV_REG);
1513
1514 return id;
f48fd9c8 1515
33ca954d
DH
1516err_wqueue:
1517 destroy_workqueue(hdev->workqueue);
1518err:
f48fd9c8
MH
1519 write_lock_bh(&hci_dev_list_lock);
1520 list_del(&hdev->list);
1521 write_unlock_bh(&hci_dev_list_lock);
1522
33ca954d 1523 return error;
1da177e4
LT
1524}
1525EXPORT_SYMBOL(hci_register_dev);
1526
1527/* Unregister HCI device */
59735631 1528void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1529{
ef222013
MH
1530 int i;
1531
c13854ce 1532 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1533
1da177e4
LT
1534 write_lock_bh(&hci_dev_list_lock);
1535 list_del(&hdev->list);
1536 write_unlock_bh(&hci_dev_list_lock);
1537
1538 hci_dev_do_close(hdev);
1539
cd4c5391 1540 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1541 kfree_skb(hdev->reassembly[i]);
1542
ab81cbf9
JH
1543 if (!test_bit(HCI_INIT, &hdev->flags) &&
1544 !test_bit(HCI_SETUP, &hdev->flags))
1545 mgmt_index_removed(hdev->id);
1546
1da177e4
LT
1547 hci_notify(hdev, HCI_DEV_UNREG);
1548
611b30f7
MH
1549 if (hdev->rfkill) {
1550 rfkill_unregister(hdev->rfkill);
1551 rfkill_destroy(hdev->rfkill);
1552 }
1553
ce242970 1554 hci_del_sysfs(hdev);
147e2d59 1555
c6f3c5f7 1556 hci_del_off_timer(hdev);
35815085 1557 del_timer(&hdev->adv_timer);
c6f3c5f7 1558
f48fd9c8
MH
1559 destroy_workqueue(hdev->workqueue);
1560
e2e0cacb
JH
1561 hci_dev_lock_bh(hdev);
1562 hci_blacklist_clear(hdev);
2aeb9a1a 1563 hci_uuids_clear(hdev);
55ed8ca1 1564 hci_link_keys_clear(hdev);
2763eda6 1565 hci_remote_oob_data_clear(hdev);
76c8686f 1566 hci_adv_entries_clear(hdev);
e2e0cacb
JH
1567 hci_dev_unlock_bh(hdev);
1568
1da177e4 1569 __hci_dev_put(hdev);
1da177e4
LT
1570}
1571EXPORT_SYMBOL(hci_unregister_dev);
1572
1573/* Suspend HCI device */
1574int hci_suspend_dev(struct hci_dev *hdev)
1575{
1576 hci_notify(hdev, HCI_DEV_SUSPEND);
1577 return 0;
1578}
1579EXPORT_SYMBOL(hci_suspend_dev);
1580
1581/* Resume HCI device */
1582int hci_resume_dev(struct hci_dev *hdev)
1583{
1584 hci_notify(hdev, HCI_DEV_RESUME);
1585 return 0;
1586}
1587EXPORT_SYMBOL(hci_resume_dev);
1588
76bca880
MH
1589/* Receive frame from HCI drivers */
1590int hci_recv_frame(struct sk_buff *skb)
1591{
1592 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1593 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1594 && !test_bit(HCI_INIT, &hdev->flags))) {
1595 kfree_skb(skb);
1596 return -ENXIO;
1597 }
1598
1599 /* Incomming skb */
1600 bt_cb(skb)->incoming = 1;
1601
1602 /* Time stamp */
1603 __net_timestamp(skb);
1604
1605 /* Queue frame for rx task */
1606 skb_queue_tail(&hdev->rx_q, skb);
c78ae283
MH
1607 tasklet_schedule(&hdev->rx_task);
1608
76bca880
MH
1609 return 0;
1610}
1611EXPORT_SYMBOL(hci_recv_frame);
1612
33e882a5 1613static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1614 int count, __u8 index)
33e882a5
SS
1615{
1616 int len = 0;
1617 int hlen = 0;
1618 int remain = count;
1619 struct sk_buff *skb;
1620 struct bt_skb_cb *scb;
1621
1622 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1623 index >= NUM_REASSEMBLY)
1624 return -EILSEQ;
1625
1626 skb = hdev->reassembly[index];
1627
1628 if (!skb) {
1629 switch (type) {
1630 case HCI_ACLDATA_PKT:
1631 len = HCI_MAX_FRAME_SIZE;
1632 hlen = HCI_ACL_HDR_SIZE;
1633 break;
1634 case HCI_EVENT_PKT:
1635 len = HCI_MAX_EVENT_SIZE;
1636 hlen = HCI_EVENT_HDR_SIZE;
1637 break;
1638 case HCI_SCODATA_PKT:
1639 len = HCI_MAX_SCO_SIZE;
1640 hlen = HCI_SCO_HDR_SIZE;
1641 break;
1642 }
1643
1e429f38 1644 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1645 if (!skb)
1646 return -ENOMEM;
1647
1648 scb = (void *) skb->cb;
1649 scb->expect = hlen;
1650 scb->pkt_type = type;
1651
1652 skb->dev = (void *) hdev;
1653 hdev->reassembly[index] = skb;
1654 }
1655
1656 while (count) {
1657 scb = (void *) skb->cb;
1658 len = min(scb->expect, (__u16)count);
1659
1660 memcpy(skb_put(skb, len), data, len);
1661
1662 count -= len;
1663 data += len;
1664 scb->expect -= len;
1665 remain = count;
1666
1667 switch (type) {
1668 case HCI_EVENT_PKT:
1669 if (skb->len == HCI_EVENT_HDR_SIZE) {
1670 struct hci_event_hdr *h = hci_event_hdr(skb);
1671 scb->expect = h->plen;
1672
1673 if (skb_tailroom(skb) < scb->expect) {
1674 kfree_skb(skb);
1675 hdev->reassembly[index] = NULL;
1676 return -ENOMEM;
1677 }
1678 }
1679 break;
1680
1681 case HCI_ACLDATA_PKT:
1682 if (skb->len == HCI_ACL_HDR_SIZE) {
1683 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1684 scb->expect = __le16_to_cpu(h->dlen);
1685
1686 if (skb_tailroom(skb) < scb->expect) {
1687 kfree_skb(skb);
1688 hdev->reassembly[index] = NULL;
1689 return -ENOMEM;
1690 }
1691 }
1692 break;
1693
1694 case HCI_SCODATA_PKT:
1695 if (skb->len == HCI_SCO_HDR_SIZE) {
1696 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1697 scb->expect = h->dlen;
1698
1699 if (skb_tailroom(skb) < scb->expect) {
1700 kfree_skb(skb);
1701 hdev->reassembly[index] = NULL;
1702 return -ENOMEM;
1703 }
1704 }
1705 break;
1706 }
1707
1708 if (scb->expect == 0) {
1709 /* Complete frame */
1710
1711 bt_cb(skb)->pkt_type = type;
1712 hci_recv_frame(skb);
1713
1714 hdev->reassembly[index] = NULL;
1715 return remain;
1716 }
1717 }
1718
1719 return remain;
1720}
1721
ef222013
MH
1722int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1723{
f39a3c06
SS
1724 int rem = 0;
1725
ef222013
MH
1726 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1727 return -EILSEQ;
1728
da5f6c37 1729 while (count) {
1e429f38 1730 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1731 if (rem < 0)
1732 return rem;
ef222013 1733
f39a3c06
SS
1734 data += (count - rem);
1735 count = rem;
f81c6224 1736 }
ef222013 1737
f39a3c06 1738 return rem;
ef222013
MH
1739}
1740EXPORT_SYMBOL(hci_recv_fragment);
1741
99811510
SS
1742#define STREAM_REASSEMBLY 0
1743
1744int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1745{
1746 int type;
1747 int rem = 0;
1748
da5f6c37 1749 while (count) {
99811510
SS
1750 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1751
1752 if (!skb) {
1753 struct { char type; } *pkt;
1754
1755 /* Start of the frame */
1756 pkt = data;
1757 type = pkt->type;
1758
1759 data++;
1760 count--;
1761 } else
1762 type = bt_cb(skb)->pkt_type;
1763
1e429f38
GP
1764 rem = hci_reassembly(hdev, type, data, count,
1765 STREAM_REASSEMBLY);
99811510
SS
1766 if (rem < 0)
1767 return rem;
1768
1769 data += (count - rem);
1770 count = rem;
f81c6224 1771 }
99811510
SS
1772
1773 return rem;
1774}
1775EXPORT_SYMBOL(hci_recv_stream_fragment);
1776
1da177e4
LT
1777/* ---- Interface to upper protocols ---- */
1778
1779/* Register/Unregister protocols.
1780 * hci_task_lock is used to ensure that no tasks are running. */
1781int hci_register_proto(struct hci_proto *hp)
1782{
1783 int err = 0;
1784
1785 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1786
1787 if (hp->id >= HCI_MAX_PROTO)
1788 return -EINVAL;
1789
1790 write_lock_bh(&hci_task_lock);
1791
1792 if (!hci_proto[hp->id])
1793 hci_proto[hp->id] = hp;
1794 else
1795 err = -EEXIST;
1796
1797 write_unlock_bh(&hci_task_lock);
1798
1799 return err;
1800}
1801EXPORT_SYMBOL(hci_register_proto);
1802
1803int hci_unregister_proto(struct hci_proto *hp)
1804{
1805 int err = 0;
1806
1807 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1808
1809 if (hp->id >= HCI_MAX_PROTO)
1810 return -EINVAL;
1811
1812 write_lock_bh(&hci_task_lock);
1813
1814 if (hci_proto[hp->id])
1815 hci_proto[hp->id] = NULL;
1816 else
1817 err = -ENOENT;
1818
1819 write_unlock_bh(&hci_task_lock);
1820
1821 return err;
1822}
1823EXPORT_SYMBOL(hci_unregister_proto);
1824
1825int hci_register_cb(struct hci_cb *cb)
1826{
1827 BT_DBG("%p name %s", cb, cb->name);
1828
1829 write_lock_bh(&hci_cb_list_lock);
1830 list_add(&cb->list, &hci_cb_list);
1831 write_unlock_bh(&hci_cb_list_lock);
1832
1833 return 0;
1834}
1835EXPORT_SYMBOL(hci_register_cb);
1836
1837int hci_unregister_cb(struct hci_cb *cb)
1838{
1839 BT_DBG("%p name %s", cb, cb->name);
1840
1841 write_lock_bh(&hci_cb_list_lock);
1842 list_del(&cb->list);
1843 write_unlock_bh(&hci_cb_list_lock);
1844
1845 return 0;
1846}
1847EXPORT_SYMBOL(hci_unregister_cb);
1848
1849static int hci_send_frame(struct sk_buff *skb)
1850{
1851 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852
1853 if (!hdev) {
1854 kfree_skb(skb);
1855 return -ENODEV;
1856 }
1857
0d48d939 1858 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1859
1860 if (atomic_read(&hdev->promisc)) {
1861 /* Time stamp */
a61bbcf2 1862 __net_timestamp(skb);
1da177e4 1863
eec8d2bc 1864 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1865 }
1866
1867 /* Get rid of skb owner, prior to sending to the driver. */
1868 skb_orphan(skb);
1869
1870 return hdev->send(skb);
1871}
1872
1873/* Send HCI command */
a9de9248 1874int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1875{
1876 int len = HCI_COMMAND_HDR_SIZE + plen;
1877 struct hci_command_hdr *hdr;
1878 struct sk_buff *skb;
1879
a9de9248 1880 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1881
1882 skb = bt_skb_alloc(len, GFP_ATOMIC);
1883 if (!skb) {
ef222013 1884 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1885 return -ENOMEM;
1886 }
1887
1888 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1889 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1890 hdr->plen = plen;
1891
1892 if (plen)
1893 memcpy(skb_put(skb, plen), param, plen);
1894
1895 BT_DBG("skb len %d", skb->len);
1896
0d48d939 1897 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1898 skb->dev = (void *) hdev;
c78ae283 1899
a5040efa
JH
1900 if (test_bit(HCI_INIT, &hdev->flags))
1901 hdev->init_last_cmd = opcode;
1902
1da177e4 1903 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1904 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1905
1906 return 0;
1907}
1da177e4
LT
1908
1909/* Get data from the previously sent command */
a9de9248 1910void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1911{
1912 struct hci_command_hdr *hdr;
1913
1914 if (!hdev->sent_cmd)
1915 return NULL;
1916
1917 hdr = (void *) hdev->sent_cmd->data;
1918
a9de9248 1919 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1920 return NULL;
1921
a9de9248 1922 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1923
1924 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1925}
1926
1927/* Send ACL data */
1928static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1929{
1930 struct hci_acl_hdr *hdr;
1931 int len = skb->len;
1932
badff6d0
ACM
1933 skb_push(skb, HCI_ACL_HDR_SIZE);
1934 skb_reset_transport_header(skb);
9c70220b 1935 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1936 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1937 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1938}
1939
73d80deb
LAD
1940static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1941 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1942{
1943 struct hci_dev *hdev = conn->hdev;
1944 struct sk_buff *list;
1945
70f23020
AE
1946 list = skb_shinfo(skb)->frag_list;
1947 if (!list) {
1da177e4
LT
1948 /* Non fragmented */
1949 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1950
73d80deb 1951 skb_queue_tail(queue, skb);
1da177e4
LT
1952 } else {
1953 /* Fragmented */
1954 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1955
1956 skb_shinfo(skb)->frag_list = NULL;
1957
1958 /* Queue all fragments atomically */
73d80deb 1959 spin_lock_bh(&queue->lock);
1da177e4 1960
73d80deb 1961 __skb_queue_tail(queue, skb);
e702112f
AE
1962
1963 flags &= ~ACL_START;
1964 flags |= ACL_CONT;
1da177e4
LT
1965 do {
1966 skb = list; list = list->next;
8e87d142 1967
1da177e4 1968 skb->dev = (void *) hdev;
0d48d939 1969 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1970 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1971
1972 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1973
73d80deb 1974 __skb_queue_tail(queue, skb);
1da177e4
LT
1975 } while (list);
1976
73d80deb 1977 spin_unlock_bh(&queue->lock);
1da177e4 1978 }
73d80deb
LAD
1979}
1980
1981void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1982{
1983 struct hci_conn *conn = chan->conn;
1984 struct hci_dev *hdev = conn->hdev;
1985
1986 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1987
1988 skb->dev = (void *) hdev;
1989 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1990 hci_add_acl_hdr(skb, conn->handle, flags);
1991
1992 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 1993
c78ae283 1994 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
1995}
1996EXPORT_SYMBOL(hci_send_acl);
1997
1998/* Send SCO data */
0d861d8b 1999void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2000{
2001 struct hci_dev *hdev = conn->hdev;
2002 struct hci_sco_hdr hdr;
2003
2004 BT_DBG("%s len %d", hdev->name, skb->len);
2005
aca3192c 2006 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2007 hdr.dlen = skb->len;
2008
badff6d0
ACM
2009 skb_push(skb, HCI_SCO_HDR_SIZE);
2010 skb_reset_transport_header(skb);
9c70220b 2011 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2012
2013 skb->dev = (void *) hdev;
0d48d939 2014 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2015
1da177e4 2016 skb_queue_tail(&conn->data_q, skb);
c78ae283 2017 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2018}
2019EXPORT_SYMBOL(hci_send_sco);
2020
2021/* ---- HCI TX task (outgoing data) ---- */
2022
2023/* HCI Connection scheduler */
2024static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2025{
2026 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2027 struct hci_conn *conn = NULL, *c;
1da177e4 2028 int num = 0, min = ~0;
1da177e4 2029
8e87d142 2030 /* We don't have to lock device here. Connections are always
1da177e4 2031 * added and removed with TX task disabled. */
8035ded4 2032 list_for_each_entry(c, &h->list, list) {
769be974 2033 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2034 continue;
769be974
MH
2035
2036 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2037 continue;
2038
1da177e4
LT
2039 num++;
2040
2041 if (c->sent < min) {
2042 min = c->sent;
2043 conn = c;
2044 }
52087a79
LAD
2045
2046 if (hci_conn_num(hdev, type) == num)
2047 break;
1da177e4
LT
2048 }
2049
2050 if (conn) {
6ed58ec5
VT
2051 int cnt, q;
2052
2053 switch (conn->type) {
2054 case ACL_LINK:
2055 cnt = hdev->acl_cnt;
2056 break;
2057 case SCO_LINK:
2058 case ESCO_LINK:
2059 cnt = hdev->sco_cnt;
2060 break;
2061 case LE_LINK:
2062 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2063 break;
2064 default:
2065 cnt = 0;
2066 BT_ERR("Unknown link type");
2067 }
2068
2069 q = cnt / num;
1da177e4
LT
2070 *quote = q ? q : 1;
2071 } else
2072 *quote = 0;
2073
2074 BT_DBG("conn %p quote %d", conn, *quote);
2075 return conn;
2076}
2077
bae1f5d9 2078static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2079{
2080 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2081 struct hci_conn *c;
1da177e4 2082
bae1f5d9 2083 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
2084
2085 /* Kill stalled connections */
8035ded4 2086 list_for_each_entry(c, &h->list, list) {
bae1f5d9
VT
2087 if (c->type == type && c->sent) {
2088 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2089 hdev->name, batostr(&c->dst));
2090 hci_acl_disconn(c, 0x13);
2091 }
2092 }
2093}
2094
73d80deb
LAD
2095static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2096 int *quote)
1da177e4 2097{
73d80deb
LAD
2098 struct hci_conn_hash *h = &hdev->conn_hash;
2099 struct hci_chan *chan = NULL;
2100 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2101 struct hci_conn *conn;
73d80deb
LAD
2102 int cnt, q, conn_num = 0;
2103
2104 BT_DBG("%s", hdev->name);
2105
2106 list_for_each_entry(conn, &h->list, list) {
2107 struct hci_chan_hash *ch;
2108 struct hci_chan *tmp;
2109
2110 if (conn->type != type)
2111 continue;
2112
2113 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2114 continue;
2115
2116 conn_num++;
2117
2118 ch = &conn->chan_hash;
2119
2120 list_for_each_entry(tmp, &ch->list, list) {
2121 struct sk_buff *skb;
2122
2123 if (skb_queue_empty(&tmp->data_q))
2124 continue;
2125
2126 skb = skb_peek(&tmp->data_q);
2127 if (skb->priority < cur_prio)
2128 continue;
2129
2130 if (skb->priority > cur_prio) {
2131 num = 0;
2132 min = ~0;
2133 cur_prio = skb->priority;
2134 }
2135
2136 num++;
2137
2138 if (conn->sent < min) {
2139 min = conn->sent;
2140 chan = tmp;
2141 }
2142 }
2143
2144 if (hci_conn_num(hdev, type) == conn_num)
2145 break;
2146 }
2147
2148 if (!chan)
2149 return NULL;
2150
2151 switch (chan->conn->type) {
2152 case ACL_LINK:
2153 cnt = hdev->acl_cnt;
2154 break;
2155 case SCO_LINK:
2156 case ESCO_LINK:
2157 cnt = hdev->sco_cnt;
2158 break;
2159 case LE_LINK:
2160 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2161 break;
2162 default:
2163 cnt = 0;
2164 BT_ERR("Unknown link type");
2165 }
2166
2167 q = cnt / num;
2168 *quote = q ? q : 1;
2169 BT_DBG("chan %p quote %d", chan, *quote);
2170 return chan;
2171}
2172
02b20f0b
LAD
2173static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2174{
2175 struct hci_conn_hash *h = &hdev->conn_hash;
2176 struct hci_conn *conn;
2177 int num = 0;
2178
2179 BT_DBG("%s", hdev->name);
2180
2181 list_for_each_entry(conn, &h->list, list) {
2182 struct hci_chan_hash *ch;
2183 struct hci_chan *chan;
2184
2185 if (conn->type != type)
2186 continue;
2187
2188 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2189 continue;
2190
2191 num++;
2192
2193 ch = &conn->chan_hash;
2194 list_for_each_entry(chan, &ch->list, list) {
2195 struct sk_buff *skb;
2196
2197 if (chan->sent) {
2198 chan->sent = 0;
2199 continue;
2200 }
2201
2202 if (skb_queue_empty(&chan->data_q))
2203 continue;
2204
2205 skb = skb_peek(&chan->data_q);
2206 if (skb->priority >= HCI_PRIO_MAX - 1)
2207 continue;
2208
2209 skb->priority = HCI_PRIO_MAX - 1;
2210
2211 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2212 skb->priority);
2213 }
2214
2215 if (hci_conn_num(hdev, type) == num)
2216 break;
2217 }
2218}
2219
73d80deb
LAD
2220static inline void hci_sched_acl(struct hci_dev *hdev)
2221{
2222 struct hci_chan *chan;
1da177e4
LT
2223 struct sk_buff *skb;
2224 int quote;
73d80deb 2225 unsigned int cnt;
1da177e4
LT
2226
2227 BT_DBG("%s", hdev->name);
2228
52087a79
LAD
2229 if (!hci_conn_num(hdev, ACL_LINK))
2230 return;
2231
1da177e4
LT
2232 if (!test_bit(HCI_RAW, &hdev->flags)) {
2233 /* ACL tx timeout must be longer than maximum
2234 * link supervision timeout (40.9 seconds) */
82453021 2235 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2236 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2237 }
2238
73d80deb 2239 cnt = hdev->acl_cnt;
04837f64 2240
73d80deb
LAD
2241 while (hdev->acl_cnt &&
2242 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2243 u32 priority = (skb_peek(&chan->data_q))->priority;
2244 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2245 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2246 skb->len, skb->priority);
2247
ec1cce24
LAD
2248 /* Stop if priority has changed */
2249 if (skb->priority < priority)
2250 break;
2251
2252 skb = skb_dequeue(&chan->data_q);
2253
73d80deb
LAD
2254 hci_conn_enter_active_mode(chan->conn,
2255 bt_cb(skb)->force_active);
04837f64 2256
1da177e4
LT
2257 hci_send_frame(skb);
2258 hdev->acl_last_tx = jiffies;
2259
2260 hdev->acl_cnt--;
73d80deb
LAD
2261 chan->sent++;
2262 chan->conn->sent++;
1da177e4
LT
2263 }
2264 }
02b20f0b
LAD
2265
2266 if (cnt != hdev->acl_cnt)
2267 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2268}
2269
2270/* Schedule SCO */
2271static inline void hci_sched_sco(struct hci_dev *hdev)
2272{
2273 struct hci_conn *conn;
2274 struct sk_buff *skb;
2275 int quote;
2276
2277 BT_DBG("%s", hdev->name);
2278
52087a79
LAD
2279 if (!hci_conn_num(hdev, SCO_LINK))
2280 return;
2281
1da177e4
LT
2282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2283 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2284 BT_DBG("skb %p len %d", skb, skb->len);
2285 hci_send_frame(skb);
2286
2287 conn->sent++;
2288 if (conn->sent == ~0)
2289 conn->sent = 0;
2290 }
2291 }
2292}
2293
b6a0dc82
MH
2294static inline void hci_sched_esco(struct hci_dev *hdev)
2295{
2296 struct hci_conn *conn;
2297 struct sk_buff *skb;
2298 int quote;
2299
2300 BT_DBG("%s", hdev->name);
2301
52087a79
LAD
2302 if (!hci_conn_num(hdev, ESCO_LINK))
2303 return;
2304
b6a0dc82
MH
2305 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2306 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2307 BT_DBG("skb %p len %d", skb, skb->len);
2308 hci_send_frame(skb);
2309
2310 conn->sent++;
2311 if (conn->sent == ~0)
2312 conn->sent = 0;
2313 }
2314 }
2315}
2316
6ed58ec5
VT
2317static inline void hci_sched_le(struct hci_dev *hdev)
2318{
73d80deb 2319 struct hci_chan *chan;
6ed58ec5 2320 struct sk_buff *skb;
02b20f0b 2321 int quote, cnt, tmp;
6ed58ec5
VT
2322
2323 BT_DBG("%s", hdev->name);
2324
52087a79
LAD
2325 if (!hci_conn_num(hdev, LE_LINK))
2326 return;
2327
6ed58ec5
VT
2328 if (!test_bit(HCI_RAW, &hdev->flags)) {
2329 /* LE tx timeout must be longer than maximum
2330 * link supervision timeout (40.9 seconds) */
bae1f5d9 2331 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2332 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2333 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2334 }
2335
2336 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2337 tmp = cnt;
73d80deb 2338 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2339 u32 priority = (skb_peek(&chan->data_q))->priority;
2340 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2341 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2342 skb->len, skb->priority);
6ed58ec5 2343
ec1cce24
LAD
2344 /* Stop if priority has changed */
2345 if (skb->priority < priority)
2346 break;
2347
2348 skb = skb_dequeue(&chan->data_q);
2349
6ed58ec5
VT
2350 hci_send_frame(skb);
2351 hdev->le_last_tx = jiffies;
2352
2353 cnt--;
73d80deb
LAD
2354 chan->sent++;
2355 chan->conn->sent++;
6ed58ec5
VT
2356 }
2357 }
73d80deb 2358
6ed58ec5
VT
2359 if (hdev->le_pkts)
2360 hdev->le_cnt = cnt;
2361 else
2362 hdev->acl_cnt = cnt;
02b20f0b
LAD
2363
2364 if (cnt != tmp)
2365 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2366}
2367
1da177e4
LT
2368static void hci_tx_task(unsigned long arg)
2369{
2370 struct hci_dev *hdev = (struct hci_dev *) arg;
2371 struct sk_buff *skb;
2372
2373 read_lock(&hci_task_lock);
2374
6ed58ec5
VT
2375 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2376 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2377
2378 /* Schedule queues and send stuff to HCI driver */
2379
2380 hci_sched_acl(hdev);
2381
2382 hci_sched_sco(hdev);
2383
b6a0dc82
MH
2384 hci_sched_esco(hdev);
2385
6ed58ec5
VT
2386 hci_sched_le(hdev);
2387
1da177e4
LT
2388 /* Send next queued raw (unknown type) packet */
2389 while ((skb = skb_dequeue(&hdev->raw_q)))
2390 hci_send_frame(skb);
2391
2392 read_unlock(&hci_task_lock);
2393}
2394
25985edc 2395/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2396
2397/* ACL data packet */
2398static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2399{
2400 struct hci_acl_hdr *hdr = (void *) skb->data;
2401 struct hci_conn *conn;
2402 __u16 handle, flags;
2403
2404 skb_pull(skb, HCI_ACL_HDR_SIZE);
2405
2406 handle = __le16_to_cpu(hdr->handle);
2407 flags = hci_flags(handle);
2408 handle = hci_handle(handle);
2409
2410 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2411
2412 hdev->stat.acl_rx++;
2413
2414 hci_dev_lock(hdev);
2415 conn = hci_conn_hash_lookup_handle(hdev, handle);
2416 hci_dev_unlock(hdev);
8e87d142 2417
1da177e4
LT
2418 if (conn) {
2419 register struct hci_proto *hp;
2420
14b12d0b 2421 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
04837f64 2422
1da177e4 2423 /* Send to upper protocol */
70f23020
AE
2424 hp = hci_proto[HCI_PROTO_L2CAP];
2425 if (hp && hp->recv_acldata) {
1da177e4
LT
2426 hp->recv_acldata(conn, skb, flags);
2427 return;
2428 }
2429 } else {
8e87d142 2430 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2431 hdev->name, handle);
2432 }
2433
2434 kfree_skb(skb);
2435}
2436
2437/* SCO data packet */
2438static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2439{
2440 struct hci_sco_hdr *hdr = (void *) skb->data;
2441 struct hci_conn *conn;
2442 __u16 handle;
2443
2444 skb_pull(skb, HCI_SCO_HDR_SIZE);
2445
2446 handle = __le16_to_cpu(hdr->handle);
2447
2448 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2449
2450 hdev->stat.sco_rx++;
2451
2452 hci_dev_lock(hdev);
2453 conn = hci_conn_hash_lookup_handle(hdev, handle);
2454 hci_dev_unlock(hdev);
2455
2456 if (conn) {
2457 register struct hci_proto *hp;
2458
2459 /* Send to upper protocol */
70f23020
AE
2460 hp = hci_proto[HCI_PROTO_SCO];
2461 if (hp && hp->recv_scodata) {
1da177e4
LT
2462 hp->recv_scodata(conn, skb);
2463 return;
2464 }
2465 } else {
8e87d142 2466 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2467 hdev->name, handle);
2468 }
2469
2470 kfree_skb(skb);
2471}
2472
6516455d 2473static void hci_rx_task(unsigned long arg)
1da177e4
LT
2474{
2475 struct hci_dev *hdev = (struct hci_dev *) arg;
2476 struct sk_buff *skb;
2477
2478 BT_DBG("%s", hdev->name);
2479
2480 read_lock(&hci_task_lock);
2481
2482 while ((skb = skb_dequeue(&hdev->rx_q))) {
2483 if (atomic_read(&hdev->promisc)) {
2484 /* Send copy to the sockets */
eec8d2bc 2485 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2486 }
2487
2488 if (test_bit(HCI_RAW, &hdev->flags)) {
2489 kfree_skb(skb);
2490 continue;
2491 }
2492
2493 if (test_bit(HCI_INIT, &hdev->flags)) {
2494 /* Don't process data packets in this states. */
0d48d939 2495 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2496 case HCI_ACLDATA_PKT:
2497 case HCI_SCODATA_PKT:
2498 kfree_skb(skb);
2499 continue;
3ff50b79 2500 }
1da177e4
LT
2501 }
2502
2503 /* Process frame */
0d48d939 2504 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2505 case HCI_EVENT_PKT:
2506 hci_event_packet(hdev, skb);
2507 break;
2508
2509 case HCI_ACLDATA_PKT:
2510 BT_DBG("%s ACL data packet", hdev->name);
2511 hci_acldata_packet(hdev, skb);
2512 break;
2513
2514 case HCI_SCODATA_PKT:
2515 BT_DBG("%s SCO data packet", hdev->name);
2516 hci_scodata_packet(hdev, skb);
2517 break;
2518
2519 default:
2520 kfree_skb(skb);
2521 break;
2522 }
2523 }
2524
2525 read_unlock(&hci_task_lock);
2526}
2527
2528static void hci_cmd_task(unsigned long arg)
2529{
2530 struct hci_dev *hdev = (struct hci_dev *) arg;
2531 struct sk_buff *skb;
2532
2533 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2534
1da177e4 2535 /* Send queued commands */
5a08ecce
AE
2536 if (atomic_read(&hdev->cmd_cnt)) {
2537 skb = skb_dequeue(&hdev->cmd_q);
2538 if (!skb)
2539 return;
2540
7585b97a 2541 kfree_skb(hdev->sent_cmd);
1da177e4 2542
70f23020
AE
2543 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2544 if (hdev->sent_cmd) {
1da177e4
LT
2545 atomic_dec(&hdev->cmd_cnt);
2546 hci_send_frame(skb);
7bdb8a5c
SJ
2547 if (test_bit(HCI_RESET, &hdev->flags))
2548 del_timer(&hdev->cmd_timer);
2549 else
2550 mod_timer(&hdev->cmd_timer,
6bd32326 2551 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2552 } else {
2553 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2554 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2555 }
2556 }
2557}