Bluetooth: Move EIR and CoD update functions to a better position
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
7784d78f
AE
58int enable_hs;
59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
67d0dfb5 64static DEFINE_MUTEX(hci_task_lock);
1da177e4
LT
65
66/* HCI device list */
67LIST_HEAD(hci_dev_list);
68DEFINE_RWLOCK(hci_dev_list_lock);
69
70/* HCI callback list */
71LIST_HEAD(hci_cb_list);
72DEFINE_RWLOCK(hci_cb_list_lock);
73
74/* HCI protocols */
75#define HCI_MAX_PROTO 2
76struct hci_proto *hci_proto[HCI_MAX_PROTO];
77
78/* HCI notifiers list */
e041c683 79static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
80
81/* ---- HCI notifications ---- */
82
83int hci_register_notifier(struct notifier_block *nb)
84{
e041c683 85 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
86}
87
88int hci_unregister_notifier(struct notifier_block *nb)
89{
e041c683 90 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
91}
92
6516455d 93static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 94{
e041c683 95 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
96}
97
98/* ---- HCI requests ---- */
99
23bb5763 100void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 101{
23bb5763
JH
102 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103
a5040efa
JH
104 /* If this is the init phase check if the completed command matches
105 * the last init command, and if not just return.
106 */
107 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 108 return;
1da177e4
LT
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = result;
112 hdev->req_status = HCI_REQ_DONE;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117static void hci_req_cancel(struct hci_dev *hdev, int err)
118{
119 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
125 }
126}
127
128/* Execute request and wait for completion. */
8e87d142 129static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 130 unsigned long opt, __u32 timeout)
1da177e4
LT
131{
132 DECLARE_WAITQUEUE(wait, current);
133 int err = 0;
134
135 BT_DBG("%s start", hdev->name);
136
137 hdev->req_status = HCI_REQ_PEND;
138
139 add_wait_queue(&hdev->req_wait_q, &wait);
140 set_current_state(TASK_INTERRUPTIBLE);
141
142 req(hdev, opt);
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return -EINTR;
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
e175072f 152 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
3ff50b79 162 }
1da177e4 163
a5040efa 164 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 return err;
169}
170
171static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 172 unsigned long opt, __u32 timeout)
1da177e4
LT
173{
174 int ret;
175
7c6a329e
MH
176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
1da177e4
LT
179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
f630cf0d 192 set_bit(HCI_RESET, &hdev->flags);
a9de9248 193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
194}
195
196static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
197{
b0916ea0 198 struct hci_cp_delete_stored_link_key cp;
1da177e4 199 struct sk_buff *skb;
1ebb9252 200 __le16 param;
89f2783d 201 __u8 flt_type;
1da177e4
LT
202
203 BT_DBG("%s %ld", hdev->name, opt);
204
205 /* Driver initialization */
206
207 /* Special commands */
208 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 209 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 210 skb->dev = (void *) hdev;
c78ae283 211
1da177e4 212 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 213 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
214 }
215 skb_queue_purge(&hdev->driver_init);
216
217 /* Mandatory initialization */
218
219 /* Reset */
f630cf0d
GP
220 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
221 set_bit(HCI_RESET, &hdev->flags);
a9de9248 222 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 223 }
1da177e4
LT
224
225 /* Read Local Supported Features */
a9de9248 226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 227
1143e5a6 228 /* Read Local Version */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 230
1da177e4 231 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 232 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 233
1da177e4 234 /* Read BD Address */
a9de9248
MH
235 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
236
237 /* Read Class of Device */
238 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
239
240 /* Read Local Name */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
242
243 /* Read Voice Setting */
a9de9248 244 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
245
246 /* Optional initialization */
247
248 /* Clear Event Filters */
89f2783d 249 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 250 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 251
1da177e4 252 /* Connection accept timeout ~20 secs */
aca3192c 253 param = cpu_to_le16(0x7d00);
a9de9248 254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
255
256 bacpy(&cp.bdaddr, BDADDR_ANY);
257 cp.delete_all = 1;
258 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
259}
260
6ed58ec5
VT
261static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
262{
263 BT_DBG("%s", hdev->name);
264
265 /* Read LE buffer size */
266 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
267}
268
1da177e4
LT
269static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
270{
271 __u8 scan = opt;
272
273 BT_DBG("%s %x", hdev->name, scan);
274
275 /* Inquiry and Page scans */
a9de9248 276 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
277}
278
279static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 auth = opt;
282
283 BT_DBG("%s %x", hdev->name, auth);
284
285 /* Authentication */
a9de9248 286 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
287}
288
289static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 encrypt = opt;
292
293 BT_DBG("%s %x", hdev->name, encrypt);
294
e4e8e37c 295 /* Encryption */
a9de9248 296 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
297}
298
e4e8e37c
MH
299static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __le16 policy = cpu_to_le16(opt);
302
a418b893 303 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
304
305 /* Default link policy */
306 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
307}
308
8e87d142 309/* Get HCI device by index.
1da177e4
LT
310 * Device is held on return. */
311struct hci_dev *hci_dev_get(int index)
312{
8035ded4 313 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
314
315 BT_DBG("%d", index);
316
317 if (index < 0)
318 return NULL;
319
320 read_lock(&hci_dev_list_lock);
8035ded4 321 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
322 if (d->id == index) {
323 hdev = hci_dev_hold(d);
324 break;
325 }
326 }
327 read_unlock(&hci_dev_list_lock);
328 return hdev;
329}
1da177e4
LT
330
331/* ---- Inquiry support ---- */
332static void inquiry_cache_flush(struct hci_dev *hdev)
333{
334 struct inquiry_cache *cache = &hdev->inq_cache;
335 struct inquiry_entry *next = cache->list, *e;
336
337 BT_DBG("cache %p", cache);
338
339 cache->list = NULL;
340 while ((e = next)) {
341 next = e->next;
342 kfree(e);
343 }
344}
345
346struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
347{
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
350
351 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
352
353 for (e = cache->list; e; e = e->next)
354 if (!bacmp(&e->data.bdaddr, bdaddr))
355 break;
356 return e;
357}
358
359void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
360{
361 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 362 struct inquiry_entry *ie;
1da177e4
LT
363
364 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
365
70f23020
AE
366 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
367 if (!ie) {
1da177e4 368 /* Entry not in the cache. Add new one. */
70f23020
AE
369 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
370 if (!ie)
1da177e4 371 return;
70f23020
AE
372
373 ie->next = cache->list;
374 cache->list = ie;
1da177e4
LT
375 }
376
70f23020
AE
377 memcpy(&ie->data, data, sizeof(*data));
378 ie->timestamp = jiffies;
1da177e4
LT
379 cache->timestamp = jiffies;
380}
381
382static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
383{
384 struct inquiry_cache *cache = &hdev->inq_cache;
385 struct inquiry_info *info = (struct inquiry_info *) buf;
386 struct inquiry_entry *e;
387 int copied = 0;
388
389 for (e = cache->list; e && copied < num; e = e->next, copied++) {
390 struct inquiry_data *data = &e->data;
391 bacpy(&info->bdaddr, &data->bdaddr);
392 info->pscan_rep_mode = data->pscan_rep_mode;
393 info->pscan_period_mode = data->pscan_period_mode;
394 info->pscan_mode = data->pscan_mode;
395 memcpy(info->dev_class, data->dev_class, 3);
396 info->clock_offset = data->clock_offset;
397 info++;
398 }
399
400 BT_DBG("cache %p, copied %d", cache, copied);
401 return copied;
402}
403
404static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
405{
406 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
407 struct hci_cp_inquiry cp;
408
409 BT_DBG("%s", hdev->name);
410
411 if (test_bit(HCI_INQUIRY, &hdev->flags))
412 return;
413
414 /* Start Inquiry */
415 memcpy(&cp.lap, &ir->lap, 3);
416 cp.length = ir->length;
417 cp.num_rsp = ir->num_rsp;
a9de9248 418 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
419}
420
421int hci_inquiry(void __user *arg)
422{
423 __u8 __user *ptr = arg;
424 struct hci_inquiry_req ir;
425 struct hci_dev *hdev;
426 int err = 0, do_inquiry = 0, max_rsp;
427 long timeo;
428 __u8 *buf;
429
430 if (copy_from_user(&ir, ptr, sizeof(ir)))
431 return -EFAULT;
432
5a08ecce
AE
433 hdev = hci_dev_get(ir.dev_id);
434 if (!hdev)
1da177e4
LT
435 return -ENODEV;
436
09fd0de5 437 hci_dev_lock(hdev);
8e87d142 438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
439 inquiry_cache_empty(hdev) ||
440 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
441 inquiry_cache_flush(hdev);
442 do_inquiry = 1;
443 }
09fd0de5 444 hci_dev_unlock(hdev);
1da177e4 445
04837f64 446 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
447
448 if (do_inquiry) {
449 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450 if (err < 0)
451 goto done;
452 }
1da177e4
LT
453
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
459 */
01df8c31 460 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 461 if (!buf) {
1da177e4
LT
462 err = -ENOMEM;
463 goto done;
464 }
465
09fd0de5 466 hci_dev_lock(hdev);
1da177e4 467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 468 hci_dev_unlock(hdev);
1da177e4
LT
469
470 BT_DBG("num_rsp %d", ir.num_rsp);
471
472 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 ptr += sizeof(ir);
474 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 ir.num_rsp))
476 err = -EFAULT;
8e87d142 477 } else
1da177e4
LT
478 err = -EFAULT;
479
480 kfree(buf);
481
482done:
483 hci_dev_put(hdev);
484 return err;
485}
486
487/* ---- HCI ioctl helpers ---- */
488
489int hci_dev_open(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int ret = 0;
493
5a08ecce
AE
494 hdev = hci_dev_get(dev);
495 if (!hdev)
1da177e4
LT
496 return -ENODEV;
497
498 BT_DBG("%s %p", hdev->name, hdev);
499
500 hci_req_lock(hdev);
501
611b30f7
MH
502 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
503 ret = -ERFKILL;
504 goto done;
505 }
506
1da177e4
LT
507 if (test_bit(HCI_UP, &hdev->flags)) {
508 ret = -EALREADY;
509 goto done;
510 }
511
512 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
513 set_bit(HCI_RAW, &hdev->flags);
514
07e3b94a
AE
515 /* Treat all non BR/EDR controllers as raw devices if
516 enable_hs is not set */
517 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
518 set_bit(HCI_RAW, &hdev->flags);
519
1da177e4
LT
520 if (hdev->open(hdev)) {
521 ret = -EIO;
522 goto done;
523 }
524
525 if (!test_bit(HCI_RAW, &hdev->flags)) {
526 atomic_set(&hdev->cmd_cnt, 1);
527 set_bit(HCI_INIT, &hdev->flags);
a5040efa 528 hdev->init_last_cmd = 0;
1da177e4 529
04837f64
MH
530 ret = __hci_request(hdev, hci_init_req, 0,
531 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 532
eead27da 533 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
534 ret = __hci_request(hdev, hci_le_init_req, 0,
535 msecs_to_jiffies(HCI_INIT_TIMEOUT));
536
1da177e4
LT
537 clear_bit(HCI_INIT, &hdev->flags);
538 }
539
540 if (!ret) {
541 hci_dev_hold(hdev);
542 set_bit(HCI_UP, &hdev->flags);
543 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 544 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 545 hci_dev_lock(hdev);
744cf19e 546 mgmt_powered(hdev, 1);
09fd0de5 547 hci_dev_unlock(hdev);
56e5cb86 548 }
8e87d142 549 } else {
1da177e4 550 /* Init failed, cleanup */
3eff45ea 551 flush_work(&hdev->tx_work);
c347b765 552 flush_work(&hdev->cmd_work);
b78752cc 553 flush_work(&hdev->rx_work);
1da177e4
LT
554
555 skb_queue_purge(&hdev->cmd_q);
556 skb_queue_purge(&hdev->rx_q);
557
558 if (hdev->flush)
559 hdev->flush(hdev);
560
561 if (hdev->sent_cmd) {
562 kfree_skb(hdev->sent_cmd);
563 hdev->sent_cmd = NULL;
564 }
565
566 hdev->close(hdev);
567 hdev->flags = 0;
568 }
569
570done:
571 hci_req_unlock(hdev);
572 hci_dev_put(hdev);
573 return ret;
574}
575
576static int hci_dev_do_close(struct hci_dev *hdev)
577{
578 BT_DBG("%s %p", hdev->name, hdev);
579
580 hci_req_cancel(hdev, ENODEV);
581 hci_req_lock(hdev);
582
583 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 584 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
585 hci_req_unlock(hdev);
586 return 0;
587 }
588
3eff45ea
GP
589 /* Flush RX and TX works */
590 flush_work(&hdev->tx_work);
b78752cc 591 flush_work(&hdev->rx_work);
1da177e4 592
16ab91ab 593 if (hdev->discov_timeout > 0) {
e0f9309f 594 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
595 hdev->discov_timeout = 0;
596 }
597
3243553f 598 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 599 cancel_delayed_work(&hdev->power_off);
3243553f 600
09fd0de5 601 hci_dev_lock(hdev);
1da177e4
LT
602 inquiry_cache_flush(hdev);
603 hci_conn_hash_flush(hdev);
09fd0de5 604 hci_dev_unlock(hdev);
1da177e4
LT
605
606 hci_notify(hdev, HCI_DEV_DOWN);
607
608 if (hdev->flush)
609 hdev->flush(hdev);
610
611 /* Reset device */
612 skb_queue_purge(&hdev->cmd_q);
613 atomic_set(&hdev->cmd_cnt, 1);
614 if (!test_bit(HCI_RAW, &hdev->flags)) {
615 set_bit(HCI_INIT, &hdev->flags);
04837f64 616 __hci_request(hdev, hci_reset_req, 0,
43611a7b 617 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
618 clear_bit(HCI_INIT, &hdev->flags);
619 }
620
c347b765
GP
621 /* flush cmd work */
622 flush_work(&hdev->cmd_work);
1da177e4
LT
623
624 /* Drop queues */
625 skb_queue_purge(&hdev->rx_q);
626 skb_queue_purge(&hdev->cmd_q);
627 skb_queue_purge(&hdev->raw_q);
628
629 /* Drop last sent command */
630 if (hdev->sent_cmd) {
b79f44c1 631 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
634 }
635
636 /* After this point our queues are empty
637 * and no tasks are scheduled. */
638 hdev->close(hdev);
639
09fd0de5 640 hci_dev_lock(hdev);
744cf19e 641 mgmt_powered(hdev, 0);
09fd0de5 642 hci_dev_unlock(hdev);
5add6af8 643
1da177e4
LT
644 /* Clear flags */
645 hdev->flags = 0;
646
647 hci_req_unlock(hdev);
648
649 hci_dev_put(hdev);
650 return 0;
651}
652
653int hci_dev_close(__u16 dev)
654{
655 struct hci_dev *hdev;
656 int err;
657
70f23020
AE
658 hdev = hci_dev_get(dev);
659 if (!hdev)
1da177e4
LT
660 return -ENODEV;
661 err = hci_dev_do_close(hdev);
662 hci_dev_put(hdev);
663 return err;
664}
665
666int hci_dev_reset(__u16 dev)
667{
668 struct hci_dev *hdev;
669 int ret = 0;
670
70f23020
AE
671 hdev = hci_dev_get(dev);
672 if (!hdev)
1da177e4
LT
673 return -ENODEV;
674
675 hci_req_lock(hdev);
1da177e4
LT
676
677 if (!test_bit(HCI_UP, &hdev->flags))
678 goto done;
679
680 /* Drop queues */
681 skb_queue_purge(&hdev->rx_q);
682 skb_queue_purge(&hdev->cmd_q);
683
09fd0de5 684 hci_dev_lock(hdev);
1da177e4
LT
685 inquiry_cache_flush(hdev);
686 hci_conn_hash_flush(hdev);
09fd0de5 687 hci_dev_unlock(hdev);
1da177e4
LT
688
689 if (hdev->flush)
690 hdev->flush(hdev);
691
8e87d142 692 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 693 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
694
695 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
696 ret = __hci_request(hdev, hci_reset_req, 0,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
698
699done:
1da177e4
LT
700 hci_req_unlock(hdev);
701 hci_dev_put(hdev);
702 return ret;
703}
704
705int hci_dev_reset_stat(__u16 dev)
706{
707 struct hci_dev *hdev;
708 int ret = 0;
709
70f23020
AE
710 hdev = hci_dev_get(dev);
711 if (!hdev)
1da177e4
LT
712 return -ENODEV;
713
714 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
715
716 hci_dev_put(hdev);
717
718 return ret;
719}
720
721int hci_dev_cmd(unsigned int cmd, void __user *arg)
722{
723 struct hci_dev *hdev;
724 struct hci_dev_req dr;
725 int err = 0;
726
727 if (copy_from_user(&dr, arg, sizeof(dr)))
728 return -EFAULT;
729
70f23020
AE
730 hdev = hci_dev_get(dr.dev_id);
731 if (!hdev)
1da177e4
LT
732 return -ENODEV;
733
734 switch (cmd) {
735 case HCISETAUTH:
04837f64
MH
736 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
738 break;
739
740 case HCISETENCRYPT:
741 if (!lmp_encrypt_capable(hdev)) {
742 err = -EOPNOTSUPP;
743 break;
744 }
745
746 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747 /* Auth must be enabled first */
04837f64
MH
748 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
750 if (err)
751 break;
752 }
753
04837f64
MH
754 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
756 break;
757
758 case HCISETSCAN:
04837f64
MH
759 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
761 break;
762
1da177e4 763 case HCISETLINKPOL:
e4e8e37c
MH
764 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
766 break;
767
768 case HCISETLINKMODE:
e4e8e37c
MH
769 hdev->link_mode = ((__u16) dr.dev_opt) &
770 (HCI_LM_MASTER | HCI_LM_ACCEPT);
771 break;
772
773 case HCISETPTYPE:
774 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
775 break;
776
777 case HCISETACLMTU:
e4e8e37c
MH
778 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
779 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
780 break;
781
782 case HCISETSCOMTU:
e4e8e37c
MH
783 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
784 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
785 break;
786
787 default:
788 err = -EINVAL;
789 break;
790 }
e4e8e37c 791
1da177e4
LT
792 hci_dev_put(hdev);
793 return err;
794}
795
796int hci_get_dev_list(void __user *arg)
797{
8035ded4 798 struct hci_dev *hdev;
1da177e4
LT
799 struct hci_dev_list_req *dl;
800 struct hci_dev_req *dr;
1da177e4
LT
801 int n = 0, size, err;
802 __u16 dev_num;
803
804 if (get_user(dev_num, (__u16 __user *) arg))
805 return -EFAULT;
806
807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
808 return -EINVAL;
809
810 size = sizeof(*dl) + dev_num * sizeof(*dr);
811
70f23020
AE
812 dl = kzalloc(size, GFP_KERNEL);
813 if (!dl)
1da177e4
LT
814 return -ENOMEM;
815
816 dr = dl->dev_req;
817
818 read_lock_bh(&hci_dev_list_lock);
8035ded4 819 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 820 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 821 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
822
823 if (!test_bit(HCI_MGMT, &hdev->flags))
824 set_bit(HCI_PAIRABLE, &hdev->flags);
825
1da177e4
LT
826 (dr + n)->dev_id = hdev->id;
827 (dr + n)->dev_opt = hdev->flags;
c542a06c 828
1da177e4
LT
829 if (++n >= dev_num)
830 break;
831 }
832 read_unlock_bh(&hci_dev_list_lock);
833
834 dl->dev_num = n;
835 size = sizeof(*dl) + n * sizeof(*dr);
836
837 err = copy_to_user(arg, dl, size);
838 kfree(dl);
839
840 return err ? -EFAULT : 0;
841}
842
843int hci_get_dev_info(void __user *arg)
844{
845 struct hci_dev *hdev;
846 struct hci_dev_info di;
847 int err = 0;
848
849 if (copy_from_user(&di, arg, sizeof(di)))
850 return -EFAULT;
851
70f23020
AE
852 hdev = hci_dev_get(di.dev_id);
853 if (!hdev)
1da177e4
LT
854 return -ENODEV;
855
3243553f
JH
856 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
857 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 858
c542a06c
JH
859 if (!test_bit(HCI_MGMT, &hdev->flags))
860 set_bit(HCI_PAIRABLE, &hdev->flags);
861
1da177e4
LT
862 strcpy(di.name, hdev->name);
863 di.bdaddr = hdev->bdaddr;
943da25d 864 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
865 di.flags = hdev->flags;
866 di.pkt_type = hdev->pkt_type;
867 di.acl_mtu = hdev->acl_mtu;
868 di.acl_pkts = hdev->acl_pkts;
869 di.sco_mtu = hdev->sco_mtu;
870 di.sco_pkts = hdev->sco_pkts;
871 di.link_policy = hdev->link_policy;
872 di.link_mode = hdev->link_mode;
873
874 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875 memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877 if (copy_to_user(arg, &di, sizeof(di)))
878 err = -EFAULT;
879
880 hci_dev_put(hdev);
881
882 return err;
883}
884
885/* ---- Interface to HCI drivers ---- */
886
611b30f7
MH
887static int hci_rfkill_set_block(void *data, bool blocked)
888{
889 struct hci_dev *hdev = data;
890
891 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
892
893 if (!blocked)
894 return 0;
895
896 hci_dev_do_close(hdev);
897
898 return 0;
899}
900
901static const struct rfkill_ops hci_rfkill_ops = {
902 .set_block = hci_rfkill_set_block,
903};
904
1da177e4
LT
905/* Alloc HCI device */
906struct hci_dev *hci_alloc_dev(void)
907{
908 struct hci_dev *hdev;
909
25ea6db0 910 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
911 if (!hdev)
912 return NULL;
913
0ac7e700 914 hci_init_sysfs(hdev);
1da177e4
LT
915 skb_queue_head_init(&hdev->driver_init);
916
917 return hdev;
918}
919EXPORT_SYMBOL(hci_alloc_dev);
920
921/* Free HCI device */
922void hci_free_dev(struct hci_dev *hdev)
923{
924 skb_queue_purge(&hdev->driver_init);
925
a91f2e39
MH
926 /* will free via device release */
927 put_device(&hdev->dev);
1da177e4
LT
928}
929EXPORT_SYMBOL(hci_free_dev);
930
ab81cbf9
JH
931static void hci_power_on(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935 BT_DBG("%s", hdev->name);
936
937 if (hci_dev_open(hdev->id) < 0)
938 return;
939
940 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
80b7ab33 941 schedule_delayed_work(&hdev->power_off,
3243553f 942 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
943
944 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 945 mgmt_index_added(hdev);
ab81cbf9
JH
946}
947
948static void hci_power_off(struct work_struct *work)
949{
3243553f
JH
950 struct hci_dev *hdev = container_of(work, struct hci_dev,
951 power_off.work);
ab81cbf9
JH
952
953 BT_DBG("%s", hdev->name);
954
955 clear_bit(HCI_AUTO_OFF, &hdev->flags);
956
3243553f 957 hci_dev_close(hdev->id);
ab81cbf9
JH
958}
959
16ab91ab
JH
960static void hci_discov_off(struct work_struct *work)
961{
962 struct hci_dev *hdev;
963 u8 scan = SCAN_PAGE;
964
965 hdev = container_of(work, struct hci_dev, discov_off.work);
966
967 BT_DBG("%s", hdev->name);
968
09fd0de5 969 hci_dev_lock(hdev);
16ab91ab
JH
970
971 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
972
973 hdev->discov_timeout = 0;
974
09fd0de5 975 hci_dev_unlock(hdev);
16ab91ab
JH
976}
977
2aeb9a1a
JH
978int hci_uuids_clear(struct hci_dev *hdev)
979{
980 struct list_head *p, *n;
981
982 list_for_each_safe(p, n, &hdev->uuids) {
983 struct bt_uuid *uuid;
984
985 uuid = list_entry(p, struct bt_uuid, list);
986
987 list_del(p);
988 kfree(uuid);
989 }
990
991 return 0;
992}
993
55ed8ca1
JH
994int hci_link_keys_clear(struct hci_dev *hdev)
995{
996 struct list_head *p, *n;
997
998 list_for_each_safe(p, n, &hdev->link_keys) {
999 struct link_key *key;
1000
1001 key = list_entry(p, struct link_key, list);
1002
1003 list_del(p);
1004 kfree(key);
1005 }
1006
1007 return 0;
1008}
1009
1010struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1011{
8035ded4 1012 struct link_key *k;
55ed8ca1 1013
8035ded4 1014 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1015 if (bacmp(bdaddr, &k->bdaddr) == 0)
1016 return k;
55ed8ca1
JH
1017
1018 return NULL;
1019}
1020
d25e28ab
JH
1021static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1022 u8 key_type, u8 old_key_type)
1023{
1024 /* Legacy key */
1025 if (key_type < 0x03)
1026 return 1;
1027
1028 /* Debug keys are insecure so don't store them persistently */
1029 if (key_type == HCI_LK_DEBUG_COMBINATION)
1030 return 0;
1031
1032 /* Changed combination key and there's no previous one */
1033 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1034 return 0;
1035
1036 /* Security mode 3 case */
1037 if (!conn)
1038 return 1;
1039
1040 /* Neither local nor remote side had no-bonding as requirement */
1041 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1042 return 1;
1043
1044 /* Local side had dedicated bonding as requirement */
1045 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1046 return 1;
1047
1048 /* Remote side had dedicated bonding as requirement */
1049 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1050 return 1;
1051
1052 /* If none of the above criteria match, then don't store the key
1053 * persistently */
1054 return 0;
1055}
1056
75d262c2
VCG
1057struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1058{
1059 struct link_key *k;
1060
1061 list_for_each_entry(k, &hdev->link_keys, list) {
1062 struct key_master_id *id;
1063
1064 if (k->type != HCI_LK_SMP_LTK)
1065 continue;
1066
1067 if (k->dlen != sizeof(*id))
1068 continue;
1069
1070 id = (void *) &k->data;
1071 if (id->ediv == ediv &&
1072 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1073 return k;
1074 }
1075
1076 return NULL;
1077}
1078EXPORT_SYMBOL(hci_find_ltk);
1079
1080struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1081 bdaddr_t *bdaddr, u8 type)
1082{
1083 struct link_key *k;
1084
1085 list_for_each_entry(k, &hdev->link_keys, list)
1086 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1087 return k;
1088
1089 return NULL;
1090}
1091EXPORT_SYMBOL(hci_find_link_key_type);
1092
d25e28ab
JH
1093int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1094 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1095{
1096 struct link_key *key, *old_key;
4df378a1 1097 u8 old_key_type, persistent;
55ed8ca1
JH
1098
1099 old_key = hci_find_link_key(hdev, bdaddr);
1100 if (old_key) {
1101 old_key_type = old_key->type;
1102 key = old_key;
1103 } else {
12adcf3a 1104 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1105 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1106 if (!key)
1107 return -ENOMEM;
1108 list_add(&key->list, &hdev->link_keys);
1109 }
1110
1111 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1112
d25e28ab
JH
1113 /* Some buggy controller combinations generate a changed
1114 * combination key for legacy pairing even when there's no
1115 * previous key */
1116 if (type == HCI_LK_CHANGED_COMBINATION &&
1117 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1118 old_key_type == 0xff) {
d25e28ab 1119 type = HCI_LK_COMBINATION;
655fe6ec
JH
1120 if (conn)
1121 conn->key_type = type;
1122 }
d25e28ab 1123
55ed8ca1
JH
1124 bacpy(&key->bdaddr, bdaddr);
1125 memcpy(key->val, val, 16);
55ed8ca1
JH
1126 key->pin_len = pin_len;
1127
b6020ba0 1128 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1129 key->type = old_key_type;
4748fed2
JH
1130 else
1131 key->type = type;
1132
4df378a1
JH
1133 if (!new_key)
1134 return 0;
1135
1136 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1137
744cf19e 1138 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1139
1140 if (!persistent) {
1141 list_del(&key->list);
1142 kfree(key);
1143 }
55ed8ca1
JH
1144
1145 return 0;
1146}
1147
75d262c2 1148int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1149 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1150{
1151 struct link_key *key, *old_key;
1152 struct key_master_id *id;
1153 u8 old_key_type;
1154
1155 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1156
1157 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1158 if (old_key) {
1159 key = old_key;
1160 old_key_type = old_key->type;
1161 } else {
1162 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1163 if (!key)
1164 return -ENOMEM;
1165 list_add(&key->list, &hdev->link_keys);
1166 old_key_type = 0xff;
1167 }
1168
1169 key->dlen = sizeof(*id);
1170
1171 bacpy(&key->bdaddr, bdaddr);
1172 memcpy(key->val, ltk, sizeof(key->val));
1173 key->type = HCI_LK_SMP_LTK;
726b4ffc 1174 key->pin_len = key_size;
75d262c2
VCG
1175
1176 id = (void *) &key->data;
1177 id->ediv = ediv;
1178 memcpy(id->rand, rand, sizeof(id->rand));
1179
1180 if (new_key)
744cf19e 1181 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1182
1183 return 0;
1184}
1185
55ed8ca1
JH
1186int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1187{
1188 struct link_key *key;
1189
1190 key = hci_find_link_key(hdev, bdaddr);
1191 if (!key)
1192 return -ENOENT;
1193
1194 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1195
1196 list_del(&key->list);
1197 kfree(key);
1198
1199 return 0;
1200}
1201
6bd32326
VT
1202/* HCI command timer function */
1203static void hci_cmd_timer(unsigned long arg)
1204{
1205 struct hci_dev *hdev = (void *) arg;
1206
1207 BT_ERR("%s command tx timeout", hdev->name);
1208 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1209 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1210}
1211
2763eda6
SJ
1212struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1213 bdaddr_t *bdaddr)
1214{
1215 struct oob_data *data;
1216
1217 list_for_each_entry(data, &hdev->remote_oob_data, list)
1218 if (bacmp(bdaddr, &data->bdaddr) == 0)
1219 return data;
1220
1221 return NULL;
1222}
1223
1224int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1225{
1226 struct oob_data *data;
1227
1228 data = hci_find_remote_oob_data(hdev, bdaddr);
1229 if (!data)
1230 return -ENOENT;
1231
1232 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1233
1234 list_del(&data->list);
1235 kfree(data);
1236
1237 return 0;
1238}
1239
1240int hci_remote_oob_data_clear(struct hci_dev *hdev)
1241{
1242 struct oob_data *data, *n;
1243
1244 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1245 list_del(&data->list);
1246 kfree(data);
1247 }
1248
1249 return 0;
1250}
1251
1252int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1253 u8 *randomizer)
1254{
1255 struct oob_data *data;
1256
1257 data = hci_find_remote_oob_data(hdev, bdaddr);
1258
1259 if (!data) {
1260 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1261 if (!data)
1262 return -ENOMEM;
1263
1264 bacpy(&data->bdaddr, bdaddr);
1265 list_add(&data->list, &hdev->remote_oob_data);
1266 }
1267
1268 memcpy(data->hash, hash, sizeof(data->hash));
1269 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1270
1271 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1272
1273 return 0;
1274}
1275
b2a66aad
AJ
1276struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1277 bdaddr_t *bdaddr)
1278{
8035ded4 1279 struct bdaddr_list *b;
b2a66aad 1280
8035ded4 1281 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1282 if (bacmp(bdaddr, &b->bdaddr) == 0)
1283 return b;
b2a66aad
AJ
1284
1285 return NULL;
1286}
1287
1288int hci_blacklist_clear(struct hci_dev *hdev)
1289{
1290 struct list_head *p, *n;
1291
1292 list_for_each_safe(p, n, &hdev->blacklist) {
1293 struct bdaddr_list *b;
1294
1295 b = list_entry(p, struct bdaddr_list, list);
1296
1297 list_del(p);
1298 kfree(b);
1299 }
1300
1301 return 0;
1302}
1303
1304int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1305{
1306 struct bdaddr_list *entry;
b2a66aad
AJ
1307
1308 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1309 return -EBADF;
1310
5e762444
AJ
1311 if (hci_blacklist_lookup(hdev, bdaddr))
1312 return -EEXIST;
b2a66aad
AJ
1313
1314 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1315 if (!entry)
1316 return -ENOMEM;
b2a66aad
AJ
1317
1318 bacpy(&entry->bdaddr, bdaddr);
1319
1320 list_add(&entry->list, &hdev->blacklist);
1321
744cf19e 1322 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1323}
1324
1325int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1326{
1327 struct bdaddr_list *entry;
b2a66aad 1328
1ec918ce 1329 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1330 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1331
1332 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1333 if (!entry)
5e762444 1334 return -ENOENT;
b2a66aad
AJ
1335
1336 list_del(&entry->list);
1337 kfree(entry);
1338
744cf19e 1339 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1340}
1341
db323f2f 1342static void hci_clear_adv_cache(struct work_struct *work)
35815085 1343{
db323f2f
GP
1344 struct hci_dev *hdev = container_of(work, struct hci_dev,
1345 adv_work.work);
35815085
AG
1346
1347 hci_dev_lock(hdev);
1348
1349 hci_adv_entries_clear(hdev);
1350
1351 hci_dev_unlock(hdev);
1352}
1353
76c8686f
AG
1354int hci_adv_entries_clear(struct hci_dev *hdev)
1355{
1356 struct adv_entry *entry, *tmp;
1357
1358 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1359 list_del(&entry->list);
1360 kfree(entry);
1361 }
1362
1363 BT_DBG("%s adv cache cleared", hdev->name);
1364
1365 return 0;
1366}
1367
1368struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369{
1370 struct adv_entry *entry;
1371
1372 list_for_each_entry(entry, &hdev->adv_entries, list)
1373 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1374 return entry;
1375
1376 return NULL;
1377}
1378
1379static inline int is_connectable_adv(u8 evt_type)
1380{
1381 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1382 return 1;
1383
1384 return 0;
1385}
1386
1387int hci_add_adv_entry(struct hci_dev *hdev,
1388 struct hci_ev_le_advertising_info *ev)
1389{
1390 struct adv_entry *entry;
1391
1392 if (!is_connectable_adv(ev->evt_type))
1393 return -EINVAL;
1394
1395 /* Only new entries should be added to adv_entries. So, if
1396 * bdaddr was found, don't add it. */
1397 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1398 return 0;
1399
1400 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1401 if (!entry)
1402 return -ENOMEM;
1403
1404 bacpy(&entry->bdaddr, &ev->bdaddr);
1405 entry->bdaddr_type = ev->bdaddr_type;
1406
1407 list_add(&entry->list, &hdev->adv_entries);
1408
1409 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1410 batostr(&entry->bdaddr), entry->bdaddr_type);
1411
1412 return 0;
1413}
1414
1da177e4
LT
1415/* Register HCI device */
1416int hci_register_dev(struct hci_dev *hdev)
1417{
1418 struct list_head *head = &hci_dev_list, *p;
08add513 1419 int i, id, error;
1da177e4 1420
c13854ce
MH
1421 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1422 hdev->bus, hdev->owner);
1da177e4
LT
1423
1424 if (!hdev->open || !hdev->close || !hdev->destruct)
1425 return -EINVAL;
1426
08add513
MM
1427 /* Do not allow HCI_AMP devices to register at index 0,
1428 * so the index can be used as the AMP controller ID.
1429 */
1430 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1431
1da177e4
LT
1432 write_lock_bh(&hci_dev_list_lock);
1433
1434 /* Find first available device id */
1435 list_for_each(p, &hci_dev_list) {
1436 if (list_entry(p, struct hci_dev, list)->id != id)
1437 break;
1438 head = p; id++;
1439 }
8e87d142 1440
1da177e4
LT
1441 sprintf(hdev->name, "hci%d", id);
1442 hdev->id = id;
c6feeb28 1443 list_add_tail(&hdev->list, head);
1da177e4
LT
1444
1445 atomic_set(&hdev->refcnt, 1);
09fd0de5 1446 mutex_init(&hdev->lock);
1da177e4
LT
1447
1448 hdev->flags = 0;
d23264a8 1449 hdev->dev_flags = 0;
1da177e4 1450 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1451 hdev->esco_type = (ESCO_HV1);
1da177e4 1452 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1453 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1454
04837f64
MH
1455 hdev->idle_timeout = 0;
1456 hdev->sniff_max_interval = 800;
1457 hdev->sniff_min_interval = 80;
1458
b78752cc 1459 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1460 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1461 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1462
1da177e4
LT
1463
1464 skb_queue_head_init(&hdev->rx_q);
1465 skb_queue_head_init(&hdev->cmd_q);
1466 skb_queue_head_init(&hdev->raw_q);
1467
6bd32326
VT
1468 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1469
cd4c5391 1470 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1471 hdev->reassembly[i] = NULL;
1472
1da177e4 1473 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1474 mutex_init(&hdev->req_lock);
1da177e4
LT
1475
1476 inquiry_cache_init(hdev);
1477
1478 hci_conn_hash_init(hdev);
1479
2e58ef3e
JH
1480 INIT_LIST_HEAD(&hdev->mgmt_pending);
1481
ea4bd8ba 1482 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1483
2aeb9a1a
JH
1484 INIT_LIST_HEAD(&hdev->uuids);
1485
55ed8ca1
JH
1486 INIT_LIST_HEAD(&hdev->link_keys);
1487
2763eda6
SJ
1488 INIT_LIST_HEAD(&hdev->remote_oob_data);
1489
76c8686f
AG
1490 INIT_LIST_HEAD(&hdev->adv_entries);
1491
db323f2f 1492 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1493 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1494 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1495
16ab91ab
JH
1496 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1497
1da177e4
LT
1498 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499
1500 atomic_set(&hdev->promisc, 0);
1501
1502 write_unlock_bh(&hci_dev_list_lock);
1503
32845eb1
GP
1504 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1505 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1506 if (!hdev->workqueue) {
1507 error = -ENOMEM;
1508 goto err;
1509 }
f48fd9c8 1510
33ca954d
DH
1511 error = hci_add_sysfs(hdev);
1512 if (error < 0)
1513 goto err_wqueue;
1da177e4 1514
611b30f7
MH
1515 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1516 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1517 if (hdev->rfkill) {
1518 if (rfkill_register(hdev->rfkill) < 0) {
1519 rfkill_destroy(hdev->rfkill);
1520 hdev->rfkill = NULL;
1521 }
1522 }
1523
ab81cbf9
JH
1524 set_bit(HCI_AUTO_OFF, &hdev->flags);
1525 set_bit(HCI_SETUP, &hdev->flags);
7f971041 1526 schedule_work(&hdev->power_on);
ab81cbf9 1527
1da177e4
LT
1528 hci_notify(hdev, HCI_DEV_REG);
1529
1530 return id;
f48fd9c8 1531
33ca954d
DH
1532err_wqueue:
1533 destroy_workqueue(hdev->workqueue);
1534err:
f48fd9c8
MH
1535 write_lock_bh(&hci_dev_list_lock);
1536 list_del(&hdev->list);
1537 write_unlock_bh(&hci_dev_list_lock);
1538
33ca954d 1539 return error;
1da177e4
LT
1540}
1541EXPORT_SYMBOL(hci_register_dev);
1542
1543/* Unregister HCI device */
59735631 1544void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1545{
ef222013
MH
1546 int i;
1547
c13854ce 1548 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1549
1da177e4
LT
1550 write_lock_bh(&hci_dev_list_lock);
1551 list_del(&hdev->list);
1552 write_unlock_bh(&hci_dev_list_lock);
1553
1554 hci_dev_do_close(hdev);
1555
cd4c5391 1556 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1557 kfree_skb(hdev->reassembly[i]);
1558
ab81cbf9 1559 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1560 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1561 hci_dev_lock(hdev);
744cf19e 1562 mgmt_index_removed(hdev);
09fd0de5 1563 hci_dev_unlock(hdev);
56e5cb86 1564 }
ab81cbf9 1565
2e58ef3e
JH
1566 /* mgmt_index_removed should take care of emptying the
1567 * pending list */
1568 BUG_ON(!list_empty(&hdev->mgmt_pending));
1569
1da177e4
LT
1570 hci_notify(hdev, HCI_DEV_UNREG);
1571
611b30f7
MH
1572 if (hdev->rfkill) {
1573 rfkill_unregister(hdev->rfkill);
1574 rfkill_destroy(hdev->rfkill);
1575 }
1576
ce242970 1577 hci_del_sysfs(hdev);
147e2d59 1578
db323f2f 1579 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1580
f48fd9c8
MH
1581 destroy_workqueue(hdev->workqueue);
1582
09fd0de5 1583 hci_dev_lock(hdev);
e2e0cacb 1584 hci_blacklist_clear(hdev);
2aeb9a1a 1585 hci_uuids_clear(hdev);
55ed8ca1 1586 hci_link_keys_clear(hdev);
2763eda6 1587 hci_remote_oob_data_clear(hdev);
76c8686f 1588 hci_adv_entries_clear(hdev);
09fd0de5 1589 hci_dev_unlock(hdev);
e2e0cacb 1590
1da177e4 1591 __hci_dev_put(hdev);
1da177e4
LT
1592}
1593EXPORT_SYMBOL(hci_unregister_dev);
1594
1595/* Suspend HCI device */
1596int hci_suspend_dev(struct hci_dev *hdev)
1597{
1598 hci_notify(hdev, HCI_DEV_SUSPEND);
1599 return 0;
1600}
1601EXPORT_SYMBOL(hci_suspend_dev);
1602
1603/* Resume HCI device */
1604int hci_resume_dev(struct hci_dev *hdev)
1605{
1606 hci_notify(hdev, HCI_DEV_RESUME);
1607 return 0;
1608}
1609EXPORT_SYMBOL(hci_resume_dev);
1610
76bca880
MH
1611/* Receive frame from HCI drivers */
1612int hci_recv_frame(struct sk_buff *skb)
1613{
1614 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1615 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1616 && !test_bit(HCI_INIT, &hdev->flags))) {
1617 kfree_skb(skb);
1618 return -ENXIO;
1619 }
1620
1621 /* Incomming skb */
1622 bt_cb(skb)->incoming = 1;
1623
1624 /* Time stamp */
1625 __net_timestamp(skb);
1626
76bca880 1627 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1628 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1629
76bca880
MH
1630 return 0;
1631}
1632EXPORT_SYMBOL(hci_recv_frame);
1633
33e882a5 1634static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1635 int count, __u8 index)
33e882a5
SS
1636{
1637 int len = 0;
1638 int hlen = 0;
1639 int remain = count;
1640 struct sk_buff *skb;
1641 struct bt_skb_cb *scb;
1642
1643 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1644 index >= NUM_REASSEMBLY)
1645 return -EILSEQ;
1646
1647 skb = hdev->reassembly[index];
1648
1649 if (!skb) {
1650 switch (type) {
1651 case HCI_ACLDATA_PKT:
1652 len = HCI_MAX_FRAME_SIZE;
1653 hlen = HCI_ACL_HDR_SIZE;
1654 break;
1655 case HCI_EVENT_PKT:
1656 len = HCI_MAX_EVENT_SIZE;
1657 hlen = HCI_EVENT_HDR_SIZE;
1658 break;
1659 case HCI_SCODATA_PKT:
1660 len = HCI_MAX_SCO_SIZE;
1661 hlen = HCI_SCO_HDR_SIZE;
1662 break;
1663 }
1664
1e429f38 1665 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1666 if (!skb)
1667 return -ENOMEM;
1668
1669 scb = (void *) skb->cb;
1670 scb->expect = hlen;
1671 scb->pkt_type = type;
1672
1673 skb->dev = (void *) hdev;
1674 hdev->reassembly[index] = skb;
1675 }
1676
1677 while (count) {
1678 scb = (void *) skb->cb;
1679 len = min(scb->expect, (__u16)count);
1680
1681 memcpy(skb_put(skb, len), data, len);
1682
1683 count -= len;
1684 data += len;
1685 scb->expect -= len;
1686 remain = count;
1687
1688 switch (type) {
1689 case HCI_EVENT_PKT:
1690 if (skb->len == HCI_EVENT_HDR_SIZE) {
1691 struct hci_event_hdr *h = hci_event_hdr(skb);
1692 scb->expect = h->plen;
1693
1694 if (skb_tailroom(skb) < scb->expect) {
1695 kfree_skb(skb);
1696 hdev->reassembly[index] = NULL;
1697 return -ENOMEM;
1698 }
1699 }
1700 break;
1701
1702 case HCI_ACLDATA_PKT:
1703 if (skb->len == HCI_ACL_HDR_SIZE) {
1704 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1705 scb->expect = __le16_to_cpu(h->dlen);
1706
1707 if (skb_tailroom(skb) < scb->expect) {
1708 kfree_skb(skb);
1709 hdev->reassembly[index] = NULL;
1710 return -ENOMEM;
1711 }
1712 }
1713 break;
1714
1715 case HCI_SCODATA_PKT:
1716 if (skb->len == HCI_SCO_HDR_SIZE) {
1717 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1718 scb->expect = h->dlen;
1719
1720 if (skb_tailroom(skb) < scb->expect) {
1721 kfree_skb(skb);
1722 hdev->reassembly[index] = NULL;
1723 return -ENOMEM;
1724 }
1725 }
1726 break;
1727 }
1728
1729 if (scb->expect == 0) {
1730 /* Complete frame */
1731
1732 bt_cb(skb)->pkt_type = type;
1733 hci_recv_frame(skb);
1734
1735 hdev->reassembly[index] = NULL;
1736 return remain;
1737 }
1738 }
1739
1740 return remain;
1741}
1742
ef222013
MH
1743int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1744{
f39a3c06
SS
1745 int rem = 0;
1746
ef222013
MH
1747 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1748 return -EILSEQ;
1749
da5f6c37 1750 while (count) {
1e429f38 1751 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1752 if (rem < 0)
1753 return rem;
ef222013 1754
f39a3c06
SS
1755 data += (count - rem);
1756 count = rem;
f81c6224 1757 }
ef222013 1758
f39a3c06 1759 return rem;
ef222013
MH
1760}
1761EXPORT_SYMBOL(hci_recv_fragment);
1762
99811510
SS
1763#define STREAM_REASSEMBLY 0
1764
1765int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1766{
1767 int type;
1768 int rem = 0;
1769
da5f6c37 1770 while (count) {
99811510
SS
1771 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1772
1773 if (!skb) {
1774 struct { char type; } *pkt;
1775
1776 /* Start of the frame */
1777 pkt = data;
1778 type = pkt->type;
1779
1780 data++;
1781 count--;
1782 } else
1783 type = bt_cb(skb)->pkt_type;
1784
1e429f38
GP
1785 rem = hci_reassembly(hdev, type, data, count,
1786 STREAM_REASSEMBLY);
99811510
SS
1787 if (rem < 0)
1788 return rem;
1789
1790 data += (count - rem);
1791 count = rem;
f81c6224 1792 }
99811510
SS
1793
1794 return rem;
1795}
1796EXPORT_SYMBOL(hci_recv_stream_fragment);
1797
1da177e4
LT
1798/* ---- Interface to upper protocols ---- */
1799
1800/* Register/Unregister protocols.
1801 * hci_task_lock is used to ensure that no tasks are running. */
1802int hci_register_proto(struct hci_proto *hp)
1803{
1804 int err = 0;
1805
1806 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1807
1808 if (hp->id >= HCI_MAX_PROTO)
1809 return -EINVAL;
1810
67d0dfb5 1811 mutex_lock(&hci_task_lock);
1da177e4
LT
1812
1813 if (!hci_proto[hp->id])
1814 hci_proto[hp->id] = hp;
1815 else
1816 err = -EEXIST;
1817
67d0dfb5 1818 mutex_unlock(&hci_task_lock);
1da177e4
LT
1819
1820 return err;
1821}
1822EXPORT_SYMBOL(hci_register_proto);
1823
1824int hci_unregister_proto(struct hci_proto *hp)
1825{
1826 int err = 0;
1827
1828 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1829
1830 if (hp->id >= HCI_MAX_PROTO)
1831 return -EINVAL;
1832
67d0dfb5 1833 mutex_lock(&hci_task_lock);
1da177e4
LT
1834
1835 if (hci_proto[hp->id])
1836 hci_proto[hp->id] = NULL;
1837 else
1838 err = -ENOENT;
1839
67d0dfb5 1840 mutex_unlock(&hci_task_lock);
1da177e4
LT
1841
1842 return err;
1843}
1844EXPORT_SYMBOL(hci_unregister_proto);
1845
1846int hci_register_cb(struct hci_cb *cb)
1847{
1848 BT_DBG("%p name %s", cb, cb->name);
1849
1850 write_lock_bh(&hci_cb_list_lock);
1851 list_add(&cb->list, &hci_cb_list);
1852 write_unlock_bh(&hci_cb_list_lock);
1853
1854 return 0;
1855}
1856EXPORT_SYMBOL(hci_register_cb);
1857
1858int hci_unregister_cb(struct hci_cb *cb)
1859{
1860 BT_DBG("%p name %s", cb, cb->name);
1861
1862 write_lock_bh(&hci_cb_list_lock);
1863 list_del(&cb->list);
1864 write_unlock_bh(&hci_cb_list_lock);
1865
1866 return 0;
1867}
1868EXPORT_SYMBOL(hci_unregister_cb);
1869
1870static int hci_send_frame(struct sk_buff *skb)
1871{
1872 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1873
1874 if (!hdev) {
1875 kfree_skb(skb);
1876 return -ENODEV;
1877 }
1878
0d48d939 1879 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1880
1881 if (atomic_read(&hdev->promisc)) {
1882 /* Time stamp */
a61bbcf2 1883 __net_timestamp(skb);
1da177e4 1884
eec8d2bc 1885 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1886 }
1887
1888 /* Get rid of skb owner, prior to sending to the driver. */
1889 skb_orphan(skb);
1890
1891 return hdev->send(skb);
1892}
1893
1894/* Send HCI command */
a9de9248 1895int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1896{
1897 int len = HCI_COMMAND_HDR_SIZE + plen;
1898 struct hci_command_hdr *hdr;
1899 struct sk_buff *skb;
1900
a9de9248 1901 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1902
1903 skb = bt_skb_alloc(len, GFP_ATOMIC);
1904 if (!skb) {
ef222013 1905 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1906 return -ENOMEM;
1907 }
1908
1909 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1910 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1911 hdr->plen = plen;
1912
1913 if (plen)
1914 memcpy(skb_put(skb, plen), param, plen);
1915
1916 BT_DBG("skb len %d", skb->len);
1917
0d48d939 1918 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1919 skb->dev = (void *) hdev;
c78ae283 1920
a5040efa
JH
1921 if (test_bit(HCI_INIT, &hdev->flags))
1922 hdev->init_last_cmd = opcode;
1923
1da177e4 1924 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1925 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1926
1927 return 0;
1928}
1da177e4
LT
1929
1930/* Get data from the previously sent command */
a9de9248 1931void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1932{
1933 struct hci_command_hdr *hdr;
1934
1935 if (!hdev->sent_cmd)
1936 return NULL;
1937
1938 hdr = (void *) hdev->sent_cmd->data;
1939
a9de9248 1940 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1941 return NULL;
1942
a9de9248 1943 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1944
1945 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1946}
1947
1948/* Send ACL data */
1949static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1950{
1951 struct hci_acl_hdr *hdr;
1952 int len = skb->len;
1953
badff6d0
ACM
1954 skb_push(skb, HCI_ACL_HDR_SIZE);
1955 skb_reset_transport_header(skb);
9c70220b 1956 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1957 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1958 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1959}
1960
73d80deb
LAD
1961static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1962 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1963{
1964 struct hci_dev *hdev = conn->hdev;
1965 struct sk_buff *list;
1966
70f23020
AE
1967 list = skb_shinfo(skb)->frag_list;
1968 if (!list) {
1da177e4
LT
1969 /* Non fragmented */
1970 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1971
73d80deb 1972 skb_queue_tail(queue, skb);
1da177e4
LT
1973 } else {
1974 /* Fragmented */
1975 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1976
1977 skb_shinfo(skb)->frag_list = NULL;
1978
1979 /* Queue all fragments atomically */
73d80deb 1980 spin_lock_bh(&queue->lock);
1da177e4 1981
73d80deb 1982 __skb_queue_tail(queue, skb);
e702112f
AE
1983
1984 flags &= ~ACL_START;
1985 flags |= ACL_CONT;
1da177e4
LT
1986 do {
1987 skb = list; list = list->next;
8e87d142 1988
1da177e4 1989 skb->dev = (void *) hdev;
0d48d939 1990 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1991 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1992
1993 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1994
73d80deb 1995 __skb_queue_tail(queue, skb);
1da177e4
LT
1996 } while (list);
1997
73d80deb 1998 spin_unlock_bh(&queue->lock);
1da177e4 1999 }
73d80deb
LAD
2000}
2001
2002void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2003{
2004 struct hci_conn *conn = chan->conn;
2005 struct hci_dev *hdev = conn->hdev;
2006
2007 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2008
2009 skb->dev = (void *) hdev;
2010 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2011 hci_add_acl_hdr(skb, conn->handle, flags);
2012
2013 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2014
3eff45ea 2015 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2016}
2017EXPORT_SYMBOL(hci_send_acl);
2018
2019/* Send SCO data */
0d861d8b 2020void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2021{
2022 struct hci_dev *hdev = conn->hdev;
2023 struct hci_sco_hdr hdr;
2024
2025 BT_DBG("%s len %d", hdev->name, skb->len);
2026
aca3192c 2027 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2028 hdr.dlen = skb->len;
2029
badff6d0
ACM
2030 skb_push(skb, HCI_SCO_HDR_SIZE);
2031 skb_reset_transport_header(skb);
9c70220b 2032 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2033
2034 skb->dev = (void *) hdev;
0d48d939 2035 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2036
1da177e4 2037 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2038 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2039}
2040EXPORT_SYMBOL(hci_send_sco);
2041
2042/* ---- HCI TX task (outgoing data) ---- */
2043
2044/* HCI Connection scheduler */
2045static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2046{
2047 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2048 struct hci_conn *conn = NULL, *c;
1da177e4 2049 int num = 0, min = ~0;
1da177e4 2050
8e87d142 2051 /* We don't have to lock device here. Connections are always
1da177e4 2052 * added and removed with TX task disabled. */
bf4c6325
GP
2053
2054 rcu_read_lock();
2055
2056 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2057 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2058 continue;
769be974
MH
2059
2060 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2061 continue;
2062
1da177e4
LT
2063 num++;
2064
2065 if (c->sent < min) {
2066 min = c->sent;
2067 conn = c;
2068 }
52087a79
LAD
2069
2070 if (hci_conn_num(hdev, type) == num)
2071 break;
1da177e4
LT
2072 }
2073
bf4c6325
GP
2074 rcu_read_unlock();
2075
1da177e4 2076 if (conn) {
6ed58ec5
VT
2077 int cnt, q;
2078
2079 switch (conn->type) {
2080 case ACL_LINK:
2081 cnt = hdev->acl_cnt;
2082 break;
2083 case SCO_LINK:
2084 case ESCO_LINK:
2085 cnt = hdev->sco_cnt;
2086 break;
2087 case LE_LINK:
2088 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2089 break;
2090 default:
2091 cnt = 0;
2092 BT_ERR("Unknown link type");
2093 }
2094
2095 q = cnt / num;
1da177e4
LT
2096 *quote = q ? q : 1;
2097 } else
2098 *quote = 0;
2099
2100 BT_DBG("conn %p quote %d", conn, *quote);
2101 return conn;
2102}
2103
bae1f5d9 2104static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2105{
2106 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2107 struct hci_conn *c;
1da177e4 2108
bae1f5d9 2109 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2110
bf4c6325
GP
2111 rcu_read_lock();
2112
1da177e4 2113 /* Kill stalled connections */
bf4c6325 2114 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2115 if (c->type == type && c->sent) {
2116 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2117 hdev->name, batostr(&c->dst));
2118 hci_acl_disconn(c, 0x13);
2119 }
2120 }
bf4c6325
GP
2121
2122 rcu_read_unlock();
1da177e4
LT
2123}
2124
73d80deb
LAD
2125static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126 int *quote)
1da177e4 2127{
73d80deb
LAD
2128 struct hci_conn_hash *h = &hdev->conn_hash;
2129 struct hci_chan *chan = NULL;
2130 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2131 struct hci_conn *conn;
73d80deb
LAD
2132 int cnt, q, conn_num = 0;
2133
2134 BT_DBG("%s", hdev->name);
2135
bf4c6325
GP
2136 rcu_read_lock();
2137
2138 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2139 struct hci_chan *tmp;
2140
2141 if (conn->type != type)
2142 continue;
2143
2144 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2145 continue;
2146
2147 conn_num++;
2148
8192edef 2149 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2150 struct sk_buff *skb;
2151
2152 if (skb_queue_empty(&tmp->data_q))
2153 continue;
2154
2155 skb = skb_peek(&tmp->data_q);
2156 if (skb->priority < cur_prio)
2157 continue;
2158
2159 if (skb->priority > cur_prio) {
2160 num = 0;
2161 min = ~0;
2162 cur_prio = skb->priority;
2163 }
2164
2165 num++;
2166
2167 if (conn->sent < min) {
2168 min = conn->sent;
2169 chan = tmp;
2170 }
2171 }
2172
2173 if (hci_conn_num(hdev, type) == conn_num)
2174 break;
2175 }
2176
bf4c6325
GP
2177 rcu_read_unlock();
2178
73d80deb
LAD
2179 if (!chan)
2180 return NULL;
2181
2182 switch (chan->conn->type) {
2183 case ACL_LINK:
2184 cnt = hdev->acl_cnt;
2185 break;
2186 case SCO_LINK:
2187 case ESCO_LINK:
2188 cnt = hdev->sco_cnt;
2189 break;
2190 case LE_LINK:
2191 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2192 break;
2193 default:
2194 cnt = 0;
2195 BT_ERR("Unknown link type");
2196 }
2197
2198 q = cnt / num;
2199 *quote = q ? q : 1;
2200 BT_DBG("chan %p quote %d", chan, *quote);
2201 return chan;
2202}
2203
02b20f0b
LAD
2204static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2205{
2206 struct hci_conn_hash *h = &hdev->conn_hash;
2207 struct hci_conn *conn;
2208 int num = 0;
2209
2210 BT_DBG("%s", hdev->name);
2211
bf4c6325
GP
2212 rcu_read_lock();
2213
2214 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2215 struct hci_chan *chan;
2216
2217 if (conn->type != type)
2218 continue;
2219
2220 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2221 continue;
2222
2223 num++;
2224
8192edef 2225 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2226 struct sk_buff *skb;
2227
2228 if (chan->sent) {
2229 chan->sent = 0;
2230 continue;
2231 }
2232
2233 if (skb_queue_empty(&chan->data_q))
2234 continue;
2235
2236 skb = skb_peek(&chan->data_q);
2237 if (skb->priority >= HCI_PRIO_MAX - 1)
2238 continue;
2239
2240 skb->priority = HCI_PRIO_MAX - 1;
2241
2242 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2243 skb->priority);
2244 }
2245
2246 if (hci_conn_num(hdev, type) == num)
2247 break;
2248 }
bf4c6325
GP
2249
2250 rcu_read_unlock();
2251
02b20f0b
LAD
2252}
2253
73d80deb
LAD
2254static inline void hci_sched_acl(struct hci_dev *hdev)
2255{
2256 struct hci_chan *chan;
1da177e4
LT
2257 struct sk_buff *skb;
2258 int quote;
73d80deb 2259 unsigned int cnt;
1da177e4
LT
2260
2261 BT_DBG("%s", hdev->name);
2262
52087a79
LAD
2263 if (!hci_conn_num(hdev, ACL_LINK))
2264 return;
2265
1da177e4
LT
2266 if (!test_bit(HCI_RAW, &hdev->flags)) {
2267 /* ACL tx timeout must be longer than maximum
2268 * link supervision timeout (40.9 seconds) */
82453021 2269 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2270 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2271 }
2272
73d80deb 2273 cnt = hdev->acl_cnt;
04837f64 2274
73d80deb
LAD
2275 while (hdev->acl_cnt &&
2276 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2277 u32 priority = (skb_peek(&chan->data_q))->priority;
2278 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2279 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2280 skb->len, skb->priority);
2281
ec1cce24
LAD
2282 /* Stop if priority has changed */
2283 if (skb->priority < priority)
2284 break;
2285
2286 skb = skb_dequeue(&chan->data_q);
2287
73d80deb
LAD
2288 hci_conn_enter_active_mode(chan->conn,
2289 bt_cb(skb)->force_active);
04837f64 2290
1da177e4
LT
2291 hci_send_frame(skb);
2292 hdev->acl_last_tx = jiffies;
2293
2294 hdev->acl_cnt--;
73d80deb
LAD
2295 chan->sent++;
2296 chan->conn->sent++;
1da177e4
LT
2297 }
2298 }
02b20f0b
LAD
2299
2300 if (cnt != hdev->acl_cnt)
2301 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2302}
2303
2304/* Schedule SCO */
2305static inline void hci_sched_sco(struct hci_dev *hdev)
2306{
2307 struct hci_conn *conn;
2308 struct sk_buff *skb;
2309 int quote;
2310
2311 BT_DBG("%s", hdev->name);
2312
52087a79
LAD
2313 if (!hci_conn_num(hdev, SCO_LINK))
2314 return;
2315
1da177e4
LT
2316 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2317 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2318 BT_DBG("skb %p len %d", skb, skb->len);
2319 hci_send_frame(skb);
2320
2321 conn->sent++;
2322 if (conn->sent == ~0)
2323 conn->sent = 0;
2324 }
2325 }
2326}
2327
b6a0dc82
MH
2328static inline void hci_sched_esco(struct hci_dev *hdev)
2329{
2330 struct hci_conn *conn;
2331 struct sk_buff *skb;
2332 int quote;
2333
2334 BT_DBG("%s", hdev->name);
2335
52087a79
LAD
2336 if (!hci_conn_num(hdev, ESCO_LINK))
2337 return;
2338
b6a0dc82
MH
2339 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2340 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2341 BT_DBG("skb %p len %d", skb, skb->len);
2342 hci_send_frame(skb);
2343
2344 conn->sent++;
2345 if (conn->sent == ~0)
2346 conn->sent = 0;
2347 }
2348 }
2349}
2350
6ed58ec5
VT
2351static inline void hci_sched_le(struct hci_dev *hdev)
2352{
73d80deb 2353 struct hci_chan *chan;
6ed58ec5 2354 struct sk_buff *skb;
02b20f0b 2355 int quote, cnt, tmp;
6ed58ec5
VT
2356
2357 BT_DBG("%s", hdev->name);
2358
52087a79
LAD
2359 if (!hci_conn_num(hdev, LE_LINK))
2360 return;
2361
6ed58ec5
VT
2362 if (!test_bit(HCI_RAW, &hdev->flags)) {
2363 /* LE tx timeout must be longer than maximum
2364 * link supervision timeout (40.9 seconds) */
bae1f5d9 2365 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2366 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2367 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2368 }
2369
2370 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2371 tmp = cnt;
73d80deb 2372 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2373 u32 priority = (skb_peek(&chan->data_q))->priority;
2374 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2375 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2376 skb->len, skb->priority);
6ed58ec5 2377
ec1cce24
LAD
2378 /* Stop if priority has changed */
2379 if (skb->priority < priority)
2380 break;
2381
2382 skb = skb_dequeue(&chan->data_q);
2383
6ed58ec5
VT
2384 hci_send_frame(skb);
2385 hdev->le_last_tx = jiffies;
2386
2387 cnt--;
73d80deb
LAD
2388 chan->sent++;
2389 chan->conn->sent++;
6ed58ec5
VT
2390 }
2391 }
73d80deb 2392
6ed58ec5
VT
2393 if (hdev->le_pkts)
2394 hdev->le_cnt = cnt;
2395 else
2396 hdev->acl_cnt = cnt;
02b20f0b
LAD
2397
2398 if (cnt != tmp)
2399 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2400}
2401
3eff45ea 2402static void hci_tx_work(struct work_struct *work)
1da177e4 2403{
3eff45ea 2404 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2405 struct sk_buff *skb;
2406
67d0dfb5 2407 mutex_lock(&hci_task_lock);
1da177e4 2408
6ed58ec5
VT
2409 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2410 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2411
2412 /* Schedule queues and send stuff to HCI driver */
2413
2414 hci_sched_acl(hdev);
2415
2416 hci_sched_sco(hdev);
2417
b6a0dc82
MH
2418 hci_sched_esco(hdev);
2419
6ed58ec5
VT
2420 hci_sched_le(hdev);
2421
1da177e4
LT
2422 /* Send next queued raw (unknown type) packet */
2423 while ((skb = skb_dequeue(&hdev->raw_q)))
2424 hci_send_frame(skb);
2425
67d0dfb5 2426 mutex_unlock(&hci_task_lock);
1da177e4
LT
2427}
2428
25985edc 2429/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2430
2431/* ACL data packet */
2432static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2433{
2434 struct hci_acl_hdr *hdr = (void *) skb->data;
2435 struct hci_conn *conn;
2436 __u16 handle, flags;
2437
2438 skb_pull(skb, HCI_ACL_HDR_SIZE);
2439
2440 handle = __le16_to_cpu(hdr->handle);
2441 flags = hci_flags(handle);
2442 handle = hci_handle(handle);
2443
2444 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2445
2446 hdev->stat.acl_rx++;
2447
2448 hci_dev_lock(hdev);
2449 conn = hci_conn_hash_lookup_handle(hdev, handle);
2450 hci_dev_unlock(hdev);
8e87d142 2451
1da177e4
LT
2452 if (conn) {
2453 register struct hci_proto *hp;
2454
14b12d0b 2455 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
04837f64 2456
1da177e4 2457 /* Send to upper protocol */
70f23020
AE
2458 hp = hci_proto[HCI_PROTO_L2CAP];
2459 if (hp && hp->recv_acldata) {
1da177e4
LT
2460 hp->recv_acldata(conn, skb, flags);
2461 return;
2462 }
2463 } else {
8e87d142 2464 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2465 hdev->name, handle);
2466 }
2467
2468 kfree_skb(skb);
2469}
2470
2471/* SCO data packet */
2472static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2473{
2474 struct hci_sco_hdr *hdr = (void *) skb->data;
2475 struct hci_conn *conn;
2476 __u16 handle;
2477
2478 skb_pull(skb, HCI_SCO_HDR_SIZE);
2479
2480 handle = __le16_to_cpu(hdr->handle);
2481
2482 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2483
2484 hdev->stat.sco_rx++;
2485
2486 hci_dev_lock(hdev);
2487 conn = hci_conn_hash_lookup_handle(hdev, handle);
2488 hci_dev_unlock(hdev);
2489
2490 if (conn) {
2491 register struct hci_proto *hp;
2492
2493 /* Send to upper protocol */
70f23020
AE
2494 hp = hci_proto[HCI_PROTO_SCO];
2495 if (hp && hp->recv_scodata) {
1da177e4
LT
2496 hp->recv_scodata(conn, skb);
2497 return;
2498 }
2499 } else {
8e87d142 2500 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2501 hdev->name, handle);
2502 }
2503
2504 kfree_skb(skb);
2505}
2506
b78752cc 2507static void hci_rx_work(struct work_struct *work)
1da177e4 2508{
b78752cc 2509 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2510 struct sk_buff *skb;
2511
2512 BT_DBG("%s", hdev->name);
2513
67d0dfb5 2514 mutex_lock(&hci_task_lock);
1da177e4
LT
2515
2516 while ((skb = skb_dequeue(&hdev->rx_q))) {
2517 if (atomic_read(&hdev->promisc)) {
2518 /* Send copy to the sockets */
eec8d2bc 2519 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2520 }
2521
2522 if (test_bit(HCI_RAW, &hdev->flags)) {
2523 kfree_skb(skb);
2524 continue;
2525 }
2526
2527 if (test_bit(HCI_INIT, &hdev->flags)) {
2528 /* Don't process data packets in this states. */
0d48d939 2529 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2530 case HCI_ACLDATA_PKT:
2531 case HCI_SCODATA_PKT:
2532 kfree_skb(skb);
2533 continue;
3ff50b79 2534 }
1da177e4
LT
2535 }
2536
2537 /* Process frame */
0d48d939 2538 switch (bt_cb(skb)->pkt_type) {
1da177e4 2539 case HCI_EVENT_PKT:
b78752cc 2540 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2541 hci_event_packet(hdev, skb);
2542 break;
2543
2544 case HCI_ACLDATA_PKT:
2545 BT_DBG("%s ACL data packet", hdev->name);
2546 hci_acldata_packet(hdev, skb);
2547 break;
2548
2549 case HCI_SCODATA_PKT:
2550 BT_DBG("%s SCO data packet", hdev->name);
2551 hci_scodata_packet(hdev, skb);
2552 break;
2553
2554 default:
2555 kfree_skb(skb);
2556 break;
2557 }
2558 }
2559
67d0dfb5 2560 mutex_unlock(&hci_task_lock);
1da177e4
LT
2561}
2562
c347b765 2563static void hci_cmd_work(struct work_struct *work)
1da177e4 2564{
c347b765 2565 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2566 struct sk_buff *skb;
2567
2568 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2569
1da177e4 2570 /* Send queued commands */
5a08ecce
AE
2571 if (atomic_read(&hdev->cmd_cnt)) {
2572 skb = skb_dequeue(&hdev->cmd_q);
2573 if (!skb)
2574 return;
2575
7585b97a 2576 kfree_skb(hdev->sent_cmd);
1da177e4 2577
70f23020
AE
2578 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2579 if (hdev->sent_cmd) {
1da177e4
LT
2580 atomic_dec(&hdev->cmd_cnt);
2581 hci_send_frame(skb);
7bdb8a5c
SJ
2582 if (test_bit(HCI_RESET, &hdev->flags))
2583 del_timer(&hdev->cmd_timer);
2584 else
2585 mod_timer(&hdev->cmd_timer,
6bd32326 2586 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2587 } else {
2588 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2589 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2590 }
2591 }
2592}
2519a1fc
AG
2593
2594int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2595{
2596 /* General inquiry access code (GIAC) */
2597 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2598 struct hci_cp_inquiry cp;
2599
2600 BT_DBG("%s", hdev->name);
2601
2602 if (test_bit(HCI_INQUIRY, &hdev->flags))
2603 return -EINPROGRESS;
2604
2605 memset(&cp, 0, sizeof(cp));
2606 memcpy(&cp.lap, lap, sizeof(cp.lap));
2607 cp.length = length;
2608
2609 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2610}
023d5049
AG
2611
2612int hci_cancel_inquiry(struct hci_dev *hdev)
2613{
2614 BT_DBG("%s", hdev->name);
2615
2616 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2617 return -EPERM;
2618
2619 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2620}
7784d78f
AE
2621
2622module_param(enable_hs, bool, 0644);
2623MODULE_PARM_DESC(enable_hs, "Enable High Speed");