mac80211: Fixing sparse warning at sta_info.c
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
7784d78f
AE
58int enable_hs;
59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
67d0dfb5 64static DEFINE_MUTEX(hci_task_lock);
1da177e4
LT
65
66/* HCI device list */
67LIST_HEAD(hci_dev_list);
68DEFINE_RWLOCK(hci_dev_list_lock);
69
70/* HCI callback list */
71LIST_HEAD(hci_cb_list);
72DEFINE_RWLOCK(hci_cb_list_lock);
73
74/* HCI protocols */
75#define HCI_MAX_PROTO 2
76struct hci_proto *hci_proto[HCI_MAX_PROTO];
77
78/* HCI notifiers list */
e041c683 79static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
80
81/* ---- HCI notifications ---- */
82
83int hci_register_notifier(struct notifier_block *nb)
84{
e041c683 85 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
86}
87
88int hci_unregister_notifier(struct notifier_block *nb)
89{
e041c683 90 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
91}
92
6516455d 93static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 94{
e041c683 95 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
96}
97
98/* ---- HCI requests ---- */
99
23bb5763 100void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 101{
23bb5763
JH
102 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103
a5040efa
JH
104 /* If this is the init phase check if the completed command matches
105 * the last init command, and if not just return.
106 */
107 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 108 return;
1da177e4
LT
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = result;
112 hdev->req_status = HCI_REQ_DONE;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117static void hci_req_cancel(struct hci_dev *hdev, int err)
118{
119 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
125 }
126}
127
128/* Execute request and wait for completion. */
8e87d142 129static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 130 unsigned long opt, __u32 timeout)
1da177e4
LT
131{
132 DECLARE_WAITQUEUE(wait, current);
133 int err = 0;
134
135 BT_DBG("%s start", hdev->name);
136
137 hdev->req_status = HCI_REQ_PEND;
138
139 add_wait_queue(&hdev->req_wait_q, &wait);
140 set_current_state(TASK_INTERRUPTIBLE);
141
142 req(hdev, opt);
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return -EINTR;
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
e175072f 152 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
3ff50b79 162 }
1da177e4 163
a5040efa 164 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 return err;
169}
170
171static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 172 unsigned long opt, __u32 timeout)
1da177e4
LT
173{
174 int ret;
175
7c6a329e
MH
176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
1da177e4
LT
179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
f630cf0d 192 set_bit(HCI_RESET, &hdev->flags);
a9de9248 193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
194}
195
196static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
197{
b0916ea0 198 struct hci_cp_delete_stored_link_key cp;
1da177e4 199 struct sk_buff *skb;
1ebb9252 200 __le16 param;
89f2783d 201 __u8 flt_type;
1da177e4
LT
202
203 BT_DBG("%s %ld", hdev->name, opt);
204
205 /* Driver initialization */
206
207 /* Special commands */
208 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 209 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 210 skb->dev = (void *) hdev;
c78ae283 211
1da177e4 212 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 213 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
214 }
215 skb_queue_purge(&hdev->driver_init);
216
217 /* Mandatory initialization */
218
219 /* Reset */
f630cf0d
GP
220 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
221 set_bit(HCI_RESET, &hdev->flags);
a9de9248 222 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 223 }
1da177e4
LT
224
225 /* Read Local Supported Features */
a9de9248 226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 227
1143e5a6 228 /* Read Local Version */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 230
1da177e4 231 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 232 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 233
1da177e4 234 /* Read BD Address */
a9de9248
MH
235 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
236
237 /* Read Class of Device */
238 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
239
240 /* Read Local Name */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
242
243 /* Read Voice Setting */
a9de9248 244 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
245
246 /* Optional initialization */
247
248 /* Clear Event Filters */
89f2783d 249 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 250 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 251
1da177e4 252 /* Connection accept timeout ~20 secs */
aca3192c 253 param = cpu_to_le16(0x7d00);
a9de9248 254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
255
256 bacpy(&cp.bdaddr, BDADDR_ANY);
257 cp.delete_all = 1;
258 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
259}
260
6ed58ec5
VT
261static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
262{
263 BT_DBG("%s", hdev->name);
264
265 /* Read LE buffer size */
266 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
267}
268
1da177e4
LT
269static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
270{
271 __u8 scan = opt;
272
273 BT_DBG("%s %x", hdev->name, scan);
274
275 /* Inquiry and Page scans */
a9de9248 276 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
277}
278
279static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 auth = opt;
282
283 BT_DBG("%s %x", hdev->name, auth);
284
285 /* Authentication */
a9de9248 286 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
287}
288
289static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 encrypt = opt;
292
293 BT_DBG("%s %x", hdev->name, encrypt);
294
e4e8e37c 295 /* Encryption */
a9de9248 296 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
297}
298
e4e8e37c
MH
299static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __le16 policy = cpu_to_le16(opt);
302
a418b893 303 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
304
305 /* Default link policy */
306 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
307}
308
8e87d142 309/* Get HCI device by index.
1da177e4
LT
310 * Device is held on return. */
311struct hci_dev *hci_dev_get(int index)
312{
8035ded4 313 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
314
315 BT_DBG("%d", index);
316
317 if (index < 0)
318 return NULL;
319
320 read_lock(&hci_dev_list_lock);
8035ded4 321 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
322 if (d->id == index) {
323 hdev = hci_dev_hold(d);
324 break;
325 }
326 }
327 read_unlock(&hci_dev_list_lock);
328 return hdev;
329}
1da177e4
LT
330
331/* ---- Inquiry support ---- */
332static void inquiry_cache_flush(struct hci_dev *hdev)
333{
334 struct inquiry_cache *cache = &hdev->inq_cache;
335 struct inquiry_entry *next = cache->list, *e;
336
337 BT_DBG("cache %p", cache);
338
339 cache->list = NULL;
340 while ((e = next)) {
341 next = e->next;
342 kfree(e);
343 }
344}
345
346struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
347{
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
350
351 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
352
353 for (e = cache->list; e; e = e->next)
354 if (!bacmp(&e->data.bdaddr, bdaddr))
355 break;
356 return e;
357}
358
359void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
360{
361 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 362 struct inquiry_entry *ie;
1da177e4
LT
363
364 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
365
70f23020
AE
366 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
367 if (!ie) {
1da177e4 368 /* Entry not in the cache. Add new one. */
70f23020
AE
369 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
370 if (!ie)
1da177e4 371 return;
70f23020
AE
372
373 ie->next = cache->list;
374 cache->list = ie;
1da177e4
LT
375 }
376
70f23020
AE
377 memcpy(&ie->data, data, sizeof(*data));
378 ie->timestamp = jiffies;
1da177e4
LT
379 cache->timestamp = jiffies;
380}
381
382static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
383{
384 struct inquiry_cache *cache = &hdev->inq_cache;
385 struct inquiry_info *info = (struct inquiry_info *) buf;
386 struct inquiry_entry *e;
387 int copied = 0;
388
389 for (e = cache->list; e && copied < num; e = e->next, copied++) {
390 struct inquiry_data *data = &e->data;
391 bacpy(&info->bdaddr, &data->bdaddr);
392 info->pscan_rep_mode = data->pscan_rep_mode;
393 info->pscan_period_mode = data->pscan_period_mode;
394 info->pscan_mode = data->pscan_mode;
395 memcpy(info->dev_class, data->dev_class, 3);
396 info->clock_offset = data->clock_offset;
397 info++;
398 }
399
400 BT_DBG("cache %p, copied %d", cache, copied);
401 return copied;
402}
403
404static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
405{
406 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
407 struct hci_cp_inquiry cp;
408
409 BT_DBG("%s", hdev->name);
410
411 if (test_bit(HCI_INQUIRY, &hdev->flags))
412 return;
413
414 /* Start Inquiry */
415 memcpy(&cp.lap, &ir->lap, 3);
416 cp.length = ir->length;
417 cp.num_rsp = ir->num_rsp;
a9de9248 418 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
419}
420
421int hci_inquiry(void __user *arg)
422{
423 __u8 __user *ptr = arg;
424 struct hci_inquiry_req ir;
425 struct hci_dev *hdev;
426 int err = 0, do_inquiry = 0, max_rsp;
427 long timeo;
428 __u8 *buf;
429
430 if (copy_from_user(&ir, ptr, sizeof(ir)))
431 return -EFAULT;
432
5a08ecce
AE
433 hdev = hci_dev_get(ir.dev_id);
434 if (!hdev)
1da177e4
LT
435 return -ENODEV;
436
09fd0de5 437 hci_dev_lock(hdev);
8e87d142 438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
439 inquiry_cache_empty(hdev) ||
440 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
441 inquiry_cache_flush(hdev);
442 do_inquiry = 1;
443 }
09fd0de5 444 hci_dev_unlock(hdev);
1da177e4 445
04837f64 446 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
447
448 if (do_inquiry) {
449 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450 if (err < 0)
451 goto done;
452 }
1da177e4
LT
453
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
459 */
01df8c31 460 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 461 if (!buf) {
1da177e4
LT
462 err = -ENOMEM;
463 goto done;
464 }
465
09fd0de5 466 hci_dev_lock(hdev);
1da177e4 467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 468 hci_dev_unlock(hdev);
1da177e4
LT
469
470 BT_DBG("num_rsp %d", ir.num_rsp);
471
472 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 ptr += sizeof(ir);
474 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 ir.num_rsp))
476 err = -EFAULT;
8e87d142 477 } else
1da177e4
LT
478 err = -EFAULT;
479
480 kfree(buf);
481
482done:
483 hci_dev_put(hdev);
484 return err;
485}
486
487/* ---- HCI ioctl helpers ---- */
488
489int hci_dev_open(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int ret = 0;
493
5a08ecce
AE
494 hdev = hci_dev_get(dev);
495 if (!hdev)
1da177e4
LT
496 return -ENODEV;
497
498 BT_DBG("%s %p", hdev->name, hdev);
499
500 hci_req_lock(hdev);
501
611b30f7
MH
502 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
503 ret = -ERFKILL;
504 goto done;
505 }
506
1da177e4
LT
507 if (test_bit(HCI_UP, &hdev->flags)) {
508 ret = -EALREADY;
509 goto done;
510 }
511
512 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
513 set_bit(HCI_RAW, &hdev->flags);
514
07e3b94a
AE
515 /* Treat all non BR/EDR controllers as raw devices if
516 enable_hs is not set */
517 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
518 set_bit(HCI_RAW, &hdev->flags);
519
1da177e4
LT
520 if (hdev->open(hdev)) {
521 ret = -EIO;
522 goto done;
523 }
524
525 if (!test_bit(HCI_RAW, &hdev->flags)) {
526 atomic_set(&hdev->cmd_cnt, 1);
527 set_bit(HCI_INIT, &hdev->flags);
a5040efa 528 hdev->init_last_cmd = 0;
1da177e4 529
04837f64
MH
530 ret = __hci_request(hdev, hci_init_req, 0,
531 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 532
eead27da 533 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
534 ret = __hci_request(hdev, hci_le_init_req, 0,
535 msecs_to_jiffies(HCI_INIT_TIMEOUT));
536
1da177e4
LT
537 clear_bit(HCI_INIT, &hdev->flags);
538 }
539
540 if (!ret) {
541 hci_dev_hold(hdev);
542 set_bit(HCI_UP, &hdev->flags);
543 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 544 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 545 hci_dev_lock(hdev);
744cf19e 546 mgmt_powered(hdev, 1);
09fd0de5 547 hci_dev_unlock(hdev);
56e5cb86 548 }
8e87d142 549 } else {
1da177e4 550 /* Init failed, cleanup */
3eff45ea 551 flush_work(&hdev->tx_work);
c347b765 552 flush_work(&hdev->cmd_work);
b78752cc 553 flush_work(&hdev->rx_work);
1da177e4
LT
554
555 skb_queue_purge(&hdev->cmd_q);
556 skb_queue_purge(&hdev->rx_q);
557
558 if (hdev->flush)
559 hdev->flush(hdev);
560
561 if (hdev->sent_cmd) {
562 kfree_skb(hdev->sent_cmd);
563 hdev->sent_cmd = NULL;
564 }
565
566 hdev->close(hdev);
567 hdev->flags = 0;
568 }
569
570done:
571 hci_req_unlock(hdev);
572 hci_dev_put(hdev);
573 return ret;
574}
575
576static int hci_dev_do_close(struct hci_dev *hdev)
577{
578 BT_DBG("%s %p", hdev->name, hdev);
579
580 hci_req_cancel(hdev, ENODEV);
581 hci_req_lock(hdev);
582
583 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 584 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
585 hci_req_unlock(hdev);
586 return 0;
587 }
588
3eff45ea
GP
589 /* Flush RX and TX works */
590 flush_work(&hdev->tx_work);
b78752cc 591 flush_work(&hdev->rx_work);
1da177e4 592
16ab91ab 593 if (hdev->discov_timeout > 0) {
e0f9309f 594 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
595 hdev->discov_timeout = 0;
596 }
597
3243553f 598 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 599 cancel_delayed_work(&hdev->power_off);
3243553f 600
7d78525d
JH
601 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
602 cancel_delayed_work(&hdev->service_cache);
603
09fd0de5 604 hci_dev_lock(hdev);
1da177e4
LT
605 inquiry_cache_flush(hdev);
606 hci_conn_hash_flush(hdev);
09fd0de5 607 hci_dev_unlock(hdev);
1da177e4
LT
608
609 hci_notify(hdev, HCI_DEV_DOWN);
610
611 if (hdev->flush)
612 hdev->flush(hdev);
613
614 /* Reset device */
615 skb_queue_purge(&hdev->cmd_q);
616 atomic_set(&hdev->cmd_cnt, 1);
617 if (!test_bit(HCI_RAW, &hdev->flags)) {
618 set_bit(HCI_INIT, &hdev->flags);
04837f64 619 __hci_request(hdev, hci_reset_req, 0,
43611a7b 620 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
621 clear_bit(HCI_INIT, &hdev->flags);
622 }
623
c347b765
GP
624 /* flush cmd work */
625 flush_work(&hdev->cmd_work);
1da177e4
LT
626
627 /* Drop queues */
628 skb_queue_purge(&hdev->rx_q);
629 skb_queue_purge(&hdev->cmd_q);
630 skb_queue_purge(&hdev->raw_q);
631
632 /* Drop last sent command */
633 if (hdev->sent_cmd) {
b79f44c1 634 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
635 kfree_skb(hdev->sent_cmd);
636 hdev->sent_cmd = NULL;
637 }
638
639 /* After this point our queues are empty
640 * and no tasks are scheduled. */
641 hdev->close(hdev);
642
09fd0de5 643 hci_dev_lock(hdev);
744cf19e 644 mgmt_powered(hdev, 0);
09fd0de5 645 hci_dev_unlock(hdev);
5add6af8 646
1da177e4
LT
647 /* Clear flags */
648 hdev->flags = 0;
649
650 hci_req_unlock(hdev);
651
652 hci_dev_put(hdev);
653 return 0;
654}
655
656int hci_dev_close(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int err;
660
70f23020
AE
661 hdev = hci_dev_get(dev);
662 if (!hdev)
1da177e4
LT
663 return -ENODEV;
664 err = hci_dev_do_close(hdev);
665 hci_dev_put(hdev);
666 return err;
667}
668
669int hci_dev_reset(__u16 dev)
670{
671 struct hci_dev *hdev;
672 int ret = 0;
673
70f23020
AE
674 hdev = hci_dev_get(dev);
675 if (!hdev)
1da177e4
LT
676 return -ENODEV;
677
678 hci_req_lock(hdev);
1da177e4
LT
679
680 if (!test_bit(HCI_UP, &hdev->flags))
681 goto done;
682
683 /* Drop queues */
684 skb_queue_purge(&hdev->rx_q);
685 skb_queue_purge(&hdev->cmd_q);
686
09fd0de5 687 hci_dev_lock(hdev);
1da177e4
LT
688 inquiry_cache_flush(hdev);
689 hci_conn_hash_flush(hdev);
09fd0de5 690 hci_dev_unlock(hdev);
1da177e4
LT
691
692 if (hdev->flush)
693 hdev->flush(hdev);
694
8e87d142 695 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 696 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
697
698 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
699 ret = __hci_request(hdev, hci_reset_req, 0,
700 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
701
702done:
1da177e4
LT
703 hci_req_unlock(hdev);
704 hci_dev_put(hdev);
705 return ret;
706}
707
708int hci_dev_reset_stat(__u16 dev)
709{
710 struct hci_dev *hdev;
711 int ret = 0;
712
70f23020
AE
713 hdev = hci_dev_get(dev);
714 if (!hdev)
1da177e4
LT
715 return -ENODEV;
716
717 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
718
719 hci_dev_put(hdev);
720
721 return ret;
722}
723
724int hci_dev_cmd(unsigned int cmd, void __user *arg)
725{
726 struct hci_dev *hdev;
727 struct hci_dev_req dr;
728 int err = 0;
729
730 if (copy_from_user(&dr, arg, sizeof(dr)))
731 return -EFAULT;
732
70f23020
AE
733 hdev = hci_dev_get(dr.dev_id);
734 if (!hdev)
1da177e4
LT
735 return -ENODEV;
736
737 switch (cmd) {
738 case HCISETAUTH:
04837f64
MH
739 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
741 break;
742
743 case HCISETENCRYPT:
744 if (!lmp_encrypt_capable(hdev)) {
745 err = -EOPNOTSUPP;
746 break;
747 }
748
749 if (!test_bit(HCI_AUTH, &hdev->flags)) {
750 /* Auth must be enabled first */
04837f64
MH
751 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
752 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
753 if (err)
754 break;
755 }
756
04837f64
MH
757 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
758 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
759 break;
760
761 case HCISETSCAN:
04837f64
MH
762 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
763 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
764 break;
765
1da177e4 766 case HCISETLINKPOL:
e4e8e37c
MH
767 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
769 break;
770
771 case HCISETLINKMODE:
e4e8e37c
MH
772 hdev->link_mode = ((__u16) dr.dev_opt) &
773 (HCI_LM_MASTER | HCI_LM_ACCEPT);
774 break;
775
776 case HCISETPTYPE:
777 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
778 break;
779
780 case HCISETACLMTU:
e4e8e37c
MH
781 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
782 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
783 break;
784
785 case HCISETSCOMTU:
e4e8e37c
MH
786 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
787 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
788 break;
789
790 default:
791 err = -EINVAL;
792 break;
793 }
e4e8e37c 794
1da177e4
LT
795 hci_dev_put(hdev);
796 return err;
797}
798
799int hci_get_dev_list(void __user *arg)
800{
8035ded4 801 struct hci_dev *hdev;
1da177e4
LT
802 struct hci_dev_list_req *dl;
803 struct hci_dev_req *dr;
1da177e4
LT
804 int n = 0, size, err;
805 __u16 dev_num;
806
807 if (get_user(dev_num, (__u16 __user *) arg))
808 return -EFAULT;
809
810 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
811 return -EINVAL;
812
813 size = sizeof(*dl) + dev_num * sizeof(*dr);
814
70f23020
AE
815 dl = kzalloc(size, GFP_KERNEL);
816 if (!dl)
1da177e4
LT
817 return -ENOMEM;
818
819 dr = dl->dev_req;
820
821 read_lock_bh(&hci_dev_list_lock);
8035ded4 822 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 823 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 824 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
825
826 if (!test_bit(HCI_MGMT, &hdev->flags))
827 set_bit(HCI_PAIRABLE, &hdev->flags);
828
1da177e4
LT
829 (dr + n)->dev_id = hdev->id;
830 (dr + n)->dev_opt = hdev->flags;
c542a06c 831
1da177e4
LT
832 if (++n >= dev_num)
833 break;
834 }
835 read_unlock_bh(&hci_dev_list_lock);
836
837 dl->dev_num = n;
838 size = sizeof(*dl) + n * sizeof(*dr);
839
840 err = copy_to_user(arg, dl, size);
841 kfree(dl);
842
843 return err ? -EFAULT : 0;
844}
845
846int hci_get_dev_info(void __user *arg)
847{
848 struct hci_dev *hdev;
849 struct hci_dev_info di;
850 int err = 0;
851
852 if (copy_from_user(&di, arg, sizeof(di)))
853 return -EFAULT;
854
70f23020
AE
855 hdev = hci_dev_get(di.dev_id);
856 if (!hdev)
1da177e4
LT
857 return -ENODEV;
858
3243553f
JH
859 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
860 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 861
c542a06c
JH
862 if (!test_bit(HCI_MGMT, &hdev->flags))
863 set_bit(HCI_PAIRABLE, &hdev->flags);
864
1da177e4
LT
865 strcpy(di.name, hdev->name);
866 di.bdaddr = hdev->bdaddr;
943da25d 867 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
868 di.flags = hdev->flags;
869 di.pkt_type = hdev->pkt_type;
870 di.acl_mtu = hdev->acl_mtu;
871 di.acl_pkts = hdev->acl_pkts;
872 di.sco_mtu = hdev->sco_mtu;
873 di.sco_pkts = hdev->sco_pkts;
874 di.link_policy = hdev->link_policy;
875 di.link_mode = hdev->link_mode;
876
877 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
878 memcpy(&di.features, &hdev->features, sizeof(di.features));
879
880 if (copy_to_user(arg, &di, sizeof(di)))
881 err = -EFAULT;
882
883 hci_dev_put(hdev);
884
885 return err;
886}
887
888/* ---- Interface to HCI drivers ---- */
889
611b30f7
MH
890static int hci_rfkill_set_block(void *data, bool blocked)
891{
892 struct hci_dev *hdev = data;
893
894 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
895
896 if (!blocked)
897 return 0;
898
899 hci_dev_do_close(hdev);
900
901 return 0;
902}
903
904static const struct rfkill_ops hci_rfkill_ops = {
905 .set_block = hci_rfkill_set_block,
906};
907
1da177e4
LT
908/* Alloc HCI device */
909struct hci_dev *hci_alloc_dev(void)
910{
911 struct hci_dev *hdev;
912
25ea6db0 913 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
914 if (!hdev)
915 return NULL;
916
0ac7e700 917 hci_init_sysfs(hdev);
1da177e4
LT
918 skb_queue_head_init(&hdev->driver_init);
919
920 return hdev;
921}
922EXPORT_SYMBOL(hci_alloc_dev);
923
924/* Free HCI device */
925void hci_free_dev(struct hci_dev *hdev)
926{
927 skb_queue_purge(&hdev->driver_init);
928
a91f2e39
MH
929 /* will free via device release */
930 put_device(&hdev->dev);
1da177e4
LT
931}
932EXPORT_SYMBOL(hci_free_dev);
933
ab81cbf9
JH
934static void hci_power_on(struct work_struct *work)
935{
936 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
937
938 BT_DBG("%s", hdev->name);
939
940 if (hci_dev_open(hdev->id) < 0)
941 return;
942
943 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
80b7ab33 944 schedule_delayed_work(&hdev->power_off,
3243553f 945 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
946
947 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 948 mgmt_index_added(hdev);
ab81cbf9
JH
949}
950
951static void hci_power_off(struct work_struct *work)
952{
3243553f
JH
953 struct hci_dev *hdev = container_of(work, struct hci_dev,
954 power_off.work);
ab81cbf9
JH
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
3243553f 960 hci_dev_close(hdev->id);
ab81cbf9
JH
961}
962
16ab91ab
JH
963static void hci_discov_off(struct work_struct *work)
964{
965 struct hci_dev *hdev;
966 u8 scan = SCAN_PAGE;
967
968 hdev = container_of(work, struct hci_dev, discov_off.work);
969
970 BT_DBG("%s", hdev->name);
971
09fd0de5 972 hci_dev_lock(hdev);
16ab91ab
JH
973
974 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
975
976 hdev->discov_timeout = 0;
977
09fd0de5 978 hci_dev_unlock(hdev);
16ab91ab
JH
979}
980
2aeb9a1a
JH
981int hci_uuids_clear(struct hci_dev *hdev)
982{
983 struct list_head *p, *n;
984
985 list_for_each_safe(p, n, &hdev->uuids) {
986 struct bt_uuid *uuid;
987
988 uuid = list_entry(p, struct bt_uuid, list);
989
990 list_del(p);
991 kfree(uuid);
992 }
993
994 return 0;
995}
996
55ed8ca1
JH
997int hci_link_keys_clear(struct hci_dev *hdev)
998{
999 struct list_head *p, *n;
1000
1001 list_for_each_safe(p, n, &hdev->link_keys) {
1002 struct link_key *key;
1003
1004 key = list_entry(p, struct link_key, list);
1005
1006 list_del(p);
1007 kfree(key);
1008 }
1009
1010 return 0;
1011}
1012
1013struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1014{
8035ded4 1015 struct link_key *k;
55ed8ca1 1016
8035ded4 1017 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1018 if (bacmp(bdaddr, &k->bdaddr) == 0)
1019 return k;
55ed8ca1
JH
1020
1021 return NULL;
1022}
1023
d25e28ab
JH
1024static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 u8 key_type, u8 old_key_type)
1026{
1027 /* Legacy key */
1028 if (key_type < 0x03)
1029 return 1;
1030
1031 /* Debug keys are insecure so don't store them persistently */
1032 if (key_type == HCI_LK_DEBUG_COMBINATION)
1033 return 0;
1034
1035 /* Changed combination key and there's no previous one */
1036 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1037 return 0;
1038
1039 /* Security mode 3 case */
1040 if (!conn)
1041 return 1;
1042
1043 /* Neither local nor remote side had no-bonding as requirement */
1044 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1045 return 1;
1046
1047 /* Local side had dedicated bonding as requirement */
1048 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1049 return 1;
1050
1051 /* Remote side had dedicated bonding as requirement */
1052 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1053 return 1;
1054
1055 /* If none of the above criteria match, then don't store the key
1056 * persistently */
1057 return 0;
1058}
1059
75d262c2
VCG
1060struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1061{
1062 struct link_key *k;
1063
1064 list_for_each_entry(k, &hdev->link_keys, list) {
1065 struct key_master_id *id;
1066
1067 if (k->type != HCI_LK_SMP_LTK)
1068 continue;
1069
1070 if (k->dlen != sizeof(*id))
1071 continue;
1072
1073 id = (void *) &k->data;
1074 if (id->ediv == ediv &&
1075 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1076 return k;
1077 }
1078
1079 return NULL;
1080}
1081EXPORT_SYMBOL(hci_find_ltk);
1082
1083struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1084 bdaddr_t *bdaddr, u8 type)
1085{
1086 struct link_key *k;
1087
1088 list_for_each_entry(k, &hdev->link_keys, list)
1089 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1090 return k;
1091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_link_key_type);
1095
d25e28ab
JH
1096int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1097 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1098{
1099 struct link_key *key, *old_key;
4df378a1 1100 u8 old_key_type, persistent;
55ed8ca1
JH
1101
1102 old_key = hci_find_link_key(hdev, bdaddr);
1103 if (old_key) {
1104 old_key_type = old_key->type;
1105 key = old_key;
1106 } else {
12adcf3a 1107 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1108 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1109 if (!key)
1110 return -ENOMEM;
1111 list_add(&key->list, &hdev->link_keys);
1112 }
1113
1114 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1115
d25e28ab
JH
1116 /* Some buggy controller combinations generate a changed
1117 * combination key for legacy pairing even when there's no
1118 * previous key */
1119 if (type == HCI_LK_CHANGED_COMBINATION &&
1120 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1121 old_key_type == 0xff) {
d25e28ab 1122 type = HCI_LK_COMBINATION;
655fe6ec
JH
1123 if (conn)
1124 conn->key_type = type;
1125 }
d25e28ab 1126
55ed8ca1
JH
1127 bacpy(&key->bdaddr, bdaddr);
1128 memcpy(key->val, val, 16);
55ed8ca1
JH
1129 key->pin_len = pin_len;
1130
b6020ba0 1131 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1132 key->type = old_key_type;
4748fed2
JH
1133 else
1134 key->type = type;
1135
4df378a1
JH
1136 if (!new_key)
1137 return 0;
1138
1139 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1140
744cf19e 1141 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1142
1143 if (!persistent) {
1144 list_del(&key->list);
1145 kfree(key);
1146 }
55ed8ca1
JH
1147
1148 return 0;
1149}
1150
75d262c2 1151int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1152 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1153{
1154 struct link_key *key, *old_key;
1155 struct key_master_id *id;
1156 u8 old_key_type;
1157
1158 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1159
1160 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1161 if (old_key) {
1162 key = old_key;
1163 old_key_type = old_key->type;
1164 } else {
1165 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1166 if (!key)
1167 return -ENOMEM;
1168 list_add(&key->list, &hdev->link_keys);
1169 old_key_type = 0xff;
1170 }
1171
1172 key->dlen = sizeof(*id);
1173
1174 bacpy(&key->bdaddr, bdaddr);
1175 memcpy(key->val, ltk, sizeof(key->val));
1176 key->type = HCI_LK_SMP_LTK;
726b4ffc 1177 key->pin_len = key_size;
75d262c2
VCG
1178
1179 id = (void *) &key->data;
1180 id->ediv = ediv;
1181 memcpy(id->rand, rand, sizeof(id->rand));
1182
1183 if (new_key)
744cf19e 1184 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1185
1186 return 0;
1187}
1188
55ed8ca1
JH
1189int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1190{
1191 struct link_key *key;
1192
1193 key = hci_find_link_key(hdev, bdaddr);
1194 if (!key)
1195 return -ENOENT;
1196
1197 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1198
1199 list_del(&key->list);
1200 kfree(key);
1201
1202 return 0;
1203}
1204
6bd32326
VT
1205/* HCI command timer function */
1206static void hci_cmd_timer(unsigned long arg)
1207{
1208 struct hci_dev *hdev = (void *) arg;
1209
1210 BT_ERR("%s command tx timeout", hdev->name);
1211 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1212 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1213}
1214
2763eda6
SJ
1215struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1216 bdaddr_t *bdaddr)
1217{
1218 struct oob_data *data;
1219
1220 list_for_each_entry(data, &hdev->remote_oob_data, list)
1221 if (bacmp(bdaddr, &data->bdaddr) == 0)
1222 return data;
1223
1224 return NULL;
1225}
1226
1227int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1228{
1229 struct oob_data *data;
1230
1231 data = hci_find_remote_oob_data(hdev, bdaddr);
1232 if (!data)
1233 return -ENOENT;
1234
1235 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1236
1237 list_del(&data->list);
1238 kfree(data);
1239
1240 return 0;
1241}
1242
1243int hci_remote_oob_data_clear(struct hci_dev *hdev)
1244{
1245 struct oob_data *data, *n;
1246
1247 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1248 list_del(&data->list);
1249 kfree(data);
1250 }
1251
1252 return 0;
1253}
1254
1255int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1256 u8 *randomizer)
1257{
1258 struct oob_data *data;
1259
1260 data = hci_find_remote_oob_data(hdev, bdaddr);
1261
1262 if (!data) {
1263 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1264 if (!data)
1265 return -ENOMEM;
1266
1267 bacpy(&data->bdaddr, bdaddr);
1268 list_add(&data->list, &hdev->remote_oob_data);
1269 }
1270
1271 memcpy(data->hash, hash, sizeof(data->hash));
1272 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1273
1274 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1275
1276 return 0;
1277}
1278
b2a66aad
AJ
1279struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1280 bdaddr_t *bdaddr)
1281{
8035ded4 1282 struct bdaddr_list *b;
b2a66aad 1283
8035ded4 1284 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1285 if (bacmp(bdaddr, &b->bdaddr) == 0)
1286 return b;
b2a66aad
AJ
1287
1288 return NULL;
1289}
1290
1291int hci_blacklist_clear(struct hci_dev *hdev)
1292{
1293 struct list_head *p, *n;
1294
1295 list_for_each_safe(p, n, &hdev->blacklist) {
1296 struct bdaddr_list *b;
1297
1298 b = list_entry(p, struct bdaddr_list, list);
1299
1300 list_del(p);
1301 kfree(b);
1302 }
1303
1304 return 0;
1305}
1306
1307int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1308{
1309 struct bdaddr_list *entry;
b2a66aad
AJ
1310
1311 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1312 return -EBADF;
1313
5e762444
AJ
1314 if (hci_blacklist_lookup(hdev, bdaddr))
1315 return -EEXIST;
b2a66aad
AJ
1316
1317 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1318 if (!entry)
1319 return -ENOMEM;
b2a66aad
AJ
1320
1321 bacpy(&entry->bdaddr, bdaddr);
1322
1323 list_add(&entry->list, &hdev->blacklist);
1324
744cf19e 1325 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1326}
1327
1328int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1329{
1330 struct bdaddr_list *entry;
b2a66aad 1331
1ec918ce 1332 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1333 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1334
1335 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1336 if (!entry)
5e762444 1337 return -ENOENT;
b2a66aad
AJ
1338
1339 list_del(&entry->list);
1340 kfree(entry);
1341
744cf19e 1342 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1343}
1344
db323f2f 1345static void hci_clear_adv_cache(struct work_struct *work)
35815085 1346{
db323f2f
GP
1347 struct hci_dev *hdev = container_of(work, struct hci_dev,
1348 adv_work.work);
35815085
AG
1349
1350 hci_dev_lock(hdev);
1351
1352 hci_adv_entries_clear(hdev);
1353
1354 hci_dev_unlock(hdev);
1355}
1356
76c8686f
AG
1357int hci_adv_entries_clear(struct hci_dev *hdev)
1358{
1359 struct adv_entry *entry, *tmp;
1360
1361 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1362 list_del(&entry->list);
1363 kfree(entry);
1364 }
1365
1366 BT_DBG("%s adv cache cleared", hdev->name);
1367
1368 return 0;
1369}
1370
1371struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1372{
1373 struct adv_entry *entry;
1374
1375 list_for_each_entry(entry, &hdev->adv_entries, list)
1376 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1377 return entry;
1378
1379 return NULL;
1380}
1381
1382static inline int is_connectable_adv(u8 evt_type)
1383{
1384 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1385 return 1;
1386
1387 return 0;
1388}
1389
1390int hci_add_adv_entry(struct hci_dev *hdev,
1391 struct hci_ev_le_advertising_info *ev)
1392{
1393 struct adv_entry *entry;
1394
1395 if (!is_connectable_adv(ev->evt_type))
1396 return -EINVAL;
1397
1398 /* Only new entries should be added to adv_entries. So, if
1399 * bdaddr was found, don't add it. */
1400 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1401 return 0;
1402
1403 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1404 if (!entry)
1405 return -ENOMEM;
1406
1407 bacpy(&entry->bdaddr, &ev->bdaddr);
1408 entry->bdaddr_type = ev->bdaddr_type;
1409
1410 list_add(&entry->list, &hdev->adv_entries);
1411
1412 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1413 batostr(&entry->bdaddr), entry->bdaddr_type);
1414
1415 return 0;
1416}
1417
1da177e4
LT
1418/* Register HCI device */
1419int hci_register_dev(struct hci_dev *hdev)
1420{
1421 struct list_head *head = &hci_dev_list, *p;
08add513 1422 int i, id, error;
1da177e4 1423
c13854ce
MH
1424 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1425 hdev->bus, hdev->owner);
1da177e4
LT
1426
1427 if (!hdev->open || !hdev->close || !hdev->destruct)
1428 return -EINVAL;
1429
08add513
MM
1430 /* Do not allow HCI_AMP devices to register at index 0,
1431 * so the index can be used as the AMP controller ID.
1432 */
1433 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1434
1da177e4
LT
1435 write_lock_bh(&hci_dev_list_lock);
1436
1437 /* Find first available device id */
1438 list_for_each(p, &hci_dev_list) {
1439 if (list_entry(p, struct hci_dev, list)->id != id)
1440 break;
1441 head = p; id++;
1442 }
8e87d142 1443
1da177e4
LT
1444 sprintf(hdev->name, "hci%d", id);
1445 hdev->id = id;
c6feeb28 1446 list_add_tail(&hdev->list, head);
1da177e4
LT
1447
1448 atomic_set(&hdev->refcnt, 1);
09fd0de5 1449 mutex_init(&hdev->lock);
1da177e4
LT
1450
1451 hdev->flags = 0;
d23264a8 1452 hdev->dev_flags = 0;
1da177e4 1453 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1454 hdev->esco_type = (ESCO_HV1);
1da177e4 1455 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1456 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1457
04837f64
MH
1458 hdev->idle_timeout = 0;
1459 hdev->sniff_max_interval = 800;
1460 hdev->sniff_min_interval = 80;
1461
b78752cc 1462 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1463 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1464 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1465
1da177e4
LT
1466
1467 skb_queue_head_init(&hdev->rx_q);
1468 skb_queue_head_init(&hdev->cmd_q);
1469 skb_queue_head_init(&hdev->raw_q);
1470
6bd32326
VT
1471 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1472
cd4c5391 1473 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1474 hdev->reassembly[i] = NULL;
1475
1da177e4 1476 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1477 mutex_init(&hdev->req_lock);
1da177e4
LT
1478
1479 inquiry_cache_init(hdev);
1480
1481 hci_conn_hash_init(hdev);
1482
2e58ef3e
JH
1483 INIT_LIST_HEAD(&hdev->mgmt_pending);
1484
ea4bd8ba 1485 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1486
2aeb9a1a
JH
1487 INIT_LIST_HEAD(&hdev->uuids);
1488
55ed8ca1
JH
1489 INIT_LIST_HEAD(&hdev->link_keys);
1490
2763eda6
SJ
1491 INIT_LIST_HEAD(&hdev->remote_oob_data);
1492
76c8686f
AG
1493 INIT_LIST_HEAD(&hdev->adv_entries);
1494
db323f2f 1495 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1496 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1497 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1498
16ab91ab
JH
1499 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1500
1da177e4
LT
1501 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1502
1503 atomic_set(&hdev->promisc, 0);
1504
1505 write_unlock_bh(&hci_dev_list_lock);
1506
32845eb1
GP
1507 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1508 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1509 if (!hdev->workqueue) {
1510 error = -ENOMEM;
1511 goto err;
1512 }
f48fd9c8 1513
33ca954d
DH
1514 error = hci_add_sysfs(hdev);
1515 if (error < 0)
1516 goto err_wqueue;
1da177e4 1517
611b30f7
MH
1518 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1519 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1520 if (hdev->rfkill) {
1521 if (rfkill_register(hdev->rfkill) < 0) {
1522 rfkill_destroy(hdev->rfkill);
1523 hdev->rfkill = NULL;
1524 }
1525 }
1526
ab81cbf9
JH
1527 set_bit(HCI_AUTO_OFF, &hdev->flags);
1528 set_bit(HCI_SETUP, &hdev->flags);
7f971041 1529 schedule_work(&hdev->power_on);
ab81cbf9 1530
1da177e4
LT
1531 hci_notify(hdev, HCI_DEV_REG);
1532
1533 return id;
f48fd9c8 1534
33ca954d
DH
1535err_wqueue:
1536 destroy_workqueue(hdev->workqueue);
1537err:
f48fd9c8
MH
1538 write_lock_bh(&hci_dev_list_lock);
1539 list_del(&hdev->list);
1540 write_unlock_bh(&hci_dev_list_lock);
1541
33ca954d 1542 return error;
1da177e4
LT
1543}
1544EXPORT_SYMBOL(hci_register_dev);
1545
1546/* Unregister HCI device */
59735631 1547void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1548{
ef222013
MH
1549 int i;
1550
c13854ce 1551 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1552
1da177e4
LT
1553 write_lock_bh(&hci_dev_list_lock);
1554 list_del(&hdev->list);
1555 write_unlock_bh(&hci_dev_list_lock);
1556
1557 hci_dev_do_close(hdev);
1558
cd4c5391 1559 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1560 kfree_skb(hdev->reassembly[i]);
1561
ab81cbf9 1562 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1563 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1564 hci_dev_lock(hdev);
744cf19e 1565 mgmt_index_removed(hdev);
09fd0de5 1566 hci_dev_unlock(hdev);
56e5cb86 1567 }
ab81cbf9 1568
2e58ef3e
JH
1569 /* mgmt_index_removed should take care of emptying the
1570 * pending list */
1571 BUG_ON(!list_empty(&hdev->mgmt_pending));
1572
1da177e4
LT
1573 hci_notify(hdev, HCI_DEV_UNREG);
1574
611b30f7
MH
1575 if (hdev->rfkill) {
1576 rfkill_unregister(hdev->rfkill);
1577 rfkill_destroy(hdev->rfkill);
1578 }
1579
ce242970 1580 hci_del_sysfs(hdev);
147e2d59 1581
db323f2f 1582 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1583
f48fd9c8
MH
1584 destroy_workqueue(hdev->workqueue);
1585
09fd0de5 1586 hci_dev_lock(hdev);
e2e0cacb 1587 hci_blacklist_clear(hdev);
2aeb9a1a 1588 hci_uuids_clear(hdev);
55ed8ca1 1589 hci_link_keys_clear(hdev);
2763eda6 1590 hci_remote_oob_data_clear(hdev);
76c8686f 1591 hci_adv_entries_clear(hdev);
09fd0de5 1592 hci_dev_unlock(hdev);
e2e0cacb 1593
1da177e4 1594 __hci_dev_put(hdev);
1da177e4
LT
1595}
1596EXPORT_SYMBOL(hci_unregister_dev);
1597
1598/* Suspend HCI device */
1599int hci_suspend_dev(struct hci_dev *hdev)
1600{
1601 hci_notify(hdev, HCI_DEV_SUSPEND);
1602 return 0;
1603}
1604EXPORT_SYMBOL(hci_suspend_dev);
1605
1606/* Resume HCI device */
1607int hci_resume_dev(struct hci_dev *hdev)
1608{
1609 hci_notify(hdev, HCI_DEV_RESUME);
1610 return 0;
1611}
1612EXPORT_SYMBOL(hci_resume_dev);
1613
76bca880
MH
1614/* Receive frame from HCI drivers */
1615int hci_recv_frame(struct sk_buff *skb)
1616{
1617 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1618 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1619 && !test_bit(HCI_INIT, &hdev->flags))) {
1620 kfree_skb(skb);
1621 return -ENXIO;
1622 }
1623
1624 /* Incomming skb */
1625 bt_cb(skb)->incoming = 1;
1626
1627 /* Time stamp */
1628 __net_timestamp(skb);
1629
76bca880 1630 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1631 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1632
76bca880
MH
1633 return 0;
1634}
1635EXPORT_SYMBOL(hci_recv_frame);
1636
33e882a5 1637static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1638 int count, __u8 index)
33e882a5
SS
1639{
1640 int len = 0;
1641 int hlen = 0;
1642 int remain = count;
1643 struct sk_buff *skb;
1644 struct bt_skb_cb *scb;
1645
1646 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1647 index >= NUM_REASSEMBLY)
1648 return -EILSEQ;
1649
1650 skb = hdev->reassembly[index];
1651
1652 if (!skb) {
1653 switch (type) {
1654 case HCI_ACLDATA_PKT:
1655 len = HCI_MAX_FRAME_SIZE;
1656 hlen = HCI_ACL_HDR_SIZE;
1657 break;
1658 case HCI_EVENT_PKT:
1659 len = HCI_MAX_EVENT_SIZE;
1660 hlen = HCI_EVENT_HDR_SIZE;
1661 break;
1662 case HCI_SCODATA_PKT:
1663 len = HCI_MAX_SCO_SIZE;
1664 hlen = HCI_SCO_HDR_SIZE;
1665 break;
1666 }
1667
1e429f38 1668 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1669 if (!skb)
1670 return -ENOMEM;
1671
1672 scb = (void *) skb->cb;
1673 scb->expect = hlen;
1674 scb->pkt_type = type;
1675
1676 skb->dev = (void *) hdev;
1677 hdev->reassembly[index] = skb;
1678 }
1679
1680 while (count) {
1681 scb = (void *) skb->cb;
1682 len = min(scb->expect, (__u16)count);
1683
1684 memcpy(skb_put(skb, len), data, len);
1685
1686 count -= len;
1687 data += len;
1688 scb->expect -= len;
1689 remain = count;
1690
1691 switch (type) {
1692 case HCI_EVENT_PKT:
1693 if (skb->len == HCI_EVENT_HDR_SIZE) {
1694 struct hci_event_hdr *h = hci_event_hdr(skb);
1695 scb->expect = h->plen;
1696
1697 if (skb_tailroom(skb) < scb->expect) {
1698 kfree_skb(skb);
1699 hdev->reassembly[index] = NULL;
1700 return -ENOMEM;
1701 }
1702 }
1703 break;
1704
1705 case HCI_ACLDATA_PKT:
1706 if (skb->len == HCI_ACL_HDR_SIZE) {
1707 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1708 scb->expect = __le16_to_cpu(h->dlen);
1709
1710 if (skb_tailroom(skb) < scb->expect) {
1711 kfree_skb(skb);
1712 hdev->reassembly[index] = NULL;
1713 return -ENOMEM;
1714 }
1715 }
1716 break;
1717
1718 case HCI_SCODATA_PKT:
1719 if (skb->len == HCI_SCO_HDR_SIZE) {
1720 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1721 scb->expect = h->dlen;
1722
1723 if (skb_tailroom(skb) < scb->expect) {
1724 kfree_skb(skb);
1725 hdev->reassembly[index] = NULL;
1726 return -ENOMEM;
1727 }
1728 }
1729 break;
1730 }
1731
1732 if (scb->expect == 0) {
1733 /* Complete frame */
1734
1735 bt_cb(skb)->pkt_type = type;
1736 hci_recv_frame(skb);
1737
1738 hdev->reassembly[index] = NULL;
1739 return remain;
1740 }
1741 }
1742
1743 return remain;
1744}
1745
ef222013
MH
1746int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1747{
f39a3c06
SS
1748 int rem = 0;
1749
ef222013
MH
1750 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1751 return -EILSEQ;
1752
da5f6c37 1753 while (count) {
1e429f38 1754 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1755 if (rem < 0)
1756 return rem;
ef222013 1757
f39a3c06
SS
1758 data += (count - rem);
1759 count = rem;
f81c6224 1760 }
ef222013 1761
f39a3c06 1762 return rem;
ef222013
MH
1763}
1764EXPORT_SYMBOL(hci_recv_fragment);
1765
99811510
SS
1766#define STREAM_REASSEMBLY 0
1767
1768int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1769{
1770 int type;
1771 int rem = 0;
1772
da5f6c37 1773 while (count) {
99811510
SS
1774 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1775
1776 if (!skb) {
1777 struct { char type; } *pkt;
1778
1779 /* Start of the frame */
1780 pkt = data;
1781 type = pkt->type;
1782
1783 data++;
1784 count--;
1785 } else
1786 type = bt_cb(skb)->pkt_type;
1787
1e429f38
GP
1788 rem = hci_reassembly(hdev, type, data, count,
1789 STREAM_REASSEMBLY);
99811510
SS
1790 if (rem < 0)
1791 return rem;
1792
1793 data += (count - rem);
1794 count = rem;
f81c6224 1795 }
99811510
SS
1796
1797 return rem;
1798}
1799EXPORT_SYMBOL(hci_recv_stream_fragment);
1800
1da177e4
LT
1801/* ---- Interface to upper protocols ---- */
1802
1803/* Register/Unregister protocols.
1804 * hci_task_lock is used to ensure that no tasks are running. */
1805int hci_register_proto(struct hci_proto *hp)
1806{
1807 int err = 0;
1808
1809 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1810
1811 if (hp->id >= HCI_MAX_PROTO)
1812 return -EINVAL;
1813
67d0dfb5 1814 mutex_lock(&hci_task_lock);
1da177e4
LT
1815
1816 if (!hci_proto[hp->id])
1817 hci_proto[hp->id] = hp;
1818 else
1819 err = -EEXIST;
1820
67d0dfb5 1821 mutex_unlock(&hci_task_lock);
1da177e4
LT
1822
1823 return err;
1824}
1825EXPORT_SYMBOL(hci_register_proto);
1826
1827int hci_unregister_proto(struct hci_proto *hp)
1828{
1829 int err = 0;
1830
1831 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1832
1833 if (hp->id >= HCI_MAX_PROTO)
1834 return -EINVAL;
1835
67d0dfb5 1836 mutex_lock(&hci_task_lock);
1da177e4
LT
1837
1838 if (hci_proto[hp->id])
1839 hci_proto[hp->id] = NULL;
1840 else
1841 err = -ENOENT;
1842
67d0dfb5 1843 mutex_unlock(&hci_task_lock);
1da177e4
LT
1844
1845 return err;
1846}
1847EXPORT_SYMBOL(hci_unregister_proto);
1848
1849int hci_register_cb(struct hci_cb *cb)
1850{
1851 BT_DBG("%p name %s", cb, cb->name);
1852
1853 write_lock_bh(&hci_cb_list_lock);
1854 list_add(&cb->list, &hci_cb_list);
1855 write_unlock_bh(&hci_cb_list_lock);
1856
1857 return 0;
1858}
1859EXPORT_SYMBOL(hci_register_cb);
1860
1861int hci_unregister_cb(struct hci_cb *cb)
1862{
1863 BT_DBG("%p name %s", cb, cb->name);
1864
1865 write_lock_bh(&hci_cb_list_lock);
1866 list_del(&cb->list);
1867 write_unlock_bh(&hci_cb_list_lock);
1868
1869 return 0;
1870}
1871EXPORT_SYMBOL(hci_unregister_cb);
1872
1873static int hci_send_frame(struct sk_buff *skb)
1874{
1875 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1876
1877 if (!hdev) {
1878 kfree_skb(skb);
1879 return -ENODEV;
1880 }
1881
0d48d939 1882 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1883
1884 if (atomic_read(&hdev->promisc)) {
1885 /* Time stamp */
a61bbcf2 1886 __net_timestamp(skb);
1da177e4 1887
eec8d2bc 1888 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1889 }
1890
1891 /* Get rid of skb owner, prior to sending to the driver. */
1892 skb_orphan(skb);
1893
1894 return hdev->send(skb);
1895}
1896
1897/* Send HCI command */
a9de9248 1898int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1899{
1900 int len = HCI_COMMAND_HDR_SIZE + plen;
1901 struct hci_command_hdr *hdr;
1902 struct sk_buff *skb;
1903
a9de9248 1904 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1905
1906 skb = bt_skb_alloc(len, GFP_ATOMIC);
1907 if (!skb) {
ef222013 1908 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1909 return -ENOMEM;
1910 }
1911
1912 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1913 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1914 hdr->plen = plen;
1915
1916 if (plen)
1917 memcpy(skb_put(skb, plen), param, plen);
1918
1919 BT_DBG("skb len %d", skb->len);
1920
0d48d939 1921 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1922 skb->dev = (void *) hdev;
c78ae283 1923
a5040efa
JH
1924 if (test_bit(HCI_INIT, &hdev->flags))
1925 hdev->init_last_cmd = opcode;
1926
1da177e4 1927 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1928 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1929
1930 return 0;
1931}
1da177e4
LT
1932
1933/* Get data from the previously sent command */
a9de9248 1934void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1935{
1936 struct hci_command_hdr *hdr;
1937
1938 if (!hdev->sent_cmd)
1939 return NULL;
1940
1941 hdr = (void *) hdev->sent_cmd->data;
1942
a9de9248 1943 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1944 return NULL;
1945
a9de9248 1946 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1947
1948 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1949}
1950
1951/* Send ACL data */
1952static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1953{
1954 struct hci_acl_hdr *hdr;
1955 int len = skb->len;
1956
badff6d0
ACM
1957 skb_push(skb, HCI_ACL_HDR_SIZE);
1958 skb_reset_transport_header(skb);
9c70220b 1959 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1960 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1961 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1962}
1963
73d80deb
LAD
1964static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1965 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1966{
1967 struct hci_dev *hdev = conn->hdev;
1968 struct sk_buff *list;
1969
70f23020
AE
1970 list = skb_shinfo(skb)->frag_list;
1971 if (!list) {
1da177e4
LT
1972 /* Non fragmented */
1973 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1974
73d80deb 1975 skb_queue_tail(queue, skb);
1da177e4
LT
1976 } else {
1977 /* Fragmented */
1978 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1979
1980 skb_shinfo(skb)->frag_list = NULL;
1981
1982 /* Queue all fragments atomically */
73d80deb 1983 spin_lock_bh(&queue->lock);
1da177e4 1984
73d80deb 1985 __skb_queue_tail(queue, skb);
e702112f
AE
1986
1987 flags &= ~ACL_START;
1988 flags |= ACL_CONT;
1da177e4
LT
1989 do {
1990 skb = list; list = list->next;
8e87d142 1991
1da177e4 1992 skb->dev = (void *) hdev;
0d48d939 1993 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1994 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1995
1996 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1997
73d80deb 1998 __skb_queue_tail(queue, skb);
1da177e4
LT
1999 } while (list);
2000
73d80deb 2001 spin_unlock_bh(&queue->lock);
1da177e4 2002 }
73d80deb
LAD
2003}
2004
2005void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2006{
2007 struct hci_conn *conn = chan->conn;
2008 struct hci_dev *hdev = conn->hdev;
2009
2010 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2011
2012 skb->dev = (void *) hdev;
2013 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2014 hci_add_acl_hdr(skb, conn->handle, flags);
2015
2016 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2017
3eff45ea 2018 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2019}
2020EXPORT_SYMBOL(hci_send_acl);
2021
2022/* Send SCO data */
0d861d8b 2023void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2024{
2025 struct hci_dev *hdev = conn->hdev;
2026 struct hci_sco_hdr hdr;
2027
2028 BT_DBG("%s len %d", hdev->name, skb->len);
2029
aca3192c 2030 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2031 hdr.dlen = skb->len;
2032
badff6d0
ACM
2033 skb_push(skb, HCI_SCO_HDR_SIZE);
2034 skb_reset_transport_header(skb);
9c70220b 2035 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2036
2037 skb->dev = (void *) hdev;
0d48d939 2038 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2039
1da177e4 2040 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2041 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2042}
2043EXPORT_SYMBOL(hci_send_sco);
2044
2045/* ---- HCI TX task (outgoing data) ---- */
2046
2047/* HCI Connection scheduler */
2048static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2049{
2050 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2051 struct hci_conn *conn = NULL, *c;
1da177e4 2052 int num = 0, min = ~0;
1da177e4 2053
8e87d142 2054 /* We don't have to lock device here. Connections are always
1da177e4 2055 * added and removed with TX task disabled. */
bf4c6325
GP
2056
2057 rcu_read_lock();
2058
2059 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2060 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2061 continue;
769be974
MH
2062
2063 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2064 continue;
2065
1da177e4
LT
2066 num++;
2067
2068 if (c->sent < min) {
2069 min = c->sent;
2070 conn = c;
2071 }
52087a79
LAD
2072
2073 if (hci_conn_num(hdev, type) == num)
2074 break;
1da177e4
LT
2075 }
2076
bf4c6325
GP
2077 rcu_read_unlock();
2078
1da177e4 2079 if (conn) {
6ed58ec5
VT
2080 int cnt, q;
2081
2082 switch (conn->type) {
2083 case ACL_LINK:
2084 cnt = hdev->acl_cnt;
2085 break;
2086 case SCO_LINK:
2087 case ESCO_LINK:
2088 cnt = hdev->sco_cnt;
2089 break;
2090 case LE_LINK:
2091 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2092 break;
2093 default:
2094 cnt = 0;
2095 BT_ERR("Unknown link type");
2096 }
2097
2098 q = cnt / num;
1da177e4
LT
2099 *quote = q ? q : 1;
2100 } else
2101 *quote = 0;
2102
2103 BT_DBG("conn %p quote %d", conn, *quote);
2104 return conn;
2105}
2106
bae1f5d9 2107static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2108{
2109 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2110 struct hci_conn *c;
1da177e4 2111
bae1f5d9 2112 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2113
bf4c6325
GP
2114 rcu_read_lock();
2115
1da177e4 2116 /* Kill stalled connections */
bf4c6325 2117 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2118 if (c->type == type && c->sent) {
2119 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2120 hdev->name, batostr(&c->dst));
2121 hci_acl_disconn(c, 0x13);
2122 }
2123 }
bf4c6325
GP
2124
2125 rcu_read_unlock();
1da177e4
LT
2126}
2127
73d80deb
LAD
2128static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2129 int *quote)
1da177e4 2130{
73d80deb
LAD
2131 struct hci_conn_hash *h = &hdev->conn_hash;
2132 struct hci_chan *chan = NULL;
2133 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2134 struct hci_conn *conn;
73d80deb
LAD
2135 int cnt, q, conn_num = 0;
2136
2137 BT_DBG("%s", hdev->name);
2138
bf4c6325
GP
2139 rcu_read_lock();
2140
2141 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2142 struct hci_chan *tmp;
2143
2144 if (conn->type != type)
2145 continue;
2146
2147 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2148 continue;
2149
2150 conn_num++;
2151
8192edef 2152 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2153 struct sk_buff *skb;
2154
2155 if (skb_queue_empty(&tmp->data_q))
2156 continue;
2157
2158 skb = skb_peek(&tmp->data_q);
2159 if (skb->priority < cur_prio)
2160 continue;
2161
2162 if (skb->priority > cur_prio) {
2163 num = 0;
2164 min = ~0;
2165 cur_prio = skb->priority;
2166 }
2167
2168 num++;
2169
2170 if (conn->sent < min) {
2171 min = conn->sent;
2172 chan = tmp;
2173 }
2174 }
2175
2176 if (hci_conn_num(hdev, type) == conn_num)
2177 break;
2178 }
2179
bf4c6325
GP
2180 rcu_read_unlock();
2181
73d80deb
LAD
2182 if (!chan)
2183 return NULL;
2184
2185 switch (chan->conn->type) {
2186 case ACL_LINK:
2187 cnt = hdev->acl_cnt;
2188 break;
2189 case SCO_LINK:
2190 case ESCO_LINK:
2191 cnt = hdev->sco_cnt;
2192 break;
2193 case LE_LINK:
2194 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2195 break;
2196 default:
2197 cnt = 0;
2198 BT_ERR("Unknown link type");
2199 }
2200
2201 q = cnt / num;
2202 *quote = q ? q : 1;
2203 BT_DBG("chan %p quote %d", chan, *quote);
2204 return chan;
2205}
2206
02b20f0b
LAD
2207static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2208{
2209 struct hci_conn_hash *h = &hdev->conn_hash;
2210 struct hci_conn *conn;
2211 int num = 0;
2212
2213 BT_DBG("%s", hdev->name);
2214
bf4c6325
GP
2215 rcu_read_lock();
2216
2217 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2218 struct hci_chan *chan;
2219
2220 if (conn->type != type)
2221 continue;
2222
2223 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2224 continue;
2225
2226 num++;
2227
8192edef 2228 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2229 struct sk_buff *skb;
2230
2231 if (chan->sent) {
2232 chan->sent = 0;
2233 continue;
2234 }
2235
2236 if (skb_queue_empty(&chan->data_q))
2237 continue;
2238
2239 skb = skb_peek(&chan->data_q);
2240 if (skb->priority >= HCI_PRIO_MAX - 1)
2241 continue;
2242
2243 skb->priority = HCI_PRIO_MAX - 1;
2244
2245 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2246 skb->priority);
2247 }
2248
2249 if (hci_conn_num(hdev, type) == num)
2250 break;
2251 }
bf4c6325
GP
2252
2253 rcu_read_unlock();
2254
02b20f0b
LAD
2255}
2256
73d80deb
LAD
2257static inline void hci_sched_acl(struct hci_dev *hdev)
2258{
2259 struct hci_chan *chan;
1da177e4
LT
2260 struct sk_buff *skb;
2261 int quote;
73d80deb 2262 unsigned int cnt;
1da177e4
LT
2263
2264 BT_DBG("%s", hdev->name);
2265
52087a79
LAD
2266 if (!hci_conn_num(hdev, ACL_LINK))
2267 return;
2268
1da177e4
LT
2269 if (!test_bit(HCI_RAW, &hdev->flags)) {
2270 /* ACL tx timeout must be longer than maximum
2271 * link supervision timeout (40.9 seconds) */
82453021 2272 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2273 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2274 }
2275
73d80deb 2276 cnt = hdev->acl_cnt;
04837f64 2277
73d80deb
LAD
2278 while (hdev->acl_cnt &&
2279 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2280 u32 priority = (skb_peek(&chan->data_q))->priority;
2281 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2282 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2283 skb->len, skb->priority);
2284
ec1cce24
LAD
2285 /* Stop if priority has changed */
2286 if (skb->priority < priority)
2287 break;
2288
2289 skb = skb_dequeue(&chan->data_q);
2290
73d80deb
LAD
2291 hci_conn_enter_active_mode(chan->conn,
2292 bt_cb(skb)->force_active);
04837f64 2293
1da177e4
LT
2294 hci_send_frame(skb);
2295 hdev->acl_last_tx = jiffies;
2296
2297 hdev->acl_cnt--;
73d80deb
LAD
2298 chan->sent++;
2299 chan->conn->sent++;
1da177e4
LT
2300 }
2301 }
02b20f0b
LAD
2302
2303 if (cnt != hdev->acl_cnt)
2304 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2305}
2306
2307/* Schedule SCO */
2308static inline void hci_sched_sco(struct hci_dev *hdev)
2309{
2310 struct hci_conn *conn;
2311 struct sk_buff *skb;
2312 int quote;
2313
2314 BT_DBG("%s", hdev->name);
2315
52087a79
LAD
2316 if (!hci_conn_num(hdev, SCO_LINK))
2317 return;
2318
1da177e4
LT
2319 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2320 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2321 BT_DBG("skb %p len %d", skb, skb->len);
2322 hci_send_frame(skb);
2323
2324 conn->sent++;
2325 if (conn->sent == ~0)
2326 conn->sent = 0;
2327 }
2328 }
2329}
2330
b6a0dc82
MH
2331static inline void hci_sched_esco(struct hci_dev *hdev)
2332{
2333 struct hci_conn *conn;
2334 struct sk_buff *skb;
2335 int quote;
2336
2337 BT_DBG("%s", hdev->name);
2338
52087a79
LAD
2339 if (!hci_conn_num(hdev, ESCO_LINK))
2340 return;
2341
b6a0dc82
MH
2342 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2343 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2344 BT_DBG("skb %p len %d", skb, skb->len);
2345 hci_send_frame(skb);
2346
2347 conn->sent++;
2348 if (conn->sent == ~0)
2349 conn->sent = 0;
2350 }
2351 }
2352}
2353
6ed58ec5
VT
2354static inline void hci_sched_le(struct hci_dev *hdev)
2355{
73d80deb 2356 struct hci_chan *chan;
6ed58ec5 2357 struct sk_buff *skb;
02b20f0b 2358 int quote, cnt, tmp;
6ed58ec5
VT
2359
2360 BT_DBG("%s", hdev->name);
2361
52087a79
LAD
2362 if (!hci_conn_num(hdev, LE_LINK))
2363 return;
2364
6ed58ec5
VT
2365 if (!test_bit(HCI_RAW, &hdev->flags)) {
2366 /* LE tx timeout must be longer than maximum
2367 * link supervision timeout (40.9 seconds) */
bae1f5d9 2368 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2369 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2370 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2371 }
2372
2373 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2374 tmp = cnt;
73d80deb 2375 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2376 u32 priority = (skb_peek(&chan->data_q))->priority;
2377 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2378 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2379 skb->len, skb->priority);
6ed58ec5 2380
ec1cce24
LAD
2381 /* Stop if priority has changed */
2382 if (skb->priority < priority)
2383 break;
2384
2385 skb = skb_dequeue(&chan->data_q);
2386
6ed58ec5
VT
2387 hci_send_frame(skb);
2388 hdev->le_last_tx = jiffies;
2389
2390 cnt--;
73d80deb
LAD
2391 chan->sent++;
2392 chan->conn->sent++;
6ed58ec5
VT
2393 }
2394 }
73d80deb 2395
6ed58ec5
VT
2396 if (hdev->le_pkts)
2397 hdev->le_cnt = cnt;
2398 else
2399 hdev->acl_cnt = cnt;
02b20f0b
LAD
2400
2401 if (cnt != tmp)
2402 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2403}
2404
3eff45ea 2405static void hci_tx_work(struct work_struct *work)
1da177e4 2406{
3eff45ea 2407 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2408 struct sk_buff *skb;
2409
67d0dfb5 2410 mutex_lock(&hci_task_lock);
1da177e4 2411
6ed58ec5
VT
2412 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2413 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2414
2415 /* Schedule queues and send stuff to HCI driver */
2416
2417 hci_sched_acl(hdev);
2418
2419 hci_sched_sco(hdev);
2420
b6a0dc82
MH
2421 hci_sched_esco(hdev);
2422
6ed58ec5
VT
2423 hci_sched_le(hdev);
2424
1da177e4
LT
2425 /* Send next queued raw (unknown type) packet */
2426 while ((skb = skb_dequeue(&hdev->raw_q)))
2427 hci_send_frame(skb);
2428
67d0dfb5 2429 mutex_unlock(&hci_task_lock);
1da177e4
LT
2430}
2431
25985edc 2432/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2433
2434/* ACL data packet */
2435static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2436{
2437 struct hci_acl_hdr *hdr = (void *) skb->data;
2438 struct hci_conn *conn;
2439 __u16 handle, flags;
2440
2441 skb_pull(skb, HCI_ACL_HDR_SIZE);
2442
2443 handle = __le16_to_cpu(hdr->handle);
2444 flags = hci_flags(handle);
2445 handle = hci_handle(handle);
2446
2447 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2448
2449 hdev->stat.acl_rx++;
2450
2451 hci_dev_lock(hdev);
2452 conn = hci_conn_hash_lookup_handle(hdev, handle);
2453 hci_dev_unlock(hdev);
8e87d142 2454
1da177e4
LT
2455 if (conn) {
2456 register struct hci_proto *hp;
2457
65983fc7 2458 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2459
1da177e4 2460 /* Send to upper protocol */
70f23020
AE
2461 hp = hci_proto[HCI_PROTO_L2CAP];
2462 if (hp && hp->recv_acldata) {
1da177e4
LT
2463 hp->recv_acldata(conn, skb, flags);
2464 return;
2465 }
2466 } else {
8e87d142 2467 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2468 hdev->name, handle);
2469 }
2470
2471 kfree_skb(skb);
2472}
2473
2474/* SCO data packet */
2475static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2476{
2477 struct hci_sco_hdr *hdr = (void *) skb->data;
2478 struct hci_conn *conn;
2479 __u16 handle;
2480
2481 skb_pull(skb, HCI_SCO_HDR_SIZE);
2482
2483 handle = __le16_to_cpu(hdr->handle);
2484
2485 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2486
2487 hdev->stat.sco_rx++;
2488
2489 hci_dev_lock(hdev);
2490 conn = hci_conn_hash_lookup_handle(hdev, handle);
2491 hci_dev_unlock(hdev);
2492
2493 if (conn) {
2494 register struct hci_proto *hp;
2495
2496 /* Send to upper protocol */
70f23020
AE
2497 hp = hci_proto[HCI_PROTO_SCO];
2498 if (hp && hp->recv_scodata) {
1da177e4
LT
2499 hp->recv_scodata(conn, skb);
2500 return;
2501 }
2502 } else {
8e87d142 2503 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2504 hdev->name, handle);
2505 }
2506
2507 kfree_skb(skb);
2508}
2509
b78752cc 2510static void hci_rx_work(struct work_struct *work)
1da177e4 2511{
b78752cc 2512 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2513 struct sk_buff *skb;
2514
2515 BT_DBG("%s", hdev->name);
2516
67d0dfb5 2517 mutex_lock(&hci_task_lock);
1da177e4
LT
2518
2519 while ((skb = skb_dequeue(&hdev->rx_q))) {
2520 if (atomic_read(&hdev->promisc)) {
2521 /* Send copy to the sockets */
eec8d2bc 2522 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2523 }
2524
2525 if (test_bit(HCI_RAW, &hdev->flags)) {
2526 kfree_skb(skb);
2527 continue;
2528 }
2529
2530 if (test_bit(HCI_INIT, &hdev->flags)) {
2531 /* Don't process data packets in this states. */
0d48d939 2532 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2533 case HCI_ACLDATA_PKT:
2534 case HCI_SCODATA_PKT:
2535 kfree_skb(skb);
2536 continue;
3ff50b79 2537 }
1da177e4
LT
2538 }
2539
2540 /* Process frame */
0d48d939 2541 switch (bt_cb(skb)->pkt_type) {
1da177e4 2542 case HCI_EVENT_PKT:
b78752cc 2543 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2544 hci_event_packet(hdev, skb);
2545 break;
2546
2547 case HCI_ACLDATA_PKT:
2548 BT_DBG("%s ACL data packet", hdev->name);
2549 hci_acldata_packet(hdev, skb);
2550 break;
2551
2552 case HCI_SCODATA_PKT:
2553 BT_DBG("%s SCO data packet", hdev->name);
2554 hci_scodata_packet(hdev, skb);
2555 break;
2556
2557 default:
2558 kfree_skb(skb);
2559 break;
2560 }
2561 }
2562
67d0dfb5 2563 mutex_unlock(&hci_task_lock);
1da177e4
LT
2564}
2565
c347b765 2566static void hci_cmd_work(struct work_struct *work)
1da177e4 2567{
c347b765 2568 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2569 struct sk_buff *skb;
2570
2571 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2572
1da177e4 2573 /* Send queued commands */
5a08ecce
AE
2574 if (atomic_read(&hdev->cmd_cnt)) {
2575 skb = skb_dequeue(&hdev->cmd_q);
2576 if (!skb)
2577 return;
2578
7585b97a 2579 kfree_skb(hdev->sent_cmd);
1da177e4 2580
70f23020
AE
2581 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2582 if (hdev->sent_cmd) {
1da177e4
LT
2583 atomic_dec(&hdev->cmd_cnt);
2584 hci_send_frame(skb);
7bdb8a5c
SJ
2585 if (test_bit(HCI_RESET, &hdev->flags))
2586 del_timer(&hdev->cmd_timer);
2587 else
2588 mod_timer(&hdev->cmd_timer,
6bd32326 2589 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2590 } else {
2591 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2592 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2593 }
2594 }
2595}
2519a1fc
AG
2596
2597int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2598{
2599 /* General inquiry access code (GIAC) */
2600 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2601 struct hci_cp_inquiry cp;
2602
2603 BT_DBG("%s", hdev->name);
2604
2605 if (test_bit(HCI_INQUIRY, &hdev->flags))
2606 return -EINPROGRESS;
2607
2608 memset(&cp, 0, sizeof(cp));
2609 memcpy(&cp.lap, lap, sizeof(cp.lap));
2610 cp.length = length;
2611
2612 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2613}
023d5049
AG
2614
2615int hci_cancel_inquiry(struct hci_dev *hdev)
2616{
2617 BT_DBG("%s", hdev->name);
2618
2619 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2620 return -EPERM;
2621
2622 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2623}
7784d78f
AE
2624
2625module_param(enable_hs, bool, 0644);
2626MODULE_PARM_DESC(enable_hs, "Enable High Speed");