Bluetooth: Add initial mgmt_confirm_name support
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
7784d78f
AE
58int enable_hs;
59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
1da177e4 72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
23bb5763 94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 95{
23bb5763
JH
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
a5040efa
JH
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 102 return;
1da177e4
LT
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
8e87d142 123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 124 unsigned long opt, __u32 timeout)
1da177e4
LT
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
e175072f 146 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
3ff50b79 156 }
1da177e4 157
a5040efa 158 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 166 unsigned long opt, __u32 timeout)
1da177e4
LT
167{
168 int ret;
169
7c6a329e
MH
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
1da177e4
LT
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
f630cf0d 186 set_bit(HCI_RESET, &hdev->flags);
a9de9248 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
188}
189
e61ef499 190static void bredr_init(struct hci_dev *hdev)
1da177e4 191{
b0916ea0 192 struct hci_cp_delete_stored_link_key cp;
1ebb9252 193 __le16 param;
89f2783d 194 __u8 flt_type;
1da177e4 195
2455a3ea
AE
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
1da177e4
LT
198 /* Mandatory initialization */
199
200 /* Reset */
f630cf0d 201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 204 }
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 214
1da177e4 215 /* Read BD Address */
a9de9248
MH
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
223
224 /* Read Voice Setting */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
89f2783d 230 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 232
1da177e4 233 /* Connection accept timeout ~20 secs */
aca3192c 234 param = cpu_to_le16(0x7d00);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
240}
241
e61ef499
AE
242static void amp_init(struct hci_dev *hdev)
243{
2455a3ea
AE
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
e61ef499
AE
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
6ed58ec5
VT
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
1da177e4
LT
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
a9de9248 302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
a9de9248 312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
e4e8e37c 321 /* Encryption */
a9de9248 322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
323}
324
e4e8e37c
MH
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
a418b893 329 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
8e87d142 335/* Get HCI device by index.
1da177e4
LT
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
8035ded4 339 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
8035ded4 347 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
1da177e4
LT
356
357/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev)
359{
561aafbc 360 struct inquiry_cache *cache = &hdev->inq_cache;
b57c1a56 361 struct inquiry_entry *p, *n;
1da177e4 362
561aafbc
JH
363 list_for_each_entry_safe(p, n, &cache->all, all) {
364 list_del(&p->all);
b57c1a56 365 kfree(p);
1da177e4 366 }
561aafbc
JH
367
368 INIT_LIST_HEAD(&cache->unknown);
369 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
370}
371
372struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373{
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
376
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378
561aafbc
JH
379 list_for_each_entry(e, &cache->all, all) {
380 if (!bacmp(&e->data.bdaddr, bdaddr))
381 return e;
382 }
383
384 return NULL;
385}
386
387struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
388 bdaddr_t *bdaddr)
389{
390 struct inquiry_cache *cache = &hdev->inq_cache;
391 struct inquiry_entry *e;
392
393 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
394
395 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 396 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
397 return e;
398 }
399
400 return NULL;
1da177e4
LT
401}
402
561aafbc
JH
403void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
404 bool name_known)
1da177e4
LT
405{
406 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 407 struct inquiry_entry *ie;
1da177e4
LT
408
409 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
410
70f23020 411 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
561aafbc
JH
412 if (ie)
413 goto update;
414
415 /* Entry not in the cache. Add new one. */
416 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
417 if (!ie)
418 return;
419
420 list_add(&ie->all, &cache->all);
421
422 if (name_known) {
423 ie->name_state = NAME_KNOWN;
424 } else {
425 ie->name_state = NAME_NOT_KNOWN;
426 list_add(&ie->list, &cache->unknown);
427 }
70f23020 428
561aafbc
JH
429update:
430 if (name_known && ie->name_state != NAME_KNOWN &&
431 ie->name_state != NAME_PENDING) {
432 ie->name_state = NAME_KNOWN;
433 list_del(&ie->list);
1da177e4
LT
434 }
435
70f23020
AE
436 memcpy(&ie->data, data, sizeof(*data));
437 ie->timestamp = jiffies;
1da177e4
LT
438 cache->timestamp = jiffies;
439}
440
441static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
442{
443 struct inquiry_cache *cache = &hdev->inq_cache;
444 struct inquiry_info *info = (struct inquiry_info *) buf;
445 struct inquiry_entry *e;
446 int copied = 0;
447
561aafbc 448 list_for_each_entry(e, &cache->all, all) {
1da177e4 449 struct inquiry_data *data = &e->data;
b57c1a56
JH
450
451 if (copied >= num)
452 break;
453
1da177e4
LT
454 bacpy(&info->bdaddr, &data->bdaddr);
455 info->pscan_rep_mode = data->pscan_rep_mode;
456 info->pscan_period_mode = data->pscan_period_mode;
457 info->pscan_mode = data->pscan_mode;
458 memcpy(info->dev_class, data->dev_class, 3);
459 info->clock_offset = data->clock_offset;
b57c1a56 460
1da177e4 461 info++;
b57c1a56 462 copied++;
1da177e4
LT
463 }
464
465 BT_DBG("cache %p, copied %d", cache, copied);
466 return copied;
467}
468
469static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
470{
471 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
472 struct hci_cp_inquiry cp;
473
474 BT_DBG("%s", hdev->name);
475
476 if (test_bit(HCI_INQUIRY, &hdev->flags))
477 return;
478
479 /* Start Inquiry */
480 memcpy(&cp.lap, &ir->lap, 3);
481 cp.length = ir->length;
482 cp.num_rsp = ir->num_rsp;
a9de9248 483 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
484}
485
486int hci_inquiry(void __user *arg)
487{
488 __u8 __user *ptr = arg;
489 struct hci_inquiry_req ir;
490 struct hci_dev *hdev;
491 int err = 0, do_inquiry = 0, max_rsp;
492 long timeo;
493 __u8 *buf;
494
495 if (copy_from_user(&ir, ptr, sizeof(ir)))
496 return -EFAULT;
497
5a08ecce
AE
498 hdev = hci_dev_get(ir.dev_id);
499 if (!hdev)
1da177e4
LT
500 return -ENODEV;
501
09fd0de5 502 hci_dev_lock(hdev);
8e87d142 503 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
504 inquiry_cache_empty(hdev) ||
505 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
506 inquiry_cache_flush(hdev);
507 do_inquiry = 1;
508 }
09fd0de5 509 hci_dev_unlock(hdev);
1da177e4 510
04837f64 511 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
512
513 if (do_inquiry) {
514 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
515 if (err < 0)
516 goto done;
517 }
1da177e4
LT
518
519 /* for unlimited number of responses we will use buffer with 255 entries */
520 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
521
522 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
523 * copy it to the user space.
524 */
01df8c31 525 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 526 if (!buf) {
1da177e4
LT
527 err = -ENOMEM;
528 goto done;
529 }
530
09fd0de5 531 hci_dev_lock(hdev);
1da177e4 532 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 533 hci_dev_unlock(hdev);
1da177e4
LT
534
535 BT_DBG("num_rsp %d", ir.num_rsp);
536
537 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
538 ptr += sizeof(ir);
539 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
540 ir.num_rsp))
541 err = -EFAULT;
8e87d142 542 } else
1da177e4
LT
543 err = -EFAULT;
544
545 kfree(buf);
546
547done:
548 hci_dev_put(hdev);
549 return err;
550}
551
552/* ---- HCI ioctl helpers ---- */
553
554int hci_dev_open(__u16 dev)
555{
556 struct hci_dev *hdev;
557 int ret = 0;
558
5a08ecce
AE
559 hdev = hci_dev_get(dev);
560 if (!hdev)
1da177e4
LT
561 return -ENODEV;
562
563 BT_DBG("%s %p", hdev->name, hdev);
564
565 hci_req_lock(hdev);
566
611b30f7
MH
567 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
568 ret = -ERFKILL;
569 goto done;
570 }
571
1da177e4
LT
572 if (test_bit(HCI_UP, &hdev->flags)) {
573 ret = -EALREADY;
574 goto done;
575 }
576
577 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
578 set_bit(HCI_RAW, &hdev->flags);
579
07e3b94a
AE
580 /* Treat all non BR/EDR controllers as raw devices if
581 enable_hs is not set */
582 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
583 set_bit(HCI_RAW, &hdev->flags);
584
1da177e4
LT
585 if (hdev->open(hdev)) {
586 ret = -EIO;
587 goto done;
588 }
589
590 if (!test_bit(HCI_RAW, &hdev->flags)) {
591 atomic_set(&hdev->cmd_cnt, 1);
592 set_bit(HCI_INIT, &hdev->flags);
a5040efa 593 hdev->init_last_cmd = 0;
1da177e4 594
04837f64
MH
595 ret = __hci_request(hdev, hci_init_req, 0,
596 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 597
eead27da 598 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
599 ret = __hci_request(hdev, hci_le_init_req, 0,
600 msecs_to_jiffies(HCI_INIT_TIMEOUT));
601
1da177e4
LT
602 clear_bit(HCI_INIT, &hdev->flags);
603 }
604
605 if (!ret) {
606 hci_dev_hold(hdev);
607 set_bit(HCI_UP, &hdev->flags);
608 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 609 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 610 hci_dev_lock(hdev);
744cf19e 611 mgmt_powered(hdev, 1);
09fd0de5 612 hci_dev_unlock(hdev);
56e5cb86 613 }
8e87d142 614 } else {
1da177e4 615 /* Init failed, cleanup */
3eff45ea 616 flush_work(&hdev->tx_work);
c347b765 617 flush_work(&hdev->cmd_work);
b78752cc 618 flush_work(&hdev->rx_work);
1da177e4
LT
619
620 skb_queue_purge(&hdev->cmd_q);
621 skb_queue_purge(&hdev->rx_q);
622
623 if (hdev->flush)
624 hdev->flush(hdev);
625
626 if (hdev->sent_cmd) {
627 kfree_skb(hdev->sent_cmd);
628 hdev->sent_cmd = NULL;
629 }
630
631 hdev->close(hdev);
632 hdev->flags = 0;
633 }
634
635done:
636 hci_req_unlock(hdev);
637 hci_dev_put(hdev);
638 return ret;
639}
640
641static int hci_dev_do_close(struct hci_dev *hdev)
642{
643 BT_DBG("%s %p", hdev->name, hdev);
644
645 hci_req_cancel(hdev, ENODEV);
646 hci_req_lock(hdev);
647
648 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 649 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
650 hci_req_unlock(hdev);
651 return 0;
652 }
653
3eff45ea
GP
654 /* Flush RX and TX works */
655 flush_work(&hdev->tx_work);
b78752cc 656 flush_work(&hdev->rx_work);
1da177e4 657
16ab91ab 658 if (hdev->discov_timeout > 0) {
e0f9309f 659 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
660 hdev->discov_timeout = 0;
661 }
662
3243553f 663 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 664 cancel_delayed_work(&hdev->power_off);
3243553f 665
7d78525d
JH
666 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
667 cancel_delayed_work(&hdev->service_cache);
668
09fd0de5 669 hci_dev_lock(hdev);
1da177e4
LT
670 inquiry_cache_flush(hdev);
671 hci_conn_hash_flush(hdev);
09fd0de5 672 hci_dev_unlock(hdev);
1da177e4
LT
673
674 hci_notify(hdev, HCI_DEV_DOWN);
675
676 if (hdev->flush)
677 hdev->flush(hdev);
678
679 /* Reset device */
680 skb_queue_purge(&hdev->cmd_q);
681 atomic_set(&hdev->cmd_cnt, 1);
682 if (!test_bit(HCI_RAW, &hdev->flags)) {
683 set_bit(HCI_INIT, &hdev->flags);
04837f64 684 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 685 msecs_to_jiffies(250));
1da177e4
LT
686 clear_bit(HCI_INIT, &hdev->flags);
687 }
688
c347b765
GP
689 /* flush cmd work */
690 flush_work(&hdev->cmd_work);
1da177e4
LT
691
692 /* Drop queues */
693 skb_queue_purge(&hdev->rx_q);
694 skb_queue_purge(&hdev->cmd_q);
695 skb_queue_purge(&hdev->raw_q);
696
697 /* Drop last sent command */
698 if (hdev->sent_cmd) {
b79f44c1 699 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
700 kfree_skb(hdev->sent_cmd);
701 hdev->sent_cmd = NULL;
702 }
703
704 /* After this point our queues are empty
705 * and no tasks are scheduled. */
706 hdev->close(hdev);
707
09fd0de5 708 hci_dev_lock(hdev);
744cf19e 709 mgmt_powered(hdev, 0);
09fd0de5 710 hci_dev_unlock(hdev);
5add6af8 711
1da177e4
LT
712 /* Clear flags */
713 hdev->flags = 0;
714
715 hci_req_unlock(hdev);
716
717 hci_dev_put(hdev);
718 return 0;
719}
720
721int hci_dev_close(__u16 dev)
722{
723 struct hci_dev *hdev;
724 int err;
725
70f23020
AE
726 hdev = hci_dev_get(dev);
727 if (!hdev)
1da177e4
LT
728 return -ENODEV;
729 err = hci_dev_do_close(hdev);
730 hci_dev_put(hdev);
731 return err;
732}
733
734int hci_dev_reset(__u16 dev)
735{
736 struct hci_dev *hdev;
737 int ret = 0;
738
70f23020
AE
739 hdev = hci_dev_get(dev);
740 if (!hdev)
1da177e4
LT
741 return -ENODEV;
742
743 hci_req_lock(hdev);
1da177e4
LT
744
745 if (!test_bit(HCI_UP, &hdev->flags))
746 goto done;
747
748 /* Drop queues */
749 skb_queue_purge(&hdev->rx_q);
750 skb_queue_purge(&hdev->cmd_q);
751
09fd0de5 752 hci_dev_lock(hdev);
1da177e4
LT
753 inquiry_cache_flush(hdev);
754 hci_conn_hash_flush(hdev);
09fd0de5 755 hci_dev_unlock(hdev);
1da177e4
LT
756
757 if (hdev->flush)
758 hdev->flush(hdev);
759
8e87d142 760 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 761 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
762
763 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
764 ret = __hci_request(hdev, hci_reset_req, 0,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
766
767done:
1da177e4
LT
768 hci_req_unlock(hdev);
769 hci_dev_put(hdev);
770 return ret;
771}
772
773int hci_dev_reset_stat(__u16 dev)
774{
775 struct hci_dev *hdev;
776 int ret = 0;
777
70f23020
AE
778 hdev = hci_dev_get(dev);
779 if (!hdev)
1da177e4
LT
780 return -ENODEV;
781
782 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
783
784 hci_dev_put(hdev);
785
786 return ret;
787}
788
789int hci_dev_cmd(unsigned int cmd, void __user *arg)
790{
791 struct hci_dev *hdev;
792 struct hci_dev_req dr;
793 int err = 0;
794
795 if (copy_from_user(&dr, arg, sizeof(dr)))
796 return -EFAULT;
797
70f23020
AE
798 hdev = hci_dev_get(dr.dev_id);
799 if (!hdev)
1da177e4
LT
800 return -ENODEV;
801
802 switch (cmd) {
803 case HCISETAUTH:
04837f64
MH
804 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
805 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
806 break;
807
808 case HCISETENCRYPT:
809 if (!lmp_encrypt_capable(hdev)) {
810 err = -EOPNOTSUPP;
811 break;
812 }
813
814 if (!test_bit(HCI_AUTH, &hdev->flags)) {
815 /* Auth must be enabled first */
04837f64
MH
816 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
817 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
818 if (err)
819 break;
820 }
821
04837f64
MH
822 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
823 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
824 break;
825
826 case HCISETSCAN:
04837f64
MH
827 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
828 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
829 break;
830
1da177e4 831 case HCISETLINKPOL:
e4e8e37c
MH
832 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
833 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
834 break;
835
836 case HCISETLINKMODE:
e4e8e37c
MH
837 hdev->link_mode = ((__u16) dr.dev_opt) &
838 (HCI_LM_MASTER | HCI_LM_ACCEPT);
839 break;
840
841 case HCISETPTYPE:
842 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
843 break;
844
845 case HCISETACLMTU:
e4e8e37c
MH
846 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
847 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
848 break;
849
850 case HCISETSCOMTU:
e4e8e37c
MH
851 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
852 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
853 break;
854
855 default:
856 err = -EINVAL;
857 break;
858 }
e4e8e37c 859
1da177e4
LT
860 hci_dev_put(hdev);
861 return err;
862}
863
864int hci_get_dev_list(void __user *arg)
865{
8035ded4 866 struct hci_dev *hdev;
1da177e4
LT
867 struct hci_dev_list_req *dl;
868 struct hci_dev_req *dr;
1da177e4
LT
869 int n = 0, size, err;
870 __u16 dev_num;
871
872 if (get_user(dev_num, (__u16 __user *) arg))
873 return -EFAULT;
874
875 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
876 return -EINVAL;
877
878 size = sizeof(*dl) + dev_num * sizeof(*dr);
879
70f23020
AE
880 dl = kzalloc(size, GFP_KERNEL);
881 if (!dl)
1da177e4
LT
882 return -ENOMEM;
883
884 dr = dl->dev_req;
885
f20d09d5 886 read_lock(&hci_dev_list_lock);
8035ded4 887 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 888 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 889 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
890
891 if (!test_bit(HCI_MGMT, &hdev->flags))
892 set_bit(HCI_PAIRABLE, &hdev->flags);
893
1da177e4
LT
894 (dr + n)->dev_id = hdev->id;
895 (dr + n)->dev_opt = hdev->flags;
c542a06c 896
1da177e4
LT
897 if (++n >= dev_num)
898 break;
899 }
f20d09d5 900 read_unlock(&hci_dev_list_lock);
1da177e4
LT
901
902 dl->dev_num = n;
903 size = sizeof(*dl) + n * sizeof(*dr);
904
905 err = copy_to_user(arg, dl, size);
906 kfree(dl);
907
908 return err ? -EFAULT : 0;
909}
910
911int hci_get_dev_info(void __user *arg)
912{
913 struct hci_dev *hdev;
914 struct hci_dev_info di;
915 int err = 0;
916
917 if (copy_from_user(&di, arg, sizeof(di)))
918 return -EFAULT;
919
70f23020
AE
920 hdev = hci_dev_get(di.dev_id);
921 if (!hdev)
1da177e4
LT
922 return -ENODEV;
923
3243553f
JH
924 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
925 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 926
c542a06c
JH
927 if (!test_bit(HCI_MGMT, &hdev->flags))
928 set_bit(HCI_PAIRABLE, &hdev->flags);
929
1da177e4
LT
930 strcpy(di.name, hdev->name);
931 di.bdaddr = hdev->bdaddr;
943da25d 932 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
933 di.flags = hdev->flags;
934 di.pkt_type = hdev->pkt_type;
935 di.acl_mtu = hdev->acl_mtu;
936 di.acl_pkts = hdev->acl_pkts;
937 di.sco_mtu = hdev->sco_mtu;
938 di.sco_pkts = hdev->sco_pkts;
939 di.link_policy = hdev->link_policy;
940 di.link_mode = hdev->link_mode;
941
942 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
943 memcpy(&di.features, &hdev->features, sizeof(di.features));
944
945 if (copy_to_user(arg, &di, sizeof(di)))
946 err = -EFAULT;
947
948 hci_dev_put(hdev);
949
950 return err;
951}
952
953/* ---- Interface to HCI drivers ---- */
954
611b30f7
MH
955static int hci_rfkill_set_block(void *data, bool blocked)
956{
957 struct hci_dev *hdev = data;
958
959 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
960
961 if (!blocked)
962 return 0;
963
964 hci_dev_do_close(hdev);
965
966 return 0;
967}
968
969static const struct rfkill_ops hci_rfkill_ops = {
970 .set_block = hci_rfkill_set_block,
971};
972
1da177e4
LT
973/* Alloc HCI device */
974struct hci_dev *hci_alloc_dev(void)
975{
976 struct hci_dev *hdev;
977
25ea6db0 978 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
979 if (!hdev)
980 return NULL;
981
0ac7e700 982 hci_init_sysfs(hdev);
1da177e4
LT
983 skb_queue_head_init(&hdev->driver_init);
984
985 return hdev;
986}
987EXPORT_SYMBOL(hci_alloc_dev);
988
989/* Free HCI device */
990void hci_free_dev(struct hci_dev *hdev)
991{
992 skb_queue_purge(&hdev->driver_init);
993
a91f2e39
MH
994 /* will free via device release */
995 put_device(&hdev->dev);
1da177e4
LT
996}
997EXPORT_SYMBOL(hci_free_dev);
998
ab81cbf9
JH
999static void hci_power_on(struct work_struct *work)
1000{
1001 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1002
1003 BT_DBG("%s", hdev->name);
1004
1005 if (hci_dev_open(hdev->id) < 0)
1006 return;
1007
1008 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
80b7ab33 1009 schedule_delayed_work(&hdev->power_off,
3243553f 1010 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
1011
1012 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 1013 mgmt_index_added(hdev);
ab81cbf9
JH
1014}
1015
1016static void hci_power_off(struct work_struct *work)
1017{
3243553f
JH
1018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 power_off.work);
ab81cbf9
JH
1020
1021 BT_DBG("%s", hdev->name);
1022
1023 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1024
3243553f 1025 hci_dev_close(hdev->id);
ab81cbf9
JH
1026}
1027
16ab91ab
JH
1028static void hci_discov_off(struct work_struct *work)
1029{
1030 struct hci_dev *hdev;
1031 u8 scan = SCAN_PAGE;
1032
1033 hdev = container_of(work, struct hci_dev, discov_off.work);
1034
1035 BT_DBG("%s", hdev->name);
1036
09fd0de5 1037 hci_dev_lock(hdev);
16ab91ab
JH
1038
1039 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1040
1041 hdev->discov_timeout = 0;
1042
09fd0de5 1043 hci_dev_unlock(hdev);
16ab91ab
JH
1044}
1045
2aeb9a1a
JH
1046int hci_uuids_clear(struct hci_dev *hdev)
1047{
1048 struct list_head *p, *n;
1049
1050 list_for_each_safe(p, n, &hdev->uuids) {
1051 struct bt_uuid *uuid;
1052
1053 uuid = list_entry(p, struct bt_uuid, list);
1054
1055 list_del(p);
1056 kfree(uuid);
1057 }
1058
1059 return 0;
1060}
1061
55ed8ca1
JH
1062int hci_link_keys_clear(struct hci_dev *hdev)
1063{
1064 struct list_head *p, *n;
1065
1066 list_for_each_safe(p, n, &hdev->link_keys) {
1067 struct link_key *key;
1068
1069 key = list_entry(p, struct link_key, list);
1070
1071 list_del(p);
1072 kfree(key);
1073 }
1074
1075 return 0;
1076}
1077
1078struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1079{
8035ded4 1080 struct link_key *k;
55ed8ca1 1081
8035ded4 1082 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1083 if (bacmp(bdaddr, &k->bdaddr) == 0)
1084 return k;
55ed8ca1
JH
1085
1086 return NULL;
1087}
1088
d25e28ab
JH
1089static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1090 u8 key_type, u8 old_key_type)
1091{
1092 /* Legacy key */
1093 if (key_type < 0x03)
1094 return 1;
1095
1096 /* Debug keys are insecure so don't store them persistently */
1097 if (key_type == HCI_LK_DEBUG_COMBINATION)
1098 return 0;
1099
1100 /* Changed combination key and there's no previous one */
1101 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1102 return 0;
1103
1104 /* Security mode 3 case */
1105 if (!conn)
1106 return 1;
1107
1108 /* Neither local nor remote side had no-bonding as requirement */
1109 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1110 return 1;
1111
1112 /* Local side had dedicated bonding as requirement */
1113 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1114 return 1;
1115
1116 /* Remote side had dedicated bonding as requirement */
1117 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1118 return 1;
1119
1120 /* If none of the above criteria match, then don't store the key
1121 * persistently */
1122 return 0;
1123}
1124
75d262c2
VCG
1125struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1126{
1127 struct link_key *k;
1128
1129 list_for_each_entry(k, &hdev->link_keys, list) {
1130 struct key_master_id *id;
1131
1132 if (k->type != HCI_LK_SMP_LTK)
1133 continue;
1134
1135 if (k->dlen != sizeof(*id))
1136 continue;
1137
1138 id = (void *) &k->data;
1139 if (id->ediv == ediv &&
1140 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1141 return k;
1142 }
1143
1144 return NULL;
1145}
1146EXPORT_SYMBOL(hci_find_ltk);
1147
1148struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1149 bdaddr_t *bdaddr, u8 type)
1150{
1151 struct link_key *k;
1152
1153 list_for_each_entry(k, &hdev->link_keys, list)
1154 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1155 return k;
1156
1157 return NULL;
1158}
1159EXPORT_SYMBOL(hci_find_link_key_type);
1160
d25e28ab
JH
1161int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1162 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1163{
1164 struct link_key *key, *old_key;
4df378a1 1165 u8 old_key_type, persistent;
55ed8ca1
JH
1166
1167 old_key = hci_find_link_key(hdev, bdaddr);
1168 if (old_key) {
1169 old_key_type = old_key->type;
1170 key = old_key;
1171 } else {
12adcf3a 1172 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1173 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1174 if (!key)
1175 return -ENOMEM;
1176 list_add(&key->list, &hdev->link_keys);
1177 }
1178
1179 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1180
d25e28ab
JH
1181 /* Some buggy controller combinations generate a changed
1182 * combination key for legacy pairing even when there's no
1183 * previous key */
1184 if (type == HCI_LK_CHANGED_COMBINATION &&
1185 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1186 old_key_type == 0xff) {
d25e28ab 1187 type = HCI_LK_COMBINATION;
655fe6ec
JH
1188 if (conn)
1189 conn->key_type = type;
1190 }
d25e28ab 1191
55ed8ca1
JH
1192 bacpy(&key->bdaddr, bdaddr);
1193 memcpy(key->val, val, 16);
55ed8ca1
JH
1194 key->pin_len = pin_len;
1195
b6020ba0 1196 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1197 key->type = old_key_type;
4748fed2
JH
1198 else
1199 key->type = type;
1200
4df378a1
JH
1201 if (!new_key)
1202 return 0;
1203
1204 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1205
744cf19e 1206 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1207
1208 if (!persistent) {
1209 list_del(&key->list);
1210 kfree(key);
1211 }
55ed8ca1
JH
1212
1213 return 0;
1214}
1215
75d262c2 1216int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1217 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1218{
1219 struct link_key *key, *old_key;
1220 struct key_master_id *id;
1221 u8 old_key_type;
1222
1223 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1224
1225 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1226 if (old_key) {
1227 key = old_key;
1228 old_key_type = old_key->type;
1229 } else {
1230 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1231 if (!key)
1232 return -ENOMEM;
1233 list_add(&key->list, &hdev->link_keys);
1234 old_key_type = 0xff;
1235 }
1236
1237 key->dlen = sizeof(*id);
1238
1239 bacpy(&key->bdaddr, bdaddr);
1240 memcpy(key->val, ltk, sizeof(key->val));
1241 key->type = HCI_LK_SMP_LTK;
726b4ffc 1242 key->pin_len = key_size;
75d262c2
VCG
1243
1244 id = (void *) &key->data;
1245 id->ediv = ediv;
1246 memcpy(id->rand, rand, sizeof(id->rand));
1247
1248 if (new_key)
744cf19e 1249 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1250
1251 return 0;
1252}
1253
55ed8ca1
JH
1254int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1255{
1256 struct link_key *key;
1257
1258 key = hci_find_link_key(hdev, bdaddr);
1259 if (!key)
1260 return -ENOENT;
1261
1262 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1263
1264 list_del(&key->list);
1265 kfree(key);
1266
1267 return 0;
1268}
1269
6bd32326
VT
1270/* HCI command timer function */
1271static void hci_cmd_timer(unsigned long arg)
1272{
1273 struct hci_dev *hdev = (void *) arg;
1274
1275 BT_ERR("%s command tx timeout", hdev->name);
1276 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1277 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1278}
1279
2763eda6
SJ
1280struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1281 bdaddr_t *bdaddr)
1282{
1283 struct oob_data *data;
1284
1285 list_for_each_entry(data, &hdev->remote_oob_data, list)
1286 if (bacmp(bdaddr, &data->bdaddr) == 0)
1287 return data;
1288
1289 return NULL;
1290}
1291
1292int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1293{
1294 struct oob_data *data;
1295
1296 data = hci_find_remote_oob_data(hdev, bdaddr);
1297 if (!data)
1298 return -ENOENT;
1299
1300 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1301
1302 list_del(&data->list);
1303 kfree(data);
1304
1305 return 0;
1306}
1307
1308int hci_remote_oob_data_clear(struct hci_dev *hdev)
1309{
1310 struct oob_data *data, *n;
1311
1312 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1313 list_del(&data->list);
1314 kfree(data);
1315 }
1316
1317 return 0;
1318}
1319
1320int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1321 u8 *randomizer)
1322{
1323 struct oob_data *data;
1324
1325 data = hci_find_remote_oob_data(hdev, bdaddr);
1326
1327 if (!data) {
1328 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1329 if (!data)
1330 return -ENOMEM;
1331
1332 bacpy(&data->bdaddr, bdaddr);
1333 list_add(&data->list, &hdev->remote_oob_data);
1334 }
1335
1336 memcpy(data->hash, hash, sizeof(data->hash));
1337 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1338
1339 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1340
1341 return 0;
1342}
1343
b2a66aad
AJ
1344struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1345 bdaddr_t *bdaddr)
1346{
8035ded4 1347 struct bdaddr_list *b;
b2a66aad 1348
8035ded4 1349 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1350 if (bacmp(bdaddr, &b->bdaddr) == 0)
1351 return b;
b2a66aad
AJ
1352
1353 return NULL;
1354}
1355
1356int hci_blacklist_clear(struct hci_dev *hdev)
1357{
1358 struct list_head *p, *n;
1359
1360 list_for_each_safe(p, n, &hdev->blacklist) {
1361 struct bdaddr_list *b;
1362
1363 b = list_entry(p, struct bdaddr_list, list);
1364
1365 list_del(p);
1366 kfree(b);
1367 }
1368
1369 return 0;
1370}
1371
1372int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1373{
1374 struct bdaddr_list *entry;
b2a66aad
AJ
1375
1376 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1377 return -EBADF;
1378
5e762444
AJ
1379 if (hci_blacklist_lookup(hdev, bdaddr))
1380 return -EEXIST;
b2a66aad
AJ
1381
1382 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1383 if (!entry)
1384 return -ENOMEM;
b2a66aad
AJ
1385
1386 bacpy(&entry->bdaddr, bdaddr);
1387
1388 list_add(&entry->list, &hdev->blacklist);
1389
744cf19e 1390 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1391}
1392
1393int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394{
1395 struct bdaddr_list *entry;
b2a66aad 1396
1ec918ce 1397 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1398 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1399
1400 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1401 if (!entry)
5e762444 1402 return -ENOENT;
b2a66aad
AJ
1403
1404 list_del(&entry->list);
1405 kfree(entry);
1406
744cf19e 1407 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1408}
1409
db323f2f 1410static void hci_clear_adv_cache(struct work_struct *work)
35815085 1411{
db323f2f
GP
1412 struct hci_dev *hdev = container_of(work, struct hci_dev,
1413 adv_work.work);
35815085
AG
1414
1415 hci_dev_lock(hdev);
1416
1417 hci_adv_entries_clear(hdev);
1418
1419 hci_dev_unlock(hdev);
1420}
1421
76c8686f
AG
1422int hci_adv_entries_clear(struct hci_dev *hdev)
1423{
1424 struct adv_entry *entry, *tmp;
1425
1426 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1427 list_del(&entry->list);
1428 kfree(entry);
1429 }
1430
1431 BT_DBG("%s adv cache cleared", hdev->name);
1432
1433 return 0;
1434}
1435
1436struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1437{
1438 struct adv_entry *entry;
1439
1440 list_for_each_entry(entry, &hdev->adv_entries, list)
1441 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1442 return entry;
1443
1444 return NULL;
1445}
1446
1447static inline int is_connectable_adv(u8 evt_type)
1448{
1449 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1450 return 1;
1451
1452 return 0;
1453}
1454
1455int hci_add_adv_entry(struct hci_dev *hdev,
1456 struct hci_ev_le_advertising_info *ev)
1457{
1458 struct adv_entry *entry;
1459
1460 if (!is_connectable_adv(ev->evt_type))
1461 return -EINVAL;
1462
1463 /* Only new entries should be added to adv_entries. So, if
1464 * bdaddr was found, don't add it. */
1465 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1466 return 0;
1467
1468 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1469 if (!entry)
1470 return -ENOMEM;
1471
1472 bacpy(&entry->bdaddr, &ev->bdaddr);
1473 entry->bdaddr_type = ev->bdaddr_type;
1474
1475 list_add(&entry->list, &hdev->adv_entries);
1476
1477 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1478 batostr(&entry->bdaddr), entry->bdaddr_type);
1479
1480 return 0;
1481}
1482
1da177e4
LT
1483/* Register HCI device */
1484int hci_register_dev(struct hci_dev *hdev)
1485{
1486 struct list_head *head = &hci_dev_list, *p;
08add513 1487 int i, id, error;
1da177e4 1488
c13854ce
MH
1489 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1490 hdev->bus, hdev->owner);
1da177e4
LT
1491
1492 if (!hdev->open || !hdev->close || !hdev->destruct)
1493 return -EINVAL;
1494
08add513
MM
1495 /* Do not allow HCI_AMP devices to register at index 0,
1496 * so the index can be used as the AMP controller ID.
1497 */
1498 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1499
f20d09d5 1500 write_lock(&hci_dev_list_lock);
1da177e4
LT
1501
1502 /* Find first available device id */
1503 list_for_each(p, &hci_dev_list) {
1504 if (list_entry(p, struct hci_dev, list)->id != id)
1505 break;
1506 head = p; id++;
1507 }
8e87d142 1508
1da177e4
LT
1509 sprintf(hdev->name, "hci%d", id);
1510 hdev->id = id;
c6feeb28 1511 list_add_tail(&hdev->list, head);
1da177e4
LT
1512
1513 atomic_set(&hdev->refcnt, 1);
09fd0de5 1514 mutex_init(&hdev->lock);
1da177e4
LT
1515
1516 hdev->flags = 0;
d23264a8 1517 hdev->dev_flags = 0;
1da177e4 1518 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1519 hdev->esco_type = (ESCO_HV1);
1da177e4 1520 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1521 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1522
04837f64
MH
1523 hdev->idle_timeout = 0;
1524 hdev->sniff_max_interval = 800;
1525 hdev->sniff_min_interval = 80;
1526
b78752cc 1527 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1528 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1529 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1530
1da177e4
LT
1531
1532 skb_queue_head_init(&hdev->rx_q);
1533 skb_queue_head_init(&hdev->cmd_q);
1534 skb_queue_head_init(&hdev->raw_q);
1535
6bd32326
VT
1536 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1537
cd4c5391 1538 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1539 hdev->reassembly[i] = NULL;
1540
1da177e4 1541 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1542 mutex_init(&hdev->req_lock);
1da177e4
LT
1543
1544 inquiry_cache_init(hdev);
1545
1546 hci_conn_hash_init(hdev);
1547
2e58ef3e
JH
1548 INIT_LIST_HEAD(&hdev->mgmt_pending);
1549
ea4bd8ba 1550 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1551
2aeb9a1a
JH
1552 INIT_LIST_HEAD(&hdev->uuids);
1553
55ed8ca1
JH
1554 INIT_LIST_HEAD(&hdev->link_keys);
1555
2763eda6
SJ
1556 INIT_LIST_HEAD(&hdev->remote_oob_data);
1557
76c8686f
AG
1558 INIT_LIST_HEAD(&hdev->adv_entries);
1559
db323f2f 1560 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1561 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1562 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1563
16ab91ab
JH
1564 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1565
1da177e4
LT
1566 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1567
1568 atomic_set(&hdev->promisc, 0);
1569
f20d09d5 1570 write_unlock(&hci_dev_list_lock);
1da177e4 1571
32845eb1
GP
1572 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1573 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1574 if (!hdev->workqueue) {
1575 error = -ENOMEM;
1576 goto err;
1577 }
f48fd9c8 1578
33ca954d
DH
1579 error = hci_add_sysfs(hdev);
1580 if (error < 0)
1581 goto err_wqueue;
1da177e4 1582
611b30f7
MH
1583 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1584 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1585 if (hdev->rfkill) {
1586 if (rfkill_register(hdev->rfkill) < 0) {
1587 rfkill_destroy(hdev->rfkill);
1588 hdev->rfkill = NULL;
1589 }
1590 }
1591
ab81cbf9
JH
1592 set_bit(HCI_AUTO_OFF, &hdev->flags);
1593 set_bit(HCI_SETUP, &hdev->flags);
7f971041 1594 schedule_work(&hdev->power_on);
ab81cbf9 1595
1da177e4
LT
1596 hci_notify(hdev, HCI_DEV_REG);
1597
1598 return id;
f48fd9c8 1599
33ca954d
DH
1600err_wqueue:
1601 destroy_workqueue(hdev->workqueue);
1602err:
f20d09d5 1603 write_lock(&hci_dev_list_lock);
f48fd9c8 1604 list_del(&hdev->list);
f20d09d5 1605 write_unlock(&hci_dev_list_lock);
f48fd9c8 1606
33ca954d 1607 return error;
1da177e4
LT
1608}
1609EXPORT_SYMBOL(hci_register_dev);
1610
1611/* Unregister HCI device */
59735631 1612void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1613{
ef222013
MH
1614 int i;
1615
c13854ce 1616 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1617
f20d09d5 1618 write_lock(&hci_dev_list_lock);
1da177e4 1619 list_del(&hdev->list);
f20d09d5 1620 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1621
1622 hci_dev_do_close(hdev);
1623
cd4c5391 1624 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1625 kfree_skb(hdev->reassembly[i]);
1626
ab81cbf9 1627 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1628 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1629 hci_dev_lock(hdev);
744cf19e 1630 mgmt_index_removed(hdev);
09fd0de5 1631 hci_dev_unlock(hdev);
56e5cb86 1632 }
ab81cbf9 1633
2e58ef3e
JH
1634 /* mgmt_index_removed should take care of emptying the
1635 * pending list */
1636 BUG_ON(!list_empty(&hdev->mgmt_pending));
1637
1da177e4
LT
1638 hci_notify(hdev, HCI_DEV_UNREG);
1639
611b30f7
MH
1640 if (hdev->rfkill) {
1641 rfkill_unregister(hdev->rfkill);
1642 rfkill_destroy(hdev->rfkill);
1643 }
1644
ce242970 1645 hci_del_sysfs(hdev);
147e2d59 1646
db323f2f 1647 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1648
f48fd9c8
MH
1649 destroy_workqueue(hdev->workqueue);
1650
09fd0de5 1651 hci_dev_lock(hdev);
e2e0cacb 1652 hci_blacklist_clear(hdev);
2aeb9a1a 1653 hci_uuids_clear(hdev);
55ed8ca1 1654 hci_link_keys_clear(hdev);
2763eda6 1655 hci_remote_oob_data_clear(hdev);
76c8686f 1656 hci_adv_entries_clear(hdev);
09fd0de5 1657 hci_dev_unlock(hdev);
e2e0cacb 1658
1da177e4 1659 __hci_dev_put(hdev);
1da177e4
LT
1660}
1661EXPORT_SYMBOL(hci_unregister_dev);
1662
1663/* Suspend HCI device */
1664int hci_suspend_dev(struct hci_dev *hdev)
1665{
1666 hci_notify(hdev, HCI_DEV_SUSPEND);
1667 return 0;
1668}
1669EXPORT_SYMBOL(hci_suspend_dev);
1670
1671/* Resume HCI device */
1672int hci_resume_dev(struct hci_dev *hdev)
1673{
1674 hci_notify(hdev, HCI_DEV_RESUME);
1675 return 0;
1676}
1677EXPORT_SYMBOL(hci_resume_dev);
1678
76bca880
MH
1679/* Receive frame from HCI drivers */
1680int hci_recv_frame(struct sk_buff *skb)
1681{
1682 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1683 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1684 && !test_bit(HCI_INIT, &hdev->flags))) {
1685 kfree_skb(skb);
1686 return -ENXIO;
1687 }
1688
1689 /* Incomming skb */
1690 bt_cb(skb)->incoming = 1;
1691
1692 /* Time stamp */
1693 __net_timestamp(skb);
1694
76bca880 1695 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1696 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1697
76bca880
MH
1698 return 0;
1699}
1700EXPORT_SYMBOL(hci_recv_frame);
1701
33e882a5 1702static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1703 int count, __u8 index)
33e882a5
SS
1704{
1705 int len = 0;
1706 int hlen = 0;
1707 int remain = count;
1708 struct sk_buff *skb;
1709 struct bt_skb_cb *scb;
1710
1711 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1712 index >= NUM_REASSEMBLY)
1713 return -EILSEQ;
1714
1715 skb = hdev->reassembly[index];
1716
1717 if (!skb) {
1718 switch (type) {
1719 case HCI_ACLDATA_PKT:
1720 len = HCI_MAX_FRAME_SIZE;
1721 hlen = HCI_ACL_HDR_SIZE;
1722 break;
1723 case HCI_EVENT_PKT:
1724 len = HCI_MAX_EVENT_SIZE;
1725 hlen = HCI_EVENT_HDR_SIZE;
1726 break;
1727 case HCI_SCODATA_PKT:
1728 len = HCI_MAX_SCO_SIZE;
1729 hlen = HCI_SCO_HDR_SIZE;
1730 break;
1731 }
1732
1e429f38 1733 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1734 if (!skb)
1735 return -ENOMEM;
1736
1737 scb = (void *) skb->cb;
1738 scb->expect = hlen;
1739 scb->pkt_type = type;
1740
1741 skb->dev = (void *) hdev;
1742 hdev->reassembly[index] = skb;
1743 }
1744
1745 while (count) {
1746 scb = (void *) skb->cb;
1747 len = min(scb->expect, (__u16)count);
1748
1749 memcpy(skb_put(skb, len), data, len);
1750
1751 count -= len;
1752 data += len;
1753 scb->expect -= len;
1754 remain = count;
1755
1756 switch (type) {
1757 case HCI_EVENT_PKT:
1758 if (skb->len == HCI_EVENT_HDR_SIZE) {
1759 struct hci_event_hdr *h = hci_event_hdr(skb);
1760 scb->expect = h->plen;
1761
1762 if (skb_tailroom(skb) < scb->expect) {
1763 kfree_skb(skb);
1764 hdev->reassembly[index] = NULL;
1765 return -ENOMEM;
1766 }
1767 }
1768 break;
1769
1770 case HCI_ACLDATA_PKT:
1771 if (skb->len == HCI_ACL_HDR_SIZE) {
1772 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1773 scb->expect = __le16_to_cpu(h->dlen);
1774
1775 if (skb_tailroom(skb) < scb->expect) {
1776 kfree_skb(skb);
1777 hdev->reassembly[index] = NULL;
1778 return -ENOMEM;
1779 }
1780 }
1781 break;
1782
1783 case HCI_SCODATA_PKT:
1784 if (skb->len == HCI_SCO_HDR_SIZE) {
1785 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1786 scb->expect = h->dlen;
1787
1788 if (skb_tailroom(skb) < scb->expect) {
1789 kfree_skb(skb);
1790 hdev->reassembly[index] = NULL;
1791 return -ENOMEM;
1792 }
1793 }
1794 break;
1795 }
1796
1797 if (scb->expect == 0) {
1798 /* Complete frame */
1799
1800 bt_cb(skb)->pkt_type = type;
1801 hci_recv_frame(skb);
1802
1803 hdev->reassembly[index] = NULL;
1804 return remain;
1805 }
1806 }
1807
1808 return remain;
1809}
1810
ef222013
MH
1811int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1812{
f39a3c06
SS
1813 int rem = 0;
1814
ef222013
MH
1815 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1816 return -EILSEQ;
1817
da5f6c37 1818 while (count) {
1e429f38 1819 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1820 if (rem < 0)
1821 return rem;
ef222013 1822
f39a3c06
SS
1823 data += (count - rem);
1824 count = rem;
f81c6224 1825 }
ef222013 1826
f39a3c06 1827 return rem;
ef222013
MH
1828}
1829EXPORT_SYMBOL(hci_recv_fragment);
1830
99811510
SS
1831#define STREAM_REASSEMBLY 0
1832
1833int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1834{
1835 int type;
1836 int rem = 0;
1837
da5f6c37 1838 while (count) {
99811510
SS
1839 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1840
1841 if (!skb) {
1842 struct { char type; } *pkt;
1843
1844 /* Start of the frame */
1845 pkt = data;
1846 type = pkt->type;
1847
1848 data++;
1849 count--;
1850 } else
1851 type = bt_cb(skb)->pkt_type;
1852
1e429f38
GP
1853 rem = hci_reassembly(hdev, type, data, count,
1854 STREAM_REASSEMBLY);
99811510
SS
1855 if (rem < 0)
1856 return rem;
1857
1858 data += (count - rem);
1859 count = rem;
f81c6224 1860 }
99811510
SS
1861
1862 return rem;
1863}
1864EXPORT_SYMBOL(hci_recv_stream_fragment);
1865
1da177e4
LT
1866/* ---- Interface to upper protocols ---- */
1867
1da177e4
LT
1868int hci_register_cb(struct hci_cb *cb)
1869{
1870 BT_DBG("%p name %s", cb, cb->name);
1871
f20d09d5 1872 write_lock(&hci_cb_list_lock);
1da177e4 1873 list_add(&cb->list, &hci_cb_list);
f20d09d5 1874 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1875
1876 return 0;
1877}
1878EXPORT_SYMBOL(hci_register_cb);
1879
1880int hci_unregister_cb(struct hci_cb *cb)
1881{
1882 BT_DBG("%p name %s", cb, cb->name);
1883
f20d09d5 1884 write_lock(&hci_cb_list_lock);
1da177e4 1885 list_del(&cb->list);
f20d09d5 1886 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1887
1888 return 0;
1889}
1890EXPORT_SYMBOL(hci_unregister_cb);
1891
1892static int hci_send_frame(struct sk_buff *skb)
1893{
1894 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1895
1896 if (!hdev) {
1897 kfree_skb(skb);
1898 return -ENODEV;
1899 }
1900
0d48d939 1901 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1902
1903 if (atomic_read(&hdev->promisc)) {
1904 /* Time stamp */
a61bbcf2 1905 __net_timestamp(skb);
1da177e4 1906
eec8d2bc 1907 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1908 }
1909
1910 /* Get rid of skb owner, prior to sending to the driver. */
1911 skb_orphan(skb);
1912
1913 return hdev->send(skb);
1914}
1915
1916/* Send HCI command */
a9de9248 1917int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1918{
1919 int len = HCI_COMMAND_HDR_SIZE + plen;
1920 struct hci_command_hdr *hdr;
1921 struct sk_buff *skb;
1922
a9de9248 1923 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1924
1925 skb = bt_skb_alloc(len, GFP_ATOMIC);
1926 if (!skb) {
ef222013 1927 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1928 return -ENOMEM;
1929 }
1930
1931 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1932 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1933 hdr->plen = plen;
1934
1935 if (plen)
1936 memcpy(skb_put(skb, plen), param, plen);
1937
1938 BT_DBG("skb len %d", skb->len);
1939
0d48d939 1940 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1941 skb->dev = (void *) hdev;
c78ae283 1942
a5040efa
JH
1943 if (test_bit(HCI_INIT, &hdev->flags))
1944 hdev->init_last_cmd = opcode;
1945
1da177e4 1946 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1947 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1948
1949 return 0;
1950}
1da177e4
LT
1951
1952/* Get data from the previously sent command */
a9de9248 1953void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1954{
1955 struct hci_command_hdr *hdr;
1956
1957 if (!hdev->sent_cmd)
1958 return NULL;
1959
1960 hdr = (void *) hdev->sent_cmd->data;
1961
a9de9248 1962 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1963 return NULL;
1964
a9de9248 1965 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1966
1967 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1968}
1969
1970/* Send ACL data */
1971static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1972{
1973 struct hci_acl_hdr *hdr;
1974 int len = skb->len;
1975
badff6d0
ACM
1976 skb_push(skb, HCI_ACL_HDR_SIZE);
1977 skb_reset_transport_header(skb);
9c70220b 1978 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1979 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1980 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1981}
1982
73d80deb
LAD
1983static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1984 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1985{
1986 struct hci_dev *hdev = conn->hdev;
1987 struct sk_buff *list;
1988
70f23020
AE
1989 list = skb_shinfo(skb)->frag_list;
1990 if (!list) {
1da177e4
LT
1991 /* Non fragmented */
1992 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1993
73d80deb 1994 skb_queue_tail(queue, skb);
1da177e4
LT
1995 } else {
1996 /* Fragmented */
1997 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1998
1999 skb_shinfo(skb)->frag_list = NULL;
2000
2001 /* Queue all fragments atomically */
af3e6359 2002 spin_lock(&queue->lock);
1da177e4 2003
73d80deb 2004 __skb_queue_tail(queue, skb);
e702112f
AE
2005
2006 flags &= ~ACL_START;
2007 flags |= ACL_CONT;
1da177e4
LT
2008 do {
2009 skb = list; list = list->next;
8e87d142 2010
1da177e4 2011 skb->dev = (void *) hdev;
0d48d939 2012 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2013 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2014
2015 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2016
73d80deb 2017 __skb_queue_tail(queue, skb);
1da177e4
LT
2018 } while (list);
2019
af3e6359 2020 spin_unlock(&queue->lock);
1da177e4 2021 }
73d80deb
LAD
2022}
2023
2024void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2025{
2026 struct hci_conn *conn = chan->conn;
2027 struct hci_dev *hdev = conn->hdev;
2028
2029 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2030
2031 skb->dev = (void *) hdev;
2032 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2033 hci_add_acl_hdr(skb, conn->handle, flags);
2034
2035 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2036
3eff45ea 2037 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2038}
2039EXPORT_SYMBOL(hci_send_acl);
2040
2041/* Send SCO data */
0d861d8b 2042void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2043{
2044 struct hci_dev *hdev = conn->hdev;
2045 struct hci_sco_hdr hdr;
2046
2047 BT_DBG("%s len %d", hdev->name, skb->len);
2048
aca3192c 2049 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2050 hdr.dlen = skb->len;
2051
badff6d0
ACM
2052 skb_push(skb, HCI_SCO_HDR_SIZE);
2053 skb_reset_transport_header(skb);
9c70220b 2054 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2055
2056 skb->dev = (void *) hdev;
0d48d939 2057 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2058
1da177e4 2059 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2060 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2061}
2062EXPORT_SYMBOL(hci_send_sco);
2063
2064/* ---- HCI TX task (outgoing data) ---- */
2065
2066/* HCI Connection scheduler */
2067static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2068{
2069 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2070 struct hci_conn *conn = NULL, *c;
1da177e4 2071 int num = 0, min = ~0;
1da177e4 2072
8e87d142 2073 /* We don't have to lock device here. Connections are always
1da177e4 2074 * added and removed with TX task disabled. */
bf4c6325
GP
2075
2076 rcu_read_lock();
2077
2078 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2079 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2080 continue;
769be974
MH
2081
2082 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2083 continue;
2084
1da177e4
LT
2085 num++;
2086
2087 if (c->sent < min) {
2088 min = c->sent;
2089 conn = c;
2090 }
52087a79
LAD
2091
2092 if (hci_conn_num(hdev, type) == num)
2093 break;
1da177e4
LT
2094 }
2095
bf4c6325
GP
2096 rcu_read_unlock();
2097
1da177e4 2098 if (conn) {
6ed58ec5
VT
2099 int cnt, q;
2100
2101 switch (conn->type) {
2102 case ACL_LINK:
2103 cnt = hdev->acl_cnt;
2104 break;
2105 case SCO_LINK:
2106 case ESCO_LINK:
2107 cnt = hdev->sco_cnt;
2108 break;
2109 case LE_LINK:
2110 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2111 break;
2112 default:
2113 cnt = 0;
2114 BT_ERR("Unknown link type");
2115 }
2116
2117 q = cnt / num;
1da177e4
LT
2118 *quote = q ? q : 1;
2119 } else
2120 *quote = 0;
2121
2122 BT_DBG("conn %p quote %d", conn, *quote);
2123 return conn;
2124}
2125
bae1f5d9 2126static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2127{
2128 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2129 struct hci_conn *c;
1da177e4 2130
bae1f5d9 2131 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2132
bf4c6325
GP
2133 rcu_read_lock();
2134
1da177e4 2135 /* Kill stalled connections */
bf4c6325 2136 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2137 if (c->type == type && c->sent) {
2138 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2139 hdev->name, batostr(&c->dst));
2140 hci_acl_disconn(c, 0x13);
2141 }
2142 }
bf4c6325
GP
2143
2144 rcu_read_unlock();
1da177e4
LT
2145}
2146
73d80deb
LAD
2147static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2148 int *quote)
1da177e4 2149{
73d80deb
LAD
2150 struct hci_conn_hash *h = &hdev->conn_hash;
2151 struct hci_chan *chan = NULL;
2152 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2153 struct hci_conn *conn;
73d80deb
LAD
2154 int cnt, q, conn_num = 0;
2155
2156 BT_DBG("%s", hdev->name);
2157
bf4c6325
GP
2158 rcu_read_lock();
2159
2160 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2161 struct hci_chan *tmp;
2162
2163 if (conn->type != type)
2164 continue;
2165
2166 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2167 continue;
2168
2169 conn_num++;
2170
8192edef 2171 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2172 struct sk_buff *skb;
2173
2174 if (skb_queue_empty(&tmp->data_q))
2175 continue;
2176
2177 skb = skb_peek(&tmp->data_q);
2178 if (skb->priority < cur_prio)
2179 continue;
2180
2181 if (skb->priority > cur_prio) {
2182 num = 0;
2183 min = ~0;
2184 cur_prio = skb->priority;
2185 }
2186
2187 num++;
2188
2189 if (conn->sent < min) {
2190 min = conn->sent;
2191 chan = tmp;
2192 }
2193 }
2194
2195 if (hci_conn_num(hdev, type) == conn_num)
2196 break;
2197 }
2198
bf4c6325
GP
2199 rcu_read_unlock();
2200
73d80deb
LAD
2201 if (!chan)
2202 return NULL;
2203
2204 switch (chan->conn->type) {
2205 case ACL_LINK:
2206 cnt = hdev->acl_cnt;
2207 break;
2208 case SCO_LINK:
2209 case ESCO_LINK:
2210 cnt = hdev->sco_cnt;
2211 break;
2212 case LE_LINK:
2213 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2214 break;
2215 default:
2216 cnt = 0;
2217 BT_ERR("Unknown link type");
2218 }
2219
2220 q = cnt / num;
2221 *quote = q ? q : 1;
2222 BT_DBG("chan %p quote %d", chan, *quote);
2223 return chan;
2224}
2225
02b20f0b
LAD
2226static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2227{
2228 struct hci_conn_hash *h = &hdev->conn_hash;
2229 struct hci_conn *conn;
2230 int num = 0;
2231
2232 BT_DBG("%s", hdev->name);
2233
bf4c6325
GP
2234 rcu_read_lock();
2235
2236 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2237 struct hci_chan *chan;
2238
2239 if (conn->type != type)
2240 continue;
2241
2242 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2243 continue;
2244
2245 num++;
2246
8192edef 2247 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2248 struct sk_buff *skb;
2249
2250 if (chan->sent) {
2251 chan->sent = 0;
2252 continue;
2253 }
2254
2255 if (skb_queue_empty(&chan->data_q))
2256 continue;
2257
2258 skb = skb_peek(&chan->data_q);
2259 if (skb->priority >= HCI_PRIO_MAX - 1)
2260 continue;
2261
2262 skb->priority = HCI_PRIO_MAX - 1;
2263
2264 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2265 skb->priority);
2266 }
2267
2268 if (hci_conn_num(hdev, type) == num)
2269 break;
2270 }
bf4c6325
GP
2271
2272 rcu_read_unlock();
2273
02b20f0b
LAD
2274}
2275
73d80deb
LAD
2276static inline void hci_sched_acl(struct hci_dev *hdev)
2277{
2278 struct hci_chan *chan;
1da177e4
LT
2279 struct sk_buff *skb;
2280 int quote;
73d80deb 2281 unsigned int cnt;
1da177e4
LT
2282
2283 BT_DBG("%s", hdev->name);
2284
52087a79
LAD
2285 if (!hci_conn_num(hdev, ACL_LINK))
2286 return;
2287
1da177e4
LT
2288 if (!test_bit(HCI_RAW, &hdev->flags)) {
2289 /* ACL tx timeout must be longer than maximum
2290 * link supervision timeout (40.9 seconds) */
82453021 2291 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2292 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2293 }
2294
73d80deb 2295 cnt = hdev->acl_cnt;
04837f64 2296
73d80deb
LAD
2297 while (hdev->acl_cnt &&
2298 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2299 u32 priority = (skb_peek(&chan->data_q))->priority;
2300 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2301 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2302 skb->len, skb->priority);
2303
ec1cce24
LAD
2304 /* Stop if priority has changed */
2305 if (skb->priority < priority)
2306 break;
2307
2308 skb = skb_dequeue(&chan->data_q);
2309
73d80deb
LAD
2310 hci_conn_enter_active_mode(chan->conn,
2311 bt_cb(skb)->force_active);
04837f64 2312
1da177e4
LT
2313 hci_send_frame(skb);
2314 hdev->acl_last_tx = jiffies;
2315
2316 hdev->acl_cnt--;
73d80deb
LAD
2317 chan->sent++;
2318 chan->conn->sent++;
1da177e4
LT
2319 }
2320 }
02b20f0b
LAD
2321
2322 if (cnt != hdev->acl_cnt)
2323 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2324}
2325
2326/* Schedule SCO */
2327static inline void hci_sched_sco(struct hci_dev *hdev)
2328{
2329 struct hci_conn *conn;
2330 struct sk_buff *skb;
2331 int quote;
2332
2333 BT_DBG("%s", hdev->name);
2334
52087a79
LAD
2335 if (!hci_conn_num(hdev, SCO_LINK))
2336 return;
2337
1da177e4
LT
2338 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2339 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2340 BT_DBG("skb %p len %d", skb, skb->len);
2341 hci_send_frame(skb);
2342
2343 conn->sent++;
2344 if (conn->sent == ~0)
2345 conn->sent = 0;
2346 }
2347 }
2348}
2349
b6a0dc82
MH
2350static inline void hci_sched_esco(struct hci_dev *hdev)
2351{
2352 struct hci_conn *conn;
2353 struct sk_buff *skb;
2354 int quote;
2355
2356 BT_DBG("%s", hdev->name);
2357
52087a79
LAD
2358 if (!hci_conn_num(hdev, ESCO_LINK))
2359 return;
2360
b6a0dc82
MH
2361 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2362 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2363 BT_DBG("skb %p len %d", skb, skb->len);
2364 hci_send_frame(skb);
2365
2366 conn->sent++;
2367 if (conn->sent == ~0)
2368 conn->sent = 0;
2369 }
2370 }
2371}
2372
6ed58ec5
VT
2373static inline void hci_sched_le(struct hci_dev *hdev)
2374{
73d80deb 2375 struct hci_chan *chan;
6ed58ec5 2376 struct sk_buff *skb;
02b20f0b 2377 int quote, cnt, tmp;
6ed58ec5
VT
2378
2379 BT_DBG("%s", hdev->name);
2380
52087a79
LAD
2381 if (!hci_conn_num(hdev, LE_LINK))
2382 return;
2383
6ed58ec5
VT
2384 if (!test_bit(HCI_RAW, &hdev->flags)) {
2385 /* LE tx timeout must be longer than maximum
2386 * link supervision timeout (40.9 seconds) */
bae1f5d9 2387 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2388 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2389 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2390 }
2391
2392 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2393 tmp = cnt;
73d80deb 2394 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2395 u32 priority = (skb_peek(&chan->data_q))->priority;
2396 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2397 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2398 skb->len, skb->priority);
6ed58ec5 2399
ec1cce24
LAD
2400 /* Stop if priority has changed */
2401 if (skb->priority < priority)
2402 break;
2403
2404 skb = skb_dequeue(&chan->data_q);
2405
6ed58ec5
VT
2406 hci_send_frame(skb);
2407 hdev->le_last_tx = jiffies;
2408
2409 cnt--;
73d80deb
LAD
2410 chan->sent++;
2411 chan->conn->sent++;
6ed58ec5
VT
2412 }
2413 }
73d80deb 2414
6ed58ec5
VT
2415 if (hdev->le_pkts)
2416 hdev->le_cnt = cnt;
2417 else
2418 hdev->acl_cnt = cnt;
02b20f0b
LAD
2419
2420 if (cnt != tmp)
2421 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2422}
2423
3eff45ea 2424static void hci_tx_work(struct work_struct *work)
1da177e4 2425{
3eff45ea 2426 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2427 struct sk_buff *skb;
2428
6ed58ec5
VT
2429 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2430 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2431
2432 /* Schedule queues and send stuff to HCI driver */
2433
2434 hci_sched_acl(hdev);
2435
2436 hci_sched_sco(hdev);
2437
b6a0dc82
MH
2438 hci_sched_esco(hdev);
2439
6ed58ec5
VT
2440 hci_sched_le(hdev);
2441
1da177e4
LT
2442 /* Send next queued raw (unknown type) packet */
2443 while ((skb = skb_dequeue(&hdev->raw_q)))
2444 hci_send_frame(skb);
1da177e4
LT
2445}
2446
25985edc 2447/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2448
2449/* ACL data packet */
2450static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2451{
2452 struct hci_acl_hdr *hdr = (void *) skb->data;
2453 struct hci_conn *conn;
2454 __u16 handle, flags;
2455
2456 skb_pull(skb, HCI_ACL_HDR_SIZE);
2457
2458 handle = __le16_to_cpu(hdr->handle);
2459 flags = hci_flags(handle);
2460 handle = hci_handle(handle);
2461
2462 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2463
2464 hdev->stat.acl_rx++;
2465
2466 hci_dev_lock(hdev);
2467 conn = hci_conn_hash_lookup_handle(hdev, handle);
2468 hci_dev_unlock(hdev);
8e87d142 2469
1da177e4 2470 if (conn) {
65983fc7 2471 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2472
1da177e4 2473 /* Send to upper protocol */
686ebf28
UF
2474 l2cap_recv_acldata(conn, skb, flags);
2475 return;
1da177e4 2476 } else {
8e87d142 2477 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2478 hdev->name, handle);
2479 }
2480
2481 kfree_skb(skb);
2482}
2483
2484/* SCO data packet */
2485static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2486{
2487 struct hci_sco_hdr *hdr = (void *) skb->data;
2488 struct hci_conn *conn;
2489 __u16 handle;
2490
2491 skb_pull(skb, HCI_SCO_HDR_SIZE);
2492
2493 handle = __le16_to_cpu(hdr->handle);
2494
2495 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2496
2497 hdev->stat.sco_rx++;
2498
2499 hci_dev_lock(hdev);
2500 conn = hci_conn_hash_lookup_handle(hdev, handle);
2501 hci_dev_unlock(hdev);
2502
2503 if (conn) {
1da177e4 2504 /* Send to upper protocol */
686ebf28
UF
2505 sco_recv_scodata(conn, skb);
2506 return;
1da177e4 2507 } else {
8e87d142 2508 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2509 hdev->name, handle);
2510 }
2511
2512 kfree_skb(skb);
2513}
2514
b78752cc 2515static void hci_rx_work(struct work_struct *work)
1da177e4 2516{
b78752cc 2517 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2518 struct sk_buff *skb;
2519
2520 BT_DBG("%s", hdev->name);
2521
1da177e4
LT
2522 while ((skb = skb_dequeue(&hdev->rx_q))) {
2523 if (atomic_read(&hdev->promisc)) {
2524 /* Send copy to the sockets */
eec8d2bc 2525 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2526 }
2527
2528 if (test_bit(HCI_RAW, &hdev->flags)) {
2529 kfree_skb(skb);
2530 continue;
2531 }
2532
2533 if (test_bit(HCI_INIT, &hdev->flags)) {
2534 /* Don't process data packets in this states. */
0d48d939 2535 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2536 case HCI_ACLDATA_PKT:
2537 case HCI_SCODATA_PKT:
2538 kfree_skb(skb);
2539 continue;
3ff50b79 2540 }
1da177e4
LT
2541 }
2542
2543 /* Process frame */
0d48d939 2544 switch (bt_cb(skb)->pkt_type) {
1da177e4 2545 case HCI_EVENT_PKT:
b78752cc 2546 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2547 hci_event_packet(hdev, skb);
2548 break;
2549
2550 case HCI_ACLDATA_PKT:
2551 BT_DBG("%s ACL data packet", hdev->name);
2552 hci_acldata_packet(hdev, skb);
2553 break;
2554
2555 case HCI_SCODATA_PKT:
2556 BT_DBG("%s SCO data packet", hdev->name);
2557 hci_scodata_packet(hdev, skb);
2558 break;
2559
2560 default:
2561 kfree_skb(skb);
2562 break;
2563 }
2564 }
1da177e4
LT
2565}
2566
c347b765 2567static void hci_cmd_work(struct work_struct *work)
1da177e4 2568{
c347b765 2569 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2570 struct sk_buff *skb;
2571
2572 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2573
1da177e4 2574 /* Send queued commands */
5a08ecce
AE
2575 if (atomic_read(&hdev->cmd_cnt)) {
2576 skb = skb_dequeue(&hdev->cmd_q);
2577 if (!skb)
2578 return;
2579
7585b97a 2580 kfree_skb(hdev->sent_cmd);
1da177e4 2581
70f23020
AE
2582 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2583 if (hdev->sent_cmd) {
1da177e4
LT
2584 atomic_dec(&hdev->cmd_cnt);
2585 hci_send_frame(skb);
7bdb8a5c
SJ
2586 if (test_bit(HCI_RESET, &hdev->flags))
2587 del_timer(&hdev->cmd_timer);
2588 else
2589 mod_timer(&hdev->cmd_timer,
6bd32326 2590 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2591 } else {
2592 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2593 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2594 }
2595 }
2596}
2519a1fc
AG
2597
2598int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2599{
2600 /* General inquiry access code (GIAC) */
2601 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2602 struct hci_cp_inquiry cp;
2603
2604 BT_DBG("%s", hdev->name);
2605
2606 if (test_bit(HCI_INQUIRY, &hdev->flags))
2607 return -EINPROGRESS;
2608
2609 memset(&cp, 0, sizeof(cp));
2610 memcpy(&cp.lap, lap, sizeof(cp.lap));
2611 cp.length = length;
2612
2613 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2614}
023d5049
AG
2615
2616int hci_cancel_inquiry(struct hci_dev *hdev)
2617{
2618 BT_DBG("%s", hdev->name);
2619
2620 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2621 return -EPERM;
2622
2623 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2624}
7784d78f
AE
2625
2626module_param(enable_hs, bool, 0644);
2627MODULE_PARM_DESC(enable_hs, "Enable High Speed");