Bluetooth: Rename hdev->inq_cache to hdev->discovery
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
7784d78f
AE
58int enable_hs;
59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
1da177e4 72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
23bb5763 94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 95{
23bb5763
JH
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
a5040efa
JH
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 102 return;
1da177e4
LT
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
8e87d142 123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 124 unsigned long opt, __u32 timeout)
1da177e4
LT
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
e175072f 146 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
3ff50b79 156 }
1da177e4 157
a5040efa 158 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 166 unsigned long opt, __u32 timeout)
1da177e4
LT
167{
168 int ret;
169
7c6a329e
MH
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
1da177e4
LT
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
f630cf0d 186 set_bit(HCI_RESET, &hdev->flags);
a9de9248 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
188}
189
e61ef499 190static void bredr_init(struct hci_dev *hdev)
1da177e4 191{
b0916ea0 192 struct hci_cp_delete_stored_link_key cp;
1ebb9252 193 __le16 param;
89f2783d 194 __u8 flt_type;
1da177e4 195
2455a3ea
AE
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
1da177e4
LT
198 /* Mandatory initialization */
199
200 /* Reset */
f630cf0d 201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 204 }
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 214
1da177e4 215 /* Read BD Address */
a9de9248
MH
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
223
224 /* Read Voice Setting */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
89f2783d 230 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 232
1da177e4 233 /* Connection accept timeout ~20 secs */
aca3192c 234 param = cpu_to_le16(0x7d00);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
240}
241
e61ef499
AE
242static void amp_init(struct hci_dev *hdev)
243{
2455a3ea
AE
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
e61ef499
AE
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
6ed58ec5
VT
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
1da177e4
LT
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
a9de9248 302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
a9de9248 312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
e4e8e37c 321 /* Encryption */
a9de9248 322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
323}
324
e4e8e37c
MH
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
a418b893 329 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
8e87d142 335/* Get HCI device by index.
1da177e4
LT
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
8035ded4 339 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
8035ded4 347 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
1da177e4
LT
356
357/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev)
359{
30883512 360 struct discovery_state *cache = &hdev->discovery;
b57c1a56 361 struct inquiry_entry *p, *n;
1da177e4 362
561aafbc
JH
363 list_for_each_entry_safe(p, n, &cache->all, all) {
364 list_del(&p->all);
b57c1a56 365 kfree(p);
1da177e4 366 }
561aafbc
JH
367
368 INIT_LIST_HEAD(&cache->unknown);
369 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
370}
371
372struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373{
30883512 374 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
375 struct inquiry_entry *e;
376
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378
561aafbc
JH
379 list_for_each_entry(e, &cache->all, all) {
380 if (!bacmp(&e->data.bdaddr, bdaddr))
381 return e;
382 }
383
384 return NULL;
385}
386
387struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
388 bdaddr_t *bdaddr)
389{
30883512 390 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
391 struct inquiry_entry *e;
392
393 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
394
395 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 396 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
397 return e;
398 }
399
400 return NULL;
1da177e4
LT
401}
402
3175405b 403bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 404 bool name_known)
1da177e4 405{
30883512 406 struct discovery_state *cache = &hdev->discovery;
70f23020 407 struct inquiry_entry *ie;
1da177e4
LT
408
409 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
410
70f23020 411 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
561aafbc
JH
412 if (ie)
413 goto update;
414
415 /* Entry not in the cache. Add new one. */
416 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
417 if (!ie)
3175405b 418 return false;
561aafbc
JH
419
420 list_add(&ie->all, &cache->all);
421
422 if (name_known) {
423 ie->name_state = NAME_KNOWN;
424 } else {
425 ie->name_state = NAME_NOT_KNOWN;
426 list_add(&ie->list, &cache->unknown);
427 }
70f23020 428
561aafbc
JH
429update:
430 if (name_known && ie->name_state != NAME_KNOWN &&
431 ie->name_state != NAME_PENDING) {
432 ie->name_state = NAME_KNOWN;
433 list_del(&ie->list);
1da177e4
LT
434 }
435
70f23020
AE
436 memcpy(&ie->data, data, sizeof(*data));
437 ie->timestamp = jiffies;
1da177e4 438 cache->timestamp = jiffies;
3175405b
JH
439
440 if (ie->name_state == NAME_NOT_KNOWN)
441 return false;
442
443 return true;
1da177e4
LT
444}
445
446static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
447{
30883512 448 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
449 struct inquiry_info *info = (struct inquiry_info *) buf;
450 struct inquiry_entry *e;
451 int copied = 0;
452
561aafbc 453 list_for_each_entry(e, &cache->all, all) {
1da177e4 454 struct inquiry_data *data = &e->data;
b57c1a56
JH
455
456 if (copied >= num)
457 break;
458
1da177e4
LT
459 bacpy(&info->bdaddr, &data->bdaddr);
460 info->pscan_rep_mode = data->pscan_rep_mode;
461 info->pscan_period_mode = data->pscan_period_mode;
462 info->pscan_mode = data->pscan_mode;
463 memcpy(info->dev_class, data->dev_class, 3);
464 info->clock_offset = data->clock_offset;
b57c1a56 465
1da177e4 466 info++;
b57c1a56 467 copied++;
1da177e4
LT
468 }
469
470 BT_DBG("cache %p, copied %d", cache, copied);
471 return copied;
472}
473
474static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
475{
476 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
477 struct hci_cp_inquiry cp;
478
479 BT_DBG("%s", hdev->name);
480
481 if (test_bit(HCI_INQUIRY, &hdev->flags))
482 return;
483
484 /* Start Inquiry */
485 memcpy(&cp.lap, &ir->lap, 3);
486 cp.length = ir->length;
487 cp.num_rsp = ir->num_rsp;
a9de9248 488 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
489}
490
491int hci_inquiry(void __user *arg)
492{
493 __u8 __user *ptr = arg;
494 struct hci_inquiry_req ir;
495 struct hci_dev *hdev;
496 int err = 0, do_inquiry = 0, max_rsp;
497 long timeo;
498 __u8 *buf;
499
500 if (copy_from_user(&ir, ptr, sizeof(ir)))
501 return -EFAULT;
502
5a08ecce
AE
503 hdev = hci_dev_get(ir.dev_id);
504 if (!hdev)
1da177e4
LT
505 return -ENODEV;
506
09fd0de5 507 hci_dev_lock(hdev);
8e87d142 508 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
509 inquiry_cache_empty(hdev) ||
510 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
511 inquiry_cache_flush(hdev);
512 do_inquiry = 1;
513 }
09fd0de5 514 hci_dev_unlock(hdev);
1da177e4 515
04837f64 516 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
517
518 if (do_inquiry) {
519 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
520 if (err < 0)
521 goto done;
522 }
1da177e4
LT
523
524 /* for unlimited number of responses we will use buffer with 255 entries */
525 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
526
527 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
528 * copy it to the user space.
529 */
01df8c31 530 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 531 if (!buf) {
1da177e4
LT
532 err = -ENOMEM;
533 goto done;
534 }
535
09fd0de5 536 hci_dev_lock(hdev);
1da177e4 537 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 538 hci_dev_unlock(hdev);
1da177e4
LT
539
540 BT_DBG("num_rsp %d", ir.num_rsp);
541
542 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
543 ptr += sizeof(ir);
544 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
545 ir.num_rsp))
546 err = -EFAULT;
8e87d142 547 } else
1da177e4
LT
548 err = -EFAULT;
549
550 kfree(buf);
551
552done:
553 hci_dev_put(hdev);
554 return err;
555}
556
557/* ---- HCI ioctl helpers ---- */
558
559int hci_dev_open(__u16 dev)
560{
561 struct hci_dev *hdev;
562 int ret = 0;
563
5a08ecce
AE
564 hdev = hci_dev_get(dev);
565 if (!hdev)
1da177e4
LT
566 return -ENODEV;
567
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_lock(hdev);
571
611b30f7
MH
572 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
573 ret = -ERFKILL;
574 goto done;
575 }
576
1da177e4
LT
577 if (test_bit(HCI_UP, &hdev->flags)) {
578 ret = -EALREADY;
579 goto done;
580 }
581
582 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
583 set_bit(HCI_RAW, &hdev->flags);
584
07e3b94a
AE
585 /* Treat all non BR/EDR controllers as raw devices if
586 enable_hs is not set */
587 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
588 set_bit(HCI_RAW, &hdev->flags);
589
1da177e4
LT
590 if (hdev->open(hdev)) {
591 ret = -EIO;
592 goto done;
593 }
594
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 atomic_set(&hdev->cmd_cnt, 1);
597 set_bit(HCI_INIT, &hdev->flags);
a5040efa 598 hdev->init_last_cmd = 0;
1da177e4 599
04837f64
MH
600 ret = __hci_request(hdev, hci_init_req, 0,
601 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 602
eead27da 603 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
604 ret = __hci_request(hdev, hci_le_init_req, 0,
605 msecs_to_jiffies(HCI_INIT_TIMEOUT));
606
1da177e4
LT
607 clear_bit(HCI_INIT, &hdev->flags);
608 }
609
610 if (!ret) {
611 hci_dev_hold(hdev);
612 set_bit(HCI_UP, &hdev->flags);
613 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 614 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 615 hci_dev_lock(hdev);
744cf19e 616 mgmt_powered(hdev, 1);
09fd0de5 617 hci_dev_unlock(hdev);
56e5cb86 618 }
8e87d142 619 } else {
1da177e4 620 /* Init failed, cleanup */
3eff45ea 621 flush_work(&hdev->tx_work);
c347b765 622 flush_work(&hdev->cmd_work);
b78752cc 623 flush_work(&hdev->rx_work);
1da177e4
LT
624
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->rx_q);
627
628 if (hdev->flush)
629 hdev->flush(hdev);
630
631 if (hdev->sent_cmd) {
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
634 }
635
636 hdev->close(hdev);
637 hdev->flags = 0;
638 }
639
640done:
641 hci_req_unlock(hdev);
642 hci_dev_put(hdev);
643 return ret;
644}
645
646static int hci_dev_do_close(struct hci_dev *hdev)
647{
648 BT_DBG("%s %p", hdev->name, hdev);
649
650 hci_req_cancel(hdev, ENODEV);
651 hci_req_lock(hdev);
652
653 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 654 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
655 hci_req_unlock(hdev);
656 return 0;
657 }
658
3eff45ea
GP
659 /* Flush RX and TX works */
660 flush_work(&hdev->tx_work);
b78752cc 661 flush_work(&hdev->rx_work);
1da177e4 662
16ab91ab 663 if (hdev->discov_timeout > 0) {
e0f9309f 664 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
665 hdev->discov_timeout = 0;
666 }
667
3243553f 668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 669 cancel_delayed_work(&hdev->power_off);
3243553f 670
7d78525d
JH
671 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
672 cancel_delayed_work(&hdev->service_cache);
673
09fd0de5 674 hci_dev_lock(hdev);
1da177e4
LT
675 inquiry_cache_flush(hdev);
676 hci_conn_hash_flush(hdev);
09fd0de5 677 hci_dev_unlock(hdev);
1da177e4
LT
678
679 hci_notify(hdev, HCI_DEV_DOWN);
680
681 if (hdev->flush)
682 hdev->flush(hdev);
683
684 /* Reset device */
685 skb_queue_purge(&hdev->cmd_q);
686 atomic_set(&hdev->cmd_cnt, 1);
687 if (!test_bit(HCI_RAW, &hdev->flags)) {
688 set_bit(HCI_INIT, &hdev->flags);
04837f64 689 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 690 msecs_to_jiffies(250));
1da177e4
LT
691 clear_bit(HCI_INIT, &hdev->flags);
692 }
693
c347b765
GP
694 /* flush cmd work */
695 flush_work(&hdev->cmd_work);
1da177e4
LT
696
697 /* Drop queues */
698 skb_queue_purge(&hdev->rx_q);
699 skb_queue_purge(&hdev->cmd_q);
700 skb_queue_purge(&hdev->raw_q);
701
702 /* Drop last sent command */
703 if (hdev->sent_cmd) {
b79f44c1 704 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
705 kfree_skb(hdev->sent_cmd);
706 hdev->sent_cmd = NULL;
707 }
708
709 /* After this point our queues are empty
710 * and no tasks are scheduled. */
711 hdev->close(hdev);
712
09fd0de5 713 hci_dev_lock(hdev);
744cf19e 714 mgmt_powered(hdev, 0);
09fd0de5 715 hci_dev_unlock(hdev);
5add6af8 716
1da177e4
LT
717 /* Clear flags */
718 hdev->flags = 0;
719
720 hci_req_unlock(hdev);
721
722 hci_dev_put(hdev);
723 return 0;
724}
725
726int hci_dev_close(__u16 dev)
727{
728 struct hci_dev *hdev;
729 int err;
730
70f23020
AE
731 hdev = hci_dev_get(dev);
732 if (!hdev)
1da177e4
LT
733 return -ENODEV;
734 err = hci_dev_do_close(hdev);
735 hci_dev_put(hdev);
736 return err;
737}
738
739int hci_dev_reset(__u16 dev)
740{
741 struct hci_dev *hdev;
742 int ret = 0;
743
70f23020
AE
744 hdev = hci_dev_get(dev);
745 if (!hdev)
1da177e4
LT
746 return -ENODEV;
747
748 hci_req_lock(hdev);
1da177e4
LT
749
750 if (!test_bit(HCI_UP, &hdev->flags))
751 goto done;
752
753 /* Drop queues */
754 skb_queue_purge(&hdev->rx_q);
755 skb_queue_purge(&hdev->cmd_q);
756
09fd0de5 757 hci_dev_lock(hdev);
1da177e4
LT
758 inquiry_cache_flush(hdev);
759 hci_conn_hash_flush(hdev);
09fd0de5 760 hci_dev_unlock(hdev);
1da177e4
LT
761
762 if (hdev->flush)
763 hdev->flush(hdev);
764
8e87d142 765 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 766 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
767
768 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
769 ret = __hci_request(hdev, hci_reset_req, 0,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
771
772done:
1da177e4
LT
773 hci_req_unlock(hdev);
774 hci_dev_put(hdev);
775 return ret;
776}
777
778int hci_dev_reset_stat(__u16 dev)
779{
780 struct hci_dev *hdev;
781 int ret = 0;
782
70f23020
AE
783 hdev = hci_dev_get(dev);
784 if (!hdev)
1da177e4
LT
785 return -ENODEV;
786
787 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
788
789 hci_dev_put(hdev);
790
791 return ret;
792}
793
794int hci_dev_cmd(unsigned int cmd, void __user *arg)
795{
796 struct hci_dev *hdev;
797 struct hci_dev_req dr;
798 int err = 0;
799
800 if (copy_from_user(&dr, arg, sizeof(dr)))
801 return -EFAULT;
802
70f23020
AE
803 hdev = hci_dev_get(dr.dev_id);
804 if (!hdev)
1da177e4
LT
805 return -ENODEV;
806
807 switch (cmd) {
808 case HCISETAUTH:
04837f64
MH
809 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
810 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
811 break;
812
813 case HCISETENCRYPT:
814 if (!lmp_encrypt_capable(hdev)) {
815 err = -EOPNOTSUPP;
816 break;
817 }
818
819 if (!test_bit(HCI_AUTH, &hdev->flags)) {
820 /* Auth must be enabled first */
04837f64
MH
821 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
822 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
823 if (err)
824 break;
825 }
826
04837f64
MH
827 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
828 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
829 break;
830
831 case HCISETSCAN:
04837f64
MH
832 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
833 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
834 break;
835
1da177e4 836 case HCISETLINKPOL:
e4e8e37c
MH
837 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
838 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
839 break;
840
841 case HCISETLINKMODE:
e4e8e37c
MH
842 hdev->link_mode = ((__u16) dr.dev_opt) &
843 (HCI_LM_MASTER | HCI_LM_ACCEPT);
844 break;
845
846 case HCISETPTYPE:
847 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
848 break;
849
850 case HCISETACLMTU:
e4e8e37c
MH
851 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
852 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
853 break;
854
855 case HCISETSCOMTU:
e4e8e37c
MH
856 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
857 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
858 break;
859
860 default:
861 err = -EINVAL;
862 break;
863 }
e4e8e37c 864
1da177e4
LT
865 hci_dev_put(hdev);
866 return err;
867}
868
869int hci_get_dev_list(void __user *arg)
870{
8035ded4 871 struct hci_dev *hdev;
1da177e4
LT
872 struct hci_dev_list_req *dl;
873 struct hci_dev_req *dr;
1da177e4
LT
874 int n = 0, size, err;
875 __u16 dev_num;
876
877 if (get_user(dev_num, (__u16 __user *) arg))
878 return -EFAULT;
879
880 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
881 return -EINVAL;
882
883 size = sizeof(*dl) + dev_num * sizeof(*dr);
884
70f23020
AE
885 dl = kzalloc(size, GFP_KERNEL);
886 if (!dl)
1da177e4
LT
887 return -ENOMEM;
888
889 dr = dl->dev_req;
890
f20d09d5 891 read_lock(&hci_dev_list_lock);
8035ded4 892 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 893 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 894 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
895
896 if (!test_bit(HCI_MGMT, &hdev->flags))
897 set_bit(HCI_PAIRABLE, &hdev->flags);
898
1da177e4
LT
899 (dr + n)->dev_id = hdev->id;
900 (dr + n)->dev_opt = hdev->flags;
c542a06c 901
1da177e4
LT
902 if (++n >= dev_num)
903 break;
904 }
f20d09d5 905 read_unlock(&hci_dev_list_lock);
1da177e4
LT
906
907 dl->dev_num = n;
908 size = sizeof(*dl) + n * sizeof(*dr);
909
910 err = copy_to_user(arg, dl, size);
911 kfree(dl);
912
913 return err ? -EFAULT : 0;
914}
915
916int hci_get_dev_info(void __user *arg)
917{
918 struct hci_dev *hdev;
919 struct hci_dev_info di;
920 int err = 0;
921
922 if (copy_from_user(&di, arg, sizeof(di)))
923 return -EFAULT;
924
70f23020
AE
925 hdev = hci_dev_get(di.dev_id);
926 if (!hdev)
1da177e4
LT
927 return -ENODEV;
928
3243553f
JH
929 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
930 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 931
c542a06c
JH
932 if (!test_bit(HCI_MGMT, &hdev->flags))
933 set_bit(HCI_PAIRABLE, &hdev->flags);
934
1da177e4
LT
935 strcpy(di.name, hdev->name);
936 di.bdaddr = hdev->bdaddr;
943da25d 937 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
938 di.flags = hdev->flags;
939 di.pkt_type = hdev->pkt_type;
940 di.acl_mtu = hdev->acl_mtu;
941 di.acl_pkts = hdev->acl_pkts;
942 di.sco_mtu = hdev->sco_mtu;
943 di.sco_pkts = hdev->sco_pkts;
944 di.link_policy = hdev->link_policy;
945 di.link_mode = hdev->link_mode;
946
947 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
948 memcpy(&di.features, &hdev->features, sizeof(di.features));
949
950 if (copy_to_user(arg, &di, sizeof(di)))
951 err = -EFAULT;
952
953 hci_dev_put(hdev);
954
955 return err;
956}
957
958/* ---- Interface to HCI drivers ---- */
959
611b30f7
MH
960static int hci_rfkill_set_block(void *data, bool blocked)
961{
962 struct hci_dev *hdev = data;
963
964 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
965
966 if (!blocked)
967 return 0;
968
969 hci_dev_do_close(hdev);
970
971 return 0;
972}
973
974static const struct rfkill_ops hci_rfkill_ops = {
975 .set_block = hci_rfkill_set_block,
976};
977
1da177e4
LT
978/* Alloc HCI device */
979struct hci_dev *hci_alloc_dev(void)
980{
981 struct hci_dev *hdev;
982
25ea6db0 983 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
984 if (!hdev)
985 return NULL;
986
0ac7e700 987 hci_init_sysfs(hdev);
1da177e4
LT
988 skb_queue_head_init(&hdev->driver_init);
989
990 return hdev;
991}
992EXPORT_SYMBOL(hci_alloc_dev);
993
994/* Free HCI device */
995void hci_free_dev(struct hci_dev *hdev)
996{
997 skb_queue_purge(&hdev->driver_init);
998
a91f2e39
MH
999 /* will free via device release */
1000 put_device(&hdev->dev);
1da177e4
LT
1001}
1002EXPORT_SYMBOL(hci_free_dev);
1003
ab81cbf9
JH
1004static void hci_power_on(struct work_struct *work)
1005{
1006 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1007
1008 BT_DBG("%s", hdev->name);
1009
1010 if (hci_dev_open(hdev->id) < 0)
1011 return;
1012
1013 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
80b7ab33 1014 schedule_delayed_work(&hdev->power_off,
3243553f 1015 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
1016
1017 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 1018 mgmt_index_added(hdev);
ab81cbf9
JH
1019}
1020
1021static void hci_power_off(struct work_struct *work)
1022{
3243553f
JH
1023 struct hci_dev *hdev = container_of(work, struct hci_dev,
1024 power_off.work);
ab81cbf9
JH
1025
1026 BT_DBG("%s", hdev->name);
1027
1028 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1029
3243553f 1030 hci_dev_close(hdev->id);
ab81cbf9
JH
1031}
1032
16ab91ab
JH
1033static void hci_discov_off(struct work_struct *work)
1034{
1035 struct hci_dev *hdev;
1036 u8 scan = SCAN_PAGE;
1037
1038 hdev = container_of(work, struct hci_dev, discov_off.work);
1039
1040 BT_DBG("%s", hdev->name);
1041
09fd0de5 1042 hci_dev_lock(hdev);
16ab91ab
JH
1043
1044 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1045
1046 hdev->discov_timeout = 0;
1047
09fd0de5 1048 hci_dev_unlock(hdev);
16ab91ab
JH
1049}
1050
2aeb9a1a
JH
1051int hci_uuids_clear(struct hci_dev *hdev)
1052{
1053 struct list_head *p, *n;
1054
1055 list_for_each_safe(p, n, &hdev->uuids) {
1056 struct bt_uuid *uuid;
1057
1058 uuid = list_entry(p, struct bt_uuid, list);
1059
1060 list_del(p);
1061 kfree(uuid);
1062 }
1063
1064 return 0;
1065}
1066
55ed8ca1
JH
1067int hci_link_keys_clear(struct hci_dev *hdev)
1068{
1069 struct list_head *p, *n;
1070
1071 list_for_each_safe(p, n, &hdev->link_keys) {
1072 struct link_key *key;
1073
1074 key = list_entry(p, struct link_key, list);
1075
1076 list_del(p);
1077 kfree(key);
1078 }
1079
1080 return 0;
1081}
1082
1083struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1084{
8035ded4 1085 struct link_key *k;
55ed8ca1 1086
8035ded4 1087 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1088 if (bacmp(bdaddr, &k->bdaddr) == 0)
1089 return k;
55ed8ca1
JH
1090
1091 return NULL;
1092}
1093
d25e28ab
JH
1094static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1095 u8 key_type, u8 old_key_type)
1096{
1097 /* Legacy key */
1098 if (key_type < 0x03)
1099 return 1;
1100
1101 /* Debug keys are insecure so don't store them persistently */
1102 if (key_type == HCI_LK_DEBUG_COMBINATION)
1103 return 0;
1104
1105 /* Changed combination key and there's no previous one */
1106 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1107 return 0;
1108
1109 /* Security mode 3 case */
1110 if (!conn)
1111 return 1;
1112
1113 /* Neither local nor remote side had no-bonding as requirement */
1114 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1115 return 1;
1116
1117 /* Local side had dedicated bonding as requirement */
1118 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1119 return 1;
1120
1121 /* Remote side had dedicated bonding as requirement */
1122 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1123 return 1;
1124
1125 /* If none of the above criteria match, then don't store the key
1126 * persistently */
1127 return 0;
1128}
1129
75d262c2
VCG
1130struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1131{
1132 struct link_key *k;
1133
1134 list_for_each_entry(k, &hdev->link_keys, list) {
1135 struct key_master_id *id;
1136
1137 if (k->type != HCI_LK_SMP_LTK)
1138 continue;
1139
1140 if (k->dlen != sizeof(*id))
1141 continue;
1142
1143 id = (void *) &k->data;
1144 if (id->ediv == ediv &&
1145 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1146 return k;
1147 }
1148
1149 return NULL;
1150}
1151EXPORT_SYMBOL(hci_find_ltk);
1152
1153struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1154 bdaddr_t *bdaddr, u8 type)
1155{
1156 struct link_key *k;
1157
1158 list_for_each_entry(k, &hdev->link_keys, list)
1159 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1160 return k;
1161
1162 return NULL;
1163}
1164EXPORT_SYMBOL(hci_find_link_key_type);
1165
d25e28ab
JH
1166int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1167 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1168{
1169 struct link_key *key, *old_key;
4df378a1 1170 u8 old_key_type, persistent;
55ed8ca1
JH
1171
1172 old_key = hci_find_link_key(hdev, bdaddr);
1173 if (old_key) {
1174 old_key_type = old_key->type;
1175 key = old_key;
1176 } else {
12adcf3a 1177 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1178 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1179 if (!key)
1180 return -ENOMEM;
1181 list_add(&key->list, &hdev->link_keys);
1182 }
1183
1184 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1185
d25e28ab
JH
1186 /* Some buggy controller combinations generate a changed
1187 * combination key for legacy pairing even when there's no
1188 * previous key */
1189 if (type == HCI_LK_CHANGED_COMBINATION &&
1190 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1191 old_key_type == 0xff) {
d25e28ab 1192 type = HCI_LK_COMBINATION;
655fe6ec
JH
1193 if (conn)
1194 conn->key_type = type;
1195 }
d25e28ab 1196
55ed8ca1
JH
1197 bacpy(&key->bdaddr, bdaddr);
1198 memcpy(key->val, val, 16);
55ed8ca1
JH
1199 key->pin_len = pin_len;
1200
b6020ba0 1201 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1202 key->type = old_key_type;
4748fed2
JH
1203 else
1204 key->type = type;
1205
4df378a1
JH
1206 if (!new_key)
1207 return 0;
1208
1209 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1210
744cf19e 1211 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1212
1213 if (!persistent) {
1214 list_del(&key->list);
1215 kfree(key);
1216 }
55ed8ca1
JH
1217
1218 return 0;
1219}
1220
75d262c2 1221int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1222 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1223{
1224 struct link_key *key, *old_key;
1225 struct key_master_id *id;
1226 u8 old_key_type;
1227
1228 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1229
1230 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1231 if (old_key) {
1232 key = old_key;
1233 old_key_type = old_key->type;
1234 } else {
1235 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1236 if (!key)
1237 return -ENOMEM;
1238 list_add(&key->list, &hdev->link_keys);
1239 old_key_type = 0xff;
1240 }
1241
1242 key->dlen = sizeof(*id);
1243
1244 bacpy(&key->bdaddr, bdaddr);
1245 memcpy(key->val, ltk, sizeof(key->val));
1246 key->type = HCI_LK_SMP_LTK;
726b4ffc 1247 key->pin_len = key_size;
75d262c2
VCG
1248
1249 id = (void *) &key->data;
1250 id->ediv = ediv;
1251 memcpy(id->rand, rand, sizeof(id->rand));
1252
1253 if (new_key)
744cf19e 1254 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1255
1256 return 0;
1257}
1258
55ed8ca1
JH
1259int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1260{
1261 struct link_key *key;
1262
1263 key = hci_find_link_key(hdev, bdaddr);
1264 if (!key)
1265 return -ENOENT;
1266
1267 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1268
1269 list_del(&key->list);
1270 kfree(key);
1271
1272 return 0;
1273}
1274
6bd32326
VT
1275/* HCI command timer function */
1276static void hci_cmd_timer(unsigned long arg)
1277{
1278 struct hci_dev *hdev = (void *) arg;
1279
1280 BT_ERR("%s command tx timeout", hdev->name);
1281 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1282 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1283}
1284
2763eda6
SJ
1285struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1286 bdaddr_t *bdaddr)
1287{
1288 struct oob_data *data;
1289
1290 list_for_each_entry(data, &hdev->remote_oob_data, list)
1291 if (bacmp(bdaddr, &data->bdaddr) == 0)
1292 return data;
1293
1294 return NULL;
1295}
1296
1297int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298{
1299 struct oob_data *data;
1300
1301 data = hci_find_remote_oob_data(hdev, bdaddr);
1302 if (!data)
1303 return -ENOENT;
1304
1305 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1306
1307 list_del(&data->list);
1308 kfree(data);
1309
1310 return 0;
1311}
1312
1313int hci_remote_oob_data_clear(struct hci_dev *hdev)
1314{
1315 struct oob_data *data, *n;
1316
1317 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1318 list_del(&data->list);
1319 kfree(data);
1320 }
1321
1322 return 0;
1323}
1324
1325int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1326 u8 *randomizer)
1327{
1328 struct oob_data *data;
1329
1330 data = hci_find_remote_oob_data(hdev, bdaddr);
1331
1332 if (!data) {
1333 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1334 if (!data)
1335 return -ENOMEM;
1336
1337 bacpy(&data->bdaddr, bdaddr);
1338 list_add(&data->list, &hdev->remote_oob_data);
1339 }
1340
1341 memcpy(data->hash, hash, sizeof(data->hash));
1342 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1343
1344 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1345
1346 return 0;
1347}
1348
b2a66aad
AJ
1349struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1350 bdaddr_t *bdaddr)
1351{
8035ded4 1352 struct bdaddr_list *b;
b2a66aad 1353
8035ded4 1354 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1355 if (bacmp(bdaddr, &b->bdaddr) == 0)
1356 return b;
b2a66aad
AJ
1357
1358 return NULL;
1359}
1360
1361int hci_blacklist_clear(struct hci_dev *hdev)
1362{
1363 struct list_head *p, *n;
1364
1365 list_for_each_safe(p, n, &hdev->blacklist) {
1366 struct bdaddr_list *b;
1367
1368 b = list_entry(p, struct bdaddr_list, list);
1369
1370 list_del(p);
1371 kfree(b);
1372 }
1373
1374 return 0;
1375}
1376
1377int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1378{
1379 struct bdaddr_list *entry;
b2a66aad
AJ
1380
1381 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1382 return -EBADF;
1383
5e762444
AJ
1384 if (hci_blacklist_lookup(hdev, bdaddr))
1385 return -EEXIST;
b2a66aad
AJ
1386
1387 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1388 if (!entry)
1389 return -ENOMEM;
b2a66aad
AJ
1390
1391 bacpy(&entry->bdaddr, bdaddr);
1392
1393 list_add(&entry->list, &hdev->blacklist);
1394
744cf19e 1395 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1396}
1397
1398int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399{
1400 struct bdaddr_list *entry;
b2a66aad 1401
1ec918ce 1402 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1403 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1404
1405 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1406 if (!entry)
5e762444 1407 return -ENOENT;
b2a66aad
AJ
1408
1409 list_del(&entry->list);
1410 kfree(entry);
1411
744cf19e 1412 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1413}
1414
db323f2f 1415static void hci_clear_adv_cache(struct work_struct *work)
35815085 1416{
db323f2f
GP
1417 struct hci_dev *hdev = container_of(work, struct hci_dev,
1418 adv_work.work);
35815085
AG
1419
1420 hci_dev_lock(hdev);
1421
1422 hci_adv_entries_clear(hdev);
1423
1424 hci_dev_unlock(hdev);
1425}
1426
76c8686f
AG
1427int hci_adv_entries_clear(struct hci_dev *hdev)
1428{
1429 struct adv_entry *entry, *tmp;
1430
1431 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1432 list_del(&entry->list);
1433 kfree(entry);
1434 }
1435
1436 BT_DBG("%s adv cache cleared", hdev->name);
1437
1438 return 0;
1439}
1440
1441struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1442{
1443 struct adv_entry *entry;
1444
1445 list_for_each_entry(entry, &hdev->adv_entries, list)
1446 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1447 return entry;
1448
1449 return NULL;
1450}
1451
1452static inline int is_connectable_adv(u8 evt_type)
1453{
1454 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1455 return 1;
1456
1457 return 0;
1458}
1459
1460int hci_add_adv_entry(struct hci_dev *hdev,
1461 struct hci_ev_le_advertising_info *ev)
1462{
1463 struct adv_entry *entry;
1464
1465 if (!is_connectable_adv(ev->evt_type))
1466 return -EINVAL;
1467
1468 /* Only new entries should be added to adv_entries. So, if
1469 * bdaddr was found, don't add it. */
1470 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1471 return 0;
1472
1473 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1474 if (!entry)
1475 return -ENOMEM;
1476
1477 bacpy(&entry->bdaddr, &ev->bdaddr);
1478 entry->bdaddr_type = ev->bdaddr_type;
1479
1480 list_add(&entry->list, &hdev->adv_entries);
1481
1482 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1483 batostr(&entry->bdaddr), entry->bdaddr_type);
1484
1485 return 0;
1486}
1487
1da177e4
LT
1488/* Register HCI device */
1489int hci_register_dev(struct hci_dev *hdev)
1490{
1491 struct list_head *head = &hci_dev_list, *p;
08add513 1492 int i, id, error;
1da177e4 1493
c13854ce
MH
1494 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1495 hdev->bus, hdev->owner);
1da177e4
LT
1496
1497 if (!hdev->open || !hdev->close || !hdev->destruct)
1498 return -EINVAL;
1499
08add513
MM
1500 /* Do not allow HCI_AMP devices to register at index 0,
1501 * so the index can be used as the AMP controller ID.
1502 */
1503 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1504
f20d09d5 1505 write_lock(&hci_dev_list_lock);
1da177e4
LT
1506
1507 /* Find first available device id */
1508 list_for_each(p, &hci_dev_list) {
1509 if (list_entry(p, struct hci_dev, list)->id != id)
1510 break;
1511 head = p; id++;
1512 }
8e87d142 1513
1da177e4
LT
1514 sprintf(hdev->name, "hci%d", id);
1515 hdev->id = id;
c6feeb28 1516 list_add_tail(&hdev->list, head);
1da177e4
LT
1517
1518 atomic_set(&hdev->refcnt, 1);
09fd0de5 1519 mutex_init(&hdev->lock);
1da177e4
LT
1520
1521 hdev->flags = 0;
d23264a8 1522 hdev->dev_flags = 0;
1da177e4 1523 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1524 hdev->esco_type = (ESCO_HV1);
1da177e4 1525 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1526 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1527
04837f64
MH
1528 hdev->idle_timeout = 0;
1529 hdev->sniff_max_interval = 800;
1530 hdev->sniff_min_interval = 80;
1531
b78752cc 1532 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1533 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1534 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1535
1da177e4
LT
1536
1537 skb_queue_head_init(&hdev->rx_q);
1538 skb_queue_head_init(&hdev->cmd_q);
1539 skb_queue_head_init(&hdev->raw_q);
1540
6bd32326
VT
1541 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1542
cd4c5391 1543 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1544 hdev->reassembly[i] = NULL;
1545
1da177e4 1546 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1547 mutex_init(&hdev->req_lock);
1da177e4 1548
30883512 1549 discovery_init(hdev);
1da177e4
LT
1550
1551 hci_conn_hash_init(hdev);
1552
2e58ef3e
JH
1553 INIT_LIST_HEAD(&hdev->mgmt_pending);
1554
ea4bd8ba 1555 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1556
2aeb9a1a
JH
1557 INIT_LIST_HEAD(&hdev->uuids);
1558
55ed8ca1
JH
1559 INIT_LIST_HEAD(&hdev->link_keys);
1560
2763eda6
SJ
1561 INIT_LIST_HEAD(&hdev->remote_oob_data);
1562
76c8686f
AG
1563 INIT_LIST_HEAD(&hdev->adv_entries);
1564
db323f2f 1565 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1566 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1567 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1568
16ab91ab
JH
1569 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1570
1da177e4
LT
1571 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1572
1573 atomic_set(&hdev->promisc, 0);
1574
f20d09d5 1575 write_unlock(&hci_dev_list_lock);
1da177e4 1576
32845eb1
GP
1577 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1578 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1579 if (!hdev->workqueue) {
1580 error = -ENOMEM;
1581 goto err;
1582 }
f48fd9c8 1583
33ca954d
DH
1584 error = hci_add_sysfs(hdev);
1585 if (error < 0)
1586 goto err_wqueue;
1da177e4 1587
611b30f7
MH
1588 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1589 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1590 if (hdev->rfkill) {
1591 if (rfkill_register(hdev->rfkill) < 0) {
1592 rfkill_destroy(hdev->rfkill);
1593 hdev->rfkill = NULL;
1594 }
1595 }
1596
ab81cbf9
JH
1597 set_bit(HCI_AUTO_OFF, &hdev->flags);
1598 set_bit(HCI_SETUP, &hdev->flags);
7f971041 1599 schedule_work(&hdev->power_on);
ab81cbf9 1600
1da177e4
LT
1601 hci_notify(hdev, HCI_DEV_REG);
1602
1603 return id;
f48fd9c8 1604
33ca954d
DH
1605err_wqueue:
1606 destroy_workqueue(hdev->workqueue);
1607err:
f20d09d5 1608 write_lock(&hci_dev_list_lock);
f48fd9c8 1609 list_del(&hdev->list);
f20d09d5 1610 write_unlock(&hci_dev_list_lock);
f48fd9c8 1611
33ca954d 1612 return error;
1da177e4
LT
1613}
1614EXPORT_SYMBOL(hci_register_dev);
1615
1616/* Unregister HCI device */
59735631 1617void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1618{
ef222013
MH
1619 int i;
1620
c13854ce 1621 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1622
f20d09d5 1623 write_lock(&hci_dev_list_lock);
1da177e4 1624 list_del(&hdev->list);
f20d09d5 1625 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1626
1627 hci_dev_do_close(hdev);
1628
cd4c5391 1629 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1630 kfree_skb(hdev->reassembly[i]);
1631
ab81cbf9 1632 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1633 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1634 hci_dev_lock(hdev);
744cf19e 1635 mgmt_index_removed(hdev);
09fd0de5 1636 hci_dev_unlock(hdev);
56e5cb86 1637 }
ab81cbf9 1638
2e58ef3e
JH
1639 /* mgmt_index_removed should take care of emptying the
1640 * pending list */
1641 BUG_ON(!list_empty(&hdev->mgmt_pending));
1642
1da177e4
LT
1643 hci_notify(hdev, HCI_DEV_UNREG);
1644
611b30f7
MH
1645 if (hdev->rfkill) {
1646 rfkill_unregister(hdev->rfkill);
1647 rfkill_destroy(hdev->rfkill);
1648 }
1649
ce242970 1650 hci_del_sysfs(hdev);
147e2d59 1651
db323f2f 1652 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1653
f48fd9c8
MH
1654 destroy_workqueue(hdev->workqueue);
1655
09fd0de5 1656 hci_dev_lock(hdev);
e2e0cacb 1657 hci_blacklist_clear(hdev);
2aeb9a1a 1658 hci_uuids_clear(hdev);
55ed8ca1 1659 hci_link_keys_clear(hdev);
2763eda6 1660 hci_remote_oob_data_clear(hdev);
76c8686f 1661 hci_adv_entries_clear(hdev);
09fd0de5 1662 hci_dev_unlock(hdev);
e2e0cacb 1663
1da177e4 1664 __hci_dev_put(hdev);
1da177e4
LT
1665}
1666EXPORT_SYMBOL(hci_unregister_dev);
1667
1668/* Suspend HCI device */
1669int hci_suspend_dev(struct hci_dev *hdev)
1670{
1671 hci_notify(hdev, HCI_DEV_SUSPEND);
1672 return 0;
1673}
1674EXPORT_SYMBOL(hci_suspend_dev);
1675
1676/* Resume HCI device */
1677int hci_resume_dev(struct hci_dev *hdev)
1678{
1679 hci_notify(hdev, HCI_DEV_RESUME);
1680 return 0;
1681}
1682EXPORT_SYMBOL(hci_resume_dev);
1683
76bca880
MH
1684/* Receive frame from HCI drivers */
1685int hci_recv_frame(struct sk_buff *skb)
1686{
1687 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1688 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1689 && !test_bit(HCI_INIT, &hdev->flags))) {
1690 kfree_skb(skb);
1691 return -ENXIO;
1692 }
1693
1694 /* Incomming skb */
1695 bt_cb(skb)->incoming = 1;
1696
1697 /* Time stamp */
1698 __net_timestamp(skb);
1699
76bca880 1700 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1701 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1702
76bca880
MH
1703 return 0;
1704}
1705EXPORT_SYMBOL(hci_recv_frame);
1706
33e882a5 1707static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1708 int count, __u8 index)
33e882a5
SS
1709{
1710 int len = 0;
1711 int hlen = 0;
1712 int remain = count;
1713 struct sk_buff *skb;
1714 struct bt_skb_cb *scb;
1715
1716 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1717 index >= NUM_REASSEMBLY)
1718 return -EILSEQ;
1719
1720 skb = hdev->reassembly[index];
1721
1722 if (!skb) {
1723 switch (type) {
1724 case HCI_ACLDATA_PKT:
1725 len = HCI_MAX_FRAME_SIZE;
1726 hlen = HCI_ACL_HDR_SIZE;
1727 break;
1728 case HCI_EVENT_PKT:
1729 len = HCI_MAX_EVENT_SIZE;
1730 hlen = HCI_EVENT_HDR_SIZE;
1731 break;
1732 case HCI_SCODATA_PKT:
1733 len = HCI_MAX_SCO_SIZE;
1734 hlen = HCI_SCO_HDR_SIZE;
1735 break;
1736 }
1737
1e429f38 1738 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1739 if (!skb)
1740 return -ENOMEM;
1741
1742 scb = (void *) skb->cb;
1743 scb->expect = hlen;
1744 scb->pkt_type = type;
1745
1746 skb->dev = (void *) hdev;
1747 hdev->reassembly[index] = skb;
1748 }
1749
1750 while (count) {
1751 scb = (void *) skb->cb;
1752 len = min(scb->expect, (__u16)count);
1753
1754 memcpy(skb_put(skb, len), data, len);
1755
1756 count -= len;
1757 data += len;
1758 scb->expect -= len;
1759 remain = count;
1760
1761 switch (type) {
1762 case HCI_EVENT_PKT:
1763 if (skb->len == HCI_EVENT_HDR_SIZE) {
1764 struct hci_event_hdr *h = hci_event_hdr(skb);
1765 scb->expect = h->plen;
1766
1767 if (skb_tailroom(skb) < scb->expect) {
1768 kfree_skb(skb);
1769 hdev->reassembly[index] = NULL;
1770 return -ENOMEM;
1771 }
1772 }
1773 break;
1774
1775 case HCI_ACLDATA_PKT:
1776 if (skb->len == HCI_ACL_HDR_SIZE) {
1777 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1778 scb->expect = __le16_to_cpu(h->dlen);
1779
1780 if (skb_tailroom(skb) < scb->expect) {
1781 kfree_skb(skb);
1782 hdev->reassembly[index] = NULL;
1783 return -ENOMEM;
1784 }
1785 }
1786 break;
1787
1788 case HCI_SCODATA_PKT:
1789 if (skb->len == HCI_SCO_HDR_SIZE) {
1790 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1791 scb->expect = h->dlen;
1792
1793 if (skb_tailroom(skb) < scb->expect) {
1794 kfree_skb(skb);
1795 hdev->reassembly[index] = NULL;
1796 return -ENOMEM;
1797 }
1798 }
1799 break;
1800 }
1801
1802 if (scb->expect == 0) {
1803 /* Complete frame */
1804
1805 bt_cb(skb)->pkt_type = type;
1806 hci_recv_frame(skb);
1807
1808 hdev->reassembly[index] = NULL;
1809 return remain;
1810 }
1811 }
1812
1813 return remain;
1814}
1815
ef222013
MH
1816int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1817{
f39a3c06
SS
1818 int rem = 0;
1819
ef222013
MH
1820 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1821 return -EILSEQ;
1822
da5f6c37 1823 while (count) {
1e429f38 1824 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1825 if (rem < 0)
1826 return rem;
ef222013 1827
f39a3c06
SS
1828 data += (count - rem);
1829 count = rem;
f81c6224 1830 }
ef222013 1831
f39a3c06 1832 return rem;
ef222013
MH
1833}
1834EXPORT_SYMBOL(hci_recv_fragment);
1835
99811510
SS
1836#define STREAM_REASSEMBLY 0
1837
1838int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1839{
1840 int type;
1841 int rem = 0;
1842
da5f6c37 1843 while (count) {
99811510
SS
1844 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1845
1846 if (!skb) {
1847 struct { char type; } *pkt;
1848
1849 /* Start of the frame */
1850 pkt = data;
1851 type = pkt->type;
1852
1853 data++;
1854 count--;
1855 } else
1856 type = bt_cb(skb)->pkt_type;
1857
1e429f38
GP
1858 rem = hci_reassembly(hdev, type, data, count,
1859 STREAM_REASSEMBLY);
99811510
SS
1860 if (rem < 0)
1861 return rem;
1862
1863 data += (count - rem);
1864 count = rem;
f81c6224 1865 }
99811510
SS
1866
1867 return rem;
1868}
1869EXPORT_SYMBOL(hci_recv_stream_fragment);
1870
1da177e4
LT
1871/* ---- Interface to upper protocols ---- */
1872
1da177e4
LT
1873int hci_register_cb(struct hci_cb *cb)
1874{
1875 BT_DBG("%p name %s", cb, cb->name);
1876
f20d09d5 1877 write_lock(&hci_cb_list_lock);
1da177e4 1878 list_add(&cb->list, &hci_cb_list);
f20d09d5 1879 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1880
1881 return 0;
1882}
1883EXPORT_SYMBOL(hci_register_cb);
1884
1885int hci_unregister_cb(struct hci_cb *cb)
1886{
1887 BT_DBG("%p name %s", cb, cb->name);
1888
f20d09d5 1889 write_lock(&hci_cb_list_lock);
1da177e4 1890 list_del(&cb->list);
f20d09d5 1891 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1892
1893 return 0;
1894}
1895EXPORT_SYMBOL(hci_unregister_cb);
1896
1897static int hci_send_frame(struct sk_buff *skb)
1898{
1899 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1900
1901 if (!hdev) {
1902 kfree_skb(skb);
1903 return -ENODEV;
1904 }
1905
0d48d939 1906 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1907
1908 if (atomic_read(&hdev->promisc)) {
1909 /* Time stamp */
a61bbcf2 1910 __net_timestamp(skb);
1da177e4 1911
eec8d2bc 1912 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1913 }
1914
1915 /* Get rid of skb owner, prior to sending to the driver. */
1916 skb_orphan(skb);
1917
1918 return hdev->send(skb);
1919}
1920
1921/* Send HCI command */
a9de9248 1922int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1923{
1924 int len = HCI_COMMAND_HDR_SIZE + plen;
1925 struct hci_command_hdr *hdr;
1926 struct sk_buff *skb;
1927
a9de9248 1928 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1929
1930 skb = bt_skb_alloc(len, GFP_ATOMIC);
1931 if (!skb) {
ef222013 1932 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1933 return -ENOMEM;
1934 }
1935
1936 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1937 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1938 hdr->plen = plen;
1939
1940 if (plen)
1941 memcpy(skb_put(skb, plen), param, plen);
1942
1943 BT_DBG("skb len %d", skb->len);
1944
0d48d939 1945 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1946 skb->dev = (void *) hdev;
c78ae283 1947
a5040efa
JH
1948 if (test_bit(HCI_INIT, &hdev->flags))
1949 hdev->init_last_cmd = opcode;
1950
1da177e4 1951 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1952 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1953
1954 return 0;
1955}
1da177e4
LT
1956
1957/* Get data from the previously sent command */
a9de9248 1958void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1959{
1960 struct hci_command_hdr *hdr;
1961
1962 if (!hdev->sent_cmd)
1963 return NULL;
1964
1965 hdr = (void *) hdev->sent_cmd->data;
1966
a9de9248 1967 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1968 return NULL;
1969
a9de9248 1970 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1971
1972 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1973}
1974
1975/* Send ACL data */
1976static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1977{
1978 struct hci_acl_hdr *hdr;
1979 int len = skb->len;
1980
badff6d0
ACM
1981 skb_push(skb, HCI_ACL_HDR_SIZE);
1982 skb_reset_transport_header(skb);
9c70220b 1983 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1984 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1985 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1986}
1987
73d80deb
LAD
1988static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1989 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1990{
1991 struct hci_dev *hdev = conn->hdev;
1992 struct sk_buff *list;
1993
70f23020
AE
1994 list = skb_shinfo(skb)->frag_list;
1995 if (!list) {
1da177e4
LT
1996 /* Non fragmented */
1997 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1998
73d80deb 1999 skb_queue_tail(queue, skb);
1da177e4
LT
2000 } else {
2001 /* Fragmented */
2002 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2003
2004 skb_shinfo(skb)->frag_list = NULL;
2005
2006 /* Queue all fragments atomically */
af3e6359 2007 spin_lock(&queue->lock);
1da177e4 2008
73d80deb 2009 __skb_queue_tail(queue, skb);
e702112f
AE
2010
2011 flags &= ~ACL_START;
2012 flags |= ACL_CONT;
1da177e4
LT
2013 do {
2014 skb = list; list = list->next;
8e87d142 2015
1da177e4 2016 skb->dev = (void *) hdev;
0d48d939 2017 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2018 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2019
2020 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2021
73d80deb 2022 __skb_queue_tail(queue, skb);
1da177e4
LT
2023 } while (list);
2024
af3e6359 2025 spin_unlock(&queue->lock);
1da177e4 2026 }
73d80deb
LAD
2027}
2028
2029void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2030{
2031 struct hci_conn *conn = chan->conn;
2032 struct hci_dev *hdev = conn->hdev;
2033
2034 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2035
2036 skb->dev = (void *) hdev;
2037 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2038 hci_add_acl_hdr(skb, conn->handle, flags);
2039
2040 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2041
3eff45ea 2042 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2043}
2044EXPORT_SYMBOL(hci_send_acl);
2045
2046/* Send SCO data */
0d861d8b 2047void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2048{
2049 struct hci_dev *hdev = conn->hdev;
2050 struct hci_sco_hdr hdr;
2051
2052 BT_DBG("%s len %d", hdev->name, skb->len);
2053
aca3192c 2054 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2055 hdr.dlen = skb->len;
2056
badff6d0
ACM
2057 skb_push(skb, HCI_SCO_HDR_SIZE);
2058 skb_reset_transport_header(skb);
9c70220b 2059 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2060
2061 skb->dev = (void *) hdev;
0d48d939 2062 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2063
1da177e4 2064 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2065 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2066}
2067EXPORT_SYMBOL(hci_send_sco);
2068
2069/* ---- HCI TX task (outgoing data) ---- */
2070
2071/* HCI Connection scheduler */
2072static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2073{
2074 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2075 struct hci_conn *conn = NULL, *c;
1da177e4 2076 int num = 0, min = ~0;
1da177e4 2077
8e87d142 2078 /* We don't have to lock device here. Connections are always
1da177e4 2079 * added and removed with TX task disabled. */
bf4c6325
GP
2080
2081 rcu_read_lock();
2082
2083 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2084 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2085 continue;
769be974
MH
2086
2087 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2088 continue;
2089
1da177e4
LT
2090 num++;
2091
2092 if (c->sent < min) {
2093 min = c->sent;
2094 conn = c;
2095 }
52087a79
LAD
2096
2097 if (hci_conn_num(hdev, type) == num)
2098 break;
1da177e4
LT
2099 }
2100
bf4c6325
GP
2101 rcu_read_unlock();
2102
1da177e4 2103 if (conn) {
6ed58ec5
VT
2104 int cnt, q;
2105
2106 switch (conn->type) {
2107 case ACL_LINK:
2108 cnt = hdev->acl_cnt;
2109 break;
2110 case SCO_LINK:
2111 case ESCO_LINK:
2112 cnt = hdev->sco_cnt;
2113 break;
2114 case LE_LINK:
2115 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2116 break;
2117 default:
2118 cnt = 0;
2119 BT_ERR("Unknown link type");
2120 }
2121
2122 q = cnt / num;
1da177e4
LT
2123 *quote = q ? q : 1;
2124 } else
2125 *quote = 0;
2126
2127 BT_DBG("conn %p quote %d", conn, *quote);
2128 return conn;
2129}
2130
bae1f5d9 2131static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2132{
2133 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2134 struct hci_conn *c;
1da177e4 2135
bae1f5d9 2136 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2137
bf4c6325
GP
2138 rcu_read_lock();
2139
1da177e4 2140 /* Kill stalled connections */
bf4c6325 2141 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2142 if (c->type == type && c->sent) {
2143 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2144 hdev->name, batostr(&c->dst));
2145 hci_acl_disconn(c, 0x13);
2146 }
2147 }
bf4c6325
GP
2148
2149 rcu_read_unlock();
1da177e4
LT
2150}
2151
73d80deb
LAD
2152static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2153 int *quote)
1da177e4 2154{
73d80deb
LAD
2155 struct hci_conn_hash *h = &hdev->conn_hash;
2156 struct hci_chan *chan = NULL;
2157 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2158 struct hci_conn *conn;
73d80deb
LAD
2159 int cnt, q, conn_num = 0;
2160
2161 BT_DBG("%s", hdev->name);
2162
bf4c6325
GP
2163 rcu_read_lock();
2164
2165 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2166 struct hci_chan *tmp;
2167
2168 if (conn->type != type)
2169 continue;
2170
2171 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2172 continue;
2173
2174 conn_num++;
2175
8192edef 2176 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2177 struct sk_buff *skb;
2178
2179 if (skb_queue_empty(&tmp->data_q))
2180 continue;
2181
2182 skb = skb_peek(&tmp->data_q);
2183 if (skb->priority < cur_prio)
2184 continue;
2185
2186 if (skb->priority > cur_prio) {
2187 num = 0;
2188 min = ~0;
2189 cur_prio = skb->priority;
2190 }
2191
2192 num++;
2193
2194 if (conn->sent < min) {
2195 min = conn->sent;
2196 chan = tmp;
2197 }
2198 }
2199
2200 if (hci_conn_num(hdev, type) == conn_num)
2201 break;
2202 }
2203
bf4c6325
GP
2204 rcu_read_unlock();
2205
73d80deb
LAD
2206 if (!chan)
2207 return NULL;
2208
2209 switch (chan->conn->type) {
2210 case ACL_LINK:
2211 cnt = hdev->acl_cnt;
2212 break;
2213 case SCO_LINK:
2214 case ESCO_LINK:
2215 cnt = hdev->sco_cnt;
2216 break;
2217 case LE_LINK:
2218 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2219 break;
2220 default:
2221 cnt = 0;
2222 BT_ERR("Unknown link type");
2223 }
2224
2225 q = cnt / num;
2226 *quote = q ? q : 1;
2227 BT_DBG("chan %p quote %d", chan, *quote);
2228 return chan;
2229}
2230
02b20f0b
LAD
2231static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2232{
2233 struct hci_conn_hash *h = &hdev->conn_hash;
2234 struct hci_conn *conn;
2235 int num = 0;
2236
2237 BT_DBG("%s", hdev->name);
2238
bf4c6325
GP
2239 rcu_read_lock();
2240
2241 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2242 struct hci_chan *chan;
2243
2244 if (conn->type != type)
2245 continue;
2246
2247 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2248 continue;
2249
2250 num++;
2251
8192edef 2252 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2253 struct sk_buff *skb;
2254
2255 if (chan->sent) {
2256 chan->sent = 0;
2257 continue;
2258 }
2259
2260 if (skb_queue_empty(&chan->data_q))
2261 continue;
2262
2263 skb = skb_peek(&chan->data_q);
2264 if (skb->priority >= HCI_PRIO_MAX - 1)
2265 continue;
2266
2267 skb->priority = HCI_PRIO_MAX - 1;
2268
2269 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2270 skb->priority);
2271 }
2272
2273 if (hci_conn_num(hdev, type) == num)
2274 break;
2275 }
bf4c6325
GP
2276
2277 rcu_read_unlock();
2278
02b20f0b
LAD
2279}
2280
73d80deb
LAD
2281static inline void hci_sched_acl(struct hci_dev *hdev)
2282{
2283 struct hci_chan *chan;
1da177e4
LT
2284 struct sk_buff *skb;
2285 int quote;
73d80deb 2286 unsigned int cnt;
1da177e4
LT
2287
2288 BT_DBG("%s", hdev->name);
2289
52087a79
LAD
2290 if (!hci_conn_num(hdev, ACL_LINK))
2291 return;
2292
1da177e4
LT
2293 if (!test_bit(HCI_RAW, &hdev->flags)) {
2294 /* ACL tx timeout must be longer than maximum
2295 * link supervision timeout (40.9 seconds) */
82453021 2296 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2297 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2298 }
2299
73d80deb 2300 cnt = hdev->acl_cnt;
04837f64 2301
73d80deb
LAD
2302 while (hdev->acl_cnt &&
2303 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2304 u32 priority = (skb_peek(&chan->data_q))->priority;
2305 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2306 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2307 skb->len, skb->priority);
2308
ec1cce24
LAD
2309 /* Stop if priority has changed */
2310 if (skb->priority < priority)
2311 break;
2312
2313 skb = skb_dequeue(&chan->data_q);
2314
73d80deb
LAD
2315 hci_conn_enter_active_mode(chan->conn,
2316 bt_cb(skb)->force_active);
04837f64 2317
1da177e4
LT
2318 hci_send_frame(skb);
2319 hdev->acl_last_tx = jiffies;
2320
2321 hdev->acl_cnt--;
73d80deb
LAD
2322 chan->sent++;
2323 chan->conn->sent++;
1da177e4
LT
2324 }
2325 }
02b20f0b
LAD
2326
2327 if (cnt != hdev->acl_cnt)
2328 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2329}
2330
2331/* Schedule SCO */
2332static inline void hci_sched_sco(struct hci_dev *hdev)
2333{
2334 struct hci_conn *conn;
2335 struct sk_buff *skb;
2336 int quote;
2337
2338 BT_DBG("%s", hdev->name);
2339
52087a79
LAD
2340 if (!hci_conn_num(hdev, SCO_LINK))
2341 return;
2342
1da177e4
LT
2343 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2344 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2345 BT_DBG("skb %p len %d", skb, skb->len);
2346 hci_send_frame(skb);
2347
2348 conn->sent++;
2349 if (conn->sent == ~0)
2350 conn->sent = 0;
2351 }
2352 }
2353}
2354
b6a0dc82
MH
2355static inline void hci_sched_esco(struct hci_dev *hdev)
2356{
2357 struct hci_conn *conn;
2358 struct sk_buff *skb;
2359 int quote;
2360
2361 BT_DBG("%s", hdev->name);
2362
52087a79
LAD
2363 if (!hci_conn_num(hdev, ESCO_LINK))
2364 return;
2365
b6a0dc82
MH
2366 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2367 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2368 BT_DBG("skb %p len %d", skb, skb->len);
2369 hci_send_frame(skb);
2370
2371 conn->sent++;
2372 if (conn->sent == ~0)
2373 conn->sent = 0;
2374 }
2375 }
2376}
2377
6ed58ec5
VT
2378static inline void hci_sched_le(struct hci_dev *hdev)
2379{
73d80deb 2380 struct hci_chan *chan;
6ed58ec5 2381 struct sk_buff *skb;
02b20f0b 2382 int quote, cnt, tmp;
6ed58ec5
VT
2383
2384 BT_DBG("%s", hdev->name);
2385
52087a79
LAD
2386 if (!hci_conn_num(hdev, LE_LINK))
2387 return;
2388
6ed58ec5
VT
2389 if (!test_bit(HCI_RAW, &hdev->flags)) {
2390 /* LE tx timeout must be longer than maximum
2391 * link supervision timeout (40.9 seconds) */
bae1f5d9 2392 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2393 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2394 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2395 }
2396
2397 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2398 tmp = cnt;
73d80deb 2399 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2400 u32 priority = (skb_peek(&chan->data_q))->priority;
2401 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2402 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2403 skb->len, skb->priority);
6ed58ec5 2404
ec1cce24
LAD
2405 /* Stop if priority has changed */
2406 if (skb->priority < priority)
2407 break;
2408
2409 skb = skb_dequeue(&chan->data_q);
2410
6ed58ec5
VT
2411 hci_send_frame(skb);
2412 hdev->le_last_tx = jiffies;
2413
2414 cnt--;
73d80deb
LAD
2415 chan->sent++;
2416 chan->conn->sent++;
6ed58ec5
VT
2417 }
2418 }
73d80deb 2419
6ed58ec5
VT
2420 if (hdev->le_pkts)
2421 hdev->le_cnt = cnt;
2422 else
2423 hdev->acl_cnt = cnt;
02b20f0b
LAD
2424
2425 if (cnt != tmp)
2426 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2427}
2428
3eff45ea 2429static void hci_tx_work(struct work_struct *work)
1da177e4 2430{
3eff45ea 2431 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2432 struct sk_buff *skb;
2433
6ed58ec5
VT
2434 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2435 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2436
2437 /* Schedule queues and send stuff to HCI driver */
2438
2439 hci_sched_acl(hdev);
2440
2441 hci_sched_sco(hdev);
2442
b6a0dc82
MH
2443 hci_sched_esco(hdev);
2444
6ed58ec5
VT
2445 hci_sched_le(hdev);
2446
1da177e4
LT
2447 /* Send next queued raw (unknown type) packet */
2448 while ((skb = skb_dequeue(&hdev->raw_q)))
2449 hci_send_frame(skb);
1da177e4
LT
2450}
2451
25985edc 2452/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2453
2454/* ACL data packet */
2455static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2456{
2457 struct hci_acl_hdr *hdr = (void *) skb->data;
2458 struct hci_conn *conn;
2459 __u16 handle, flags;
2460
2461 skb_pull(skb, HCI_ACL_HDR_SIZE);
2462
2463 handle = __le16_to_cpu(hdr->handle);
2464 flags = hci_flags(handle);
2465 handle = hci_handle(handle);
2466
2467 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2468
2469 hdev->stat.acl_rx++;
2470
2471 hci_dev_lock(hdev);
2472 conn = hci_conn_hash_lookup_handle(hdev, handle);
2473 hci_dev_unlock(hdev);
8e87d142 2474
1da177e4 2475 if (conn) {
65983fc7 2476 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2477
1da177e4 2478 /* Send to upper protocol */
686ebf28
UF
2479 l2cap_recv_acldata(conn, skb, flags);
2480 return;
1da177e4 2481 } else {
8e87d142 2482 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2483 hdev->name, handle);
2484 }
2485
2486 kfree_skb(skb);
2487}
2488
2489/* SCO data packet */
2490static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2491{
2492 struct hci_sco_hdr *hdr = (void *) skb->data;
2493 struct hci_conn *conn;
2494 __u16 handle;
2495
2496 skb_pull(skb, HCI_SCO_HDR_SIZE);
2497
2498 handle = __le16_to_cpu(hdr->handle);
2499
2500 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2501
2502 hdev->stat.sco_rx++;
2503
2504 hci_dev_lock(hdev);
2505 conn = hci_conn_hash_lookup_handle(hdev, handle);
2506 hci_dev_unlock(hdev);
2507
2508 if (conn) {
1da177e4 2509 /* Send to upper protocol */
686ebf28
UF
2510 sco_recv_scodata(conn, skb);
2511 return;
1da177e4 2512 } else {
8e87d142 2513 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2514 hdev->name, handle);
2515 }
2516
2517 kfree_skb(skb);
2518}
2519
b78752cc 2520static void hci_rx_work(struct work_struct *work)
1da177e4 2521{
b78752cc 2522 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2523 struct sk_buff *skb;
2524
2525 BT_DBG("%s", hdev->name);
2526
1da177e4
LT
2527 while ((skb = skb_dequeue(&hdev->rx_q))) {
2528 if (atomic_read(&hdev->promisc)) {
2529 /* Send copy to the sockets */
eec8d2bc 2530 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2531 }
2532
2533 if (test_bit(HCI_RAW, &hdev->flags)) {
2534 kfree_skb(skb);
2535 continue;
2536 }
2537
2538 if (test_bit(HCI_INIT, &hdev->flags)) {
2539 /* Don't process data packets in this states. */
0d48d939 2540 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2541 case HCI_ACLDATA_PKT:
2542 case HCI_SCODATA_PKT:
2543 kfree_skb(skb);
2544 continue;
3ff50b79 2545 }
1da177e4
LT
2546 }
2547
2548 /* Process frame */
0d48d939 2549 switch (bt_cb(skb)->pkt_type) {
1da177e4 2550 case HCI_EVENT_PKT:
b78752cc 2551 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2552 hci_event_packet(hdev, skb);
2553 break;
2554
2555 case HCI_ACLDATA_PKT:
2556 BT_DBG("%s ACL data packet", hdev->name);
2557 hci_acldata_packet(hdev, skb);
2558 break;
2559
2560 case HCI_SCODATA_PKT:
2561 BT_DBG("%s SCO data packet", hdev->name);
2562 hci_scodata_packet(hdev, skb);
2563 break;
2564
2565 default:
2566 kfree_skb(skb);
2567 break;
2568 }
2569 }
1da177e4
LT
2570}
2571
c347b765 2572static void hci_cmd_work(struct work_struct *work)
1da177e4 2573{
c347b765 2574 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2575 struct sk_buff *skb;
2576
2577 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2578
1da177e4 2579 /* Send queued commands */
5a08ecce
AE
2580 if (atomic_read(&hdev->cmd_cnt)) {
2581 skb = skb_dequeue(&hdev->cmd_q);
2582 if (!skb)
2583 return;
2584
7585b97a 2585 kfree_skb(hdev->sent_cmd);
1da177e4 2586
70f23020
AE
2587 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2588 if (hdev->sent_cmd) {
1da177e4
LT
2589 atomic_dec(&hdev->cmd_cnt);
2590 hci_send_frame(skb);
7bdb8a5c
SJ
2591 if (test_bit(HCI_RESET, &hdev->flags))
2592 del_timer(&hdev->cmd_timer);
2593 else
2594 mod_timer(&hdev->cmd_timer,
6bd32326 2595 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2596 } else {
2597 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2598 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2599 }
2600 }
2601}
2519a1fc
AG
2602
2603int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2604{
2605 /* General inquiry access code (GIAC) */
2606 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2607 struct hci_cp_inquiry cp;
2608
2609 BT_DBG("%s", hdev->name);
2610
2611 if (test_bit(HCI_INQUIRY, &hdev->flags))
2612 return -EINPROGRESS;
2613
4663262c
JH
2614 inquiry_cache_flush(hdev);
2615
2519a1fc
AG
2616 memset(&cp, 0, sizeof(cp));
2617 memcpy(&cp.lap, lap, sizeof(cp.lap));
2618 cp.length = length;
2619
2620 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2621}
023d5049
AG
2622
2623int hci_cancel_inquiry(struct hci_dev *hdev)
2624{
2625 BT_DBG("%s", hdev->name);
2626
2627 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2628 return -EPERM;
2629
2630 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2631}
7784d78f
AE
2632
2633module_param(enable_hs, bool, 0644);
2634MODULE_PARM_DESC(enable_hs, "Enable High Speed");