Bluetooth: Split ctrl init to BREDR and AMP parts
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
7784d78f
AE
58int enable_hs;
59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
67d0dfb5 64static DEFINE_MUTEX(hci_task_lock);
1da177e4
LT
65
66/* HCI device list */
67LIST_HEAD(hci_dev_list);
68DEFINE_RWLOCK(hci_dev_list_lock);
69
70/* HCI callback list */
71LIST_HEAD(hci_cb_list);
72DEFINE_RWLOCK(hci_cb_list_lock);
73
74/* HCI protocols */
75#define HCI_MAX_PROTO 2
76struct hci_proto *hci_proto[HCI_MAX_PROTO];
77
78/* HCI notifiers list */
e041c683 79static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
80
81/* ---- HCI notifications ---- */
82
83int hci_register_notifier(struct notifier_block *nb)
84{
e041c683 85 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
86}
87
88int hci_unregister_notifier(struct notifier_block *nb)
89{
e041c683 90 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
91}
92
6516455d 93static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 94{
e041c683 95 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
96}
97
98/* ---- HCI requests ---- */
99
23bb5763 100void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 101{
23bb5763
JH
102 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103
a5040efa
JH
104 /* If this is the init phase check if the completed command matches
105 * the last init command, and if not just return.
106 */
107 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 108 return;
1da177e4
LT
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = result;
112 hdev->req_status = HCI_REQ_DONE;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117static void hci_req_cancel(struct hci_dev *hdev, int err)
118{
119 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
125 }
126}
127
128/* Execute request and wait for completion. */
8e87d142 129static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 130 unsigned long opt, __u32 timeout)
1da177e4
LT
131{
132 DECLARE_WAITQUEUE(wait, current);
133 int err = 0;
134
135 BT_DBG("%s start", hdev->name);
136
137 hdev->req_status = HCI_REQ_PEND;
138
139 add_wait_queue(&hdev->req_wait_q, &wait);
140 set_current_state(TASK_INTERRUPTIBLE);
141
142 req(hdev, opt);
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return -EINTR;
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
e175072f 152 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
3ff50b79 162 }
1da177e4 163
a5040efa 164 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 return err;
169}
170
171static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 172 unsigned long opt, __u32 timeout)
1da177e4
LT
173{
174 int ret;
175
7c6a329e
MH
176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
1da177e4
LT
179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
f630cf0d 192 set_bit(HCI_RESET, &hdev->flags);
a9de9248 193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
194}
195
e61ef499 196static void bredr_init(struct hci_dev *hdev)
1da177e4 197{
b0916ea0 198 struct hci_cp_delete_stored_link_key cp;
1ebb9252 199 __le16 param;
89f2783d 200 __u8 flt_type;
1da177e4 201
1da177e4
LT
202 /* Mandatory initialization */
203
204 /* Reset */
f630cf0d 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 208 }
1da177e4
LT
209
210 /* Read Local Supported Features */
a9de9248 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 212
1143e5a6 213 /* Read Local Version */
a9de9248 214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 215
1da177e4 216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 218
1da177e4 219 /* Read BD Address */
a9de9248
MH
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
227
228 /* Read Voice Setting */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
89f2783d 234 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 236
1da177e4 237 /* Connection accept timeout ~20 secs */
aca3192c 238 param = cpu_to_le16(0x7d00);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
244}
245
e61ef499
AE
246static void amp_init(struct hci_dev *hdev)
247{
248 /* Reset */
249 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
250
251 /* Read Local Version */
252 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
253}
254
255static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
256{
257 struct sk_buff *skb;
258
259 BT_DBG("%s %ld", hdev->name, opt);
260
261 /* Driver initialization */
262
263 /* Special commands */
264 while ((skb = skb_dequeue(&hdev->driver_init))) {
265 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
266 skb->dev = (void *) hdev;
267
268 skb_queue_tail(&hdev->cmd_q, skb);
269 queue_work(hdev->workqueue, &hdev->cmd_work);
270 }
271 skb_queue_purge(&hdev->driver_init);
272
273 switch (hdev->dev_type) {
274 case HCI_BREDR:
275 bredr_init(hdev);
276 break;
277
278 case HCI_AMP:
279 amp_init(hdev);
280 break;
281
282 default:
283 BT_ERR("Unknown device type %d", hdev->dev_type);
284 break;
285 }
286
287}
288
6ed58ec5
VT
289static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
290{
291 BT_DBG("%s", hdev->name);
292
293 /* Read LE buffer size */
294 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
295}
296
1da177e4
LT
297static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
298{
299 __u8 scan = opt;
300
301 BT_DBG("%s %x", hdev->name, scan);
302
303 /* Inquiry and Page scans */
a9de9248 304 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
305}
306
307static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
308{
309 __u8 auth = opt;
310
311 BT_DBG("%s %x", hdev->name, auth);
312
313 /* Authentication */
a9de9248 314 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
315}
316
317static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
318{
319 __u8 encrypt = opt;
320
321 BT_DBG("%s %x", hdev->name, encrypt);
322
e4e8e37c 323 /* Encryption */
a9de9248 324 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
325}
326
e4e8e37c
MH
327static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
328{
329 __le16 policy = cpu_to_le16(opt);
330
a418b893 331 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
332
333 /* Default link policy */
334 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
335}
336
8e87d142 337/* Get HCI device by index.
1da177e4
LT
338 * Device is held on return. */
339struct hci_dev *hci_dev_get(int index)
340{
8035ded4 341 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
342
343 BT_DBG("%d", index);
344
345 if (index < 0)
346 return NULL;
347
348 read_lock(&hci_dev_list_lock);
8035ded4 349 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
350 if (d->id == index) {
351 hdev = hci_dev_hold(d);
352 break;
353 }
354 }
355 read_unlock(&hci_dev_list_lock);
356 return hdev;
357}
1da177e4
LT
358
359/* ---- Inquiry support ---- */
360static void inquiry_cache_flush(struct hci_dev *hdev)
361{
362 struct inquiry_cache *cache = &hdev->inq_cache;
363 struct inquiry_entry *next = cache->list, *e;
364
365 BT_DBG("cache %p", cache);
366
367 cache->list = NULL;
368 while ((e = next)) {
369 next = e->next;
370 kfree(e);
371 }
372}
373
374struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
375{
376 struct inquiry_cache *cache = &hdev->inq_cache;
377 struct inquiry_entry *e;
378
379 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
380
381 for (e = cache->list; e; e = e->next)
382 if (!bacmp(&e->data.bdaddr, bdaddr))
383 break;
384 return e;
385}
386
387void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
388{
389 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 390 struct inquiry_entry *ie;
1da177e4
LT
391
392 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
393
70f23020
AE
394 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
395 if (!ie) {
1da177e4 396 /* Entry not in the cache. Add new one. */
70f23020
AE
397 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
398 if (!ie)
1da177e4 399 return;
70f23020
AE
400
401 ie->next = cache->list;
402 cache->list = ie;
1da177e4
LT
403 }
404
70f23020
AE
405 memcpy(&ie->data, data, sizeof(*data));
406 ie->timestamp = jiffies;
1da177e4
LT
407 cache->timestamp = jiffies;
408}
409
410static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
411{
412 struct inquiry_cache *cache = &hdev->inq_cache;
413 struct inquiry_info *info = (struct inquiry_info *) buf;
414 struct inquiry_entry *e;
415 int copied = 0;
416
417 for (e = cache->list; e && copied < num; e = e->next, copied++) {
418 struct inquiry_data *data = &e->data;
419 bacpy(&info->bdaddr, &data->bdaddr);
420 info->pscan_rep_mode = data->pscan_rep_mode;
421 info->pscan_period_mode = data->pscan_period_mode;
422 info->pscan_mode = data->pscan_mode;
423 memcpy(info->dev_class, data->dev_class, 3);
424 info->clock_offset = data->clock_offset;
425 info++;
426 }
427
428 BT_DBG("cache %p, copied %d", cache, copied);
429 return copied;
430}
431
432static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
433{
434 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
435 struct hci_cp_inquiry cp;
436
437 BT_DBG("%s", hdev->name);
438
439 if (test_bit(HCI_INQUIRY, &hdev->flags))
440 return;
441
442 /* Start Inquiry */
443 memcpy(&cp.lap, &ir->lap, 3);
444 cp.length = ir->length;
445 cp.num_rsp = ir->num_rsp;
a9de9248 446 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
447}
448
449int hci_inquiry(void __user *arg)
450{
451 __u8 __user *ptr = arg;
452 struct hci_inquiry_req ir;
453 struct hci_dev *hdev;
454 int err = 0, do_inquiry = 0, max_rsp;
455 long timeo;
456 __u8 *buf;
457
458 if (copy_from_user(&ir, ptr, sizeof(ir)))
459 return -EFAULT;
460
5a08ecce
AE
461 hdev = hci_dev_get(ir.dev_id);
462 if (!hdev)
1da177e4
LT
463 return -ENODEV;
464
09fd0de5 465 hci_dev_lock(hdev);
8e87d142 466 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
467 inquiry_cache_empty(hdev) ||
468 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
469 inquiry_cache_flush(hdev);
470 do_inquiry = 1;
471 }
09fd0de5 472 hci_dev_unlock(hdev);
1da177e4 473
04837f64 474 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
475
476 if (do_inquiry) {
477 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
478 if (err < 0)
479 goto done;
480 }
1da177e4
LT
481
482 /* for unlimited number of responses we will use buffer with 255 entries */
483 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
484
485 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
486 * copy it to the user space.
487 */
01df8c31 488 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 489 if (!buf) {
1da177e4
LT
490 err = -ENOMEM;
491 goto done;
492 }
493
09fd0de5 494 hci_dev_lock(hdev);
1da177e4 495 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 496 hci_dev_unlock(hdev);
1da177e4
LT
497
498 BT_DBG("num_rsp %d", ir.num_rsp);
499
500 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
501 ptr += sizeof(ir);
502 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
503 ir.num_rsp))
504 err = -EFAULT;
8e87d142 505 } else
1da177e4
LT
506 err = -EFAULT;
507
508 kfree(buf);
509
510done:
511 hci_dev_put(hdev);
512 return err;
513}
514
515/* ---- HCI ioctl helpers ---- */
516
517int hci_dev_open(__u16 dev)
518{
519 struct hci_dev *hdev;
520 int ret = 0;
521
5a08ecce
AE
522 hdev = hci_dev_get(dev);
523 if (!hdev)
1da177e4
LT
524 return -ENODEV;
525
526 BT_DBG("%s %p", hdev->name, hdev);
527
528 hci_req_lock(hdev);
529
611b30f7
MH
530 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
531 ret = -ERFKILL;
532 goto done;
533 }
534
1da177e4
LT
535 if (test_bit(HCI_UP, &hdev->flags)) {
536 ret = -EALREADY;
537 goto done;
538 }
539
540 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
541 set_bit(HCI_RAW, &hdev->flags);
542
07e3b94a
AE
543 /* Treat all non BR/EDR controllers as raw devices if
544 enable_hs is not set */
545 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
546 set_bit(HCI_RAW, &hdev->flags);
547
1da177e4
LT
548 if (hdev->open(hdev)) {
549 ret = -EIO;
550 goto done;
551 }
552
553 if (!test_bit(HCI_RAW, &hdev->flags)) {
554 atomic_set(&hdev->cmd_cnt, 1);
555 set_bit(HCI_INIT, &hdev->flags);
a5040efa 556 hdev->init_last_cmd = 0;
1da177e4 557
04837f64
MH
558 ret = __hci_request(hdev, hci_init_req, 0,
559 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 560
eead27da 561 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
562 ret = __hci_request(hdev, hci_le_init_req, 0,
563 msecs_to_jiffies(HCI_INIT_TIMEOUT));
564
1da177e4
LT
565 clear_bit(HCI_INIT, &hdev->flags);
566 }
567
568 if (!ret) {
569 hci_dev_hold(hdev);
570 set_bit(HCI_UP, &hdev->flags);
571 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 572 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 573 hci_dev_lock(hdev);
744cf19e 574 mgmt_powered(hdev, 1);
09fd0de5 575 hci_dev_unlock(hdev);
56e5cb86 576 }
8e87d142 577 } else {
1da177e4 578 /* Init failed, cleanup */
3eff45ea 579 flush_work(&hdev->tx_work);
c347b765 580 flush_work(&hdev->cmd_work);
b78752cc 581 flush_work(&hdev->rx_work);
1da177e4
LT
582
583 skb_queue_purge(&hdev->cmd_q);
584 skb_queue_purge(&hdev->rx_q);
585
586 if (hdev->flush)
587 hdev->flush(hdev);
588
589 if (hdev->sent_cmd) {
590 kfree_skb(hdev->sent_cmd);
591 hdev->sent_cmd = NULL;
592 }
593
594 hdev->close(hdev);
595 hdev->flags = 0;
596 }
597
598done:
599 hci_req_unlock(hdev);
600 hci_dev_put(hdev);
601 return ret;
602}
603
604static int hci_dev_do_close(struct hci_dev *hdev)
605{
606 BT_DBG("%s %p", hdev->name, hdev);
607
608 hci_req_cancel(hdev, ENODEV);
609 hci_req_lock(hdev);
610
611 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 612 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
613 hci_req_unlock(hdev);
614 return 0;
615 }
616
3eff45ea
GP
617 /* Flush RX and TX works */
618 flush_work(&hdev->tx_work);
b78752cc 619 flush_work(&hdev->rx_work);
1da177e4 620
16ab91ab 621 if (hdev->discov_timeout > 0) {
e0f9309f 622 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
623 hdev->discov_timeout = 0;
624 }
625
3243553f 626 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 627 cancel_delayed_work(&hdev->power_off);
3243553f 628
7d78525d
JH
629 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
630 cancel_delayed_work(&hdev->service_cache);
631
09fd0de5 632 hci_dev_lock(hdev);
1da177e4
LT
633 inquiry_cache_flush(hdev);
634 hci_conn_hash_flush(hdev);
09fd0de5 635 hci_dev_unlock(hdev);
1da177e4
LT
636
637 hci_notify(hdev, HCI_DEV_DOWN);
638
639 if (hdev->flush)
640 hdev->flush(hdev);
641
642 /* Reset device */
643 skb_queue_purge(&hdev->cmd_q);
644 atomic_set(&hdev->cmd_cnt, 1);
645 if (!test_bit(HCI_RAW, &hdev->flags)) {
646 set_bit(HCI_INIT, &hdev->flags);
04837f64 647 __hci_request(hdev, hci_reset_req, 0,
43611a7b 648 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
649 clear_bit(HCI_INIT, &hdev->flags);
650 }
651
c347b765
GP
652 /* flush cmd work */
653 flush_work(&hdev->cmd_work);
1da177e4
LT
654
655 /* Drop queues */
656 skb_queue_purge(&hdev->rx_q);
657 skb_queue_purge(&hdev->cmd_q);
658 skb_queue_purge(&hdev->raw_q);
659
660 /* Drop last sent command */
661 if (hdev->sent_cmd) {
b79f44c1 662 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
663 kfree_skb(hdev->sent_cmd);
664 hdev->sent_cmd = NULL;
665 }
666
667 /* After this point our queues are empty
668 * and no tasks are scheduled. */
669 hdev->close(hdev);
670
09fd0de5 671 hci_dev_lock(hdev);
744cf19e 672 mgmt_powered(hdev, 0);
09fd0de5 673 hci_dev_unlock(hdev);
5add6af8 674
1da177e4
LT
675 /* Clear flags */
676 hdev->flags = 0;
677
678 hci_req_unlock(hdev);
679
680 hci_dev_put(hdev);
681 return 0;
682}
683
684int hci_dev_close(__u16 dev)
685{
686 struct hci_dev *hdev;
687 int err;
688
70f23020
AE
689 hdev = hci_dev_get(dev);
690 if (!hdev)
1da177e4
LT
691 return -ENODEV;
692 err = hci_dev_do_close(hdev);
693 hci_dev_put(hdev);
694 return err;
695}
696
697int hci_dev_reset(__u16 dev)
698{
699 struct hci_dev *hdev;
700 int ret = 0;
701
70f23020
AE
702 hdev = hci_dev_get(dev);
703 if (!hdev)
1da177e4
LT
704 return -ENODEV;
705
706 hci_req_lock(hdev);
1da177e4
LT
707
708 if (!test_bit(HCI_UP, &hdev->flags))
709 goto done;
710
711 /* Drop queues */
712 skb_queue_purge(&hdev->rx_q);
713 skb_queue_purge(&hdev->cmd_q);
714
09fd0de5 715 hci_dev_lock(hdev);
1da177e4
LT
716 inquiry_cache_flush(hdev);
717 hci_conn_hash_flush(hdev);
09fd0de5 718 hci_dev_unlock(hdev);
1da177e4
LT
719
720 if (hdev->flush)
721 hdev->flush(hdev);
722
8e87d142 723 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 724 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
725
726 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
727 ret = __hci_request(hdev, hci_reset_req, 0,
728 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
729
730done:
1da177e4
LT
731 hci_req_unlock(hdev);
732 hci_dev_put(hdev);
733 return ret;
734}
735
736int hci_dev_reset_stat(__u16 dev)
737{
738 struct hci_dev *hdev;
739 int ret = 0;
740
70f23020
AE
741 hdev = hci_dev_get(dev);
742 if (!hdev)
1da177e4
LT
743 return -ENODEV;
744
745 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
746
747 hci_dev_put(hdev);
748
749 return ret;
750}
751
752int hci_dev_cmd(unsigned int cmd, void __user *arg)
753{
754 struct hci_dev *hdev;
755 struct hci_dev_req dr;
756 int err = 0;
757
758 if (copy_from_user(&dr, arg, sizeof(dr)))
759 return -EFAULT;
760
70f23020
AE
761 hdev = hci_dev_get(dr.dev_id);
762 if (!hdev)
1da177e4
LT
763 return -ENODEV;
764
765 switch (cmd) {
766 case HCISETAUTH:
04837f64
MH
767 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
769 break;
770
771 case HCISETENCRYPT:
772 if (!lmp_encrypt_capable(hdev)) {
773 err = -EOPNOTSUPP;
774 break;
775 }
776
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
04837f64
MH
779 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
780 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
781 if (err)
782 break;
783 }
784
04837f64
MH
785 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
786 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
787 break;
788
789 case HCISETSCAN:
04837f64
MH
790 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
791 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
792 break;
793
1da177e4 794 case HCISETLINKPOL:
e4e8e37c
MH
795 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
796 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
797 break;
798
799 case HCISETLINKMODE:
e4e8e37c
MH
800 hdev->link_mode = ((__u16) dr.dev_opt) &
801 (HCI_LM_MASTER | HCI_LM_ACCEPT);
802 break;
803
804 case HCISETPTYPE:
805 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
806 break;
807
808 case HCISETACLMTU:
e4e8e37c
MH
809 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
810 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
811 break;
812
813 case HCISETSCOMTU:
e4e8e37c
MH
814 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
815 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
816 break;
817
818 default:
819 err = -EINVAL;
820 break;
821 }
e4e8e37c 822
1da177e4
LT
823 hci_dev_put(hdev);
824 return err;
825}
826
827int hci_get_dev_list(void __user *arg)
828{
8035ded4 829 struct hci_dev *hdev;
1da177e4
LT
830 struct hci_dev_list_req *dl;
831 struct hci_dev_req *dr;
1da177e4
LT
832 int n = 0, size, err;
833 __u16 dev_num;
834
835 if (get_user(dev_num, (__u16 __user *) arg))
836 return -EFAULT;
837
838 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
839 return -EINVAL;
840
841 size = sizeof(*dl) + dev_num * sizeof(*dr);
842
70f23020
AE
843 dl = kzalloc(size, GFP_KERNEL);
844 if (!dl)
1da177e4
LT
845 return -ENOMEM;
846
847 dr = dl->dev_req;
848
849 read_lock_bh(&hci_dev_list_lock);
8035ded4 850 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 851 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 852 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
853
854 if (!test_bit(HCI_MGMT, &hdev->flags))
855 set_bit(HCI_PAIRABLE, &hdev->flags);
856
1da177e4
LT
857 (dr + n)->dev_id = hdev->id;
858 (dr + n)->dev_opt = hdev->flags;
c542a06c 859
1da177e4
LT
860 if (++n >= dev_num)
861 break;
862 }
863 read_unlock_bh(&hci_dev_list_lock);
864
865 dl->dev_num = n;
866 size = sizeof(*dl) + n * sizeof(*dr);
867
868 err = copy_to_user(arg, dl, size);
869 kfree(dl);
870
871 return err ? -EFAULT : 0;
872}
873
874int hci_get_dev_info(void __user *arg)
875{
876 struct hci_dev *hdev;
877 struct hci_dev_info di;
878 int err = 0;
879
880 if (copy_from_user(&di, arg, sizeof(di)))
881 return -EFAULT;
882
70f23020
AE
883 hdev = hci_dev_get(di.dev_id);
884 if (!hdev)
1da177e4
LT
885 return -ENODEV;
886
3243553f
JH
887 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
888 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 889
c542a06c
JH
890 if (!test_bit(HCI_MGMT, &hdev->flags))
891 set_bit(HCI_PAIRABLE, &hdev->flags);
892
1da177e4
LT
893 strcpy(di.name, hdev->name);
894 di.bdaddr = hdev->bdaddr;
943da25d 895 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
896 di.flags = hdev->flags;
897 di.pkt_type = hdev->pkt_type;
898 di.acl_mtu = hdev->acl_mtu;
899 di.acl_pkts = hdev->acl_pkts;
900 di.sco_mtu = hdev->sco_mtu;
901 di.sco_pkts = hdev->sco_pkts;
902 di.link_policy = hdev->link_policy;
903 di.link_mode = hdev->link_mode;
904
905 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
906 memcpy(&di.features, &hdev->features, sizeof(di.features));
907
908 if (copy_to_user(arg, &di, sizeof(di)))
909 err = -EFAULT;
910
911 hci_dev_put(hdev);
912
913 return err;
914}
915
916/* ---- Interface to HCI drivers ---- */
917
611b30f7
MH
918static int hci_rfkill_set_block(void *data, bool blocked)
919{
920 struct hci_dev *hdev = data;
921
922 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
923
924 if (!blocked)
925 return 0;
926
927 hci_dev_do_close(hdev);
928
929 return 0;
930}
931
932static const struct rfkill_ops hci_rfkill_ops = {
933 .set_block = hci_rfkill_set_block,
934};
935
1da177e4
LT
936/* Alloc HCI device */
937struct hci_dev *hci_alloc_dev(void)
938{
939 struct hci_dev *hdev;
940
25ea6db0 941 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
942 if (!hdev)
943 return NULL;
944
0ac7e700 945 hci_init_sysfs(hdev);
1da177e4
LT
946 skb_queue_head_init(&hdev->driver_init);
947
948 return hdev;
949}
950EXPORT_SYMBOL(hci_alloc_dev);
951
952/* Free HCI device */
953void hci_free_dev(struct hci_dev *hdev)
954{
955 skb_queue_purge(&hdev->driver_init);
956
a91f2e39
MH
957 /* will free via device release */
958 put_device(&hdev->dev);
1da177e4
LT
959}
960EXPORT_SYMBOL(hci_free_dev);
961
ab81cbf9
JH
962static void hci_power_on(struct work_struct *work)
963{
964 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
965
966 BT_DBG("%s", hdev->name);
967
968 if (hci_dev_open(hdev->id) < 0)
969 return;
970
971 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
80b7ab33 972 schedule_delayed_work(&hdev->power_off,
3243553f 973 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
974
975 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 976 mgmt_index_added(hdev);
ab81cbf9
JH
977}
978
979static void hci_power_off(struct work_struct *work)
980{
3243553f
JH
981 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 power_off.work);
ab81cbf9
JH
983
984 BT_DBG("%s", hdev->name);
985
986 clear_bit(HCI_AUTO_OFF, &hdev->flags);
987
3243553f 988 hci_dev_close(hdev->id);
ab81cbf9
JH
989}
990
16ab91ab
JH
991static void hci_discov_off(struct work_struct *work)
992{
993 struct hci_dev *hdev;
994 u8 scan = SCAN_PAGE;
995
996 hdev = container_of(work, struct hci_dev, discov_off.work);
997
998 BT_DBG("%s", hdev->name);
999
09fd0de5 1000 hci_dev_lock(hdev);
16ab91ab
JH
1001
1002 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1003
1004 hdev->discov_timeout = 0;
1005
09fd0de5 1006 hci_dev_unlock(hdev);
16ab91ab
JH
1007}
1008
2aeb9a1a
JH
1009int hci_uuids_clear(struct hci_dev *hdev)
1010{
1011 struct list_head *p, *n;
1012
1013 list_for_each_safe(p, n, &hdev->uuids) {
1014 struct bt_uuid *uuid;
1015
1016 uuid = list_entry(p, struct bt_uuid, list);
1017
1018 list_del(p);
1019 kfree(uuid);
1020 }
1021
1022 return 0;
1023}
1024
55ed8ca1
JH
1025int hci_link_keys_clear(struct hci_dev *hdev)
1026{
1027 struct list_head *p, *n;
1028
1029 list_for_each_safe(p, n, &hdev->link_keys) {
1030 struct link_key *key;
1031
1032 key = list_entry(p, struct link_key, list);
1033
1034 list_del(p);
1035 kfree(key);
1036 }
1037
1038 return 0;
1039}
1040
1041struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1042{
8035ded4 1043 struct link_key *k;
55ed8ca1 1044
8035ded4 1045 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1046 if (bacmp(bdaddr, &k->bdaddr) == 0)
1047 return k;
55ed8ca1
JH
1048
1049 return NULL;
1050}
1051
d25e28ab
JH
1052static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1053 u8 key_type, u8 old_key_type)
1054{
1055 /* Legacy key */
1056 if (key_type < 0x03)
1057 return 1;
1058
1059 /* Debug keys are insecure so don't store them persistently */
1060 if (key_type == HCI_LK_DEBUG_COMBINATION)
1061 return 0;
1062
1063 /* Changed combination key and there's no previous one */
1064 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1065 return 0;
1066
1067 /* Security mode 3 case */
1068 if (!conn)
1069 return 1;
1070
1071 /* Neither local nor remote side had no-bonding as requirement */
1072 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1073 return 1;
1074
1075 /* Local side had dedicated bonding as requirement */
1076 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1077 return 1;
1078
1079 /* Remote side had dedicated bonding as requirement */
1080 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1081 return 1;
1082
1083 /* If none of the above criteria match, then don't store the key
1084 * persistently */
1085 return 0;
1086}
1087
75d262c2
VCG
1088struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1089{
1090 struct link_key *k;
1091
1092 list_for_each_entry(k, &hdev->link_keys, list) {
1093 struct key_master_id *id;
1094
1095 if (k->type != HCI_LK_SMP_LTK)
1096 continue;
1097
1098 if (k->dlen != sizeof(*id))
1099 continue;
1100
1101 id = (void *) &k->data;
1102 if (id->ediv == ediv &&
1103 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1104 return k;
1105 }
1106
1107 return NULL;
1108}
1109EXPORT_SYMBOL(hci_find_ltk);
1110
1111struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1112 bdaddr_t *bdaddr, u8 type)
1113{
1114 struct link_key *k;
1115
1116 list_for_each_entry(k, &hdev->link_keys, list)
1117 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1118 return k;
1119
1120 return NULL;
1121}
1122EXPORT_SYMBOL(hci_find_link_key_type);
1123
d25e28ab
JH
1124int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1125 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1126{
1127 struct link_key *key, *old_key;
4df378a1 1128 u8 old_key_type, persistent;
55ed8ca1
JH
1129
1130 old_key = hci_find_link_key(hdev, bdaddr);
1131 if (old_key) {
1132 old_key_type = old_key->type;
1133 key = old_key;
1134 } else {
12adcf3a 1135 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1136 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1137 if (!key)
1138 return -ENOMEM;
1139 list_add(&key->list, &hdev->link_keys);
1140 }
1141
1142 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1143
d25e28ab
JH
1144 /* Some buggy controller combinations generate a changed
1145 * combination key for legacy pairing even when there's no
1146 * previous key */
1147 if (type == HCI_LK_CHANGED_COMBINATION &&
1148 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1149 old_key_type == 0xff) {
d25e28ab 1150 type = HCI_LK_COMBINATION;
655fe6ec
JH
1151 if (conn)
1152 conn->key_type = type;
1153 }
d25e28ab 1154
55ed8ca1
JH
1155 bacpy(&key->bdaddr, bdaddr);
1156 memcpy(key->val, val, 16);
55ed8ca1
JH
1157 key->pin_len = pin_len;
1158
b6020ba0 1159 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1160 key->type = old_key_type;
4748fed2
JH
1161 else
1162 key->type = type;
1163
4df378a1
JH
1164 if (!new_key)
1165 return 0;
1166
1167 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1168
744cf19e 1169 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1170
1171 if (!persistent) {
1172 list_del(&key->list);
1173 kfree(key);
1174 }
55ed8ca1
JH
1175
1176 return 0;
1177}
1178
75d262c2 1179int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1180 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1181{
1182 struct link_key *key, *old_key;
1183 struct key_master_id *id;
1184 u8 old_key_type;
1185
1186 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1187
1188 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1189 if (old_key) {
1190 key = old_key;
1191 old_key_type = old_key->type;
1192 } else {
1193 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1194 if (!key)
1195 return -ENOMEM;
1196 list_add(&key->list, &hdev->link_keys);
1197 old_key_type = 0xff;
1198 }
1199
1200 key->dlen = sizeof(*id);
1201
1202 bacpy(&key->bdaddr, bdaddr);
1203 memcpy(key->val, ltk, sizeof(key->val));
1204 key->type = HCI_LK_SMP_LTK;
726b4ffc 1205 key->pin_len = key_size;
75d262c2
VCG
1206
1207 id = (void *) &key->data;
1208 id->ediv = ediv;
1209 memcpy(id->rand, rand, sizeof(id->rand));
1210
1211 if (new_key)
744cf19e 1212 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1213
1214 return 0;
1215}
1216
55ed8ca1
JH
1217int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218{
1219 struct link_key *key;
1220
1221 key = hci_find_link_key(hdev, bdaddr);
1222 if (!key)
1223 return -ENOENT;
1224
1225 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226
1227 list_del(&key->list);
1228 kfree(key);
1229
1230 return 0;
1231}
1232
6bd32326
VT
1233/* HCI command timer function */
1234static void hci_cmd_timer(unsigned long arg)
1235{
1236 struct hci_dev *hdev = (void *) arg;
1237
1238 BT_ERR("%s command tx timeout", hdev->name);
1239 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1240 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1241}
1242
2763eda6
SJ
1243struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1244 bdaddr_t *bdaddr)
1245{
1246 struct oob_data *data;
1247
1248 list_for_each_entry(data, &hdev->remote_oob_data, list)
1249 if (bacmp(bdaddr, &data->bdaddr) == 0)
1250 return data;
1251
1252 return NULL;
1253}
1254
1255int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1256{
1257 struct oob_data *data;
1258
1259 data = hci_find_remote_oob_data(hdev, bdaddr);
1260 if (!data)
1261 return -ENOENT;
1262
1263 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1264
1265 list_del(&data->list);
1266 kfree(data);
1267
1268 return 0;
1269}
1270
1271int hci_remote_oob_data_clear(struct hci_dev *hdev)
1272{
1273 struct oob_data *data, *n;
1274
1275 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1276 list_del(&data->list);
1277 kfree(data);
1278 }
1279
1280 return 0;
1281}
1282
1283int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1284 u8 *randomizer)
1285{
1286 struct oob_data *data;
1287
1288 data = hci_find_remote_oob_data(hdev, bdaddr);
1289
1290 if (!data) {
1291 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1292 if (!data)
1293 return -ENOMEM;
1294
1295 bacpy(&data->bdaddr, bdaddr);
1296 list_add(&data->list, &hdev->remote_oob_data);
1297 }
1298
1299 memcpy(data->hash, hash, sizeof(data->hash));
1300 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1301
1302 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1303
1304 return 0;
1305}
1306
b2a66aad
AJ
1307struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1308 bdaddr_t *bdaddr)
1309{
8035ded4 1310 struct bdaddr_list *b;
b2a66aad 1311
8035ded4 1312 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1313 if (bacmp(bdaddr, &b->bdaddr) == 0)
1314 return b;
b2a66aad
AJ
1315
1316 return NULL;
1317}
1318
1319int hci_blacklist_clear(struct hci_dev *hdev)
1320{
1321 struct list_head *p, *n;
1322
1323 list_for_each_safe(p, n, &hdev->blacklist) {
1324 struct bdaddr_list *b;
1325
1326 b = list_entry(p, struct bdaddr_list, list);
1327
1328 list_del(p);
1329 kfree(b);
1330 }
1331
1332 return 0;
1333}
1334
1335int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336{
1337 struct bdaddr_list *entry;
b2a66aad
AJ
1338
1339 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1340 return -EBADF;
1341
5e762444
AJ
1342 if (hci_blacklist_lookup(hdev, bdaddr))
1343 return -EEXIST;
b2a66aad
AJ
1344
1345 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1346 if (!entry)
1347 return -ENOMEM;
b2a66aad
AJ
1348
1349 bacpy(&entry->bdaddr, bdaddr);
1350
1351 list_add(&entry->list, &hdev->blacklist);
1352
744cf19e 1353 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1354}
1355
1356int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1357{
1358 struct bdaddr_list *entry;
b2a66aad 1359
1ec918ce 1360 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1361 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1362
1363 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1364 if (!entry)
5e762444 1365 return -ENOENT;
b2a66aad
AJ
1366
1367 list_del(&entry->list);
1368 kfree(entry);
1369
744cf19e 1370 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1371}
1372
db323f2f 1373static void hci_clear_adv_cache(struct work_struct *work)
35815085 1374{
db323f2f
GP
1375 struct hci_dev *hdev = container_of(work, struct hci_dev,
1376 adv_work.work);
35815085
AG
1377
1378 hci_dev_lock(hdev);
1379
1380 hci_adv_entries_clear(hdev);
1381
1382 hci_dev_unlock(hdev);
1383}
1384
76c8686f
AG
1385int hci_adv_entries_clear(struct hci_dev *hdev)
1386{
1387 struct adv_entry *entry, *tmp;
1388
1389 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1390 list_del(&entry->list);
1391 kfree(entry);
1392 }
1393
1394 BT_DBG("%s adv cache cleared", hdev->name);
1395
1396 return 0;
1397}
1398
1399struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400{
1401 struct adv_entry *entry;
1402
1403 list_for_each_entry(entry, &hdev->adv_entries, list)
1404 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1405 return entry;
1406
1407 return NULL;
1408}
1409
1410static inline int is_connectable_adv(u8 evt_type)
1411{
1412 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1413 return 1;
1414
1415 return 0;
1416}
1417
1418int hci_add_adv_entry(struct hci_dev *hdev,
1419 struct hci_ev_le_advertising_info *ev)
1420{
1421 struct adv_entry *entry;
1422
1423 if (!is_connectable_adv(ev->evt_type))
1424 return -EINVAL;
1425
1426 /* Only new entries should be added to adv_entries. So, if
1427 * bdaddr was found, don't add it. */
1428 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1429 return 0;
1430
1431 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1432 if (!entry)
1433 return -ENOMEM;
1434
1435 bacpy(&entry->bdaddr, &ev->bdaddr);
1436 entry->bdaddr_type = ev->bdaddr_type;
1437
1438 list_add(&entry->list, &hdev->adv_entries);
1439
1440 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1441 batostr(&entry->bdaddr), entry->bdaddr_type);
1442
1443 return 0;
1444}
1445
1da177e4
LT
1446/* Register HCI device */
1447int hci_register_dev(struct hci_dev *hdev)
1448{
1449 struct list_head *head = &hci_dev_list, *p;
08add513 1450 int i, id, error;
1da177e4 1451
c13854ce
MH
1452 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1453 hdev->bus, hdev->owner);
1da177e4
LT
1454
1455 if (!hdev->open || !hdev->close || !hdev->destruct)
1456 return -EINVAL;
1457
08add513
MM
1458 /* Do not allow HCI_AMP devices to register at index 0,
1459 * so the index can be used as the AMP controller ID.
1460 */
1461 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1462
1da177e4
LT
1463 write_lock_bh(&hci_dev_list_lock);
1464
1465 /* Find first available device id */
1466 list_for_each(p, &hci_dev_list) {
1467 if (list_entry(p, struct hci_dev, list)->id != id)
1468 break;
1469 head = p; id++;
1470 }
8e87d142 1471
1da177e4
LT
1472 sprintf(hdev->name, "hci%d", id);
1473 hdev->id = id;
c6feeb28 1474 list_add_tail(&hdev->list, head);
1da177e4
LT
1475
1476 atomic_set(&hdev->refcnt, 1);
09fd0de5 1477 mutex_init(&hdev->lock);
1da177e4
LT
1478
1479 hdev->flags = 0;
d23264a8 1480 hdev->dev_flags = 0;
1da177e4 1481 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1482 hdev->esco_type = (ESCO_HV1);
1da177e4 1483 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1484 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1485
04837f64
MH
1486 hdev->idle_timeout = 0;
1487 hdev->sniff_max_interval = 800;
1488 hdev->sniff_min_interval = 80;
1489
b78752cc 1490 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1491 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1492 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1493
1da177e4
LT
1494
1495 skb_queue_head_init(&hdev->rx_q);
1496 skb_queue_head_init(&hdev->cmd_q);
1497 skb_queue_head_init(&hdev->raw_q);
1498
6bd32326
VT
1499 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1500
cd4c5391 1501 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1502 hdev->reassembly[i] = NULL;
1503
1da177e4 1504 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1505 mutex_init(&hdev->req_lock);
1da177e4
LT
1506
1507 inquiry_cache_init(hdev);
1508
1509 hci_conn_hash_init(hdev);
1510
2e58ef3e
JH
1511 INIT_LIST_HEAD(&hdev->mgmt_pending);
1512
ea4bd8ba 1513 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1514
2aeb9a1a
JH
1515 INIT_LIST_HEAD(&hdev->uuids);
1516
55ed8ca1
JH
1517 INIT_LIST_HEAD(&hdev->link_keys);
1518
2763eda6
SJ
1519 INIT_LIST_HEAD(&hdev->remote_oob_data);
1520
76c8686f
AG
1521 INIT_LIST_HEAD(&hdev->adv_entries);
1522
db323f2f 1523 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1524 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1525 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1526
16ab91ab
JH
1527 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1528
1da177e4
LT
1529 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1530
1531 atomic_set(&hdev->promisc, 0);
1532
1533 write_unlock_bh(&hci_dev_list_lock);
1534
32845eb1
GP
1535 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1536 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1537 if (!hdev->workqueue) {
1538 error = -ENOMEM;
1539 goto err;
1540 }
f48fd9c8 1541
33ca954d
DH
1542 error = hci_add_sysfs(hdev);
1543 if (error < 0)
1544 goto err_wqueue;
1da177e4 1545
611b30f7
MH
1546 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1547 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1548 if (hdev->rfkill) {
1549 if (rfkill_register(hdev->rfkill) < 0) {
1550 rfkill_destroy(hdev->rfkill);
1551 hdev->rfkill = NULL;
1552 }
1553 }
1554
ab81cbf9
JH
1555 set_bit(HCI_AUTO_OFF, &hdev->flags);
1556 set_bit(HCI_SETUP, &hdev->flags);
7f971041 1557 schedule_work(&hdev->power_on);
ab81cbf9 1558
1da177e4
LT
1559 hci_notify(hdev, HCI_DEV_REG);
1560
1561 return id;
f48fd9c8 1562
33ca954d
DH
1563err_wqueue:
1564 destroy_workqueue(hdev->workqueue);
1565err:
f48fd9c8
MH
1566 write_lock_bh(&hci_dev_list_lock);
1567 list_del(&hdev->list);
1568 write_unlock_bh(&hci_dev_list_lock);
1569
33ca954d 1570 return error;
1da177e4
LT
1571}
1572EXPORT_SYMBOL(hci_register_dev);
1573
1574/* Unregister HCI device */
59735631 1575void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1576{
ef222013
MH
1577 int i;
1578
c13854ce 1579 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1580
1da177e4
LT
1581 write_lock_bh(&hci_dev_list_lock);
1582 list_del(&hdev->list);
1583 write_unlock_bh(&hci_dev_list_lock);
1584
1585 hci_dev_do_close(hdev);
1586
cd4c5391 1587 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1588 kfree_skb(hdev->reassembly[i]);
1589
ab81cbf9 1590 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1591 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1592 hci_dev_lock(hdev);
744cf19e 1593 mgmt_index_removed(hdev);
09fd0de5 1594 hci_dev_unlock(hdev);
56e5cb86 1595 }
ab81cbf9 1596
2e58ef3e
JH
1597 /* mgmt_index_removed should take care of emptying the
1598 * pending list */
1599 BUG_ON(!list_empty(&hdev->mgmt_pending));
1600
1da177e4
LT
1601 hci_notify(hdev, HCI_DEV_UNREG);
1602
611b30f7
MH
1603 if (hdev->rfkill) {
1604 rfkill_unregister(hdev->rfkill);
1605 rfkill_destroy(hdev->rfkill);
1606 }
1607
ce242970 1608 hci_del_sysfs(hdev);
147e2d59 1609
db323f2f 1610 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1611
f48fd9c8
MH
1612 destroy_workqueue(hdev->workqueue);
1613
09fd0de5 1614 hci_dev_lock(hdev);
e2e0cacb 1615 hci_blacklist_clear(hdev);
2aeb9a1a 1616 hci_uuids_clear(hdev);
55ed8ca1 1617 hci_link_keys_clear(hdev);
2763eda6 1618 hci_remote_oob_data_clear(hdev);
76c8686f 1619 hci_adv_entries_clear(hdev);
09fd0de5 1620 hci_dev_unlock(hdev);
e2e0cacb 1621
1da177e4 1622 __hci_dev_put(hdev);
1da177e4
LT
1623}
1624EXPORT_SYMBOL(hci_unregister_dev);
1625
1626/* Suspend HCI device */
1627int hci_suspend_dev(struct hci_dev *hdev)
1628{
1629 hci_notify(hdev, HCI_DEV_SUSPEND);
1630 return 0;
1631}
1632EXPORT_SYMBOL(hci_suspend_dev);
1633
1634/* Resume HCI device */
1635int hci_resume_dev(struct hci_dev *hdev)
1636{
1637 hci_notify(hdev, HCI_DEV_RESUME);
1638 return 0;
1639}
1640EXPORT_SYMBOL(hci_resume_dev);
1641
76bca880
MH
1642/* Receive frame from HCI drivers */
1643int hci_recv_frame(struct sk_buff *skb)
1644{
1645 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1646 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1647 && !test_bit(HCI_INIT, &hdev->flags))) {
1648 kfree_skb(skb);
1649 return -ENXIO;
1650 }
1651
1652 /* Incomming skb */
1653 bt_cb(skb)->incoming = 1;
1654
1655 /* Time stamp */
1656 __net_timestamp(skb);
1657
76bca880 1658 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1659 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1660
76bca880
MH
1661 return 0;
1662}
1663EXPORT_SYMBOL(hci_recv_frame);
1664
33e882a5 1665static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1666 int count, __u8 index)
33e882a5
SS
1667{
1668 int len = 0;
1669 int hlen = 0;
1670 int remain = count;
1671 struct sk_buff *skb;
1672 struct bt_skb_cb *scb;
1673
1674 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1675 index >= NUM_REASSEMBLY)
1676 return -EILSEQ;
1677
1678 skb = hdev->reassembly[index];
1679
1680 if (!skb) {
1681 switch (type) {
1682 case HCI_ACLDATA_PKT:
1683 len = HCI_MAX_FRAME_SIZE;
1684 hlen = HCI_ACL_HDR_SIZE;
1685 break;
1686 case HCI_EVENT_PKT:
1687 len = HCI_MAX_EVENT_SIZE;
1688 hlen = HCI_EVENT_HDR_SIZE;
1689 break;
1690 case HCI_SCODATA_PKT:
1691 len = HCI_MAX_SCO_SIZE;
1692 hlen = HCI_SCO_HDR_SIZE;
1693 break;
1694 }
1695
1e429f38 1696 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1697 if (!skb)
1698 return -ENOMEM;
1699
1700 scb = (void *) skb->cb;
1701 scb->expect = hlen;
1702 scb->pkt_type = type;
1703
1704 skb->dev = (void *) hdev;
1705 hdev->reassembly[index] = skb;
1706 }
1707
1708 while (count) {
1709 scb = (void *) skb->cb;
1710 len = min(scb->expect, (__u16)count);
1711
1712 memcpy(skb_put(skb, len), data, len);
1713
1714 count -= len;
1715 data += len;
1716 scb->expect -= len;
1717 remain = count;
1718
1719 switch (type) {
1720 case HCI_EVENT_PKT:
1721 if (skb->len == HCI_EVENT_HDR_SIZE) {
1722 struct hci_event_hdr *h = hci_event_hdr(skb);
1723 scb->expect = h->plen;
1724
1725 if (skb_tailroom(skb) < scb->expect) {
1726 kfree_skb(skb);
1727 hdev->reassembly[index] = NULL;
1728 return -ENOMEM;
1729 }
1730 }
1731 break;
1732
1733 case HCI_ACLDATA_PKT:
1734 if (skb->len == HCI_ACL_HDR_SIZE) {
1735 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1736 scb->expect = __le16_to_cpu(h->dlen);
1737
1738 if (skb_tailroom(skb) < scb->expect) {
1739 kfree_skb(skb);
1740 hdev->reassembly[index] = NULL;
1741 return -ENOMEM;
1742 }
1743 }
1744 break;
1745
1746 case HCI_SCODATA_PKT:
1747 if (skb->len == HCI_SCO_HDR_SIZE) {
1748 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1749 scb->expect = h->dlen;
1750
1751 if (skb_tailroom(skb) < scb->expect) {
1752 kfree_skb(skb);
1753 hdev->reassembly[index] = NULL;
1754 return -ENOMEM;
1755 }
1756 }
1757 break;
1758 }
1759
1760 if (scb->expect == 0) {
1761 /* Complete frame */
1762
1763 bt_cb(skb)->pkt_type = type;
1764 hci_recv_frame(skb);
1765
1766 hdev->reassembly[index] = NULL;
1767 return remain;
1768 }
1769 }
1770
1771 return remain;
1772}
1773
ef222013
MH
1774int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1775{
f39a3c06
SS
1776 int rem = 0;
1777
ef222013
MH
1778 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1779 return -EILSEQ;
1780
da5f6c37 1781 while (count) {
1e429f38 1782 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1783 if (rem < 0)
1784 return rem;
ef222013 1785
f39a3c06
SS
1786 data += (count - rem);
1787 count = rem;
f81c6224 1788 }
ef222013 1789
f39a3c06 1790 return rem;
ef222013
MH
1791}
1792EXPORT_SYMBOL(hci_recv_fragment);
1793
99811510
SS
1794#define STREAM_REASSEMBLY 0
1795
1796int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1797{
1798 int type;
1799 int rem = 0;
1800
da5f6c37 1801 while (count) {
99811510
SS
1802 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1803
1804 if (!skb) {
1805 struct { char type; } *pkt;
1806
1807 /* Start of the frame */
1808 pkt = data;
1809 type = pkt->type;
1810
1811 data++;
1812 count--;
1813 } else
1814 type = bt_cb(skb)->pkt_type;
1815
1e429f38
GP
1816 rem = hci_reassembly(hdev, type, data, count,
1817 STREAM_REASSEMBLY);
99811510
SS
1818 if (rem < 0)
1819 return rem;
1820
1821 data += (count - rem);
1822 count = rem;
f81c6224 1823 }
99811510
SS
1824
1825 return rem;
1826}
1827EXPORT_SYMBOL(hci_recv_stream_fragment);
1828
1da177e4
LT
1829/* ---- Interface to upper protocols ---- */
1830
1831/* Register/Unregister protocols.
1832 * hci_task_lock is used to ensure that no tasks are running. */
1833int hci_register_proto(struct hci_proto *hp)
1834{
1835 int err = 0;
1836
1837 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1838
1839 if (hp->id >= HCI_MAX_PROTO)
1840 return -EINVAL;
1841
67d0dfb5 1842 mutex_lock(&hci_task_lock);
1da177e4
LT
1843
1844 if (!hci_proto[hp->id])
1845 hci_proto[hp->id] = hp;
1846 else
1847 err = -EEXIST;
1848
67d0dfb5 1849 mutex_unlock(&hci_task_lock);
1da177e4
LT
1850
1851 return err;
1852}
1853EXPORT_SYMBOL(hci_register_proto);
1854
1855int hci_unregister_proto(struct hci_proto *hp)
1856{
1857 int err = 0;
1858
1859 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1860
1861 if (hp->id >= HCI_MAX_PROTO)
1862 return -EINVAL;
1863
67d0dfb5 1864 mutex_lock(&hci_task_lock);
1da177e4
LT
1865
1866 if (hci_proto[hp->id])
1867 hci_proto[hp->id] = NULL;
1868 else
1869 err = -ENOENT;
1870
67d0dfb5 1871 mutex_unlock(&hci_task_lock);
1da177e4
LT
1872
1873 return err;
1874}
1875EXPORT_SYMBOL(hci_unregister_proto);
1876
1877int hci_register_cb(struct hci_cb *cb)
1878{
1879 BT_DBG("%p name %s", cb, cb->name);
1880
1881 write_lock_bh(&hci_cb_list_lock);
1882 list_add(&cb->list, &hci_cb_list);
1883 write_unlock_bh(&hci_cb_list_lock);
1884
1885 return 0;
1886}
1887EXPORT_SYMBOL(hci_register_cb);
1888
1889int hci_unregister_cb(struct hci_cb *cb)
1890{
1891 BT_DBG("%p name %s", cb, cb->name);
1892
1893 write_lock_bh(&hci_cb_list_lock);
1894 list_del(&cb->list);
1895 write_unlock_bh(&hci_cb_list_lock);
1896
1897 return 0;
1898}
1899EXPORT_SYMBOL(hci_unregister_cb);
1900
1901static int hci_send_frame(struct sk_buff *skb)
1902{
1903 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1904
1905 if (!hdev) {
1906 kfree_skb(skb);
1907 return -ENODEV;
1908 }
1909
0d48d939 1910 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1911
1912 if (atomic_read(&hdev->promisc)) {
1913 /* Time stamp */
a61bbcf2 1914 __net_timestamp(skb);
1da177e4 1915
eec8d2bc 1916 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1917 }
1918
1919 /* Get rid of skb owner, prior to sending to the driver. */
1920 skb_orphan(skb);
1921
1922 return hdev->send(skb);
1923}
1924
1925/* Send HCI command */
a9de9248 1926int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1927{
1928 int len = HCI_COMMAND_HDR_SIZE + plen;
1929 struct hci_command_hdr *hdr;
1930 struct sk_buff *skb;
1931
a9de9248 1932 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1933
1934 skb = bt_skb_alloc(len, GFP_ATOMIC);
1935 if (!skb) {
ef222013 1936 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1937 return -ENOMEM;
1938 }
1939
1940 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1941 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1942 hdr->plen = plen;
1943
1944 if (plen)
1945 memcpy(skb_put(skb, plen), param, plen);
1946
1947 BT_DBG("skb len %d", skb->len);
1948
0d48d939 1949 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1950 skb->dev = (void *) hdev;
c78ae283 1951
a5040efa
JH
1952 if (test_bit(HCI_INIT, &hdev->flags))
1953 hdev->init_last_cmd = opcode;
1954
1da177e4 1955 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1956 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1957
1958 return 0;
1959}
1da177e4
LT
1960
1961/* Get data from the previously sent command */
a9de9248 1962void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1963{
1964 struct hci_command_hdr *hdr;
1965
1966 if (!hdev->sent_cmd)
1967 return NULL;
1968
1969 hdr = (void *) hdev->sent_cmd->data;
1970
a9de9248 1971 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1972 return NULL;
1973
a9de9248 1974 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1975
1976 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1977}
1978
1979/* Send ACL data */
1980static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1981{
1982 struct hci_acl_hdr *hdr;
1983 int len = skb->len;
1984
badff6d0
ACM
1985 skb_push(skb, HCI_ACL_HDR_SIZE);
1986 skb_reset_transport_header(skb);
9c70220b 1987 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1988 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1989 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1990}
1991
73d80deb
LAD
1992static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1993 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1994{
1995 struct hci_dev *hdev = conn->hdev;
1996 struct sk_buff *list;
1997
70f23020
AE
1998 list = skb_shinfo(skb)->frag_list;
1999 if (!list) {
1da177e4
LT
2000 /* Non fragmented */
2001 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2002
73d80deb 2003 skb_queue_tail(queue, skb);
1da177e4
LT
2004 } else {
2005 /* Fragmented */
2006 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2007
2008 skb_shinfo(skb)->frag_list = NULL;
2009
2010 /* Queue all fragments atomically */
73d80deb 2011 spin_lock_bh(&queue->lock);
1da177e4 2012
73d80deb 2013 __skb_queue_tail(queue, skb);
e702112f
AE
2014
2015 flags &= ~ACL_START;
2016 flags |= ACL_CONT;
1da177e4
LT
2017 do {
2018 skb = list; list = list->next;
8e87d142 2019
1da177e4 2020 skb->dev = (void *) hdev;
0d48d939 2021 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2022 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2023
2024 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2025
73d80deb 2026 __skb_queue_tail(queue, skb);
1da177e4
LT
2027 } while (list);
2028
73d80deb 2029 spin_unlock_bh(&queue->lock);
1da177e4 2030 }
73d80deb
LAD
2031}
2032
2033void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2034{
2035 struct hci_conn *conn = chan->conn;
2036 struct hci_dev *hdev = conn->hdev;
2037
2038 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2039
2040 skb->dev = (void *) hdev;
2041 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2042 hci_add_acl_hdr(skb, conn->handle, flags);
2043
2044 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2045
3eff45ea 2046 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2047}
2048EXPORT_SYMBOL(hci_send_acl);
2049
2050/* Send SCO data */
0d861d8b 2051void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2052{
2053 struct hci_dev *hdev = conn->hdev;
2054 struct hci_sco_hdr hdr;
2055
2056 BT_DBG("%s len %d", hdev->name, skb->len);
2057
aca3192c 2058 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2059 hdr.dlen = skb->len;
2060
badff6d0
ACM
2061 skb_push(skb, HCI_SCO_HDR_SIZE);
2062 skb_reset_transport_header(skb);
9c70220b 2063 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2064
2065 skb->dev = (void *) hdev;
0d48d939 2066 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2067
1da177e4 2068 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2069 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2070}
2071EXPORT_SYMBOL(hci_send_sco);
2072
2073/* ---- HCI TX task (outgoing data) ---- */
2074
2075/* HCI Connection scheduler */
2076static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2077{
2078 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2079 struct hci_conn *conn = NULL, *c;
1da177e4 2080 int num = 0, min = ~0;
1da177e4 2081
8e87d142 2082 /* We don't have to lock device here. Connections are always
1da177e4 2083 * added and removed with TX task disabled. */
bf4c6325
GP
2084
2085 rcu_read_lock();
2086
2087 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2088 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2089 continue;
769be974
MH
2090
2091 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2092 continue;
2093
1da177e4
LT
2094 num++;
2095
2096 if (c->sent < min) {
2097 min = c->sent;
2098 conn = c;
2099 }
52087a79
LAD
2100
2101 if (hci_conn_num(hdev, type) == num)
2102 break;
1da177e4
LT
2103 }
2104
bf4c6325
GP
2105 rcu_read_unlock();
2106
1da177e4 2107 if (conn) {
6ed58ec5
VT
2108 int cnt, q;
2109
2110 switch (conn->type) {
2111 case ACL_LINK:
2112 cnt = hdev->acl_cnt;
2113 break;
2114 case SCO_LINK:
2115 case ESCO_LINK:
2116 cnt = hdev->sco_cnt;
2117 break;
2118 case LE_LINK:
2119 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2120 break;
2121 default:
2122 cnt = 0;
2123 BT_ERR("Unknown link type");
2124 }
2125
2126 q = cnt / num;
1da177e4
LT
2127 *quote = q ? q : 1;
2128 } else
2129 *quote = 0;
2130
2131 BT_DBG("conn %p quote %d", conn, *quote);
2132 return conn;
2133}
2134
bae1f5d9 2135static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2136{
2137 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2138 struct hci_conn *c;
1da177e4 2139
bae1f5d9 2140 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2141
bf4c6325
GP
2142 rcu_read_lock();
2143
1da177e4 2144 /* Kill stalled connections */
bf4c6325 2145 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2146 if (c->type == type && c->sent) {
2147 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2148 hdev->name, batostr(&c->dst));
2149 hci_acl_disconn(c, 0x13);
2150 }
2151 }
bf4c6325
GP
2152
2153 rcu_read_unlock();
1da177e4
LT
2154}
2155
73d80deb
LAD
2156static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2157 int *quote)
1da177e4 2158{
73d80deb
LAD
2159 struct hci_conn_hash *h = &hdev->conn_hash;
2160 struct hci_chan *chan = NULL;
2161 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2162 struct hci_conn *conn;
73d80deb
LAD
2163 int cnt, q, conn_num = 0;
2164
2165 BT_DBG("%s", hdev->name);
2166
bf4c6325
GP
2167 rcu_read_lock();
2168
2169 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2170 struct hci_chan *tmp;
2171
2172 if (conn->type != type)
2173 continue;
2174
2175 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2176 continue;
2177
2178 conn_num++;
2179
8192edef 2180 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2181 struct sk_buff *skb;
2182
2183 if (skb_queue_empty(&tmp->data_q))
2184 continue;
2185
2186 skb = skb_peek(&tmp->data_q);
2187 if (skb->priority < cur_prio)
2188 continue;
2189
2190 if (skb->priority > cur_prio) {
2191 num = 0;
2192 min = ~0;
2193 cur_prio = skb->priority;
2194 }
2195
2196 num++;
2197
2198 if (conn->sent < min) {
2199 min = conn->sent;
2200 chan = tmp;
2201 }
2202 }
2203
2204 if (hci_conn_num(hdev, type) == conn_num)
2205 break;
2206 }
2207
bf4c6325
GP
2208 rcu_read_unlock();
2209
73d80deb
LAD
2210 if (!chan)
2211 return NULL;
2212
2213 switch (chan->conn->type) {
2214 case ACL_LINK:
2215 cnt = hdev->acl_cnt;
2216 break;
2217 case SCO_LINK:
2218 case ESCO_LINK:
2219 cnt = hdev->sco_cnt;
2220 break;
2221 case LE_LINK:
2222 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2223 break;
2224 default:
2225 cnt = 0;
2226 BT_ERR("Unknown link type");
2227 }
2228
2229 q = cnt / num;
2230 *quote = q ? q : 1;
2231 BT_DBG("chan %p quote %d", chan, *quote);
2232 return chan;
2233}
2234
02b20f0b
LAD
2235static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2236{
2237 struct hci_conn_hash *h = &hdev->conn_hash;
2238 struct hci_conn *conn;
2239 int num = 0;
2240
2241 BT_DBG("%s", hdev->name);
2242
bf4c6325
GP
2243 rcu_read_lock();
2244
2245 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2246 struct hci_chan *chan;
2247
2248 if (conn->type != type)
2249 continue;
2250
2251 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2252 continue;
2253
2254 num++;
2255
8192edef 2256 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2257 struct sk_buff *skb;
2258
2259 if (chan->sent) {
2260 chan->sent = 0;
2261 continue;
2262 }
2263
2264 if (skb_queue_empty(&chan->data_q))
2265 continue;
2266
2267 skb = skb_peek(&chan->data_q);
2268 if (skb->priority >= HCI_PRIO_MAX - 1)
2269 continue;
2270
2271 skb->priority = HCI_PRIO_MAX - 1;
2272
2273 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2274 skb->priority);
2275 }
2276
2277 if (hci_conn_num(hdev, type) == num)
2278 break;
2279 }
bf4c6325
GP
2280
2281 rcu_read_unlock();
2282
02b20f0b
LAD
2283}
2284
73d80deb
LAD
2285static inline void hci_sched_acl(struct hci_dev *hdev)
2286{
2287 struct hci_chan *chan;
1da177e4
LT
2288 struct sk_buff *skb;
2289 int quote;
73d80deb 2290 unsigned int cnt;
1da177e4
LT
2291
2292 BT_DBG("%s", hdev->name);
2293
52087a79
LAD
2294 if (!hci_conn_num(hdev, ACL_LINK))
2295 return;
2296
1da177e4
LT
2297 if (!test_bit(HCI_RAW, &hdev->flags)) {
2298 /* ACL tx timeout must be longer than maximum
2299 * link supervision timeout (40.9 seconds) */
82453021 2300 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2301 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2302 }
2303
73d80deb 2304 cnt = hdev->acl_cnt;
04837f64 2305
73d80deb
LAD
2306 while (hdev->acl_cnt &&
2307 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2308 u32 priority = (skb_peek(&chan->data_q))->priority;
2309 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2310 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2311 skb->len, skb->priority);
2312
ec1cce24
LAD
2313 /* Stop if priority has changed */
2314 if (skb->priority < priority)
2315 break;
2316
2317 skb = skb_dequeue(&chan->data_q);
2318
73d80deb
LAD
2319 hci_conn_enter_active_mode(chan->conn,
2320 bt_cb(skb)->force_active);
04837f64 2321
1da177e4
LT
2322 hci_send_frame(skb);
2323 hdev->acl_last_tx = jiffies;
2324
2325 hdev->acl_cnt--;
73d80deb
LAD
2326 chan->sent++;
2327 chan->conn->sent++;
1da177e4
LT
2328 }
2329 }
02b20f0b
LAD
2330
2331 if (cnt != hdev->acl_cnt)
2332 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2333}
2334
2335/* Schedule SCO */
2336static inline void hci_sched_sco(struct hci_dev *hdev)
2337{
2338 struct hci_conn *conn;
2339 struct sk_buff *skb;
2340 int quote;
2341
2342 BT_DBG("%s", hdev->name);
2343
52087a79
LAD
2344 if (!hci_conn_num(hdev, SCO_LINK))
2345 return;
2346
1da177e4
LT
2347 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2348 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2349 BT_DBG("skb %p len %d", skb, skb->len);
2350 hci_send_frame(skb);
2351
2352 conn->sent++;
2353 if (conn->sent == ~0)
2354 conn->sent = 0;
2355 }
2356 }
2357}
2358
b6a0dc82
MH
2359static inline void hci_sched_esco(struct hci_dev *hdev)
2360{
2361 struct hci_conn *conn;
2362 struct sk_buff *skb;
2363 int quote;
2364
2365 BT_DBG("%s", hdev->name);
2366
52087a79
LAD
2367 if (!hci_conn_num(hdev, ESCO_LINK))
2368 return;
2369
b6a0dc82
MH
2370 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2371 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2372 BT_DBG("skb %p len %d", skb, skb->len);
2373 hci_send_frame(skb);
2374
2375 conn->sent++;
2376 if (conn->sent == ~0)
2377 conn->sent = 0;
2378 }
2379 }
2380}
2381
6ed58ec5
VT
2382static inline void hci_sched_le(struct hci_dev *hdev)
2383{
73d80deb 2384 struct hci_chan *chan;
6ed58ec5 2385 struct sk_buff *skb;
02b20f0b 2386 int quote, cnt, tmp;
6ed58ec5
VT
2387
2388 BT_DBG("%s", hdev->name);
2389
52087a79
LAD
2390 if (!hci_conn_num(hdev, LE_LINK))
2391 return;
2392
6ed58ec5
VT
2393 if (!test_bit(HCI_RAW, &hdev->flags)) {
2394 /* LE tx timeout must be longer than maximum
2395 * link supervision timeout (40.9 seconds) */
bae1f5d9 2396 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2397 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2398 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2399 }
2400
2401 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2402 tmp = cnt;
73d80deb 2403 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2404 u32 priority = (skb_peek(&chan->data_q))->priority;
2405 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2406 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2407 skb->len, skb->priority);
6ed58ec5 2408
ec1cce24
LAD
2409 /* Stop if priority has changed */
2410 if (skb->priority < priority)
2411 break;
2412
2413 skb = skb_dequeue(&chan->data_q);
2414
6ed58ec5
VT
2415 hci_send_frame(skb);
2416 hdev->le_last_tx = jiffies;
2417
2418 cnt--;
73d80deb
LAD
2419 chan->sent++;
2420 chan->conn->sent++;
6ed58ec5
VT
2421 }
2422 }
73d80deb 2423
6ed58ec5
VT
2424 if (hdev->le_pkts)
2425 hdev->le_cnt = cnt;
2426 else
2427 hdev->acl_cnt = cnt;
02b20f0b
LAD
2428
2429 if (cnt != tmp)
2430 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2431}
2432
3eff45ea 2433static void hci_tx_work(struct work_struct *work)
1da177e4 2434{
3eff45ea 2435 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2436 struct sk_buff *skb;
2437
67d0dfb5 2438 mutex_lock(&hci_task_lock);
1da177e4 2439
6ed58ec5
VT
2440 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2441 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2442
2443 /* Schedule queues and send stuff to HCI driver */
2444
2445 hci_sched_acl(hdev);
2446
2447 hci_sched_sco(hdev);
2448
b6a0dc82
MH
2449 hci_sched_esco(hdev);
2450
6ed58ec5
VT
2451 hci_sched_le(hdev);
2452
1da177e4
LT
2453 /* Send next queued raw (unknown type) packet */
2454 while ((skb = skb_dequeue(&hdev->raw_q)))
2455 hci_send_frame(skb);
2456
67d0dfb5 2457 mutex_unlock(&hci_task_lock);
1da177e4
LT
2458}
2459
25985edc 2460/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2461
2462/* ACL data packet */
2463static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2464{
2465 struct hci_acl_hdr *hdr = (void *) skb->data;
2466 struct hci_conn *conn;
2467 __u16 handle, flags;
2468
2469 skb_pull(skb, HCI_ACL_HDR_SIZE);
2470
2471 handle = __le16_to_cpu(hdr->handle);
2472 flags = hci_flags(handle);
2473 handle = hci_handle(handle);
2474
2475 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2476
2477 hdev->stat.acl_rx++;
2478
2479 hci_dev_lock(hdev);
2480 conn = hci_conn_hash_lookup_handle(hdev, handle);
2481 hci_dev_unlock(hdev);
8e87d142 2482
1da177e4
LT
2483 if (conn) {
2484 register struct hci_proto *hp;
2485
65983fc7 2486 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2487
1da177e4 2488 /* Send to upper protocol */
70f23020
AE
2489 hp = hci_proto[HCI_PROTO_L2CAP];
2490 if (hp && hp->recv_acldata) {
1da177e4
LT
2491 hp->recv_acldata(conn, skb, flags);
2492 return;
2493 }
2494 } else {
8e87d142 2495 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2496 hdev->name, handle);
2497 }
2498
2499 kfree_skb(skb);
2500}
2501
2502/* SCO data packet */
2503static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2504{
2505 struct hci_sco_hdr *hdr = (void *) skb->data;
2506 struct hci_conn *conn;
2507 __u16 handle;
2508
2509 skb_pull(skb, HCI_SCO_HDR_SIZE);
2510
2511 handle = __le16_to_cpu(hdr->handle);
2512
2513 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2514
2515 hdev->stat.sco_rx++;
2516
2517 hci_dev_lock(hdev);
2518 conn = hci_conn_hash_lookup_handle(hdev, handle);
2519 hci_dev_unlock(hdev);
2520
2521 if (conn) {
2522 register struct hci_proto *hp;
2523
2524 /* Send to upper protocol */
70f23020
AE
2525 hp = hci_proto[HCI_PROTO_SCO];
2526 if (hp && hp->recv_scodata) {
1da177e4
LT
2527 hp->recv_scodata(conn, skb);
2528 return;
2529 }
2530 } else {
8e87d142 2531 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2532 hdev->name, handle);
2533 }
2534
2535 kfree_skb(skb);
2536}
2537
b78752cc 2538static void hci_rx_work(struct work_struct *work)
1da177e4 2539{
b78752cc 2540 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2541 struct sk_buff *skb;
2542
2543 BT_DBG("%s", hdev->name);
2544
67d0dfb5 2545 mutex_lock(&hci_task_lock);
1da177e4
LT
2546
2547 while ((skb = skb_dequeue(&hdev->rx_q))) {
2548 if (atomic_read(&hdev->promisc)) {
2549 /* Send copy to the sockets */
eec8d2bc 2550 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2551 }
2552
2553 if (test_bit(HCI_RAW, &hdev->flags)) {
2554 kfree_skb(skb);
2555 continue;
2556 }
2557
2558 if (test_bit(HCI_INIT, &hdev->flags)) {
2559 /* Don't process data packets in this states. */
0d48d939 2560 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2561 case HCI_ACLDATA_PKT:
2562 case HCI_SCODATA_PKT:
2563 kfree_skb(skb);
2564 continue;
3ff50b79 2565 }
1da177e4
LT
2566 }
2567
2568 /* Process frame */
0d48d939 2569 switch (bt_cb(skb)->pkt_type) {
1da177e4 2570 case HCI_EVENT_PKT:
b78752cc 2571 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2572 hci_event_packet(hdev, skb);
2573 break;
2574
2575 case HCI_ACLDATA_PKT:
2576 BT_DBG("%s ACL data packet", hdev->name);
2577 hci_acldata_packet(hdev, skb);
2578 break;
2579
2580 case HCI_SCODATA_PKT:
2581 BT_DBG("%s SCO data packet", hdev->name);
2582 hci_scodata_packet(hdev, skb);
2583 break;
2584
2585 default:
2586 kfree_skb(skb);
2587 break;
2588 }
2589 }
2590
67d0dfb5 2591 mutex_unlock(&hci_task_lock);
1da177e4
LT
2592}
2593
c347b765 2594static void hci_cmd_work(struct work_struct *work)
1da177e4 2595{
c347b765 2596 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2597 struct sk_buff *skb;
2598
2599 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2600
1da177e4 2601 /* Send queued commands */
5a08ecce
AE
2602 if (atomic_read(&hdev->cmd_cnt)) {
2603 skb = skb_dequeue(&hdev->cmd_q);
2604 if (!skb)
2605 return;
2606
7585b97a 2607 kfree_skb(hdev->sent_cmd);
1da177e4 2608
70f23020
AE
2609 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2610 if (hdev->sent_cmd) {
1da177e4
LT
2611 atomic_dec(&hdev->cmd_cnt);
2612 hci_send_frame(skb);
7bdb8a5c
SJ
2613 if (test_bit(HCI_RESET, &hdev->flags))
2614 del_timer(&hdev->cmd_timer);
2615 else
2616 mod_timer(&hdev->cmd_timer,
6bd32326 2617 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2618 } else {
2619 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2620 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2621 }
2622 }
2623}
2519a1fc
AG
2624
2625int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2626{
2627 /* General inquiry access code (GIAC) */
2628 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2629 struct hci_cp_inquiry cp;
2630
2631 BT_DBG("%s", hdev->name);
2632
2633 if (test_bit(HCI_INQUIRY, &hdev->flags))
2634 return -EINPROGRESS;
2635
2636 memset(&cp, 0, sizeof(cp));
2637 memcpy(&cp.lap, lap, sizeof(cp.lap));
2638 cp.length = length;
2639
2640 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2641}
023d5049
AG
2642
2643int hci_cancel_inquiry(struct hci_dev *hdev)
2644{
2645 BT_DBG("%s", hdev->name);
2646
2647 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2648 return -EPERM;
2649
2650 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2651}
7784d78f
AE
2652
2653module_param(enable_hs, bool, 0644);
2654MODULE_PARM_DESC(enable_hs, "Enable High Speed");