Bluetooth: mgmt: Fix device_found parameters
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
b78752cc 57static void hci_rx_work(struct work_struct *work);
c347b765 58static void hci_cmd_work(struct work_struct *work);
3eff45ea 59static void hci_tx_work(struct work_struct *work);
1da177e4 60
1da177e4
LT
61/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
1da177e4
LT
69/* ---- HCI notifications ---- */
70
6516455d 71static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 72{
040030ef 73 hci_sock_dev_event(hdev, event);
1da177e4
LT
74}
75
76/* ---- HCI requests ---- */
77
23bb5763 78void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 79{
23bb5763
JH
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
a5040efa
JH
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 86 return;
1da177e4
LT
87
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
92 }
93}
94
95static void hci_req_cancel(struct hci_dev *hdev, int err)
96{
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106/* Execute request and wait for completion. */
8e87d142 107static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 108 unsigned long opt, __u32 timeout)
1da177e4
LT
109{
110 DECLARE_WAITQUEUE(wait, current);
111 int err = 0;
112
113 BT_DBG("%s start", hdev->name);
114
115 hdev->req_status = HCI_REQ_PEND;
116
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
119
120 req(hdev, opt);
121 schedule_timeout(timeout);
122
123 remove_wait_queue(&hdev->req_wait_q, &wait);
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 switch (hdev->req_status) {
129 case HCI_REQ_DONE:
e175072f 130 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
131 break;
132
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
135 break;
136
137 default:
138 err = -ETIMEDOUT;
139 break;
3ff50b79 140 }
1da177e4 141
a5040efa 142 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
143
144 BT_DBG("%s end: err %d", hdev->name, err);
145
146 return err;
147}
148
149static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 150 unsigned long opt, __u32 timeout)
1da177e4
LT
151{
152 int ret;
153
7c6a329e
MH
154 if (!test_bit(HCI_UP, &hdev->flags))
155 return -ENETDOWN;
156
1da177e4
LT
157 /* Serialize all requests */
158 hci_req_lock(hdev);
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
161
162 return ret;
163}
164
165static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166{
167 BT_DBG("%s %ld", hdev->name, opt);
168
169 /* Reset device */
f630cf0d 170 set_bit(HCI_RESET, &hdev->flags);
a9de9248 171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
172}
173
e61ef499 174static void bredr_init(struct hci_dev *hdev)
1da177e4 175{
b0916ea0 176 struct hci_cp_delete_stored_link_key cp;
1ebb9252 177 __le16 param;
89f2783d 178 __u8 flt_type;
1da177e4 179
2455a3ea
AE
180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
1da177e4
LT
182 /* Mandatory initialization */
183
184 /* Reset */
f630cf0d 185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 188 }
1da177e4
LT
189
190 /* Read Local Supported Features */
a9de9248 191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 192
1143e5a6 193 /* Read Local Version */
a9de9248 194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 195
1da177e4 196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 198
1da177e4 199 /* Read BD Address */
a9de9248
MH
200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
207
208 /* Read Voice Setting */
a9de9248 209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
210
211 /* Optional initialization */
212
213 /* Clear Event Filters */
89f2783d 214 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 216
1da177e4 217 /* Connection accept timeout ~20 secs */
aca3192c 218 param = cpu_to_le16(0x7d00);
a9de9248 219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
220
221 bacpy(&cp.bdaddr, BDADDR_ANY);
222 cp.delete_all = 1;
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
224}
225
e61ef499
AE
226static void amp_init(struct hci_dev *hdev)
227{
2455a3ea
AE
228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
e61ef499
AE
230 /* Reset */
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235}
236
237static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238{
239 struct sk_buff *skb;
240
241 BT_DBG("%s %ld", hdev->name, opt);
242
243 /* Driver initialization */
244
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
249
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
252 }
253 skb_queue_purge(&hdev->driver_init);
254
255 switch (hdev->dev_type) {
256 case HCI_BREDR:
257 bredr_init(hdev);
258 break;
259
260 case HCI_AMP:
261 amp_init(hdev);
262 break;
263
264 default:
265 BT_ERR("Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269}
270
6ed58ec5
VT
271static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272{
273 BT_DBG("%s", hdev->name);
274
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277}
278
1da177e4
LT
279static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 scan = opt;
282
283 BT_DBG("%s %x", hdev->name, scan);
284
285 /* Inquiry and Page scans */
a9de9248 286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
287}
288
289static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 auth = opt;
292
293 BT_DBG("%s %x", hdev->name, auth);
294
295 /* Authentication */
a9de9248 296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
297}
298
299static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 encrypt = opt;
302
303 BT_DBG("%s %x", hdev->name, encrypt);
304
e4e8e37c 305 /* Encryption */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
307}
308
e4e8e37c
MH
309static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __le16 policy = cpu_to_le16(opt);
312
a418b893 313 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
314
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317}
318
8e87d142 319/* Get HCI device by index.
1da177e4
LT
320 * Device is held on return. */
321struct hci_dev *hci_dev_get(int index)
322{
8035ded4 323 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
8035ded4 331 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
1da177e4
LT
340
341/* ---- Inquiry support ---- */
ff9ef578 342
30dc78e1
JH
343bool hci_discovery_active(struct hci_dev *hdev)
344{
345 struct discovery_state *discov = &hdev->discovery;
346
6fbe195d 347 switch (discov->state) {
343f935b 348 case DISCOVERY_FINDING:
6fbe195d 349 case DISCOVERY_RESOLVING:
30dc78e1
JH
350 return true;
351
6fbe195d
AG
352 default:
353 return false;
354 }
30dc78e1
JH
355}
356
ff9ef578
JH
357void hci_discovery_set_state(struct hci_dev *hdev, int state)
358{
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361 if (hdev->discovery.state == state)
362 return;
363
364 switch (state) {
365 case DISCOVERY_STOPPED:
7b99b659
AG
366 if (hdev->discovery.state != DISCOVERY_STARTING)
367 mgmt_discovering(hdev, 0);
f963e8e9 368 hdev->discovery.type = 0;
ff9ef578
JH
369 break;
370 case DISCOVERY_STARTING:
371 break;
343f935b 372 case DISCOVERY_FINDING:
ff9ef578
JH
373 mgmt_discovering(hdev, 1);
374 break;
30dc78e1
JH
375 case DISCOVERY_RESOLVING:
376 break;
ff9ef578
JH
377 case DISCOVERY_STOPPING:
378 break;
379 }
380
381 hdev->discovery.state = state;
382}
383
1da177e4
LT
384static void inquiry_cache_flush(struct hci_dev *hdev)
385{
30883512 386 struct discovery_state *cache = &hdev->discovery;
b57c1a56 387 struct inquiry_entry *p, *n;
1da177e4 388
561aafbc
JH
389 list_for_each_entry_safe(p, n, &cache->all, all) {
390 list_del(&p->all);
b57c1a56 391 kfree(p);
1da177e4 392 }
561aafbc
JH
393
394 INIT_LIST_HEAD(&cache->unknown);
395 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 396 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
397}
398
399struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
400{
30883512 401 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
402 struct inquiry_entry *e;
403
404 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
405
561aafbc
JH
406 list_for_each_entry(e, &cache->all, all) {
407 if (!bacmp(&e->data.bdaddr, bdaddr))
408 return e;
409 }
410
411 return NULL;
412}
413
414struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
415 bdaddr_t *bdaddr)
416{
30883512 417 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 423 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
424 return e;
425 }
426
427 return NULL;
1da177e4
LT
428}
429
30dc78e1
JH
430struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
431 bdaddr_t *bdaddr,
432 int state)
433{
434 struct discovery_state *cache = &hdev->discovery;
435 struct inquiry_entry *e;
436
437 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
438
439 list_for_each_entry(e, &cache->resolve, list) {
440 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
441 return e;
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
a3d4e20a
JH
449void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450 struct inquiry_entry *ie)
451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct list_head *pos = &cache->resolve;
454 struct inquiry_entry *p;
455
456 list_del(&ie->list);
457
458 list_for_each_entry(p, &cache->resolve, list) {
459 if (p->name_state != NAME_PENDING &&
460 abs(p->data.rssi) >= abs(ie->data.rssi))
461 break;
462 pos = &p->list;
463 }
464
465 list_add(&ie->list, pos);
466}
467
3175405b 468bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 469 bool name_known)
1da177e4 470{
30883512 471 struct discovery_state *cache = &hdev->discovery;
70f23020 472 struct inquiry_entry *ie;
1da177e4
LT
473
474 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
475
70f23020 476 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
477 if (ie) {
478 if (ie->name_state == NAME_NEEDED &&
479 data->rssi != ie->data.rssi) {
480 ie->data.rssi = data->rssi;
481 hci_inquiry_cache_update_resolve(hdev, ie);
482 }
483
561aafbc 484 goto update;
a3d4e20a 485 }
561aafbc
JH
486
487 /* Entry not in the cache. Add new one. */
488 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
489 if (!ie)
3175405b 490 return false;
561aafbc
JH
491
492 list_add(&ie->all, &cache->all);
493
494 if (name_known) {
495 ie->name_state = NAME_KNOWN;
496 } else {
497 ie->name_state = NAME_NOT_KNOWN;
498 list_add(&ie->list, &cache->unknown);
499 }
70f23020 500
561aafbc
JH
501update:
502 if (name_known && ie->name_state != NAME_KNOWN &&
503 ie->name_state != NAME_PENDING) {
504 ie->name_state = NAME_KNOWN;
505 list_del(&ie->list);
1da177e4
LT
506 }
507
70f23020
AE
508 memcpy(&ie->data, data, sizeof(*data));
509 ie->timestamp = jiffies;
1da177e4 510 cache->timestamp = jiffies;
3175405b
JH
511
512 if (ie->name_state == NAME_NOT_KNOWN)
513 return false;
514
515 return true;
1da177e4
LT
516}
517
518static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
519{
30883512 520 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
521 struct inquiry_info *info = (struct inquiry_info *) buf;
522 struct inquiry_entry *e;
523 int copied = 0;
524
561aafbc 525 list_for_each_entry(e, &cache->all, all) {
1da177e4 526 struct inquiry_data *data = &e->data;
b57c1a56
JH
527
528 if (copied >= num)
529 break;
530
1da177e4
LT
531 bacpy(&info->bdaddr, &data->bdaddr);
532 info->pscan_rep_mode = data->pscan_rep_mode;
533 info->pscan_period_mode = data->pscan_period_mode;
534 info->pscan_mode = data->pscan_mode;
535 memcpy(info->dev_class, data->dev_class, 3);
536 info->clock_offset = data->clock_offset;
b57c1a56 537
1da177e4 538 info++;
b57c1a56 539 copied++;
1da177e4
LT
540 }
541
542 BT_DBG("cache %p, copied %d", cache, copied);
543 return copied;
544}
545
546static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
547{
548 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
549 struct hci_cp_inquiry cp;
550
551 BT_DBG("%s", hdev->name);
552
553 if (test_bit(HCI_INQUIRY, &hdev->flags))
554 return;
555
556 /* Start Inquiry */
557 memcpy(&cp.lap, &ir->lap, 3);
558 cp.length = ir->length;
559 cp.num_rsp = ir->num_rsp;
a9de9248 560 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
561}
562
563int hci_inquiry(void __user *arg)
564{
565 __u8 __user *ptr = arg;
566 struct hci_inquiry_req ir;
567 struct hci_dev *hdev;
568 int err = 0, do_inquiry = 0, max_rsp;
569 long timeo;
570 __u8 *buf;
571
572 if (copy_from_user(&ir, ptr, sizeof(ir)))
573 return -EFAULT;
574
5a08ecce
AE
575 hdev = hci_dev_get(ir.dev_id);
576 if (!hdev)
1da177e4
LT
577 return -ENODEV;
578
09fd0de5 579 hci_dev_lock(hdev);
8e87d142 580 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
581 inquiry_cache_empty(hdev) ||
582 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
583 inquiry_cache_flush(hdev);
584 do_inquiry = 1;
585 }
09fd0de5 586 hci_dev_unlock(hdev);
1da177e4 587
04837f64 588 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
589
590 if (do_inquiry) {
591 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
592 if (err < 0)
593 goto done;
594 }
1da177e4
LT
595
596 /* for unlimited number of responses we will use buffer with 255 entries */
597 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
598
599 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600 * copy it to the user space.
601 */
01df8c31 602 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 603 if (!buf) {
1da177e4
LT
604 err = -ENOMEM;
605 goto done;
606 }
607
09fd0de5 608 hci_dev_lock(hdev);
1da177e4 609 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 610 hci_dev_unlock(hdev);
1da177e4
LT
611
612 BT_DBG("num_rsp %d", ir.num_rsp);
613
614 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
615 ptr += sizeof(ir);
616 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
617 ir.num_rsp))
618 err = -EFAULT;
8e87d142 619 } else
1da177e4
LT
620 err = -EFAULT;
621
622 kfree(buf);
623
624done:
625 hci_dev_put(hdev);
626 return err;
627}
628
629/* ---- HCI ioctl helpers ---- */
630
631int hci_dev_open(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int ret = 0;
635
5a08ecce
AE
636 hdev = hci_dev_get(dev);
637 if (!hdev)
1da177e4
LT
638 return -ENODEV;
639
640 BT_DBG("%s %p", hdev->name, hdev);
641
642 hci_req_lock(hdev);
643
611b30f7
MH
644 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
645 ret = -ERFKILL;
646 goto done;
647 }
648
1da177e4
LT
649 if (test_bit(HCI_UP, &hdev->flags)) {
650 ret = -EALREADY;
651 goto done;
652 }
653
654 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
655 set_bit(HCI_RAW, &hdev->flags);
656
07e3b94a
AE
657 /* Treat all non BR/EDR controllers as raw devices if
658 enable_hs is not set */
659 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
660 set_bit(HCI_RAW, &hdev->flags);
661
1da177e4
LT
662 if (hdev->open(hdev)) {
663 ret = -EIO;
664 goto done;
665 }
666
667 if (!test_bit(HCI_RAW, &hdev->flags)) {
668 atomic_set(&hdev->cmd_cnt, 1);
669 set_bit(HCI_INIT, &hdev->flags);
a5040efa 670 hdev->init_last_cmd = 0;
1da177e4 671
04837f64
MH
672 ret = __hci_request(hdev, hci_init_req, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 674
eead27da 675 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
676 ret = __hci_request(hdev, hci_le_init_req, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
1da177e4
LT
679 clear_bit(HCI_INIT, &hdev->flags);
680 }
681
682 if (!ret) {
683 hci_dev_hold(hdev);
684 set_bit(HCI_UP, &hdev->flags);
685 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 686 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 687 hci_dev_lock(hdev);
744cf19e 688 mgmt_powered(hdev, 1);
09fd0de5 689 hci_dev_unlock(hdev);
56e5cb86 690 }
8e87d142 691 } else {
1da177e4 692 /* Init failed, cleanup */
3eff45ea 693 flush_work(&hdev->tx_work);
c347b765 694 flush_work(&hdev->cmd_work);
b78752cc 695 flush_work(&hdev->rx_work);
1da177e4
LT
696
697 skb_queue_purge(&hdev->cmd_q);
698 skb_queue_purge(&hdev->rx_q);
699
700 if (hdev->flush)
701 hdev->flush(hdev);
702
703 if (hdev->sent_cmd) {
704 kfree_skb(hdev->sent_cmd);
705 hdev->sent_cmd = NULL;
706 }
707
708 hdev->close(hdev);
709 hdev->flags = 0;
710 }
711
712done:
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716}
717
718static int hci_dev_do_close(struct hci_dev *hdev)
719{
720 BT_DBG("%s %p", hdev->name, hdev);
721
28b75a89
AG
722 cancel_work_sync(&hdev->le_scan);
723
1da177e4
LT
724 hci_req_cancel(hdev, ENODEV);
725 hci_req_lock(hdev);
726
727 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 728 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
729 hci_req_unlock(hdev);
730 return 0;
731 }
732
3eff45ea
GP
733 /* Flush RX and TX works */
734 flush_work(&hdev->tx_work);
b78752cc 735 flush_work(&hdev->rx_work);
1da177e4 736
16ab91ab 737 if (hdev->discov_timeout > 0) {
e0f9309f 738 cancel_delayed_work(&hdev->discov_off);
16ab91ab 739 hdev->discov_timeout = 0;
5e5282bb 740 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
741 }
742
a8b2d5c2 743 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
744 cancel_delayed_work(&hdev->service_cache);
745
7ba8b4be
AG
746 cancel_delayed_work_sync(&hdev->le_scan_disable);
747
09fd0de5 748 hci_dev_lock(hdev);
1da177e4
LT
749 inquiry_cache_flush(hdev);
750 hci_conn_hash_flush(hdev);
09fd0de5 751 hci_dev_unlock(hdev);
1da177e4
LT
752
753 hci_notify(hdev, HCI_DEV_DOWN);
754
755 if (hdev->flush)
756 hdev->flush(hdev);
757
758 /* Reset device */
759 skb_queue_purge(&hdev->cmd_q);
760 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
761 if (!test_bit(HCI_RAW, &hdev->flags) &&
762 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 763 set_bit(HCI_INIT, &hdev->flags);
04837f64 764 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 765 msecs_to_jiffies(250));
1da177e4
LT
766 clear_bit(HCI_INIT, &hdev->flags);
767 }
768
c347b765
GP
769 /* flush cmd work */
770 flush_work(&hdev->cmd_work);
1da177e4
LT
771
772 /* Drop queues */
773 skb_queue_purge(&hdev->rx_q);
774 skb_queue_purge(&hdev->cmd_q);
775 skb_queue_purge(&hdev->raw_q);
776
777 /* Drop last sent command */
778 if (hdev->sent_cmd) {
b79f44c1 779 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
780 kfree_skb(hdev->sent_cmd);
781 hdev->sent_cmd = NULL;
782 }
783
784 /* After this point our queues are empty
785 * and no tasks are scheduled. */
786 hdev->close(hdev);
787
8ee56540
MH
788 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
789 hci_dev_lock(hdev);
790 mgmt_powered(hdev, 0);
791 hci_dev_unlock(hdev);
792 }
5add6af8 793
1da177e4
LT
794 /* Clear flags */
795 hdev->flags = 0;
796
e59fda8d 797 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 798 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 799
1da177e4
LT
800 hci_req_unlock(hdev);
801
802 hci_dev_put(hdev);
803 return 0;
804}
805
806int hci_dev_close(__u16 dev)
807{
808 struct hci_dev *hdev;
809 int err;
810
70f23020
AE
811 hdev = hci_dev_get(dev);
812 if (!hdev)
1da177e4 813 return -ENODEV;
8ee56540
MH
814
815 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
816 cancel_delayed_work(&hdev->power_off);
817
1da177e4 818 err = hci_dev_do_close(hdev);
8ee56540 819
1da177e4
LT
820 hci_dev_put(hdev);
821 return err;
822}
823
824int hci_dev_reset(__u16 dev)
825{
826 struct hci_dev *hdev;
827 int ret = 0;
828
70f23020
AE
829 hdev = hci_dev_get(dev);
830 if (!hdev)
1da177e4
LT
831 return -ENODEV;
832
833 hci_req_lock(hdev);
1da177e4
LT
834
835 if (!test_bit(HCI_UP, &hdev->flags))
836 goto done;
837
838 /* Drop queues */
839 skb_queue_purge(&hdev->rx_q);
840 skb_queue_purge(&hdev->cmd_q);
841
09fd0de5 842 hci_dev_lock(hdev);
1da177e4
LT
843 inquiry_cache_flush(hdev);
844 hci_conn_hash_flush(hdev);
09fd0de5 845 hci_dev_unlock(hdev);
1da177e4
LT
846
847 if (hdev->flush)
848 hdev->flush(hdev);
849
8e87d142 850 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 851 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
852
853 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
854 ret = __hci_request(hdev, hci_reset_req, 0,
855 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
856
857done:
1da177e4
LT
858 hci_req_unlock(hdev);
859 hci_dev_put(hdev);
860 return ret;
861}
862
863int hci_dev_reset_stat(__u16 dev)
864{
865 struct hci_dev *hdev;
866 int ret = 0;
867
70f23020
AE
868 hdev = hci_dev_get(dev);
869 if (!hdev)
1da177e4
LT
870 return -ENODEV;
871
872 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
873
874 hci_dev_put(hdev);
875
876 return ret;
877}
878
879int hci_dev_cmd(unsigned int cmd, void __user *arg)
880{
881 struct hci_dev *hdev;
882 struct hci_dev_req dr;
883 int err = 0;
884
885 if (copy_from_user(&dr, arg, sizeof(dr)))
886 return -EFAULT;
887
70f23020
AE
888 hdev = hci_dev_get(dr.dev_id);
889 if (!hdev)
1da177e4
LT
890 return -ENODEV;
891
892 switch (cmd) {
893 case HCISETAUTH:
04837f64
MH
894 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
895 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
896 break;
897
898 case HCISETENCRYPT:
899 if (!lmp_encrypt_capable(hdev)) {
900 err = -EOPNOTSUPP;
901 break;
902 }
903
904 if (!test_bit(HCI_AUTH, &hdev->flags)) {
905 /* Auth must be enabled first */
04837f64
MH
906 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
907 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
908 if (err)
909 break;
910 }
911
04837f64
MH
912 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
914 break;
915
916 case HCISETSCAN:
04837f64
MH
917 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
918 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
919 break;
920
1da177e4 921 case HCISETLINKPOL:
e4e8e37c
MH
922 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
923 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
924 break;
925
926 case HCISETLINKMODE:
e4e8e37c
MH
927 hdev->link_mode = ((__u16) dr.dev_opt) &
928 (HCI_LM_MASTER | HCI_LM_ACCEPT);
929 break;
930
931 case HCISETPTYPE:
932 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
933 break;
934
935 case HCISETACLMTU:
e4e8e37c
MH
936 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
937 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
938 break;
939
940 case HCISETSCOMTU:
e4e8e37c
MH
941 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
942 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
943 break;
944
945 default:
946 err = -EINVAL;
947 break;
948 }
e4e8e37c 949
1da177e4
LT
950 hci_dev_put(hdev);
951 return err;
952}
953
954int hci_get_dev_list(void __user *arg)
955{
8035ded4 956 struct hci_dev *hdev;
1da177e4
LT
957 struct hci_dev_list_req *dl;
958 struct hci_dev_req *dr;
1da177e4
LT
959 int n = 0, size, err;
960 __u16 dev_num;
961
962 if (get_user(dev_num, (__u16 __user *) arg))
963 return -EFAULT;
964
965 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
966 return -EINVAL;
967
968 size = sizeof(*dl) + dev_num * sizeof(*dr);
969
70f23020
AE
970 dl = kzalloc(size, GFP_KERNEL);
971 if (!dl)
1da177e4
LT
972 return -ENOMEM;
973
974 dr = dl->dev_req;
975
f20d09d5 976 read_lock(&hci_dev_list_lock);
8035ded4 977 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 978 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 979 cancel_delayed_work(&hdev->power_off);
c542a06c 980
a8b2d5c2
JH
981 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
982 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 983
1da177e4
LT
984 (dr + n)->dev_id = hdev->id;
985 (dr + n)->dev_opt = hdev->flags;
c542a06c 986
1da177e4
LT
987 if (++n >= dev_num)
988 break;
989 }
f20d09d5 990 read_unlock(&hci_dev_list_lock);
1da177e4
LT
991
992 dl->dev_num = n;
993 size = sizeof(*dl) + n * sizeof(*dr);
994
995 err = copy_to_user(arg, dl, size);
996 kfree(dl);
997
998 return err ? -EFAULT : 0;
999}
1000
1001int hci_get_dev_info(void __user *arg)
1002{
1003 struct hci_dev *hdev;
1004 struct hci_dev_info di;
1005 int err = 0;
1006
1007 if (copy_from_user(&di, arg, sizeof(di)))
1008 return -EFAULT;
1009
70f23020
AE
1010 hdev = hci_dev_get(di.dev_id);
1011 if (!hdev)
1da177e4
LT
1012 return -ENODEV;
1013
a8b2d5c2 1014 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1015 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1016
a8b2d5c2
JH
1017 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1018 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1019
1da177e4
LT
1020 strcpy(di.name, hdev->name);
1021 di.bdaddr = hdev->bdaddr;
943da25d 1022 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1023 di.flags = hdev->flags;
1024 di.pkt_type = hdev->pkt_type;
1025 di.acl_mtu = hdev->acl_mtu;
1026 di.acl_pkts = hdev->acl_pkts;
1027 di.sco_mtu = hdev->sco_mtu;
1028 di.sco_pkts = hdev->sco_pkts;
1029 di.link_policy = hdev->link_policy;
1030 di.link_mode = hdev->link_mode;
1031
1032 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1033 memcpy(&di.features, &hdev->features, sizeof(di.features));
1034
1035 if (copy_to_user(arg, &di, sizeof(di)))
1036 err = -EFAULT;
1037
1038 hci_dev_put(hdev);
1039
1040 return err;
1041}
1042
1043/* ---- Interface to HCI drivers ---- */
1044
611b30f7
MH
1045static int hci_rfkill_set_block(void *data, bool blocked)
1046{
1047 struct hci_dev *hdev = data;
1048
1049 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1050
1051 if (!blocked)
1052 return 0;
1053
1054 hci_dev_do_close(hdev);
1055
1056 return 0;
1057}
1058
1059static const struct rfkill_ops hci_rfkill_ops = {
1060 .set_block = hci_rfkill_set_block,
1061};
1062
1da177e4
LT
1063/* Alloc HCI device */
1064struct hci_dev *hci_alloc_dev(void)
1065{
1066 struct hci_dev *hdev;
1067
25ea6db0 1068 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1069 if (!hdev)
1070 return NULL;
1071
0ac7e700 1072 hci_init_sysfs(hdev);
1da177e4
LT
1073 skb_queue_head_init(&hdev->driver_init);
1074
1075 return hdev;
1076}
1077EXPORT_SYMBOL(hci_alloc_dev);
1078
1079/* Free HCI device */
1080void hci_free_dev(struct hci_dev *hdev)
1081{
1082 skb_queue_purge(&hdev->driver_init);
1083
a91f2e39
MH
1084 /* will free via device release */
1085 put_device(&hdev->dev);
1da177e4
LT
1086}
1087EXPORT_SYMBOL(hci_free_dev);
1088
ab81cbf9
JH
1089static void hci_power_on(struct work_struct *work)
1090{
1091 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1092
1093 BT_DBG("%s", hdev->name);
1094
1095 if (hci_dev_open(hdev->id) < 0)
1096 return;
1097
a8b2d5c2 1098 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1099 schedule_delayed_work(&hdev->power_off,
3243553f 1100 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1101
a8b2d5c2 1102 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1103 mgmt_index_added(hdev);
ab81cbf9
JH
1104}
1105
1106static void hci_power_off(struct work_struct *work)
1107{
3243553f
JH
1108 struct hci_dev *hdev = container_of(work, struct hci_dev,
1109 power_off.work);
ab81cbf9
JH
1110
1111 BT_DBG("%s", hdev->name);
1112
8ee56540 1113 hci_dev_do_close(hdev);
ab81cbf9
JH
1114}
1115
16ab91ab
JH
1116static void hci_discov_off(struct work_struct *work)
1117{
1118 struct hci_dev *hdev;
1119 u8 scan = SCAN_PAGE;
1120
1121 hdev = container_of(work, struct hci_dev, discov_off.work);
1122
1123 BT_DBG("%s", hdev->name);
1124
09fd0de5 1125 hci_dev_lock(hdev);
16ab91ab
JH
1126
1127 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1128
1129 hdev->discov_timeout = 0;
1130
09fd0de5 1131 hci_dev_unlock(hdev);
16ab91ab
JH
1132}
1133
2aeb9a1a
JH
1134int hci_uuids_clear(struct hci_dev *hdev)
1135{
1136 struct list_head *p, *n;
1137
1138 list_for_each_safe(p, n, &hdev->uuids) {
1139 struct bt_uuid *uuid;
1140
1141 uuid = list_entry(p, struct bt_uuid, list);
1142
1143 list_del(p);
1144 kfree(uuid);
1145 }
1146
1147 return 0;
1148}
1149
55ed8ca1
JH
1150int hci_link_keys_clear(struct hci_dev *hdev)
1151{
1152 struct list_head *p, *n;
1153
1154 list_for_each_safe(p, n, &hdev->link_keys) {
1155 struct link_key *key;
1156
1157 key = list_entry(p, struct link_key, list);
1158
1159 list_del(p);
1160 kfree(key);
1161 }
1162
1163 return 0;
1164}
1165
b899efaf
VCG
1166int hci_smp_ltks_clear(struct hci_dev *hdev)
1167{
1168 struct smp_ltk *k, *tmp;
1169
1170 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1171 list_del(&k->list);
1172 kfree(k);
1173 }
1174
1175 return 0;
1176}
1177
55ed8ca1
JH
1178struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1179{
8035ded4 1180 struct link_key *k;
55ed8ca1 1181
8035ded4 1182 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1183 if (bacmp(bdaddr, &k->bdaddr) == 0)
1184 return k;
55ed8ca1
JH
1185
1186 return NULL;
1187}
1188
d25e28ab
JH
1189static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1190 u8 key_type, u8 old_key_type)
1191{
1192 /* Legacy key */
1193 if (key_type < 0x03)
1194 return 1;
1195
1196 /* Debug keys are insecure so don't store them persistently */
1197 if (key_type == HCI_LK_DEBUG_COMBINATION)
1198 return 0;
1199
1200 /* Changed combination key and there's no previous one */
1201 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1202 return 0;
1203
1204 /* Security mode 3 case */
1205 if (!conn)
1206 return 1;
1207
1208 /* Neither local nor remote side had no-bonding as requirement */
1209 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1210 return 1;
1211
1212 /* Local side had dedicated bonding as requirement */
1213 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1214 return 1;
1215
1216 /* Remote side had dedicated bonding as requirement */
1217 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1218 return 1;
1219
1220 /* If none of the above criteria match, then don't store the key
1221 * persistently */
1222 return 0;
1223}
1224
c9839a11 1225struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1226{
c9839a11 1227 struct smp_ltk *k;
75d262c2 1228
c9839a11
VCG
1229 list_for_each_entry(k, &hdev->long_term_keys, list) {
1230 if (k->ediv != ediv ||
1231 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1232 continue;
1233
c9839a11 1234 return k;
75d262c2
VCG
1235 }
1236
1237 return NULL;
1238}
1239EXPORT_SYMBOL(hci_find_ltk);
1240
c9839a11
VCG
1241struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1242 u8 addr_type)
75d262c2 1243{
c9839a11 1244 struct smp_ltk *k;
75d262c2 1245
c9839a11
VCG
1246 list_for_each_entry(k, &hdev->long_term_keys, list)
1247 if (addr_type == k->bdaddr_type &&
1248 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1249 return k;
1250
1251 return NULL;
1252}
c9839a11 1253EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1254
d25e28ab
JH
1255int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1256 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1257{
1258 struct link_key *key, *old_key;
4df378a1 1259 u8 old_key_type, persistent;
55ed8ca1
JH
1260
1261 old_key = hci_find_link_key(hdev, bdaddr);
1262 if (old_key) {
1263 old_key_type = old_key->type;
1264 key = old_key;
1265 } else {
12adcf3a 1266 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1267 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1268 if (!key)
1269 return -ENOMEM;
1270 list_add(&key->list, &hdev->link_keys);
1271 }
1272
1273 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1274
d25e28ab
JH
1275 /* Some buggy controller combinations generate a changed
1276 * combination key for legacy pairing even when there's no
1277 * previous key */
1278 if (type == HCI_LK_CHANGED_COMBINATION &&
1279 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1280 old_key_type == 0xff) {
d25e28ab 1281 type = HCI_LK_COMBINATION;
655fe6ec
JH
1282 if (conn)
1283 conn->key_type = type;
1284 }
d25e28ab 1285
55ed8ca1
JH
1286 bacpy(&key->bdaddr, bdaddr);
1287 memcpy(key->val, val, 16);
55ed8ca1
JH
1288 key->pin_len = pin_len;
1289
b6020ba0 1290 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1291 key->type = old_key_type;
4748fed2
JH
1292 else
1293 key->type = type;
1294
4df378a1
JH
1295 if (!new_key)
1296 return 0;
1297
1298 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1299
744cf19e 1300 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1301
1302 if (!persistent) {
1303 list_del(&key->list);
1304 kfree(key);
1305 }
55ed8ca1
JH
1306
1307 return 0;
1308}
1309
c9839a11
VCG
1310int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1311 int new_key, u8 authenticated, u8 tk[16],
1312 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1313{
c9839a11 1314 struct smp_ltk *key, *old_key;
75d262c2 1315
c9839a11
VCG
1316 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1317 return 0;
75d262c2 1318
c9839a11
VCG
1319 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1320 if (old_key)
75d262c2 1321 key = old_key;
c9839a11
VCG
1322 else {
1323 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1324 if (!key)
1325 return -ENOMEM;
c9839a11 1326 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1327 }
1328
75d262c2 1329 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1330 key->bdaddr_type = addr_type;
1331 memcpy(key->val, tk, sizeof(key->val));
1332 key->authenticated = authenticated;
1333 key->ediv = ediv;
1334 key->enc_size = enc_size;
1335 key->type = type;
1336 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1337
c9839a11
VCG
1338 if (!new_key)
1339 return 0;
75d262c2 1340
261cc5aa
VCG
1341 if (type & HCI_SMP_LTK)
1342 mgmt_new_ltk(hdev, key, 1);
1343
75d262c2
VCG
1344 return 0;
1345}
1346
55ed8ca1
JH
1347int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1348{
1349 struct link_key *key;
1350
1351 key = hci_find_link_key(hdev, bdaddr);
1352 if (!key)
1353 return -ENOENT;
1354
1355 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1356
1357 list_del(&key->list);
1358 kfree(key);
1359
1360 return 0;
1361}
1362
b899efaf
VCG
1363int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1364{
1365 struct smp_ltk *k, *tmp;
1366
1367 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1368 if (bacmp(bdaddr, &k->bdaddr))
1369 continue;
1370
1371 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1372
1373 list_del(&k->list);
1374 kfree(k);
1375 }
1376
1377 return 0;
1378}
1379
6bd32326
VT
1380/* HCI command timer function */
1381static void hci_cmd_timer(unsigned long arg)
1382{
1383 struct hci_dev *hdev = (void *) arg;
1384
1385 BT_ERR("%s command tx timeout", hdev->name);
1386 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1387 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1388}
1389
2763eda6
SJ
1390struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1391 bdaddr_t *bdaddr)
1392{
1393 struct oob_data *data;
1394
1395 list_for_each_entry(data, &hdev->remote_oob_data, list)
1396 if (bacmp(bdaddr, &data->bdaddr) == 0)
1397 return data;
1398
1399 return NULL;
1400}
1401
1402int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403{
1404 struct oob_data *data;
1405
1406 data = hci_find_remote_oob_data(hdev, bdaddr);
1407 if (!data)
1408 return -ENOENT;
1409
1410 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1411
1412 list_del(&data->list);
1413 kfree(data);
1414
1415 return 0;
1416}
1417
1418int hci_remote_oob_data_clear(struct hci_dev *hdev)
1419{
1420 struct oob_data *data, *n;
1421
1422 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1423 list_del(&data->list);
1424 kfree(data);
1425 }
1426
1427 return 0;
1428}
1429
1430int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1431 u8 *randomizer)
1432{
1433 struct oob_data *data;
1434
1435 data = hci_find_remote_oob_data(hdev, bdaddr);
1436
1437 if (!data) {
1438 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1439 if (!data)
1440 return -ENOMEM;
1441
1442 bacpy(&data->bdaddr, bdaddr);
1443 list_add(&data->list, &hdev->remote_oob_data);
1444 }
1445
1446 memcpy(data->hash, hash, sizeof(data->hash));
1447 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1448
1449 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1450
1451 return 0;
1452}
1453
b2a66aad
AJ
1454struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1455 bdaddr_t *bdaddr)
1456{
8035ded4 1457 struct bdaddr_list *b;
b2a66aad 1458
8035ded4 1459 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1460 if (bacmp(bdaddr, &b->bdaddr) == 0)
1461 return b;
b2a66aad
AJ
1462
1463 return NULL;
1464}
1465
1466int hci_blacklist_clear(struct hci_dev *hdev)
1467{
1468 struct list_head *p, *n;
1469
1470 list_for_each_safe(p, n, &hdev->blacklist) {
1471 struct bdaddr_list *b;
1472
1473 b = list_entry(p, struct bdaddr_list, list);
1474
1475 list_del(p);
1476 kfree(b);
1477 }
1478
1479 return 0;
1480}
1481
88c1fe4b 1482int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1483{
1484 struct bdaddr_list *entry;
b2a66aad
AJ
1485
1486 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1487 return -EBADF;
1488
5e762444
AJ
1489 if (hci_blacklist_lookup(hdev, bdaddr))
1490 return -EEXIST;
b2a66aad
AJ
1491
1492 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1493 if (!entry)
1494 return -ENOMEM;
b2a66aad
AJ
1495
1496 bacpy(&entry->bdaddr, bdaddr);
1497
1498 list_add(&entry->list, &hdev->blacklist);
1499
88c1fe4b 1500 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1501}
1502
88c1fe4b 1503int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1504{
1505 struct bdaddr_list *entry;
b2a66aad 1506
1ec918ce 1507 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1508 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1509
1510 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1511 if (!entry)
5e762444 1512 return -ENOENT;
b2a66aad
AJ
1513
1514 list_del(&entry->list);
1515 kfree(entry);
1516
88c1fe4b 1517 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1518}
1519
db323f2f 1520static void hci_clear_adv_cache(struct work_struct *work)
35815085 1521{
db323f2f
GP
1522 struct hci_dev *hdev = container_of(work, struct hci_dev,
1523 adv_work.work);
35815085
AG
1524
1525 hci_dev_lock(hdev);
1526
1527 hci_adv_entries_clear(hdev);
1528
1529 hci_dev_unlock(hdev);
1530}
1531
76c8686f
AG
1532int hci_adv_entries_clear(struct hci_dev *hdev)
1533{
1534 struct adv_entry *entry, *tmp;
1535
1536 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1537 list_del(&entry->list);
1538 kfree(entry);
1539 }
1540
1541 BT_DBG("%s adv cache cleared", hdev->name);
1542
1543 return 0;
1544}
1545
1546struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1547{
1548 struct adv_entry *entry;
1549
1550 list_for_each_entry(entry, &hdev->adv_entries, list)
1551 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1552 return entry;
1553
1554 return NULL;
1555}
1556
1557static inline int is_connectable_adv(u8 evt_type)
1558{
1559 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1560 return 1;
1561
1562 return 0;
1563}
1564
1565int hci_add_adv_entry(struct hci_dev *hdev,
1566 struct hci_ev_le_advertising_info *ev)
1567{
1568 struct adv_entry *entry;
1569
1570 if (!is_connectable_adv(ev->evt_type))
1571 return -EINVAL;
1572
1573 /* Only new entries should be added to adv_entries. So, if
1574 * bdaddr was found, don't add it. */
1575 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1576 return 0;
1577
4777bfde 1578 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1579 if (!entry)
1580 return -ENOMEM;
1581
1582 bacpy(&entry->bdaddr, &ev->bdaddr);
1583 entry->bdaddr_type = ev->bdaddr_type;
1584
1585 list_add(&entry->list, &hdev->adv_entries);
1586
1587 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1588 batostr(&entry->bdaddr), entry->bdaddr_type);
1589
1590 return 0;
1591}
1592
7ba8b4be
AG
1593static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1594{
1595 struct le_scan_params *param = (struct le_scan_params *) opt;
1596 struct hci_cp_le_set_scan_param cp;
1597
1598 memset(&cp, 0, sizeof(cp));
1599 cp.type = param->type;
1600 cp.interval = cpu_to_le16(param->interval);
1601 cp.window = cpu_to_le16(param->window);
1602
1603 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1604}
1605
1606static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1607{
1608 struct hci_cp_le_set_scan_enable cp;
1609
1610 memset(&cp, 0, sizeof(cp));
1611 cp.enable = 1;
1612
1613 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1614}
1615
1616static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1617 u16 window, int timeout)
1618{
1619 long timeo = msecs_to_jiffies(3000);
1620 struct le_scan_params param;
1621 int err;
1622
1623 BT_DBG("%s", hdev->name);
1624
1625 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1626 return -EINPROGRESS;
1627
1628 param.type = type;
1629 param.interval = interval;
1630 param.window = window;
1631
1632 hci_req_lock(hdev);
1633
1634 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1635 timeo);
1636 if (!err)
1637 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1638
1639 hci_req_unlock(hdev);
1640
1641 if (err < 0)
1642 return err;
1643
1644 schedule_delayed_work(&hdev->le_scan_disable,
1645 msecs_to_jiffies(timeout));
1646
1647 return 0;
1648}
1649
1650static void le_scan_disable_work(struct work_struct *work)
1651{
1652 struct hci_dev *hdev = container_of(work, struct hci_dev,
1653 le_scan_disable.work);
1654 struct hci_cp_le_set_scan_enable cp;
1655
1656 BT_DBG("%s", hdev->name);
1657
1658 memset(&cp, 0, sizeof(cp));
1659
1660 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1661}
1662
28b75a89
AG
1663static void le_scan_work(struct work_struct *work)
1664{
1665 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1666 struct le_scan_params *param = &hdev->le_scan_params;
1667
1668 BT_DBG("%s", hdev->name);
1669
1670 hci_do_le_scan(hdev, param->type, param->interval,
1671 param->window, param->timeout);
1672}
1673
1674int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1675 int timeout)
1676{
1677 struct le_scan_params *param = &hdev->le_scan_params;
1678
1679 BT_DBG("%s", hdev->name);
1680
1681 if (work_busy(&hdev->le_scan))
1682 return -EINPROGRESS;
1683
1684 param->type = type;
1685 param->interval = interval;
1686 param->window = window;
1687 param->timeout = timeout;
1688
1689 queue_work(system_long_wq, &hdev->le_scan);
1690
1691 return 0;
1692}
1693
1da177e4
LT
1694/* Register HCI device */
1695int hci_register_dev(struct hci_dev *hdev)
1696{
1697 struct list_head *head = &hci_dev_list, *p;
08add513 1698 int i, id, error;
1da177e4 1699
e9b9cfa1 1700 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1701
010666a1 1702 if (!hdev->open || !hdev->close)
1da177e4
LT
1703 return -EINVAL;
1704
08add513
MM
1705 /* Do not allow HCI_AMP devices to register at index 0,
1706 * so the index can be used as the AMP controller ID.
1707 */
1708 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1709
f20d09d5 1710 write_lock(&hci_dev_list_lock);
1da177e4
LT
1711
1712 /* Find first available device id */
1713 list_for_each(p, &hci_dev_list) {
1714 if (list_entry(p, struct hci_dev, list)->id != id)
1715 break;
1716 head = p; id++;
1717 }
8e87d142 1718
1da177e4
LT
1719 sprintf(hdev->name, "hci%d", id);
1720 hdev->id = id;
c6feeb28 1721 list_add_tail(&hdev->list, head);
1da177e4 1722
09fd0de5 1723 mutex_init(&hdev->lock);
1da177e4
LT
1724
1725 hdev->flags = 0;
d23264a8 1726 hdev->dev_flags = 0;
1da177e4 1727 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1728 hdev->esco_type = (ESCO_HV1);
1da177e4 1729 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1730 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1731
04837f64
MH
1732 hdev->idle_timeout = 0;
1733 hdev->sniff_max_interval = 800;
1734 hdev->sniff_min_interval = 80;
1735
b78752cc 1736 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1737 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1738 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1739
1da177e4
LT
1740
1741 skb_queue_head_init(&hdev->rx_q);
1742 skb_queue_head_init(&hdev->cmd_q);
1743 skb_queue_head_init(&hdev->raw_q);
1744
6bd32326
VT
1745 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1746
cd4c5391 1747 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1748 hdev->reassembly[i] = NULL;
1749
1da177e4 1750 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1751 mutex_init(&hdev->req_lock);
1da177e4 1752
30883512 1753 discovery_init(hdev);
1da177e4
LT
1754
1755 hci_conn_hash_init(hdev);
1756
2e58ef3e
JH
1757 INIT_LIST_HEAD(&hdev->mgmt_pending);
1758
ea4bd8ba 1759 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1760
2aeb9a1a
JH
1761 INIT_LIST_HEAD(&hdev->uuids);
1762
55ed8ca1 1763 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1764 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1765
2763eda6
SJ
1766 INIT_LIST_HEAD(&hdev->remote_oob_data);
1767
76c8686f
AG
1768 INIT_LIST_HEAD(&hdev->adv_entries);
1769
db323f2f 1770 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1771 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1772 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1773
16ab91ab
JH
1774 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1775
1da177e4
LT
1776 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1777
1778 atomic_set(&hdev->promisc, 0);
1779
28b75a89
AG
1780 INIT_WORK(&hdev->le_scan, le_scan_work);
1781
7ba8b4be
AG
1782 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1783
f20d09d5 1784 write_unlock(&hci_dev_list_lock);
1da177e4 1785
32845eb1
GP
1786 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1787 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1788 if (!hdev->workqueue) {
1789 error = -ENOMEM;
1790 goto err;
1791 }
f48fd9c8 1792
33ca954d
DH
1793 error = hci_add_sysfs(hdev);
1794 if (error < 0)
1795 goto err_wqueue;
1da177e4 1796
611b30f7
MH
1797 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1798 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1799 if (hdev->rfkill) {
1800 if (rfkill_register(hdev->rfkill) < 0) {
1801 rfkill_destroy(hdev->rfkill);
1802 hdev->rfkill = NULL;
1803 }
1804 }
1805
a8b2d5c2
JH
1806 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1807 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1808 schedule_work(&hdev->power_on);
ab81cbf9 1809
1da177e4 1810 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1811 hci_dev_hold(hdev);
1da177e4
LT
1812
1813 return id;
f48fd9c8 1814
33ca954d
DH
1815err_wqueue:
1816 destroy_workqueue(hdev->workqueue);
1817err:
f20d09d5 1818 write_lock(&hci_dev_list_lock);
f48fd9c8 1819 list_del(&hdev->list);
f20d09d5 1820 write_unlock(&hci_dev_list_lock);
f48fd9c8 1821
33ca954d 1822 return error;
1da177e4
LT
1823}
1824EXPORT_SYMBOL(hci_register_dev);
1825
1826/* Unregister HCI device */
59735631 1827void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1828{
ef222013
MH
1829 int i;
1830
c13854ce 1831 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1832
f20d09d5 1833 write_lock(&hci_dev_list_lock);
1da177e4 1834 list_del(&hdev->list);
f20d09d5 1835 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1836
1837 hci_dev_do_close(hdev);
1838
cd4c5391 1839 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1840 kfree_skb(hdev->reassembly[i]);
1841
ab81cbf9 1842 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1843 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1844 hci_dev_lock(hdev);
744cf19e 1845 mgmt_index_removed(hdev);
09fd0de5 1846 hci_dev_unlock(hdev);
56e5cb86 1847 }
ab81cbf9 1848
2e58ef3e
JH
1849 /* mgmt_index_removed should take care of emptying the
1850 * pending list */
1851 BUG_ON(!list_empty(&hdev->mgmt_pending));
1852
1da177e4
LT
1853 hci_notify(hdev, HCI_DEV_UNREG);
1854
611b30f7
MH
1855 if (hdev->rfkill) {
1856 rfkill_unregister(hdev->rfkill);
1857 rfkill_destroy(hdev->rfkill);
1858 }
1859
ce242970 1860 hci_del_sysfs(hdev);
147e2d59 1861
db323f2f 1862 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1863
f48fd9c8
MH
1864 destroy_workqueue(hdev->workqueue);
1865
09fd0de5 1866 hci_dev_lock(hdev);
e2e0cacb 1867 hci_blacklist_clear(hdev);
2aeb9a1a 1868 hci_uuids_clear(hdev);
55ed8ca1 1869 hci_link_keys_clear(hdev);
b899efaf 1870 hci_smp_ltks_clear(hdev);
2763eda6 1871 hci_remote_oob_data_clear(hdev);
76c8686f 1872 hci_adv_entries_clear(hdev);
09fd0de5 1873 hci_dev_unlock(hdev);
e2e0cacb 1874
dc946bd8 1875 hci_dev_put(hdev);
1da177e4
LT
1876}
1877EXPORT_SYMBOL(hci_unregister_dev);
1878
1879/* Suspend HCI device */
1880int hci_suspend_dev(struct hci_dev *hdev)
1881{
1882 hci_notify(hdev, HCI_DEV_SUSPEND);
1883 return 0;
1884}
1885EXPORT_SYMBOL(hci_suspend_dev);
1886
1887/* Resume HCI device */
1888int hci_resume_dev(struct hci_dev *hdev)
1889{
1890 hci_notify(hdev, HCI_DEV_RESUME);
1891 return 0;
1892}
1893EXPORT_SYMBOL(hci_resume_dev);
1894
76bca880
MH
1895/* Receive frame from HCI drivers */
1896int hci_recv_frame(struct sk_buff *skb)
1897{
1898 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1899 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1900 && !test_bit(HCI_INIT, &hdev->flags))) {
1901 kfree_skb(skb);
1902 return -ENXIO;
1903 }
1904
1905 /* Incomming skb */
1906 bt_cb(skb)->incoming = 1;
1907
1908 /* Time stamp */
1909 __net_timestamp(skb);
1910
76bca880 1911 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1912 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1913
76bca880
MH
1914 return 0;
1915}
1916EXPORT_SYMBOL(hci_recv_frame);
1917
33e882a5 1918static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1919 int count, __u8 index)
33e882a5
SS
1920{
1921 int len = 0;
1922 int hlen = 0;
1923 int remain = count;
1924 struct sk_buff *skb;
1925 struct bt_skb_cb *scb;
1926
1927 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1928 index >= NUM_REASSEMBLY)
1929 return -EILSEQ;
1930
1931 skb = hdev->reassembly[index];
1932
1933 if (!skb) {
1934 switch (type) {
1935 case HCI_ACLDATA_PKT:
1936 len = HCI_MAX_FRAME_SIZE;
1937 hlen = HCI_ACL_HDR_SIZE;
1938 break;
1939 case HCI_EVENT_PKT:
1940 len = HCI_MAX_EVENT_SIZE;
1941 hlen = HCI_EVENT_HDR_SIZE;
1942 break;
1943 case HCI_SCODATA_PKT:
1944 len = HCI_MAX_SCO_SIZE;
1945 hlen = HCI_SCO_HDR_SIZE;
1946 break;
1947 }
1948
1e429f38 1949 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1950 if (!skb)
1951 return -ENOMEM;
1952
1953 scb = (void *) skb->cb;
1954 scb->expect = hlen;
1955 scb->pkt_type = type;
1956
1957 skb->dev = (void *) hdev;
1958 hdev->reassembly[index] = skb;
1959 }
1960
1961 while (count) {
1962 scb = (void *) skb->cb;
70c1f20b 1963 len = min_t(__u16, scb->expect, count);
33e882a5
SS
1964
1965 memcpy(skb_put(skb, len), data, len);
1966
1967 count -= len;
1968 data += len;
1969 scb->expect -= len;
1970 remain = count;
1971
1972 switch (type) {
1973 case HCI_EVENT_PKT:
1974 if (skb->len == HCI_EVENT_HDR_SIZE) {
1975 struct hci_event_hdr *h = hci_event_hdr(skb);
1976 scb->expect = h->plen;
1977
1978 if (skb_tailroom(skb) < scb->expect) {
1979 kfree_skb(skb);
1980 hdev->reassembly[index] = NULL;
1981 return -ENOMEM;
1982 }
1983 }
1984 break;
1985
1986 case HCI_ACLDATA_PKT:
1987 if (skb->len == HCI_ACL_HDR_SIZE) {
1988 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1989 scb->expect = __le16_to_cpu(h->dlen);
1990
1991 if (skb_tailroom(skb) < scb->expect) {
1992 kfree_skb(skb);
1993 hdev->reassembly[index] = NULL;
1994 return -ENOMEM;
1995 }
1996 }
1997 break;
1998
1999 case HCI_SCODATA_PKT:
2000 if (skb->len == HCI_SCO_HDR_SIZE) {
2001 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2002 scb->expect = h->dlen;
2003
2004 if (skb_tailroom(skb) < scb->expect) {
2005 kfree_skb(skb);
2006 hdev->reassembly[index] = NULL;
2007 return -ENOMEM;
2008 }
2009 }
2010 break;
2011 }
2012
2013 if (scb->expect == 0) {
2014 /* Complete frame */
2015
2016 bt_cb(skb)->pkt_type = type;
2017 hci_recv_frame(skb);
2018
2019 hdev->reassembly[index] = NULL;
2020 return remain;
2021 }
2022 }
2023
2024 return remain;
2025}
2026
ef222013
MH
2027int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2028{
f39a3c06
SS
2029 int rem = 0;
2030
ef222013
MH
2031 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2032 return -EILSEQ;
2033
da5f6c37 2034 while (count) {
1e429f38 2035 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2036 if (rem < 0)
2037 return rem;
ef222013 2038
f39a3c06
SS
2039 data += (count - rem);
2040 count = rem;
f81c6224 2041 }
ef222013 2042
f39a3c06 2043 return rem;
ef222013
MH
2044}
2045EXPORT_SYMBOL(hci_recv_fragment);
2046
99811510
SS
2047#define STREAM_REASSEMBLY 0
2048
2049int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2050{
2051 int type;
2052 int rem = 0;
2053
da5f6c37 2054 while (count) {
99811510
SS
2055 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2056
2057 if (!skb) {
2058 struct { char type; } *pkt;
2059
2060 /* Start of the frame */
2061 pkt = data;
2062 type = pkt->type;
2063
2064 data++;
2065 count--;
2066 } else
2067 type = bt_cb(skb)->pkt_type;
2068
1e429f38
GP
2069 rem = hci_reassembly(hdev, type, data, count,
2070 STREAM_REASSEMBLY);
99811510
SS
2071 if (rem < 0)
2072 return rem;
2073
2074 data += (count - rem);
2075 count = rem;
f81c6224 2076 }
99811510
SS
2077
2078 return rem;
2079}
2080EXPORT_SYMBOL(hci_recv_stream_fragment);
2081
1da177e4
LT
2082/* ---- Interface to upper protocols ---- */
2083
1da177e4
LT
2084int hci_register_cb(struct hci_cb *cb)
2085{
2086 BT_DBG("%p name %s", cb, cb->name);
2087
f20d09d5 2088 write_lock(&hci_cb_list_lock);
1da177e4 2089 list_add(&cb->list, &hci_cb_list);
f20d09d5 2090 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2091
2092 return 0;
2093}
2094EXPORT_SYMBOL(hci_register_cb);
2095
2096int hci_unregister_cb(struct hci_cb *cb)
2097{
2098 BT_DBG("%p name %s", cb, cb->name);
2099
f20d09d5 2100 write_lock(&hci_cb_list_lock);
1da177e4 2101 list_del(&cb->list);
f20d09d5 2102 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2103
2104 return 0;
2105}
2106EXPORT_SYMBOL(hci_unregister_cb);
2107
2108static int hci_send_frame(struct sk_buff *skb)
2109{
2110 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2111
2112 if (!hdev) {
2113 kfree_skb(skb);
2114 return -ENODEV;
2115 }
2116
0d48d939 2117 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2118
cd82e61c
MH
2119 /* Time stamp */
2120 __net_timestamp(skb);
1da177e4 2121
cd82e61c
MH
2122 /* Send copy to monitor */
2123 hci_send_to_monitor(hdev, skb);
2124
2125 if (atomic_read(&hdev->promisc)) {
2126 /* Send copy to the sockets */
470fe1b5 2127 hci_send_to_sock(hdev, skb);
1da177e4
LT
2128 }
2129
2130 /* Get rid of skb owner, prior to sending to the driver. */
2131 skb_orphan(skb);
2132
2133 return hdev->send(skb);
2134}
2135
2136/* Send HCI command */
a9de9248 2137int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2138{
2139 int len = HCI_COMMAND_HDR_SIZE + plen;
2140 struct hci_command_hdr *hdr;
2141 struct sk_buff *skb;
2142
a9de9248 2143 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2144
2145 skb = bt_skb_alloc(len, GFP_ATOMIC);
2146 if (!skb) {
ef222013 2147 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2148 return -ENOMEM;
2149 }
2150
2151 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2152 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2153 hdr->plen = plen;
2154
2155 if (plen)
2156 memcpy(skb_put(skb, plen), param, plen);
2157
2158 BT_DBG("skb len %d", skb->len);
2159
0d48d939 2160 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2161 skb->dev = (void *) hdev;
c78ae283 2162
a5040efa
JH
2163 if (test_bit(HCI_INIT, &hdev->flags))
2164 hdev->init_last_cmd = opcode;
2165
1da177e4 2166 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2167 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2168
2169 return 0;
2170}
1da177e4
LT
2171
2172/* Get data from the previously sent command */
a9de9248 2173void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2174{
2175 struct hci_command_hdr *hdr;
2176
2177 if (!hdev->sent_cmd)
2178 return NULL;
2179
2180 hdr = (void *) hdev->sent_cmd->data;
2181
a9de9248 2182 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2183 return NULL;
2184
a9de9248 2185 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2186
2187 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2188}
2189
2190/* Send ACL data */
2191static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2192{
2193 struct hci_acl_hdr *hdr;
2194 int len = skb->len;
2195
badff6d0
ACM
2196 skb_push(skb, HCI_ACL_HDR_SIZE);
2197 skb_reset_transport_header(skb);
9c70220b 2198 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2199 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2200 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2201}
2202
73d80deb
LAD
2203static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2204 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2205{
2206 struct hci_dev *hdev = conn->hdev;
2207 struct sk_buff *list;
2208
70f23020
AE
2209 list = skb_shinfo(skb)->frag_list;
2210 if (!list) {
1da177e4
LT
2211 /* Non fragmented */
2212 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2213
73d80deb 2214 skb_queue_tail(queue, skb);
1da177e4
LT
2215 } else {
2216 /* Fragmented */
2217 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2218
2219 skb_shinfo(skb)->frag_list = NULL;
2220
2221 /* Queue all fragments atomically */
af3e6359 2222 spin_lock(&queue->lock);
1da177e4 2223
73d80deb 2224 __skb_queue_tail(queue, skb);
e702112f
AE
2225
2226 flags &= ~ACL_START;
2227 flags |= ACL_CONT;
1da177e4
LT
2228 do {
2229 skb = list; list = list->next;
8e87d142 2230
1da177e4 2231 skb->dev = (void *) hdev;
0d48d939 2232 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2233 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2234
2235 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2236
73d80deb 2237 __skb_queue_tail(queue, skb);
1da177e4
LT
2238 } while (list);
2239
af3e6359 2240 spin_unlock(&queue->lock);
1da177e4 2241 }
73d80deb
LAD
2242}
2243
2244void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2245{
2246 struct hci_conn *conn = chan->conn;
2247 struct hci_dev *hdev = conn->hdev;
2248
2249 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2250
2251 skb->dev = (void *) hdev;
2252 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2253 hci_add_acl_hdr(skb, conn->handle, flags);
2254
2255 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2256
3eff45ea 2257 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2258}
2259EXPORT_SYMBOL(hci_send_acl);
2260
2261/* Send SCO data */
0d861d8b 2262void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2263{
2264 struct hci_dev *hdev = conn->hdev;
2265 struct hci_sco_hdr hdr;
2266
2267 BT_DBG("%s len %d", hdev->name, skb->len);
2268
aca3192c 2269 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2270 hdr.dlen = skb->len;
2271
badff6d0
ACM
2272 skb_push(skb, HCI_SCO_HDR_SIZE);
2273 skb_reset_transport_header(skb);
9c70220b 2274 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2275
2276 skb->dev = (void *) hdev;
0d48d939 2277 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2278
1da177e4 2279 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2280 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2281}
2282EXPORT_SYMBOL(hci_send_sco);
2283
2284/* ---- HCI TX task (outgoing data) ---- */
2285
2286/* HCI Connection scheduler */
2287static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2288{
2289 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2290 struct hci_conn *conn = NULL, *c;
1da177e4 2291 int num = 0, min = ~0;
1da177e4 2292
8e87d142 2293 /* We don't have to lock device here. Connections are always
1da177e4 2294 * added and removed with TX task disabled. */
bf4c6325
GP
2295
2296 rcu_read_lock();
2297
2298 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2299 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2300 continue;
769be974
MH
2301
2302 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2303 continue;
2304
1da177e4
LT
2305 num++;
2306
2307 if (c->sent < min) {
2308 min = c->sent;
2309 conn = c;
2310 }
52087a79
LAD
2311
2312 if (hci_conn_num(hdev, type) == num)
2313 break;
1da177e4
LT
2314 }
2315
bf4c6325
GP
2316 rcu_read_unlock();
2317
1da177e4 2318 if (conn) {
6ed58ec5
VT
2319 int cnt, q;
2320
2321 switch (conn->type) {
2322 case ACL_LINK:
2323 cnt = hdev->acl_cnt;
2324 break;
2325 case SCO_LINK:
2326 case ESCO_LINK:
2327 cnt = hdev->sco_cnt;
2328 break;
2329 case LE_LINK:
2330 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2331 break;
2332 default:
2333 cnt = 0;
2334 BT_ERR("Unknown link type");
2335 }
2336
2337 q = cnt / num;
1da177e4
LT
2338 *quote = q ? q : 1;
2339 } else
2340 *quote = 0;
2341
2342 BT_DBG("conn %p quote %d", conn, *quote);
2343 return conn;
2344}
2345
bae1f5d9 2346static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2347{
2348 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2349 struct hci_conn *c;
1da177e4 2350
bae1f5d9 2351 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2352
bf4c6325
GP
2353 rcu_read_lock();
2354
1da177e4 2355 /* Kill stalled connections */
bf4c6325 2356 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2357 if (c->type == type && c->sent) {
2358 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2359 hdev->name, batostr(&c->dst));
2360 hci_acl_disconn(c, 0x13);
2361 }
2362 }
bf4c6325
GP
2363
2364 rcu_read_unlock();
1da177e4
LT
2365}
2366
73d80deb
LAD
2367static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2368 int *quote)
1da177e4 2369{
73d80deb
LAD
2370 struct hci_conn_hash *h = &hdev->conn_hash;
2371 struct hci_chan *chan = NULL;
2372 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2373 struct hci_conn *conn;
73d80deb
LAD
2374 int cnt, q, conn_num = 0;
2375
2376 BT_DBG("%s", hdev->name);
2377
bf4c6325
GP
2378 rcu_read_lock();
2379
2380 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2381 struct hci_chan *tmp;
2382
2383 if (conn->type != type)
2384 continue;
2385
2386 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2387 continue;
2388
2389 conn_num++;
2390
8192edef 2391 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2392 struct sk_buff *skb;
2393
2394 if (skb_queue_empty(&tmp->data_q))
2395 continue;
2396
2397 skb = skb_peek(&tmp->data_q);
2398 if (skb->priority < cur_prio)
2399 continue;
2400
2401 if (skb->priority > cur_prio) {
2402 num = 0;
2403 min = ~0;
2404 cur_prio = skb->priority;
2405 }
2406
2407 num++;
2408
2409 if (conn->sent < min) {
2410 min = conn->sent;
2411 chan = tmp;
2412 }
2413 }
2414
2415 if (hci_conn_num(hdev, type) == conn_num)
2416 break;
2417 }
2418
bf4c6325
GP
2419 rcu_read_unlock();
2420
73d80deb
LAD
2421 if (!chan)
2422 return NULL;
2423
2424 switch (chan->conn->type) {
2425 case ACL_LINK:
2426 cnt = hdev->acl_cnt;
2427 break;
2428 case SCO_LINK:
2429 case ESCO_LINK:
2430 cnt = hdev->sco_cnt;
2431 break;
2432 case LE_LINK:
2433 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2434 break;
2435 default:
2436 cnt = 0;
2437 BT_ERR("Unknown link type");
2438 }
2439
2440 q = cnt / num;
2441 *quote = q ? q : 1;
2442 BT_DBG("chan %p quote %d", chan, *quote);
2443 return chan;
2444}
2445
02b20f0b
LAD
2446static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2447{
2448 struct hci_conn_hash *h = &hdev->conn_hash;
2449 struct hci_conn *conn;
2450 int num = 0;
2451
2452 BT_DBG("%s", hdev->name);
2453
bf4c6325
GP
2454 rcu_read_lock();
2455
2456 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2457 struct hci_chan *chan;
2458
2459 if (conn->type != type)
2460 continue;
2461
2462 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2463 continue;
2464
2465 num++;
2466
8192edef 2467 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2468 struct sk_buff *skb;
2469
2470 if (chan->sent) {
2471 chan->sent = 0;
2472 continue;
2473 }
2474
2475 if (skb_queue_empty(&chan->data_q))
2476 continue;
2477
2478 skb = skb_peek(&chan->data_q);
2479 if (skb->priority >= HCI_PRIO_MAX - 1)
2480 continue;
2481
2482 skb->priority = HCI_PRIO_MAX - 1;
2483
2484 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2485 skb->priority);
2486 }
2487
2488 if (hci_conn_num(hdev, type) == num)
2489 break;
2490 }
bf4c6325
GP
2491
2492 rcu_read_unlock();
2493
02b20f0b
LAD
2494}
2495
b71d385a
AE
2496static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2497{
2498 /* Calculate count of blocks used by this packet */
2499 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2500}
2501
63d2bc1b 2502static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2503{
1da177e4
LT
2504 if (!test_bit(HCI_RAW, &hdev->flags)) {
2505 /* ACL tx timeout must be longer than maximum
2506 * link supervision timeout (40.9 seconds) */
63d2bc1b 2507 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2508 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2509 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2510 }
63d2bc1b 2511}
1da177e4 2512
63d2bc1b
AE
2513static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2514{
2515 unsigned int cnt = hdev->acl_cnt;
2516 struct hci_chan *chan;
2517 struct sk_buff *skb;
2518 int quote;
2519
2520 __check_timeout(hdev, cnt);
04837f64 2521
73d80deb
LAD
2522 while (hdev->acl_cnt &&
2523 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2524 u32 priority = (skb_peek(&chan->data_q))->priority;
2525 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2526 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 skb->len, skb->priority);
2528
ec1cce24
LAD
2529 /* Stop if priority has changed */
2530 if (skb->priority < priority)
2531 break;
2532
2533 skb = skb_dequeue(&chan->data_q);
2534
73d80deb
LAD
2535 hci_conn_enter_active_mode(chan->conn,
2536 bt_cb(skb)->force_active);
04837f64 2537
1da177e4
LT
2538 hci_send_frame(skb);
2539 hdev->acl_last_tx = jiffies;
2540
2541 hdev->acl_cnt--;
73d80deb
LAD
2542 chan->sent++;
2543 chan->conn->sent++;
1da177e4
LT
2544 }
2545 }
02b20f0b
LAD
2546
2547 if (cnt != hdev->acl_cnt)
2548 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2549}
2550
b71d385a
AE
2551static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2552{
63d2bc1b 2553 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2554 struct hci_chan *chan;
2555 struct sk_buff *skb;
2556 int quote;
b71d385a 2557
63d2bc1b 2558 __check_timeout(hdev, cnt);
b71d385a
AE
2559
2560 while (hdev->block_cnt > 0 &&
2561 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2562 u32 priority = (skb_peek(&chan->data_q))->priority;
2563 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2564 int blocks;
2565
2566 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2567 skb->len, skb->priority);
2568
2569 /* Stop if priority has changed */
2570 if (skb->priority < priority)
2571 break;
2572
2573 skb = skb_dequeue(&chan->data_q);
2574
2575 blocks = __get_blocks(hdev, skb);
2576 if (blocks > hdev->block_cnt)
2577 return;
2578
2579 hci_conn_enter_active_mode(chan->conn,
2580 bt_cb(skb)->force_active);
2581
2582 hci_send_frame(skb);
2583 hdev->acl_last_tx = jiffies;
2584
2585 hdev->block_cnt -= blocks;
2586 quote -= blocks;
2587
2588 chan->sent += blocks;
2589 chan->conn->sent += blocks;
2590 }
2591 }
2592
2593 if (cnt != hdev->block_cnt)
2594 hci_prio_recalculate(hdev, ACL_LINK);
2595}
2596
2597static inline void hci_sched_acl(struct hci_dev *hdev)
2598{
2599 BT_DBG("%s", hdev->name);
2600
2601 if (!hci_conn_num(hdev, ACL_LINK))
2602 return;
2603
2604 switch (hdev->flow_ctl_mode) {
2605 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2606 hci_sched_acl_pkt(hdev);
2607 break;
2608
2609 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2610 hci_sched_acl_blk(hdev);
2611 break;
2612 }
2613}
2614
1da177e4
LT
2615/* Schedule SCO */
2616static inline void hci_sched_sco(struct hci_dev *hdev)
2617{
2618 struct hci_conn *conn;
2619 struct sk_buff *skb;
2620 int quote;
2621
2622 BT_DBG("%s", hdev->name);
2623
52087a79
LAD
2624 if (!hci_conn_num(hdev, SCO_LINK))
2625 return;
2626
1da177e4
LT
2627 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2628 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2629 BT_DBG("skb %p len %d", skb, skb->len);
2630 hci_send_frame(skb);
2631
2632 conn->sent++;
2633 if (conn->sent == ~0)
2634 conn->sent = 0;
2635 }
2636 }
2637}
2638
b6a0dc82
MH
2639static inline void hci_sched_esco(struct hci_dev *hdev)
2640{
2641 struct hci_conn *conn;
2642 struct sk_buff *skb;
2643 int quote;
2644
2645 BT_DBG("%s", hdev->name);
2646
52087a79
LAD
2647 if (!hci_conn_num(hdev, ESCO_LINK))
2648 return;
2649
b6a0dc82
MH
2650 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2651 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2652 BT_DBG("skb %p len %d", skb, skb->len);
2653 hci_send_frame(skb);
2654
2655 conn->sent++;
2656 if (conn->sent == ~0)
2657 conn->sent = 0;
2658 }
2659 }
2660}
2661
6ed58ec5
VT
2662static inline void hci_sched_le(struct hci_dev *hdev)
2663{
73d80deb 2664 struct hci_chan *chan;
6ed58ec5 2665 struct sk_buff *skb;
02b20f0b 2666 int quote, cnt, tmp;
6ed58ec5
VT
2667
2668 BT_DBG("%s", hdev->name);
2669
52087a79
LAD
2670 if (!hci_conn_num(hdev, LE_LINK))
2671 return;
2672
6ed58ec5
VT
2673 if (!test_bit(HCI_RAW, &hdev->flags)) {
2674 /* LE tx timeout must be longer than maximum
2675 * link supervision timeout (40.9 seconds) */
bae1f5d9 2676 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2677 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2678 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2679 }
2680
2681 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2682 tmp = cnt;
73d80deb 2683 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2684 u32 priority = (skb_peek(&chan->data_q))->priority;
2685 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2686 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2687 skb->len, skb->priority);
6ed58ec5 2688
ec1cce24
LAD
2689 /* Stop if priority has changed */
2690 if (skb->priority < priority)
2691 break;
2692
2693 skb = skb_dequeue(&chan->data_q);
2694
6ed58ec5
VT
2695 hci_send_frame(skb);
2696 hdev->le_last_tx = jiffies;
2697
2698 cnt--;
73d80deb
LAD
2699 chan->sent++;
2700 chan->conn->sent++;
6ed58ec5
VT
2701 }
2702 }
73d80deb 2703
6ed58ec5
VT
2704 if (hdev->le_pkts)
2705 hdev->le_cnt = cnt;
2706 else
2707 hdev->acl_cnt = cnt;
02b20f0b
LAD
2708
2709 if (cnt != tmp)
2710 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2711}
2712
3eff45ea 2713static void hci_tx_work(struct work_struct *work)
1da177e4 2714{
3eff45ea 2715 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2716 struct sk_buff *skb;
2717
6ed58ec5
VT
2718 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2719 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2720
2721 /* Schedule queues and send stuff to HCI driver */
2722
2723 hci_sched_acl(hdev);
2724
2725 hci_sched_sco(hdev);
2726
b6a0dc82
MH
2727 hci_sched_esco(hdev);
2728
6ed58ec5
VT
2729 hci_sched_le(hdev);
2730
1da177e4
LT
2731 /* Send next queued raw (unknown type) packet */
2732 while ((skb = skb_dequeue(&hdev->raw_q)))
2733 hci_send_frame(skb);
1da177e4
LT
2734}
2735
25985edc 2736/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2737
2738/* ACL data packet */
2739static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2740{
2741 struct hci_acl_hdr *hdr = (void *) skb->data;
2742 struct hci_conn *conn;
2743 __u16 handle, flags;
2744
2745 skb_pull(skb, HCI_ACL_HDR_SIZE);
2746
2747 handle = __le16_to_cpu(hdr->handle);
2748 flags = hci_flags(handle);
2749 handle = hci_handle(handle);
2750
2751 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2752
2753 hdev->stat.acl_rx++;
2754
2755 hci_dev_lock(hdev);
2756 conn = hci_conn_hash_lookup_handle(hdev, handle);
2757 hci_dev_unlock(hdev);
8e87d142 2758
1da177e4 2759 if (conn) {
65983fc7 2760 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2761
1da177e4 2762 /* Send to upper protocol */
686ebf28
UF
2763 l2cap_recv_acldata(conn, skb, flags);
2764 return;
1da177e4 2765 } else {
8e87d142 2766 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2767 hdev->name, handle);
2768 }
2769
2770 kfree_skb(skb);
2771}
2772
2773/* SCO data packet */
2774static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2775{
2776 struct hci_sco_hdr *hdr = (void *) skb->data;
2777 struct hci_conn *conn;
2778 __u16 handle;
2779
2780 skb_pull(skb, HCI_SCO_HDR_SIZE);
2781
2782 handle = __le16_to_cpu(hdr->handle);
2783
2784 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2785
2786 hdev->stat.sco_rx++;
2787
2788 hci_dev_lock(hdev);
2789 conn = hci_conn_hash_lookup_handle(hdev, handle);
2790 hci_dev_unlock(hdev);
2791
2792 if (conn) {
1da177e4 2793 /* Send to upper protocol */
686ebf28
UF
2794 sco_recv_scodata(conn, skb);
2795 return;
1da177e4 2796 } else {
8e87d142 2797 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2798 hdev->name, handle);
2799 }
2800
2801 kfree_skb(skb);
2802}
2803
b78752cc 2804static void hci_rx_work(struct work_struct *work)
1da177e4 2805{
b78752cc 2806 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2807 struct sk_buff *skb;
2808
2809 BT_DBG("%s", hdev->name);
2810
1da177e4 2811 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2812 /* Send copy to monitor */
2813 hci_send_to_monitor(hdev, skb);
2814
1da177e4
LT
2815 if (atomic_read(&hdev->promisc)) {
2816 /* Send copy to the sockets */
470fe1b5 2817 hci_send_to_sock(hdev, skb);
1da177e4
LT
2818 }
2819
2820 if (test_bit(HCI_RAW, &hdev->flags)) {
2821 kfree_skb(skb);
2822 continue;
2823 }
2824
2825 if (test_bit(HCI_INIT, &hdev->flags)) {
2826 /* Don't process data packets in this states. */
0d48d939 2827 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2828 case HCI_ACLDATA_PKT:
2829 case HCI_SCODATA_PKT:
2830 kfree_skb(skb);
2831 continue;
3ff50b79 2832 }
1da177e4
LT
2833 }
2834
2835 /* Process frame */
0d48d939 2836 switch (bt_cb(skb)->pkt_type) {
1da177e4 2837 case HCI_EVENT_PKT:
b78752cc 2838 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2839 hci_event_packet(hdev, skb);
2840 break;
2841
2842 case HCI_ACLDATA_PKT:
2843 BT_DBG("%s ACL data packet", hdev->name);
2844 hci_acldata_packet(hdev, skb);
2845 break;
2846
2847 case HCI_SCODATA_PKT:
2848 BT_DBG("%s SCO data packet", hdev->name);
2849 hci_scodata_packet(hdev, skb);
2850 break;
2851
2852 default:
2853 kfree_skb(skb);
2854 break;
2855 }
2856 }
1da177e4
LT
2857}
2858
c347b765 2859static void hci_cmd_work(struct work_struct *work)
1da177e4 2860{
c347b765 2861 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2862 struct sk_buff *skb;
2863
2864 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2865
1da177e4 2866 /* Send queued commands */
5a08ecce
AE
2867 if (atomic_read(&hdev->cmd_cnt)) {
2868 skb = skb_dequeue(&hdev->cmd_q);
2869 if (!skb)
2870 return;
2871
7585b97a 2872 kfree_skb(hdev->sent_cmd);
1da177e4 2873
70f23020
AE
2874 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2875 if (hdev->sent_cmd) {
1da177e4
LT
2876 atomic_dec(&hdev->cmd_cnt);
2877 hci_send_frame(skb);
7bdb8a5c
SJ
2878 if (test_bit(HCI_RESET, &hdev->flags))
2879 del_timer(&hdev->cmd_timer);
2880 else
2881 mod_timer(&hdev->cmd_timer,
6bd32326 2882 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2883 } else {
2884 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2885 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2886 }
2887 }
2888}
2519a1fc
AG
2889
2890int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2891{
2892 /* General inquiry access code (GIAC) */
2893 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2894 struct hci_cp_inquiry cp;
2895
2896 BT_DBG("%s", hdev->name);
2897
2898 if (test_bit(HCI_INQUIRY, &hdev->flags))
2899 return -EINPROGRESS;
2900
4663262c
JH
2901 inquiry_cache_flush(hdev);
2902
2519a1fc
AG
2903 memset(&cp, 0, sizeof(cp));
2904 memcpy(&cp.lap, lap, sizeof(cp.lap));
2905 cp.length = length;
2906
2907 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2908}
023d5049
AG
2909
2910int hci_cancel_inquiry(struct hci_dev *hdev)
2911{
2912 BT_DBG("%s", hdev->name);
2913
2914 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2915 return -EPERM;
2916
2917 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2918}