Bluetooth: mgmt: Implement Set LE command
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
b78752cc 57static void hci_rx_work(struct work_struct *work);
c347b765 58static void hci_cmd_work(struct work_struct *work);
3eff45ea 59static void hci_tx_work(struct work_struct *work);
1da177e4 60
1da177e4
LT
61/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
1da177e4
LT
69/* ---- HCI notifications ---- */
70
6516455d 71static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 72{
040030ef 73 hci_sock_dev_event(hdev, event);
1da177e4
LT
74}
75
76/* ---- HCI requests ---- */
77
23bb5763 78void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 79{
23bb5763
JH
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
a5040efa
JH
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 86 return;
1da177e4
LT
87
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
92 }
93}
94
95static void hci_req_cancel(struct hci_dev *hdev, int err)
96{
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106/* Execute request and wait for completion. */
8e87d142 107static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 108 unsigned long opt, __u32 timeout)
1da177e4
LT
109{
110 DECLARE_WAITQUEUE(wait, current);
111 int err = 0;
112
113 BT_DBG("%s start", hdev->name);
114
115 hdev->req_status = HCI_REQ_PEND;
116
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
119
120 req(hdev, opt);
121 schedule_timeout(timeout);
122
123 remove_wait_queue(&hdev->req_wait_q, &wait);
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 switch (hdev->req_status) {
129 case HCI_REQ_DONE:
e175072f 130 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
131 break;
132
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
135 break;
136
137 default:
138 err = -ETIMEDOUT;
139 break;
3ff50b79 140 }
1da177e4 141
a5040efa 142 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
143
144 BT_DBG("%s end: err %d", hdev->name, err);
145
146 return err;
147}
148
149static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 150 unsigned long opt, __u32 timeout)
1da177e4
LT
151{
152 int ret;
153
7c6a329e
MH
154 if (!test_bit(HCI_UP, &hdev->flags))
155 return -ENETDOWN;
156
1da177e4
LT
157 /* Serialize all requests */
158 hci_req_lock(hdev);
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
161
162 return ret;
163}
164
165static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166{
167 BT_DBG("%s %ld", hdev->name, opt);
168
169 /* Reset device */
f630cf0d 170 set_bit(HCI_RESET, &hdev->flags);
a9de9248 171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
172}
173
e61ef499 174static void bredr_init(struct hci_dev *hdev)
1da177e4 175{
b0916ea0 176 struct hci_cp_delete_stored_link_key cp;
1ebb9252 177 __le16 param;
89f2783d 178 __u8 flt_type;
1da177e4 179
2455a3ea
AE
180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
1da177e4
LT
182 /* Mandatory initialization */
183
184 /* Reset */
f630cf0d 185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 188 }
1da177e4
LT
189
190 /* Read Local Supported Features */
a9de9248 191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 192
1143e5a6 193 /* Read Local Version */
a9de9248 194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 195
1da177e4 196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 198
1da177e4 199 /* Read BD Address */
a9de9248
MH
200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
207
208 /* Read Voice Setting */
a9de9248 209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
210
211 /* Optional initialization */
212
213 /* Clear Event Filters */
89f2783d 214 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 216
1da177e4 217 /* Connection accept timeout ~20 secs */
aca3192c 218 param = cpu_to_le16(0x7d00);
a9de9248 219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
220
221 bacpy(&cp.bdaddr, BDADDR_ANY);
222 cp.delete_all = 1;
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
224}
225
e61ef499
AE
226static void amp_init(struct hci_dev *hdev)
227{
2455a3ea
AE
228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
e61ef499
AE
230 /* Reset */
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235}
236
237static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238{
239 struct sk_buff *skb;
240
241 BT_DBG("%s %ld", hdev->name, opt);
242
243 /* Driver initialization */
244
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
249
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
252 }
253 skb_queue_purge(&hdev->driver_init);
254
255 switch (hdev->dev_type) {
256 case HCI_BREDR:
257 bredr_init(hdev);
258 break;
259
260 case HCI_AMP:
261 amp_init(hdev);
262 break;
263
264 default:
265 BT_ERR("Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269}
270
6ed58ec5
VT
271static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272{
273 BT_DBG("%s", hdev->name);
274
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277}
278
1da177e4
LT
279static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 scan = opt;
282
283 BT_DBG("%s %x", hdev->name, scan);
284
285 /* Inquiry and Page scans */
a9de9248 286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
287}
288
289static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 auth = opt;
292
293 BT_DBG("%s %x", hdev->name, auth);
294
295 /* Authentication */
a9de9248 296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
297}
298
299static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 encrypt = opt;
302
303 BT_DBG("%s %x", hdev->name, encrypt);
304
e4e8e37c 305 /* Encryption */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
307}
308
e4e8e37c
MH
309static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __le16 policy = cpu_to_le16(opt);
312
a418b893 313 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
314
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317}
318
8e87d142 319/* Get HCI device by index.
1da177e4
LT
320 * Device is held on return. */
321struct hci_dev *hci_dev_get(int index)
322{
8035ded4 323 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
8035ded4 331 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
1da177e4
LT
340
341/* ---- Inquiry support ---- */
ff9ef578 342
30dc78e1
JH
343bool hci_discovery_active(struct hci_dev *hdev)
344{
345 struct discovery_state *discov = &hdev->discovery;
346
6fbe195d 347 switch (discov->state) {
343f935b 348 case DISCOVERY_FINDING:
6fbe195d 349 case DISCOVERY_RESOLVING:
30dc78e1
JH
350 return true;
351
6fbe195d
AG
352 default:
353 return false;
354 }
30dc78e1
JH
355}
356
ff9ef578
JH
357void hci_discovery_set_state(struct hci_dev *hdev, int state)
358{
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361 if (hdev->discovery.state == state)
362 return;
363
364 switch (state) {
365 case DISCOVERY_STOPPED:
7b99b659
AG
366 if (hdev->discovery.state != DISCOVERY_STARTING)
367 mgmt_discovering(hdev, 0);
f963e8e9 368 hdev->discovery.type = 0;
ff9ef578
JH
369 break;
370 case DISCOVERY_STARTING:
371 break;
343f935b 372 case DISCOVERY_FINDING:
ff9ef578
JH
373 mgmt_discovering(hdev, 1);
374 break;
30dc78e1
JH
375 case DISCOVERY_RESOLVING:
376 break;
ff9ef578
JH
377 case DISCOVERY_STOPPING:
378 break;
379 }
380
381 hdev->discovery.state = state;
382}
383
1da177e4
LT
384static void inquiry_cache_flush(struct hci_dev *hdev)
385{
30883512 386 struct discovery_state *cache = &hdev->discovery;
b57c1a56 387 struct inquiry_entry *p, *n;
1da177e4 388
561aafbc
JH
389 list_for_each_entry_safe(p, n, &cache->all, all) {
390 list_del(&p->all);
b57c1a56 391 kfree(p);
1da177e4 392 }
561aafbc
JH
393
394 INIT_LIST_HEAD(&cache->unknown);
395 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 396 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
397}
398
399struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
400{
30883512 401 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
402 struct inquiry_entry *e;
403
404 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
405
561aafbc
JH
406 list_for_each_entry(e, &cache->all, all) {
407 if (!bacmp(&e->data.bdaddr, bdaddr))
408 return e;
409 }
410
411 return NULL;
412}
413
414struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
415 bdaddr_t *bdaddr)
416{
30883512 417 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 423 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
424 return e;
425 }
426
427 return NULL;
1da177e4
LT
428}
429
30dc78e1
JH
430struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
431 bdaddr_t *bdaddr,
432 int state)
433{
434 struct discovery_state *cache = &hdev->discovery;
435 struct inquiry_entry *e;
436
437 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
438
439 list_for_each_entry(e, &cache->resolve, list) {
440 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
441 return e;
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
a3d4e20a
JH
449void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450 struct inquiry_entry *ie)
451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct list_head *pos = &cache->resolve;
454 struct inquiry_entry *p;
455
456 list_del(&ie->list);
457
458 list_for_each_entry(p, &cache->resolve, list) {
459 if (p->name_state != NAME_PENDING &&
460 abs(p->data.rssi) >= abs(ie->data.rssi))
461 break;
462 pos = &p->list;
463 }
464
465 list_add(&ie->list, pos);
466}
467
3175405b 468bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 469 bool name_known)
1da177e4 470{
30883512 471 struct discovery_state *cache = &hdev->discovery;
70f23020 472 struct inquiry_entry *ie;
1da177e4
LT
473
474 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
475
70f23020 476 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
477 if (ie) {
478 if (ie->name_state == NAME_NEEDED &&
479 data->rssi != ie->data.rssi) {
480 ie->data.rssi = data->rssi;
481 hci_inquiry_cache_update_resolve(hdev, ie);
482 }
483
561aafbc 484 goto update;
a3d4e20a 485 }
561aafbc
JH
486
487 /* Entry not in the cache. Add new one. */
488 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
489 if (!ie)
3175405b 490 return false;
561aafbc
JH
491
492 list_add(&ie->all, &cache->all);
493
494 if (name_known) {
495 ie->name_state = NAME_KNOWN;
496 } else {
497 ie->name_state = NAME_NOT_KNOWN;
498 list_add(&ie->list, &cache->unknown);
499 }
70f23020 500
561aafbc
JH
501update:
502 if (name_known && ie->name_state != NAME_KNOWN &&
503 ie->name_state != NAME_PENDING) {
504 ie->name_state = NAME_KNOWN;
505 list_del(&ie->list);
1da177e4
LT
506 }
507
70f23020
AE
508 memcpy(&ie->data, data, sizeof(*data));
509 ie->timestamp = jiffies;
1da177e4 510 cache->timestamp = jiffies;
3175405b
JH
511
512 if (ie->name_state == NAME_NOT_KNOWN)
513 return false;
514
515 return true;
1da177e4
LT
516}
517
518static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
519{
30883512 520 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
521 struct inquiry_info *info = (struct inquiry_info *) buf;
522 struct inquiry_entry *e;
523 int copied = 0;
524
561aafbc 525 list_for_each_entry(e, &cache->all, all) {
1da177e4 526 struct inquiry_data *data = &e->data;
b57c1a56
JH
527
528 if (copied >= num)
529 break;
530
1da177e4
LT
531 bacpy(&info->bdaddr, &data->bdaddr);
532 info->pscan_rep_mode = data->pscan_rep_mode;
533 info->pscan_period_mode = data->pscan_period_mode;
534 info->pscan_mode = data->pscan_mode;
535 memcpy(info->dev_class, data->dev_class, 3);
536 info->clock_offset = data->clock_offset;
b57c1a56 537
1da177e4 538 info++;
b57c1a56 539 copied++;
1da177e4
LT
540 }
541
542 BT_DBG("cache %p, copied %d", cache, copied);
543 return copied;
544}
545
546static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
547{
548 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
549 struct hci_cp_inquiry cp;
550
551 BT_DBG("%s", hdev->name);
552
553 if (test_bit(HCI_INQUIRY, &hdev->flags))
554 return;
555
556 /* Start Inquiry */
557 memcpy(&cp.lap, &ir->lap, 3);
558 cp.length = ir->length;
559 cp.num_rsp = ir->num_rsp;
a9de9248 560 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
561}
562
563int hci_inquiry(void __user *arg)
564{
565 __u8 __user *ptr = arg;
566 struct hci_inquiry_req ir;
567 struct hci_dev *hdev;
568 int err = 0, do_inquiry = 0, max_rsp;
569 long timeo;
570 __u8 *buf;
571
572 if (copy_from_user(&ir, ptr, sizeof(ir)))
573 return -EFAULT;
574
5a08ecce
AE
575 hdev = hci_dev_get(ir.dev_id);
576 if (!hdev)
1da177e4
LT
577 return -ENODEV;
578
09fd0de5 579 hci_dev_lock(hdev);
8e87d142 580 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
581 inquiry_cache_empty(hdev) ||
582 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
583 inquiry_cache_flush(hdev);
584 do_inquiry = 1;
585 }
09fd0de5 586 hci_dev_unlock(hdev);
1da177e4 587
04837f64 588 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
589
590 if (do_inquiry) {
591 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
592 if (err < 0)
593 goto done;
594 }
1da177e4
LT
595
596 /* for unlimited number of responses we will use buffer with 255 entries */
597 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
598
599 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600 * copy it to the user space.
601 */
01df8c31 602 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 603 if (!buf) {
1da177e4
LT
604 err = -ENOMEM;
605 goto done;
606 }
607
09fd0de5 608 hci_dev_lock(hdev);
1da177e4 609 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 610 hci_dev_unlock(hdev);
1da177e4
LT
611
612 BT_DBG("num_rsp %d", ir.num_rsp);
613
614 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
615 ptr += sizeof(ir);
616 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
617 ir.num_rsp))
618 err = -EFAULT;
8e87d142 619 } else
1da177e4
LT
620 err = -EFAULT;
621
622 kfree(buf);
623
624done:
625 hci_dev_put(hdev);
626 return err;
627}
628
629/* ---- HCI ioctl helpers ---- */
630
631int hci_dev_open(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int ret = 0;
635
5a08ecce
AE
636 hdev = hci_dev_get(dev);
637 if (!hdev)
1da177e4
LT
638 return -ENODEV;
639
640 BT_DBG("%s %p", hdev->name, hdev);
641
642 hci_req_lock(hdev);
643
611b30f7
MH
644 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
645 ret = -ERFKILL;
646 goto done;
647 }
648
1da177e4
LT
649 if (test_bit(HCI_UP, &hdev->flags)) {
650 ret = -EALREADY;
651 goto done;
652 }
653
654 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
655 set_bit(HCI_RAW, &hdev->flags);
656
07e3b94a
AE
657 /* Treat all non BR/EDR controllers as raw devices if
658 enable_hs is not set */
659 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
660 set_bit(HCI_RAW, &hdev->flags);
661
1da177e4
LT
662 if (hdev->open(hdev)) {
663 ret = -EIO;
664 goto done;
665 }
666
667 if (!test_bit(HCI_RAW, &hdev->flags)) {
668 atomic_set(&hdev->cmd_cnt, 1);
669 set_bit(HCI_INIT, &hdev->flags);
a5040efa 670 hdev->init_last_cmd = 0;
1da177e4 671
04837f64
MH
672 ret = __hci_request(hdev, hci_init_req, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 674
eead27da 675 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
676 ret = __hci_request(hdev, hci_le_init_req, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
1da177e4
LT
679 clear_bit(HCI_INIT, &hdev->flags);
680 }
681
682 if (!ret) {
683 hci_dev_hold(hdev);
684 set_bit(HCI_UP, &hdev->flags);
685 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 686 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 687 hci_dev_lock(hdev);
744cf19e 688 mgmt_powered(hdev, 1);
09fd0de5 689 hci_dev_unlock(hdev);
56e5cb86 690 }
8e87d142 691 } else {
1da177e4 692 /* Init failed, cleanup */
3eff45ea 693 flush_work(&hdev->tx_work);
c347b765 694 flush_work(&hdev->cmd_work);
b78752cc 695 flush_work(&hdev->rx_work);
1da177e4
LT
696
697 skb_queue_purge(&hdev->cmd_q);
698 skb_queue_purge(&hdev->rx_q);
699
700 if (hdev->flush)
701 hdev->flush(hdev);
702
703 if (hdev->sent_cmd) {
704 kfree_skb(hdev->sent_cmd);
705 hdev->sent_cmd = NULL;
706 }
707
708 hdev->close(hdev);
709 hdev->flags = 0;
710 }
711
712done:
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716}
717
718static int hci_dev_do_close(struct hci_dev *hdev)
719{
720 BT_DBG("%s %p", hdev->name, hdev);
721
28b75a89
AG
722 cancel_work_sync(&hdev->le_scan);
723
1da177e4
LT
724 hci_req_cancel(hdev, ENODEV);
725 hci_req_lock(hdev);
726
727 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 728 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
729 hci_req_unlock(hdev);
730 return 0;
731 }
732
3eff45ea
GP
733 /* Flush RX and TX works */
734 flush_work(&hdev->tx_work);
b78752cc 735 flush_work(&hdev->rx_work);
1da177e4 736
16ab91ab 737 if (hdev->discov_timeout > 0) {
e0f9309f 738 cancel_delayed_work(&hdev->discov_off);
16ab91ab 739 hdev->discov_timeout = 0;
5e5282bb 740 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
741 }
742
a8b2d5c2 743 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
744 cancel_delayed_work(&hdev->service_cache);
745
7ba8b4be
AG
746 cancel_delayed_work_sync(&hdev->le_scan_disable);
747
09fd0de5 748 hci_dev_lock(hdev);
1da177e4
LT
749 inquiry_cache_flush(hdev);
750 hci_conn_hash_flush(hdev);
09fd0de5 751 hci_dev_unlock(hdev);
1da177e4
LT
752
753 hci_notify(hdev, HCI_DEV_DOWN);
754
755 if (hdev->flush)
756 hdev->flush(hdev);
757
758 /* Reset device */
759 skb_queue_purge(&hdev->cmd_q);
760 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
761 if (!test_bit(HCI_RAW, &hdev->flags) &&
762 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 763 set_bit(HCI_INIT, &hdev->flags);
04837f64 764 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 765 msecs_to_jiffies(250));
1da177e4
LT
766 clear_bit(HCI_INIT, &hdev->flags);
767 }
768
c347b765
GP
769 /* flush cmd work */
770 flush_work(&hdev->cmd_work);
1da177e4
LT
771
772 /* Drop queues */
773 skb_queue_purge(&hdev->rx_q);
774 skb_queue_purge(&hdev->cmd_q);
775 skb_queue_purge(&hdev->raw_q);
776
777 /* Drop last sent command */
778 if (hdev->sent_cmd) {
b79f44c1 779 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
780 kfree_skb(hdev->sent_cmd);
781 hdev->sent_cmd = NULL;
782 }
783
784 /* After this point our queues are empty
785 * and no tasks are scheduled. */
786 hdev->close(hdev);
787
8ee56540
MH
788 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
789 hci_dev_lock(hdev);
790 mgmt_powered(hdev, 0);
791 hci_dev_unlock(hdev);
792 }
5add6af8 793
1da177e4
LT
794 /* Clear flags */
795 hdev->flags = 0;
796
797 hci_req_unlock(hdev);
798
799 hci_dev_put(hdev);
800 return 0;
801}
802
803int hci_dev_close(__u16 dev)
804{
805 struct hci_dev *hdev;
806 int err;
807
70f23020
AE
808 hdev = hci_dev_get(dev);
809 if (!hdev)
1da177e4 810 return -ENODEV;
8ee56540
MH
811
812 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
813 cancel_delayed_work(&hdev->power_off);
814
1da177e4 815 err = hci_dev_do_close(hdev);
8ee56540 816
1da177e4
LT
817 hci_dev_put(hdev);
818 return err;
819}
820
821int hci_dev_reset(__u16 dev)
822{
823 struct hci_dev *hdev;
824 int ret = 0;
825
70f23020
AE
826 hdev = hci_dev_get(dev);
827 if (!hdev)
1da177e4
LT
828 return -ENODEV;
829
830 hci_req_lock(hdev);
1da177e4
LT
831
832 if (!test_bit(HCI_UP, &hdev->flags))
833 goto done;
834
835 /* Drop queues */
836 skb_queue_purge(&hdev->rx_q);
837 skb_queue_purge(&hdev->cmd_q);
838
09fd0de5 839 hci_dev_lock(hdev);
1da177e4
LT
840 inquiry_cache_flush(hdev);
841 hci_conn_hash_flush(hdev);
09fd0de5 842 hci_dev_unlock(hdev);
1da177e4
LT
843
844 if (hdev->flush)
845 hdev->flush(hdev);
846
8e87d142 847 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 848 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
849
850 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
851 ret = __hci_request(hdev, hci_reset_req, 0,
852 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
853
854done:
1da177e4
LT
855 hci_req_unlock(hdev);
856 hci_dev_put(hdev);
857 return ret;
858}
859
860int hci_dev_reset_stat(__u16 dev)
861{
862 struct hci_dev *hdev;
863 int ret = 0;
864
70f23020
AE
865 hdev = hci_dev_get(dev);
866 if (!hdev)
1da177e4
LT
867 return -ENODEV;
868
869 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
870
871 hci_dev_put(hdev);
872
873 return ret;
874}
875
876int hci_dev_cmd(unsigned int cmd, void __user *arg)
877{
878 struct hci_dev *hdev;
879 struct hci_dev_req dr;
880 int err = 0;
881
882 if (copy_from_user(&dr, arg, sizeof(dr)))
883 return -EFAULT;
884
70f23020
AE
885 hdev = hci_dev_get(dr.dev_id);
886 if (!hdev)
1da177e4
LT
887 return -ENODEV;
888
889 switch (cmd) {
890 case HCISETAUTH:
04837f64
MH
891 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
892 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
893 break;
894
895 case HCISETENCRYPT:
896 if (!lmp_encrypt_capable(hdev)) {
897 err = -EOPNOTSUPP;
898 break;
899 }
900
901 if (!test_bit(HCI_AUTH, &hdev->flags)) {
902 /* Auth must be enabled first */
04837f64
MH
903 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
904 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
905 if (err)
906 break;
907 }
908
04837f64
MH
909 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
910 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
911 break;
912
913 case HCISETSCAN:
04837f64
MH
914 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
915 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
916 break;
917
1da177e4 918 case HCISETLINKPOL:
e4e8e37c
MH
919 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
920 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
921 break;
922
923 case HCISETLINKMODE:
e4e8e37c
MH
924 hdev->link_mode = ((__u16) dr.dev_opt) &
925 (HCI_LM_MASTER | HCI_LM_ACCEPT);
926 break;
927
928 case HCISETPTYPE:
929 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
930 break;
931
932 case HCISETACLMTU:
e4e8e37c
MH
933 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
934 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
935 break;
936
937 case HCISETSCOMTU:
e4e8e37c
MH
938 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
939 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
940 break;
941
942 default:
943 err = -EINVAL;
944 break;
945 }
e4e8e37c 946
1da177e4
LT
947 hci_dev_put(hdev);
948 return err;
949}
950
951int hci_get_dev_list(void __user *arg)
952{
8035ded4 953 struct hci_dev *hdev;
1da177e4
LT
954 struct hci_dev_list_req *dl;
955 struct hci_dev_req *dr;
1da177e4
LT
956 int n = 0, size, err;
957 __u16 dev_num;
958
959 if (get_user(dev_num, (__u16 __user *) arg))
960 return -EFAULT;
961
962 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
963 return -EINVAL;
964
965 size = sizeof(*dl) + dev_num * sizeof(*dr);
966
70f23020
AE
967 dl = kzalloc(size, GFP_KERNEL);
968 if (!dl)
1da177e4
LT
969 return -ENOMEM;
970
971 dr = dl->dev_req;
972
f20d09d5 973 read_lock(&hci_dev_list_lock);
8035ded4 974 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 975 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 976 cancel_delayed_work(&hdev->power_off);
c542a06c 977
a8b2d5c2
JH
978 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
979 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 980
1da177e4
LT
981 (dr + n)->dev_id = hdev->id;
982 (dr + n)->dev_opt = hdev->flags;
c542a06c 983
1da177e4
LT
984 if (++n >= dev_num)
985 break;
986 }
f20d09d5 987 read_unlock(&hci_dev_list_lock);
1da177e4
LT
988
989 dl->dev_num = n;
990 size = sizeof(*dl) + n * sizeof(*dr);
991
992 err = copy_to_user(arg, dl, size);
993 kfree(dl);
994
995 return err ? -EFAULT : 0;
996}
997
998int hci_get_dev_info(void __user *arg)
999{
1000 struct hci_dev *hdev;
1001 struct hci_dev_info di;
1002 int err = 0;
1003
1004 if (copy_from_user(&di, arg, sizeof(di)))
1005 return -EFAULT;
1006
70f23020
AE
1007 hdev = hci_dev_get(di.dev_id);
1008 if (!hdev)
1da177e4
LT
1009 return -ENODEV;
1010
a8b2d5c2 1011 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1012 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1013
a8b2d5c2
JH
1014 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1015 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1016
1da177e4
LT
1017 strcpy(di.name, hdev->name);
1018 di.bdaddr = hdev->bdaddr;
943da25d 1019 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1020 di.flags = hdev->flags;
1021 di.pkt_type = hdev->pkt_type;
1022 di.acl_mtu = hdev->acl_mtu;
1023 di.acl_pkts = hdev->acl_pkts;
1024 di.sco_mtu = hdev->sco_mtu;
1025 di.sco_pkts = hdev->sco_pkts;
1026 di.link_policy = hdev->link_policy;
1027 di.link_mode = hdev->link_mode;
1028
1029 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1030 memcpy(&di.features, &hdev->features, sizeof(di.features));
1031
1032 if (copy_to_user(arg, &di, sizeof(di)))
1033 err = -EFAULT;
1034
1035 hci_dev_put(hdev);
1036
1037 return err;
1038}
1039
1040/* ---- Interface to HCI drivers ---- */
1041
611b30f7
MH
1042static int hci_rfkill_set_block(void *data, bool blocked)
1043{
1044 struct hci_dev *hdev = data;
1045
1046 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1047
1048 if (!blocked)
1049 return 0;
1050
1051 hci_dev_do_close(hdev);
1052
1053 return 0;
1054}
1055
1056static const struct rfkill_ops hci_rfkill_ops = {
1057 .set_block = hci_rfkill_set_block,
1058};
1059
1da177e4
LT
1060/* Alloc HCI device */
1061struct hci_dev *hci_alloc_dev(void)
1062{
1063 struct hci_dev *hdev;
1064
25ea6db0 1065 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1066 if (!hdev)
1067 return NULL;
1068
0ac7e700 1069 hci_init_sysfs(hdev);
1da177e4
LT
1070 skb_queue_head_init(&hdev->driver_init);
1071
1072 return hdev;
1073}
1074EXPORT_SYMBOL(hci_alloc_dev);
1075
1076/* Free HCI device */
1077void hci_free_dev(struct hci_dev *hdev)
1078{
1079 skb_queue_purge(&hdev->driver_init);
1080
a91f2e39
MH
1081 /* will free via device release */
1082 put_device(&hdev->dev);
1da177e4
LT
1083}
1084EXPORT_SYMBOL(hci_free_dev);
1085
ab81cbf9
JH
1086static void hci_power_on(struct work_struct *work)
1087{
1088 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1089
1090 BT_DBG("%s", hdev->name);
1091
1092 if (hci_dev_open(hdev->id) < 0)
1093 return;
1094
a8b2d5c2 1095 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1096 schedule_delayed_work(&hdev->power_off,
3243553f 1097 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1098
a8b2d5c2 1099 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1100 mgmt_index_added(hdev);
ab81cbf9
JH
1101}
1102
1103static void hci_power_off(struct work_struct *work)
1104{
3243553f
JH
1105 struct hci_dev *hdev = container_of(work, struct hci_dev,
1106 power_off.work);
ab81cbf9
JH
1107
1108 BT_DBG("%s", hdev->name);
1109
8ee56540 1110 hci_dev_do_close(hdev);
ab81cbf9
JH
1111}
1112
16ab91ab
JH
1113static void hci_discov_off(struct work_struct *work)
1114{
1115 struct hci_dev *hdev;
1116 u8 scan = SCAN_PAGE;
1117
1118 hdev = container_of(work, struct hci_dev, discov_off.work);
1119
1120 BT_DBG("%s", hdev->name);
1121
09fd0de5 1122 hci_dev_lock(hdev);
16ab91ab
JH
1123
1124 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1125
1126 hdev->discov_timeout = 0;
1127
09fd0de5 1128 hci_dev_unlock(hdev);
16ab91ab
JH
1129}
1130
2aeb9a1a
JH
1131int hci_uuids_clear(struct hci_dev *hdev)
1132{
1133 struct list_head *p, *n;
1134
1135 list_for_each_safe(p, n, &hdev->uuids) {
1136 struct bt_uuid *uuid;
1137
1138 uuid = list_entry(p, struct bt_uuid, list);
1139
1140 list_del(p);
1141 kfree(uuid);
1142 }
1143
1144 return 0;
1145}
1146
55ed8ca1
JH
1147int hci_link_keys_clear(struct hci_dev *hdev)
1148{
1149 struct list_head *p, *n;
1150
1151 list_for_each_safe(p, n, &hdev->link_keys) {
1152 struct link_key *key;
1153
1154 key = list_entry(p, struct link_key, list);
1155
1156 list_del(p);
1157 kfree(key);
1158 }
1159
1160 return 0;
1161}
1162
b899efaf
VCG
1163int hci_smp_ltks_clear(struct hci_dev *hdev)
1164{
1165 struct smp_ltk *k, *tmp;
1166
1167 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1168 list_del(&k->list);
1169 kfree(k);
1170 }
1171
1172 return 0;
1173}
1174
55ed8ca1
JH
1175struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1176{
8035ded4 1177 struct link_key *k;
55ed8ca1 1178
8035ded4 1179 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1180 if (bacmp(bdaddr, &k->bdaddr) == 0)
1181 return k;
55ed8ca1
JH
1182
1183 return NULL;
1184}
1185
d25e28ab
JH
1186static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1187 u8 key_type, u8 old_key_type)
1188{
1189 /* Legacy key */
1190 if (key_type < 0x03)
1191 return 1;
1192
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type == HCI_LK_DEBUG_COMBINATION)
1195 return 0;
1196
1197 /* Changed combination key and there's no previous one */
1198 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1199 return 0;
1200
1201 /* Security mode 3 case */
1202 if (!conn)
1203 return 1;
1204
1205 /* Neither local nor remote side had no-bonding as requirement */
1206 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1207 return 1;
1208
1209 /* Local side had dedicated bonding as requirement */
1210 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1211 return 1;
1212
1213 /* Remote side had dedicated bonding as requirement */
1214 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1215 return 1;
1216
1217 /* If none of the above criteria match, then don't store the key
1218 * persistently */
1219 return 0;
1220}
1221
c9839a11 1222struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1223{
c9839a11 1224 struct smp_ltk *k;
75d262c2 1225
c9839a11
VCG
1226 list_for_each_entry(k, &hdev->long_term_keys, list) {
1227 if (k->ediv != ediv ||
1228 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1229 continue;
1230
c9839a11 1231 return k;
75d262c2
VCG
1232 }
1233
1234 return NULL;
1235}
1236EXPORT_SYMBOL(hci_find_ltk);
1237
c9839a11
VCG
1238struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239 u8 addr_type)
75d262c2 1240{
c9839a11 1241 struct smp_ltk *k;
75d262c2 1242
c9839a11
VCG
1243 list_for_each_entry(k, &hdev->long_term_keys, list)
1244 if (addr_type == k->bdaddr_type &&
1245 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1246 return k;
1247
1248 return NULL;
1249}
c9839a11 1250EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1251
d25e28ab
JH
1252int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1253 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1254{
1255 struct link_key *key, *old_key;
4df378a1 1256 u8 old_key_type, persistent;
55ed8ca1
JH
1257
1258 old_key = hci_find_link_key(hdev, bdaddr);
1259 if (old_key) {
1260 old_key_type = old_key->type;
1261 key = old_key;
1262 } else {
12adcf3a 1263 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1264 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1265 if (!key)
1266 return -ENOMEM;
1267 list_add(&key->list, &hdev->link_keys);
1268 }
1269
1270 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1271
d25e28ab
JH
1272 /* Some buggy controller combinations generate a changed
1273 * combination key for legacy pairing even when there's no
1274 * previous key */
1275 if (type == HCI_LK_CHANGED_COMBINATION &&
1276 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1277 old_key_type == 0xff) {
d25e28ab 1278 type = HCI_LK_COMBINATION;
655fe6ec
JH
1279 if (conn)
1280 conn->key_type = type;
1281 }
d25e28ab 1282
55ed8ca1
JH
1283 bacpy(&key->bdaddr, bdaddr);
1284 memcpy(key->val, val, 16);
55ed8ca1
JH
1285 key->pin_len = pin_len;
1286
b6020ba0 1287 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1288 key->type = old_key_type;
4748fed2
JH
1289 else
1290 key->type = type;
1291
4df378a1
JH
1292 if (!new_key)
1293 return 0;
1294
1295 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1296
744cf19e 1297 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1298
1299 if (!persistent) {
1300 list_del(&key->list);
1301 kfree(key);
1302 }
55ed8ca1
JH
1303
1304 return 0;
1305}
1306
c9839a11
VCG
1307int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1308 int new_key, u8 authenticated, u8 tk[16],
1309 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1310{
c9839a11 1311 struct smp_ltk *key, *old_key;
75d262c2 1312
c9839a11
VCG
1313 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1314 return 0;
75d262c2 1315
c9839a11
VCG
1316 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1317 if (old_key)
75d262c2 1318 key = old_key;
c9839a11
VCG
1319 else {
1320 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1321 if (!key)
1322 return -ENOMEM;
c9839a11 1323 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1324 }
1325
75d262c2 1326 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1327 key->bdaddr_type = addr_type;
1328 memcpy(key->val, tk, sizeof(key->val));
1329 key->authenticated = authenticated;
1330 key->ediv = ediv;
1331 key->enc_size = enc_size;
1332 key->type = type;
1333 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1334
c9839a11
VCG
1335 if (!new_key)
1336 return 0;
75d262c2 1337
261cc5aa
VCG
1338 if (type & HCI_SMP_LTK)
1339 mgmt_new_ltk(hdev, key, 1);
1340
75d262c2
VCG
1341 return 0;
1342}
1343
55ed8ca1
JH
1344int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345{
1346 struct link_key *key;
1347
1348 key = hci_find_link_key(hdev, bdaddr);
1349 if (!key)
1350 return -ENOENT;
1351
1352 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1353
1354 list_del(&key->list);
1355 kfree(key);
1356
1357 return 0;
1358}
1359
b899efaf
VCG
1360int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361{
1362 struct smp_ltk *k, *tmp;
1363
1364 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1365 if (bacmp(bdaddr, &k->bdaddr))
1366 continue;
1367
1368 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369
1370 list_del(&k->list);
1371 kfree(k);
1372 }
1373
1374 return 0;
1375}
1376
6bd32326
VT
1377/* HCI command timer function */
1378static void hci_cmd_timer(unsigned long arg)
1379{
1380 struct hci_dev *hdev = (void *) arg;
1381
1382 BT_ERR("%s command tx timeout", hdev->name);
1383 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1384 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1385}
1386
2763eda6
SJ
1387struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1388 bdaddr_t *bdaddr)
1389{
1390 struct oob_data *data;
1391
1392 list_for_each_entry(data, &hdev->remote_oob_data, list)
1393 if (bacmp(bdaddr, &data->bdaddr) == 0)
1394 return data;
1395
1396 return NULL;
1397}
1398
1399int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400{
1401 struct oob_data *data;
1402
1403 data = hci_find_remote_oob_data(hdev, bdaddr);
1404 if (!data)
1405 return -ENOENT;
1406
1407 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1408
1409 list_del(&data->list);
1410 kfree(data);
1411
1412 return 0;
1413}
1414
1415int hci_remote_oob_data_clear(struct hci_dev *hdev)
1416{
1417 struct oob_data *data, *n;
1418
1419 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1420 list_del(&data->list);
1421 kfree(data);
1422 }
1423
1424 return 0;
1425}
1426
1427int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1428 u8 *randomizer)
1429{
1430 struct oob_data *data;
1431
1432 data = hci_find_remote_oob_data(hdev, bdaddr);
1433
1434 if (!data) {
1435 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1436 if (!data)
1437 return -ENOMEM;
1438
1439 bacpy(&data->bdaddr, bdaddr);
1440 list_add(&data->list, &hdev->remote_oob_data);
1441 }
1442
1443 memcpy(data->hash, hash, sizeof(data->hash));
1444 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1445
1446 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1447
1448 return 0;
1449}
1450
b2a66aad
AJ
1451struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1452 bdaddr_t *bdaddr)
1453{
8035ded4 1454 struct bdaddr_list *b;
b2a66aad 1455
8035ded4 1456 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1457 if (bacmp(bdaddr, &b->bdaddr) == 0)
1458 return b;
b2a66aad
AJ
1459
1460 return NULL;
1461}
1462
1463int hci_blacklist_clear(struct hci_dev *hdev)
1464{
1465 struct list_head *p, *n;
1466
1467 list_for_each_safe(p, n, &hdev->blacklist) {
1468 struct bdaddr_list *b;
1469
1470 b = list_entry(p, struct bdaddr_list, list);
1471
1472 list_del(p);
1473 kfree(b);
1474 }
1475
1476 return 0;
1477}
1478
88c1fe4b 1479int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1480{
1481 struct bdaddr_list *entry;
b2a66aad
AJ
1482
1483 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1484 return -EBADF;
1485
5e762444
AJ
1486 if (hci_blacklist_lookup(hdev, bdaddr))
1487 return -EEXIST;
b2a66aad
AJ
1488
1489 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1490 if (!entry)
1491 return -ENOMEM;
b2a66aad
AJ
1492
1493 bacpy(&entry->bdaddr, bdaddr);
1494
1495 list_add(&entry->list, &hdev->blacklist);
1496
88c1fe4b 1497 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1498}
1499
88c1fe4b 1500int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1501{
1502 struct bdaddr_list *entry;
b2a66aad 1503
1ec918ce 1504 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1505 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1506
1507 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1508 if (!entry)
5e762444 1509 return -ENOENT;
b2a66aad
AJ
1510
1511 list_del(&entry->list);
1512 kfree(entry);
1513
88c1fe4b 1514 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1515}
1516
db323f2f 1517static void hci_clear_adv_cache(struct work_struct *work)
35815085 1518{
db323f2f
GP
1519 struct hci_dev *hdev = container_of(work, struct hci_dev,
1520 adv_work.work);
35815085
AG
1521
1522 hci_dev_lock(hdev);
1523
1524 hci_adv_entries_clear(hdev);
1525
1526 hci_dev_unlock(hdev);
1527}
1528
76c8686f
AG
1529int hci_adv_entries_clear(struct hci_dev *hdev)
1530{
1531 struct adv_entry *entry, *tmp;
1532
1533 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1534 list_del(&entry->list);
1535 kfree(entry);
1536 }
1537
1538 BT_DBG("%s adv cache cleared", hdev->name);
1539
1540 return 0;
1541}
1542
1543struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1544{
1545 struct adv_entry *entry;
1546
1547 list_for_each_entry(entry, &hdev->adv_entries, list)
1548 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1549 return entry;
1550
1551 return NULL;
1552}
1553
1554static inline int is_connectable_adv(u8 evt_type)
1555{
1556 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1557 return 1;
1558
1559 return 0;
1560}
1561
1562int hci_add_adv_entry(struct hci_dev *hdev,
1563 struct hci_ev_le_advertising_info *ev)
1564{
1565 struct adv_entry *entry;
1566
1567 if (!is_connectable_adv(ev->evt_type))
1568 return -EINVAL;
1569
1570 /* Only new entries should be added to adv_entries. So, if
1571 * bdaddr was found, don't add it. */
1572 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1573 return 0;
1574
4777bfde 1575 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1576 if (!entry)
1577 return -ENOMEM;
1578
1579 bacpy(&entry->bdaddr, &ev->bdaddr);
1580 entry->bdaddr_type = ev->bdaddr_type;
1581
1582 list_add(&entry->list, &hdev->adv_entries);
1583
1584 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1585 batostr(&entry->bdaddr), entry->bdaddr_type);
1586
1587 return 0;
1588}
1589
7ba8b4be
AG
1590static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1591{
1592 struct le_scan_params *param = (struct le_scan_params *) opt;
1593 struct hci_cp_le_set_scan_param cp;
1594
1595 memset(&cp, 0, sizeof(cp));
1596 cp.type = param->type;
1597 cp.interval = cpu_to_le16(param->interval);
1598 cp.window = cpu_to_le16(param->window);
1599
1600 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1601}
1602
1603static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1604{
1605 struct hci_cp_le_set_scan_enable cp;
1606
1607 memset(&cp, 0, sizeof(cp));
1608 cp.enable = 1;
1609
1610 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1611}
1612
1613static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1614 u16 window, int timeout)
1615{
1616 long timeo = msecs_to_jiffies(3000);
1617 struct le_scan_params param;
1618 int err;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1623 return -EINPROGRESS;
1624
1625 param.type = type;
1626 param.interval = interval;
1627 param.window = window;
1628
1629 hci_req_lock(hdev);
1630
1631 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1632 timeo);
1633 if (!err)
1634 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1635
1636 hci_req_unlock(hdev);
1637
1638 if (err < 0)
1639 return err;
1640
1641 schedule_delayed_work(&hdev->le_scan_disable,
1642 msecs_to_jiffies(timeout));
1643
1644 return 0;
1645}
1646
1647static void le_scan_disable_work(struct work_struct *work)
1648{
1649 struct hci_dev *hdev = container_of(work, struct hci_dev,
1650 le_scan_disable.work);
1651 struct hci_cp_le_set_scan_enable cp;
1652
1653 BT_DBG("%s", hdev->name);
1654
1655 memset(&cp, 0, sizeof(cp));
1656
1657 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1658}
1659
28b75a89
AG
1660static void le_scan_work(struct work_struct *work)
1661{
1662 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1663 struct le_scan_params *param = &hdev->le_scan_params;
1664
1665 BT_DBG("%s", hdev->name);
1666
1667 hci_do_le_scan(hdev, param->type, param->interval,
1668 param->window, param->timeout);
1669}
1670
1671int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1672 int timeout)
1673{
1674 struct le_scan_params *param = &hdev->le_scan_params;
1675
1676 BT_DBG("%s", hdev->name);
1677
1678 if (work_busy(&hdev->le_scan))
1679 return -EINPROGRESS;
1680
1681 param->type = type;
1682 param->interval = interval;
1683 param->window = window;
1684 param->timeout = timeout;
1685
1686 queue_work(system_long_wq, &hdev->le_scan);
1687
1688 return 0;
1689}
1690
1da177e4
LT
1691/* Register HCI device */
1692int hci_register_dev(struct hci_dev *hdev)
1693{
1694 struct list_head *head = &hci_dev_list, *p;
08add513 1695 int i, id, error;
1da177e4 1696
e9b9cfa1 1697 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1698
010666a1 1699 if (!hdev->open || !hdev->close)
1da177e4
LT
1700 return -EINVAL;
1701
08add513
MM
1702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1704 */
1705 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1706
f20d09d5 1707 write_lock(&hci_dev_list_lock);
1da177e4
LT
1708
1709 /* Find first available device id */
1710 list_for_each(p, &hci_dev_list) {
1711 if (list_entry(p, struct hci_dev, list)->id != id)
1712 break;
1713 head = p; id++;
1714 }
8e87d142 1715
1da177e4
LT
1716 sprintf(hdev->name, "hci%d", id);
1717 hdev->id = id;
c6feeb28 1718 list_add_tail(&hdev->list, head);
1da177e4 1719
09fd0de5 1720 mutex_init(&hdev->lock);
1da177e4
LT
1721
1722 hdev->flags = 0;
d23264a8 1723 hdev->dev_flags = 0;
1da177e4 1724 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1725 hdev->esco_type = (ESCO_HV1);
1da177e4 1726 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1727 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1728
04837f64
MH
1729 hdev->idle_timeout = 0;
1730 hdev->sniff_max_interval = 800;
1731 hdev->sniff_min_interval = 80;
1732
b78752cc 1733 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1734 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1735 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1736
1da177e4
LT
1737
1738 skb_queue_head_init(&hdev->rx_q);
1739 skb_queue_head_init(&hdev->cmd_q);
1740 skb_queue_head_init(&hdev->raw_q);
1741
6bd32326
VT
1742 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1743
cd4c5391 1744 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1745 hdev->reassembly[i] = NULL;
1746
1da177e4 1747 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1748 mutex_init(&hdev->req_lock);
1da177e4 1749
30883512 1750 discovery_init(hdev);
1da177e4
LT
1751
1752 hci_conn_hash_init(hdev);
1753
2e58ef3e
JH
1754 INIT_LIST_HEAD(&hdev->mgmt_pending);
1755
ea4bd8ba 1756 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1757
2aeb9a1a
JH
1758 INIT_LIST_HEAD(&hdev->uuids);
1759
55ed8ca1 1760 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1761 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1762
2763eda6
SJ
1763 INIT_LIST_HEAD(&hdev->remote_oob_data);
1764
76c8686f
AG
1765 INIT_LIST_HEAD(&hdev->adv_entries);
1766
db323f2f 1767 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1768 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1769 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1770
16ab91ab
JH
1771 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1772
1da177e4
LT
1773 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1774
1775 atomic_set(&hdev->promisc, 0);
1776
28b75a89
AG
1777 INIT_WORK(&hdev->le_scan, le_scan_work);
1778
7ba8b4be
AG
1779 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1780
f20d09d5 1781 write_unlock(&hci_dev_list_lock);
1da177e4 1782
32845eb1
GP
1783 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1784 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1785 if (!hdev->workqueue) {
1786 error = -ENOMEM;
1787 goto err;
1788 }
f48fd9c8 1789
33ca954d
DH
1790 error = hci_add_sysfs(hdev);
1791 if (error < 0)
1792 goto err_wqueue;
1da177e4 1793
611b30f7
MH
1794 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1795 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1796 if (hdev->rfkill) {
1797 if (rfkill_register(hdev->rfkill) < 0) {
1798 rfkill_destroy(hdev->rfkill);
1799 hdev->rfkill = NULL;
1800 }
1801 }
1802
a8b2d5c2
JH
1803 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1804 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1805 schedule_work(&hdev->power_on);
ab81cbf9 1806
1da177e4 1807 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1808 hci_dev_hold(hdev);
1da177e4
LT
1809
1810 return id;
f48fd9c8 1811
33ca954d
DH
1812err_wqueue:
1813 destroy_workqueue(hdev->workqueue);
1814err:
f20d09d5 1815 write_lock(&hci_dev_list_lock);
f48fd9c8 1816 list_del(&hdev->list);
f20d09d5 1817 write_unlock(&hci_dev_list_lock);
f48fd9c8 1818
33ca954d 1819 return error;
1da177e4
LT
1820}
1821EXPORT_SYMBOL(hci_register_dev);
1822
1823/* Unregister HCI device */
59735631 1824void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1825{
ef222013
MH
1826 int i;
1827
c13854ce 1828 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1829
f20d09d5 1830 write_lock(&hci_dev_list_lock);
1da177e4 1831 list_del(&hdev->list);
f20d09d5 1832 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1833
1834 hci_dev_do_close(hdev);
1835
cd4c5391 1836 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1837 kfree_skb(hdev->reassembly[i]);
1838
ab81cbf9 1839 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1840 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1841 hci_dev_lock(hdev);
744cf19e 1842 mgmt_index_removed(hdev);
09fd0de5 1843 hci_dev_unlock(hdev);
56e5cb86 1844 }
ab81cbf9 1845
2e58ef3e
JH
1846 /* mgmt_index_removed should take care of emptying the
1847 * pending list */
1848 BUG_ON(!list_empty(&hdev->mgmt_pending));
1849
1da177e4
LT
1850 hci_notify(hdev, HCI_DEV_UNREG);
1851
611b30f7
MH
1852 if (hdev->rfkill) {
1853 rfkill_unregister(hdev->rfkill);
1854 rfkill_destroy(hdev->rfkill);
1855 }
1856
ce242970 1857 hci_del_sysfs(hdev);
147e2d59 1858
db323f2f 1859 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1860
f48fd9c8
MH
1861 destroy_workqueue(hdev->workqueue);
1862
09fd0de5 1863 hci_dev_lock(hdev);
e2e0cacb 1864 hci_blacklist_clear(hdev);
2aeb9a1a 1865 hci_uuids_clear(hdev);
55ed8ca1 1866 hci_link_keys_clear(hdev);
b899efaf 1867 hci_smp_ltks_clear(hdev);
2763eda6 1868 hci_remote_oob_data_clear(hdev);
76c8686f 1869 hci_adv_entries_clear(hdev);
09fd0de5 1870 hci_dev_unlock(hdev);
e2e0cacb 1871
dc946bd8 1872 hci_dev_put(hdev);
1da177e4
LT
1873}
1874EXPORT_SYMBOL(hci_unregister_dev);
1875
1876/* Suspend HCI device */
1877int hci_suspend_dev(struct hci_dev *hdev)
1878{
1879 hci_notify(hdev, HCI_DEV_SUSPEND);
1880 return 0;
1881}
1882EXPORT_SYMBOL(hci_suspend_dev);
1883
1884/* Resume HCI device */
1885int hci_resume_dev(struct hci_dev *hdev)
1886{
1887 hci_notify(hdev, HCI_DEV_RESUME);
1888 return 0;
1889}
1890EXPORT_SYMBOL(hci_resume_dev);
1891
76bca880
MH
1892/* Receive frame from HCI drivers */
1893int hci_recv_frame(struct sk_buff *skb)
1894{
1895 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1896 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1897 && !test_bit(HCI_INIT, &hdev->flags))) {
1898 kfree_skb(skb);
1899 return -ENXIO;
1900 }
1901
1902 /* Incomming skb */
1903 bt_cb(skb)->incoming = 1;
1904
1905 /* Time stamp */
1906 __net_timestamp(skb);
1907
76bca880 1908 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1909 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1910
76bca880
MH
1911 return 0;
1912}
1913EXPORT_SYMBOL(hci_recv_frame);
1914
33e882a5 1915static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1916 int count, __u8 index)
33e882a5
SS
1917{
1918 int len = 0;
1919 int hlen = 0;
1920 int remain = count;
1921 struct sk_buff *skb;
1922 struct bt_skb_cb *scb;
1923
1924 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1925 index >= NUM_REASSEMBLY)
1926 return -EILSEQ;
1927
1928 skb = hdev->reassembly[index];
1929
1930 if (!skb) {
1931 switch (type) {
1932 case HCI_ACLDATA_PKT:
1933 len = HCI_MAX_FRAME_SIZE;
1934 hlen = HCI_ACL_HDR_SIZE;
1935 break;
1936 case HCI_EVENT_PKT:
1937 len = HCI_MAX_EVENT_SIZE;
1938 hlen = HCI_EVENT_HDR_SIZE;
1939 break;
1940 case HCI_SCODATA_PKT:
1941 len = HCI_MAX_SCO_SIZE;
1942 hlen = HCI_SCO_HDR_SIZE;
1943 break;
1944 }
1945
1e429f38 1946 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1947 if (!skb)
1948 return -ENOMEM;
1949
1950 scb = (void *) skb->cb;
1951 scb->expect = hlen;
1952 scb->pkt_type = type;
1953
1954 skb->dev = (void *) hdev;
1955 hdev->reassembly[index] = skb;
1956 }
1957
1958 while (count) {
1959 scb = (void *) skb->cb;
70c1f20b 1960 len = min_t(__u16, scb->expect, count);
33e882a5
SS
1961
1962 memcpy(skb_put(skb, len), data, len);
1963
1964 count -= len;
1965 data += len;
1966 scb->expect -= len;
1967 remain = count;
1968
1969 switch (type) {
1970 case HCI_EVENT_PKT:
1971 if (skb->len == HCI_EVENT_HDR_SIZE) {
1972 struct hci_event_hdr *h = hci_event_hdr(skb);
1973 scb->expect = h->plen;
1974
1975 if (skb_tailroom(skb) < scb->expect) {
1976 kfree_skb(skb);
1977 hdev->reassembly[index] = NULL;
1978 return -ENOMEM;
1979 }
1980 }
1981 break;
1982
1983 case HCI_ACLDATA_PKT:
1984 if (skb->len == HCI_ACL_HDR_SIZE) {
1985 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1986 scb->expect = __le16_to_cpu(h->dlen);
1987
1988 if (skb_tailroom(skb) < scb->expect) {
1989 kfree_skb(skb);
1990 hdev->reassembly[index] = NULL;
1991 return -ENOMEM;
1992 }
1993 }
1994 break;
1995
1996 case HCI_SCODATA_PKT:
1997 if (skb->len == HCI_SCO_HDR_SIZE) {
1998 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1999 scb->expect = h->dlen;
2000
2001 if (skb_tailroom(skb) < scb->expect) {
2002 kfree_skb(skb);
2003 hdev->reassembly[index] = NULL;
2004 return -ENOMEM;
2005 }
2006 }
2007 break;
2008 }
2009
2010 if (scb->expect == 0) {
2011 /* Complete frame */
2012
2013 bt_cb(skb)->pkt_type = type;
2014 hci_recv_frame(skb);
2015
2016 hdev->reassembly[index] = NULL;
2017 return remain;
2018 }
2019 }
2020
2021 return remain;
2022}
2023
ef222013
MH
2024int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2025{
f39a3c06
SS
2026 int rem = 0;
2027
ef222013
MH
2028 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2029 return -EILSEQ;
2030
da5f6c37 2031 while (count) {
1e429f38 2032 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2033 if (rem < 0)
2034 return rem;
ef222013 2035
f39a3c06
SS
2036 data += (count - rem);
2037 count = rem;
f81c6224 2038 }
ef222013 2039
f39a3c06 2040 return rem;
ef222013
MH
2041}
2042EXPORT_SYMBOL(hci_recv_fragment);
2043
99811510
SS
2044#define STREAM_REASSEMBLY 0
2045
2046int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2047{
2048 int type;
2049 int rem = 0;
2050
da5f6c37 2051 while (count) {
99811510
SS
2052 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2053
2054 if (!skb) {
2055 struct { char type; } *pkt;
2056
2057 /* Start of the frame */
2058 pkt = data;
2059 type = pkt->type;
2060
2061 data++;
2062 count--;
2063 } else
2064 type = bt_cb(skb)->pkt_type;
2065
1e429f38
GP
2066 rem = hci_reassembly(hdev, type, data, count,
2067 STREAM_REASSEMBLY);
99811510
SS
2068 if (rem < 0)
2069 return rem;
2070
2071 data += (count - rem);
2072 count = rem;
f81c6224 2073 }
99811510
SS
2074
2075 return rem;
2076}
2077EXPORT_SYMBOL(hci_recv_stream_fragment);
2078
1da177e4
LT
2079/* ---- Interface to upper protocols ---- */
2080
1da177e4
LT
2081int hci_register_cb(struct hci_cb *cb)
2082{
2083 BT_DBG("%p name %s", cb, cb->name);
2084
f20d09d5 2085 write_lock(&hci_cb_list_lock);
1da177e4 2086 list_add(&cb->list, &hci_cb_list);
f20d09d5 2087 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2088
2089 return 0;
2090}
2091EXPORT_SYMBOL(hci_register_cb);
2092
2093int hci_unregister_cb(struct hci_cb *cb)
2094{
2095 BT_DBG("%p name %s", cb, cb->name);
2096
f20d09d5 2097 write_lock(&hci_cb_list_lock);
1da177e4 2098 list_del(&cb->list);
f20d09d5 2099 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2100
2101 return 0;
2102}
2103EXPORT_SYMBOL(hci_unregister_cb);
2104
2105static int hci_send_frame(struct sk_buff *skb)
2106{
2107 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2108
2109 if (!hdev) {
2110 kfree_skb(skb);
2111 return -ENODEV;
2112 }
2113
0d48d939 2114 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2115
cd82e61c
MH
2116 /* Time stamp */
2117 __net_timestamp(skb);
1da177e4 2118
cd82e61c
MH
2119 /* Send copy to monitor */
2120 hci_send_to_monitor(hdev, skb);
2121
2122 if (atomic_read(&hdev->promisc)) {
2123 /* Send copy to the sockets */
470fe1b5 2124 hci_send_to_sock(hdev, skb);
1da177e4
LT
2125 }
2126
2127 /* Get rid of skb owner, prior to sending to the driver. */
2128 skb_orphan(skb);
2129
2130 return hdev->send(skb);
2131}
2132
2133/* Send HCI command */
a9de9248 2134int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2135{
2136 int len = HCI_COMMAND_HDR_SIZE + plen;
2137 struct hci_command_hdr *hdr;
2138 struct sk_buff *skb;
2139
a9de9248 2140 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2141
2142 skb = bt_skb_alloc(len, GFP_ATOMIC);
2143 if (!skb) {
ef222013 2144 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2145 return -ENOMEM;
2146 }
2147
2148 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2149 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2150 hdr->plen = plen;
2151
2152 if (plen)
2153 memcpy(skb_put(skb, plen), param, plen);
2154
2155 BT_DBG("skb len %d", skb->len);
2156
0d48d939 2157 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2158 skb->dev = (void *) hdev;
c78ae283 2159
a5040efa
JH
2160 if (test_bit(HCI_INIT, &hdev->flags))
2161 hdev->init_last_cmd = opcode;
2162
1da177e4 2163 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2164 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2165
2166 return 0;
2167}
1da177e4
LT
2168
2169/* Get data from the previously sent command */
a9de9248 2170void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2171{
2172 struct hci_command_hdr *hdr;
2173
2174 if (!hdev->sent_cmd)
2175 return NULL;
2176
2177 hdr = (void *) hdev->sent_cmd->data;
2178
a9de9248 2179 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2180 return NULL;
2181
a9de9248 2182 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2183
2184 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2185}
2186
2187/* Send ACL data */
2188static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2189{
2190 struct hci_acl_hdr *hdr;
2191 int len = skb->len;
2192
badff6d0
ACM
2193 skb_push(skb, HCI_ACL_HDR_SIZE);
2194 skb_reset_transport_header(skb);
9c70220b 2195 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2196 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2197 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2198}
2199
73d80deb
LAD
2200static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2201 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2202{
2203 struct hci_dev *hdev = conn->hdev;
2204 struct sk_buff *list;
2205
70f23020
AE
2206 list = skb_shinfo(skb)->frag_list;
2207 if (!list) {
1da177e4
LT
2208 /* Non fragmented */
2209 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2210
73d80deb 2211 skb_queue_tail(queue, skb);
1da177e4
LT
2212 } else {
2213 /* Fragmented */
2214 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2215
2216 skb_shinfo(skb)->frag_list = NULL;
2217
2218 /* Queue all fragments atomically */
af3e6359 2219 spin_lock(&queue->lock);
1da177e4 2220
73d80deb 2221 __skb_queue_tail(queue, skb);
e702112f
AE
2222
2223 flags &= ~ACL_START;
2224 flags |= ACL_CONT;
1da177e4
LT
2225 do {
2226 skb = list; list = list->next;
8e87d142 2227
1da177e4 2228 skb->dev = (void *) hdev;
0d48d939 2229 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2230 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2231
2232 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2233
73d80deb 2234 __skb_queue_tail(queue, skb);
1da177e4
LT
2235 } while (list);
2236
af3e6359 2237 spin_unlock(&queue->lock);
1da177e4 2238 }
73d80deb
LAD
2239}
2240
2241void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2242{
2243 struct hci_conn *conn = chan->conn;
2244 struct hci_dev *hdev = conn->hdev;
2245
2246 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2247
2248 skb->dev = (void *) hdev;
2249 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2250 hci_add_acl_hdr(skb, conn->handle, flags);
2251
2252 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2253
3eff45ea 2254 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2255}
2256EXPORT_SYMBOL(hci_send_acl);
2257
2258/* Send SCO data */
0d861d8b 2259void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2260{
2261 struct hci_dev *hdev = conn->hdev;
2262 struct hci_sco_hdr hdr;
2263
2264 BT_DBG("%s len %d", hdev->name, skb->len);
2265
aca3192c 2266 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2267 hdr.dlen = skb->len;
2268
badff6d0
ACM
2269 skb_push(skb, HCI_SCO_HDR_SIZE);
2270 skb_reset_transport_header(skb);
9c70220b 2271 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2272
2273 skb->dev = (void *) hdev;
0d48d939 2274 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2275
1da177e4 2276 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2277 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2278}
2279EXPORT_SYMBOL(hci_send_sco);
2280
2281/* ---- HCI TX task (outgoing data) ---- */
2282
2283/* HCI Connection scheduler */
2284static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2285{
2286 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2287 struct hci_conn *conn = NULL, *c;
1da177e4 2288 int num = 0, min = ~0;
1da177e4 2289
8e87d142 2290 /* We don't have to lock device here. Connections are always
1da177e4 2291 * added and removed with TX task disabled. */
bf4c6325
GP
2292
2293 rcu_read_lock();
2294
2295 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2296 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2297 continue;
769be974
MH
2298
2299 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2300 continue;
2301
1da177e4
LT
2302 num++;
2303
2304 if (c->sent < min) {
2305 min = c->sent;
2306 conn = c;
2307 }
52087a79
LAD
2308
2309 if (hci_conn_num(hdev, type) == num)
2310 break;
1da177e4
LT
2311 }
2312
bf4c6325
GP
2313 rcu_read_unlock();
2314
1da177e4 2315 if (conn) {
6ed58ec5
VT
2316 int cnt, q;
2317
2318 switch (conn->type) {
2319 case ACL_LINK:
2320 cnt = hdev->acl_cnt;
2321 break;
2322 case SCO_LINK:
2323 case ESCO_LINK:
2324 cnt = hdev->sco_cnt;
2325 break;
2326 case LE_LINK:
2327 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2328 break;
2329 default:
2330 cnt = 0;
2331 BT_ERR("Unknown link type");
2332 }
2333
2334 q = cnt / num;
1da177e4
LT
2335 *quote = q ? q : 1;
2336 } else
2337 *quote = 0;
2338
2339 BT_DBG("conn %p quote %d", conn, *quote);
2340 return conn;
2341}
2342
bae1f5d9 2343static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2344{
2345 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2346 struct hci_conn *c;
1da177e4 2347
bae1f5d9 2348 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2349
bf4c6325
GP
2350 rcu_read_lock();
2351
1da177e4 2352 /* Kill stalled connections */
bf4c6325 2353 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2354 if (c->type == type && c->sent) {
2355 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2356 hdev->name, batostr(&c->dst));
2357 hci_acl_disconn(c, 0x13);
2358 }
2359 }
bf4c6325
GP
2360
2361 rcu_read_unlock();
1da177e4
LT
2362}
2363
73d80deb
LAD
2364static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2365 int *quote)
1da177e4 2366{
73d80deb
LAD
2367 struct hci_conn_hash *h = &hdev->conn_hash;
2368 struct hci_chan *chan = NULL;
2369 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2370 struct hci_conn *conn;
73d80deb
LAD
2371 int cnt, q, conn_num = 0;
2372
2373 BT_DBG("%s", hdev->name);
2374
bf4c6325
GP
2375 rcu_read_lock();
2376
2377 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2378 struct hci_chan *tmp;
2379
2380 if (conn->type != type)
2381 continue;
2382
2383 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2384 continue;
2385
2386 conn_num++;
2387
8192edef 2388 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2389 struct sk_buff *skb;
2390
2391 if (skb_queue_empty(&tmp->data_q))
2392 continue;
2393
2394 skb = skb_peek(&tmp->data_q);
2395 if (skb->priority < cur_prio)
2396 continue;
2397
2398 if (skb->priority > cur_prio) {
2399 num = 0;
2400 min = ~0;
2401 cur_prio = skb->priority;
2402 }
2403
2404 num++;
2405
2406 if (conn->sent < min) {
2407 min = conn->sent;
2408 chan = tmp;
2409 }
2410 }
2411
2412 if (hci_conn_num(hdev, type) == conn_num)
2413 break;
2414 }
2415
bf4c6325
GP
2416 rcu_read_unlock();
2417
73d80deb
LAD
2418 if (!chan)
2419 return NULL;
2420
2421 switch (chan->conn->type) {
2422 case ACL_LINK:
2423 cnt = hdev->acl_cnt;
2424 break;
2425 case SCO_LINK:
2426 case ESCO_LINK:
2427 cnt = hdev->sco_cnt;
2428 break;
2429 case LE_LINK:
2430 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2431 break;
2432 default:
2433 cnt = 0;
2434 BT_ERR("Unknown link type");
2435 }
2436
2437 q = cnt / num;
2438 *quote = q ? q : 1;
2439 BT_DBG("chan %p quote %d", chan, *quote);
2440 return chan;
2441}
2442
02b20f0b
LAD
2443static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2444{
2445 struct hci_conn_hash *h = &hdev->conn_hash;
2446 struct hci_conn *conn;
2447 int num = 0;
2448
2449 BT_DBG("%s", hdev->name);
2450
bf4c6325
GP
2451 rcu_read_lock();
2452
2453 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2454 struct hci_chan *chan;
2455
2456 if (conn->type != type)
2457 continue;
2458
2459 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2460 continue;
2461
2462 num++;
2463
8192edef 2464 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2465 struct sk_buff *skb;
2466
2467 if (chan->sent) {
2468 chan->sent = 0;
2469 continue;
2470 }
2471
2472 if (skb_queue_empty(&chan->data_q))
2473 continue;
2474
2475 skb = skb_peek(&chan->data_q);
2476 if (skb->priority >= HCI_PRIO_MAX - 1)
2477 continue;
2478
2479 skb->priority = HCI_PRIO_MAX - 1;
2480
2481 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2482 skb->priority);
2483 }
2484
2485 if (hci_conn_num(hdev, type) == num)
2486 break;
2487 }
bf4c6325
GP
2488
2489 rcu_read_unlock();
2490
02b20f0b
LAD
2491}
2492
b71d385a
AE
2493static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2494{
2495 /* Calculate count of blocks used by this packet */
2496 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2497}
2498
63d2bc1b 2499static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2500{
1da177e4
LT
2501 if (!test_bit(HCI_RAW, &hdev->flags)) {
2502 /* ACL tx timeout must be longer than maximum
2503 * link supervision timeout (40.9 seconds) */
63d2bc1b 2504 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2505 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2506 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2507 }
63d2bc1b 2508}
1da177e4 2509
63d2bc1b
AE
2510static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2511{
2512 unsigned int cnt = hdev->acl_cnt;
2513 struct hci_chan *chan;
2514 struct sk_buff *skb;
2515 int quote;
2516
2517 __check_timeout(hdev, cnt);
04837f64 2518
73d80deb
LAD
2519 while (hdev->acl_cnt &&
2520 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2521 u32 priority = (skb_peek(&chan->data_q))->priority;
2522 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2523 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2524 skb->len, skb->priority);
2525
ec1cce24
LAD
2526 /* Stop if priority has changed */
2527 if (skb->priority < priority)
2528 break;
2529
2530 skb = skb_dequeue(&chan->data_q);
2531
73d80deb
LAD
2532 hci_conn_enter_active_mode(chan->conn,
2533 bt_cb(skb)->force_active);
04837f64 2534
1da177e4
LT
2535 hci_send_frame(skb);
2536 hdev->acl_last_tx = jiffies;
2537
2538 hdev->acl_cnt--;
73d80deb
LAD
2539 chan->sent++;
2540 chan->conn->sent++;
1da177e4
LT
2541 }
2542 }
02b20f0b
LAD
2543
2544 if (cnt != hdev->acl_cnt)
2545 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2546}
2547
b71d385a
AE
2548static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2549{
63d2bc1b 2550 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2551 struct hci_chan *chan;
2552 struct sk_buff *skb;
2553 int quote;
b71d385a 2554
63d2bc1b 2555 __check_timeout(hdev, cnt);
b71d385a
AE
2556
2557 while (hdev->block_cnt > 0 &&
2558 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2559 u32 priority = (skb_peek(&chan->data_q))->priority;
2560 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2561 int blocks;
2562
2563 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2564 skb->len, skb->priority);
2565
2566 /* Stop if priority has changed */
2567 if (skb->priority < priority)
2568 break;
2569
2570 skb = skb_dequeue(&chan->data_q);
2571
2572 blocks = __get_blocks(hdev, skb);
2573 if (blocks > hdev->block_cnt)
2574 return;
2575
2576 hci_conn_enter_active_mode(chan->conn,
2577 bt_cb(skb)->force_active);
2578
2579 hci_send_frame(skb);
2580 hdev->acl_last_tx = jiffies;
2581
2582 hdev->block_cnt -= blocks;
2583 quote -= blocks;
2584
2585 chan->sent += blocks;
2586 chan->conn->sent += blocks;
2587 }
2588 }
2589
2590 if (cnt != hdev->block_cnt)
2591 hci_prio_recalculate(hdev, ACL_LINK);
2592}
2593
2594static inline void hci_sched_acl(struct hci_dev *hdev)
2595{
2596 BT_DBG("%s", hdev->name);
2597
2598 if (!hci_conn_num(hdev, ACL_LINK))
2599 return;
2600
2601 switch (hdev->flow_ctl_mode) {
2602 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2603 hci_sched_acl_pkt(hdev);
2604 break;
2605
2606 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2607 hci_sched_acl_blk(hdev);
2608 break;
2609 }
2610}
2611
1da177e4
LT
2612/* Schedule SCO */
2613static inline void hci_sched_sco(struct hci_dev *hdev)
2614{
2615 struct hci_conn *conn;
2616 struct sk_buff *skb;
2617 int quote;
2618
2619 BT_DBG("%s", hdev->name);
2620
52087a79
LAD
2621 if (!hci_conn_num(hdev, SCO_LINK))
2622 return;
2623
1da177e4
LT
2624 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2625 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2626 BT_DBG("skb %p len %d", skb, skb->len);
2627 hci_send_frame(skb);
2628
2629 conn->sent++;
2630 if (conn->sent == ~0)
2631 conn->sent = 0;
2632 }
2633 }
2634}
2635
b6a0dc82
MH
2636static inline void hci_sched_esco(struct hci_dev *hdev)
2637{
2638 struct hci_conn *conn;
2639 struct sk_buff *skb;
2640 int quote;
2641
2642 BT_DBG("%s", hdev->name);
2643
52087a79
LAD
2644 if (!hci_conn_num(hdev, ESCO_LINK))
2645 return;
2646
b6a0dc82
MH
2647 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2648 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2649 BT_DBG("skb %p len %d", skb, skb->len);
2650 hci_send_frame(skb);
2651
2652 conn->sent++;
2653 if (conn->sent == ~0)
2654 conn->sent = 0;
2655 }
2656 }
2657}
2658
6ed58ec5
VT
2659static inline void hci_sched_le(struct hci_dev *hdev)
2660{
73d80deb 2661 struct hci_chan *chan;
6ed58ec5 2662 struct sk_buff *skb;
02b20f0b 2663 int quote, cnt, tmp;
6ed58ec5
VT
2664
2665 BT_DBG("%s", hdev->name);
2666
52087a79
LAD
2667 if (!hci_conn_num(hdev, LE_LINK))
2668 return;
2669
6ed58ec5
VT
2670 if (!test_bit(HCI_RAW, &hdev->flags)) {
2671 /* LE tx timeout must be longer than maximum
2672 * link supervision timeout (40.9 seconds) */
bae1f5d9 2673 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2674 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2675 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2676 }
2677
2678 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2679 tmp = cnt;
73d80deb 2680 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2681 u32 priority = (skb_peek(&chan->data_q))->priority;
2682 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2683 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2684 skb->len, skb->priority);
6ed58ec5 2685
ec1cce24
LAD
2686 /* Stop if priority has changed */
2687 if (skb->priority < priority)
2688 break;
2689
2690 skb = skb_dequeue(&chan->data_q);
2691
6ed58ec5
VT
2692 hci_send_frame(skb);
2693 hdev->le_last_tx = jiffies;
2694
2695 cnt--;
73d80deb
LAD
2696 chan->sent++;
2697 chan->conn->sent++;
6ed58ec5
VT
2698 }
2699 }
73d80deb 2700
6ed58ec5
VT
2701 if (hdev->le_pkts)
2702 hdev->le_cnt = cnt;
2703 else
2704 hdev->acl_cnt = cnt;
02b20f0b
LAD
2705
2706 if (cnt != tmp)
2707 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2708}
2709
3eff45ea 2710static void hci_tx_work(struct work_struct *work)
1da177e4 2711{
3eff45ea 2712 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2713 struct sk_buff *skb;
2714
6ed58ec5
VT
2715 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2716 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2717
2718 /* Schedule queues and send stuff to HCI driver */
2719
2720 hci_sched_acl(hdev);
2721
2722 hci_sched_sco(hdev);
2723
b6a0dc82
MH
2724 hci_sched_esco(hdev);
2725
6ed58ec5
VT
2726 hci_sched_le(hdev);
2727
1da177e4
LT
2728 /* Send next queued raw (unknown type) packet */
2729 while ((skb = skb_dequeue(&hdev->raw_q)))
2730 hci_send_frame(skb);
1da177e4
LT
2731}
2732
25985edc 2733/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2734
2735/* ACL data packet */
2736static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2737{
2738 struct hci_acl_hdr *hdr = (void *) skb->data;
2739 struct hci_conn *conn;
2740 __u16 handle, flags;
2741
2742 skb_pull(skb, HCI_ACL_HDR_SIZE);
2743
2744 handle = __le16_to_cpu(hdr->handle);
2745 flags = hci_flags(handle);
2746 handle = hci_handle(handle);
2747
2748 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2749
2750 hdev->stat.acl_rx++;
2751
2752 hci_dev_lock(hdev);
2753 conn = hci_conn_hash_lookup_handle(hdev, handle);
2754 hci_dev_unlock(hdev);
8e87d142 2755
1da177e4 2756 if (conn) {
65983fc7 2757 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2758
1da177e4 2759 /* Send to upper protocol */
686ebf28
UF
2760 l2cap_recv_acldata(conn, skb, flags);
2761 return;
1da177e4 2762 } else {
8e87d142 2763 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2764 hdev->name, handle);
2765 }
2766
2767 kfree_skb(skb);
2768}
2769
2770/* SCO data packet */
2771static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2772{
2773 struct hci_sco_hdr *hdr = (void *) skb->data;
2774 struct hci_conn *conn;
2775 __u16 handle;
2776
2777 skb_pull(skb, HCI_SCO_HDR_SIZE);
2778
2779 handle = __le16_to_cpu(hdr->handle);
2780
2781 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2782
2783 hdev->stat.sco_rx++;
2784
2785 hci_dev_lock(hdev);
2786 conn = hci_conn_hash_lookup_handle(hdev, handle);
2787 hci_dev_unlock(hdev);
2788
2789 if (conn) {
1da177e4 2790 /* Send to upper protocol */
686ebf28
UF
2791 sco_recv_scodata(conn, skb);
2792 return;
1da177e4 2793 } else {
8e87d142 2794 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2795 hdev->name, handle);
2796 }
2797
2798 kfree_skb(skb);
2799}
2800
b78752cc 2801static void hci_rx_work(struct work_struct *work)
1da177e4 2802{
b78752cc 2803 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2804 struct sk_buff *skb;
2805
2806 BT_DBG("%s", hdev->name);
2807
1da177e4 2808 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2809 /* Send copy to monitor */
2810 hci_send_to_monitor(hdev, skb);
2811
1da177e4
LT
2812 if (atomic_read(&hdev->promisc)) {
2813 /* Send copy to the sockets */
470fe1b5 2814 hci_send_to_sock(hdev, skb);
1da177e4
LT
2815 }
2816
2817 if (test_bit(HCI_RAW, &hdev->flags)) {
2818 kfree_skb(skb);
2819 continue;
2820 }
2821
2822 if (test_bit(HCI_INIT, &hdev->flags)) {
2823 /* Don't process data packets in this states. */
0d48d939 2824 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2825 case HCI_ACLDATA_PKT:
2826 case HCI_SCODATA_PKT:
2827 kfree_skb(skb);
2828 continue;
3ff50b79 2829 }
1da177e4
LT
2830 }
2831
2832 /* Process frame */
0d48d939 2833 switch (bt_cb(skb)->pkt_type) {
1da177e4 2834 case HCI_EVENT_PKT:
b78752cc 2835 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2836 hci_event_packet(hdev, skb);
2837 break;
2838
2839 case HCI_ACLDATA_PKT:
2840 BT_DBG("%s ACL data packet", hdev->name);
2841 hci_acldata_packet(hdev, skb);
2842 break;
2843
2844 case HCI_SCODATA_PKT:
2845 BT_DBG("%s SCO data packet", hdev->name);
2846 hci_scodata_packet(hdev, skb);
2847 break;
2848
2849 default:
2850 kfree_skb(skb);
2851 break;
2852 }
2853 }
1da177e4
LT
2854}
2855
c347b765 2856static void hci_cmd_work(struct work_struct *work)
1da177e4 2857{
c347b765 2858 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2859 struct sk_buff *skb;
2860
2861 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2862
1da177e4 2863 /* Send queued commands */
5a08ecce
AE
2864 if (atomic_read(&hdev->cmd_cnt)) {
2865 skb = skb_dequeue(&hdev->cmd_q);
2866 if (!skb)
2867 return;
2868
7585b97a 2869 kfree_skb(hdev->sent_cmd);
1da177e4 2870
70f23020
AE
2871 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2872 if (hdev->sent_cmd) {
1da177e4
LT
2873 atomic_dec(&hdev->cmd_cnt);
2874 hci_send_frame(skb);
7bdb8a5c
SJ
2875 if (test_bit(HCI_RESET, &hdev->flags))
2876 del_timer(&hdev->cmd_timer);
2877 else
2878 mod_timer(&hdev->cmd_timer,
6bd32326 2879 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2880 } else {
2881 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2882 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2883 }
2884 }
2885}
2519a1fc
AG
2886
2887int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2888{
2889 /* General inquiry access code (GIAC) */
2890 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2891 struct hci_cp_inquiry cp;
2892
2893 BT_DBG("%s", hdev->name);
2894
2895 if (test_bit(HCI_INQUIRY, &hdev->flags))
2896 return -EINPROGRESS;
2897
4663262c
JH
2898 inquiry_cache_flush(hdev);
2899
2519a1fc
AG
2900 memset(&cp, 0, sizeof(cp));
2901 memcpy(&cp.lap, lap, sizeof(cp.lap));
2902 cp.length = length;
2903
2904 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2905}
023d5049
AG
2906
2907int hci_cancel_inquiry(struct hci_dev *hdev)
2908{
2909 BT_DBG("%s", hdev->name);
2910
2911 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2912 return -EPERM;
2913
2914 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2915}