Bluetooth: Fix __hci_req_sync() handling of empty requests
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
23bb5763 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 61{
f0e09510 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
23bb5763 63
a5040efa
JH
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
75fb0e32
JH
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 69 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
1036b890 79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
23bb5763 88 return;
75fb0e32 89 }
1da177e4
LT
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
01178cd4
JH
110static int __hci_req_sync(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
1da177e4
LT
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
53cce22d
JH
125
126 /* If the request didn't send any commands return immediately */
127 if (skb_queue_empty(&hdev->cmd_q) && atomic_read(&hdev->cmd_cnt)) {
128 hdev->req_status = 0;
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130 return err;
131 }
132
1da177e4
LT
133 schedule_timeout(timeout);
134
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136
137 if (signal_pending(current))
138 return -EINTR;
139
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
e175072f 142 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
143 break;
144
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
148
149 default:
150 err = -ETIMEDOUT;
151 break;
3ff50b79 152 }
1da177e4 153
a5040efa 154 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
155
156 BT_DBG("%s end: err %d", hdev->name, err);
157
158 return err;
159}
160
01178cd4
JH
161static int hci_req_sync(struct hci_dev *hdev,
162 void (*req)(struct hci_dev *hdev, unsigned long opt),
163 unsigned long opt, __u32 timeout)
1da177e4
LT
164{
165 int ret;
166
7c6a329e
MH
167 if (!test_bit(HCI_UP, &hdev->flags))
168 return -ENETDOWN;
169
1da177e4
LT
170 /* Serialize all requests */
171 hci_req_lock(hdev);
01178cd4 172 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
173 hci_req_unlock(hdev);
174
175 return ret;
176}
177
178static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
179{
180 BT_DBG("%s %ld", hdev->name, opt);
181
182 /* Reset device */
f630cf0d 183 set_bit(HCI_RESET, &hdev->flags);
a9de9248 184 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
185}
186
e61ef499 187static void bredr_init(struct hci_dev *hdev)
1da177e4 188{
2455a3ea
AE
189 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
190
1da177e4 191 /* Read Local Supported Features */
a9de9248 192 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 193
1143e5a6 194 /* Read Local Version */
a9de9248 195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1da177e4
LT
196}
197
e61ef499
AE
198static void amp_init(struct hci_dev *hdev)
199{
2455a3ea
AE
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
201
e61ef499
AE
202 /* Read Local Version */
203 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
204
205 /* Read Local AMP Info */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
207
208 /* Read Data Blk size */
209 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
210}
211
212static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
213{
214 struct sk_buff *skb;
215
216 BT_DBG("%s %ld", hdev->name, opt);
217
218 /* Driver initialization */
219
220 /* Special commands */
221 while ((skb = skb_dequeue(&hdev->driver_init))) {
222 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
223 skb->dev = (void *) hdev;
224
225 skb_queue_tail(&hdev->cmd_q, skb);
226 queue_work(hdev->workqueue, &hdev->cmd_work);
227 }
228 skb_queue_purge(&hdev->driver_init);
229
11778716
AE
230 /* Reset */
231 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
232 hci_reset_req(hdev, 0);
233
e61ef499
AE
234 switch (hdev->dev_type) {
235 case HCI_BREDR:
236 bredr_init(hdev);
237 break;
238
239 case HCI_AMP:
240 amp_init(hdev);
241 break;
242
243 default:
244 BT_ERR("Unknown device type %d", hdev->dev_type);
245 break;
246 }
e61ef499
AE
247}
248
1da177e4
LT
249static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
250{
251 __u8 scan = opt;
252
253 BT_DBG("%s %x", hdev->name, scan);
254
255 /* Inquiry and Page scans */
a9de9248 256 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
257}
258
259static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
260{
261 __u8 auth = opt;
262
263 BT_DBG("%s %x", hdev->name, auth);
264
265 /* Authentication */
a9de9248 266 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
267}
268
269static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
270{
271 __u8 encrypt = opt;
272
273 BT_DBG("%s %x", hdev->name, encrypt);
274
e4e8e37c 275 /* Encryption */
a9de9248 276 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
277}
278
e4e8e37c
MH
279static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __le16 policy = cpu_to_le16(opt);
282
a418b893 283 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
284
285 /* Default link policy */
286 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
287}
288
8e87d142 289/* Get HCI device by index.
1da177e4
LT
290 * Device is held on return. */
291struct hci_dev *hci_dev_get(int index)
292{
8035ded4 293 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
294
295 BT_DBG("%d", index);
296
297 if (index < 0)
298 return NULL;
299
300 read_lock(&hci_dev_list_lock);
8035ded4 301 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
302 if (d->id == index) {
303 hdev = hci_dev_hold(d);
304 break;
305 }
306 }
307 read_unlock(&hci_dev_list_lock);
308 return hdev;
309}
1da177e4
LT
310
311/* ---- Inquiry support ---- */
ff9ef578 312
30dc78e1
JH
313bool hci_discovery_active(struct hci_dev *hdev)
314{
315 struct discovery_state *discov = &hdev->discovery;
316
6fbe195d 317 switch (discov->state) {
343f935b 318 case DISCOVERY_FINDING:
6fbe195d 319 case DISCOVERY_RESOLVING:
30dc78e1
JH
320 return true;
321
6fbe195d
AG
322 default:
323 return false;
324 }
30dc78e1
JH
325}
326
ff9ef578
JH
327void hci_discovery_set_state(struct hci_dev *hdev, int state)
328{
329 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
330
331 if (hdev->discovery.state == state)
332 return;
333
334 switch (state) {
335 case DISCOVERY_STOPPED:
7b99b659
AG
336 if (hdev->discovery.state != DISCOVERY_STARTING)
337 mgmt_discovering(hdev, 0);
ff9ef578
JH
338 break;
339 case DISCOVERY_STARTING:
340 break;
343f935b 341 case DISCOVERY_FINDING:
ff9ef578
JH
342 mgmt_discovering(hdev, 1);
343 break;
30dc78e1
JH
344 case DISCOVERY_RESOLVING:
345 break;
ff9ef578
JH
346 case DISCOVERY_STOPPING:
347 break;
348 }
349
350 hdev->discovery.state = state;
351}
352
1da177e4
LT
353static void inquiry_cache_flush(struct hci_dev *hdev)
354{
30883512 355 struct discovery_state *cache = &hdev->discovery;
b57c1a56 356 struct inquiry_entry *p, *n;
1da177e4 357
561aafbc
JH
358 list_for_each_entry_safe(p, n, &cache->all, all) {
359 list_del(&p->all);
b57c1a56 360 kfree(p);
1da177e4 361 }
561aafbc
JH
362
363 INIT_LIST_HEAD(&cache->unknown);
364 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
365}
366
a8c5fb1a
GP
367struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
368 bdaddr_t *bdaddr)
1da177e4 369{
30883512 370 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
371 struct inquiry_entry *e;
372
6ed93dc6 373 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 374
561aafbc
JH
375 list_for_each_entry(e, &cache->all, all) {
376 if (!bacmp(&e->data.bdaddr, bdaddr))
377 return e;
378 }
379
380 return NULL;
381}
382
383struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 384 bdaddr_t *bdaddr)
561aafbc 385{
30883512 386 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
387 struct inquiry_entry *e;
388
6ed93dc6 389 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
390
391 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 392 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
393 return e;
394 }
395
396 return NULL;
1da177e4
LT
397}
398
30dc78e1 399struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
400 bdaddr_t *bdaddr,
401 int state)
30dc78e1
JH
402{
403 struct discovery_state *cache = &hdev->discovery;
404 struct inquiry_entry *e;
405
6ed93dc6 406 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
407
408 list_for_each_entry(e, &cache->resolve, list) {
409 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
410 return e;
411 if (!bacmp(&e->data.bdaddr, bdaddr))
412 return e;
413 }
414
415 return NULL;
416}
417
a3d4e20a 418void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 419 struct inquiry_entry *ie)
a3d4e20a
JH
420{
421 struct discovery_state *cache = &hdev->discovery;
422 struct list_head *pos = &cache->resolve;
423 struct inquiry_entry *p;
424
425 list_del(&ie->list);
426
427 list_for_each_entry(p, &cache->resolve, list) {
428 if (p->name_state != NAME_PENDING &&
a8c5fb1a 429 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
430 break;
431 pos = &p->list;
432 }
433
434 list_add(&ie->list, pos);
435}
436
3175405b 437bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 438 bool name_known, bool *ssp)
1da177e4 439{
30883512 440 struct discovery_state *cache = &hdev->discovery;
70f23020 441 struct inquiry_entry *ie;
1da177e4 442
6ed93dc6 443 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 444
2b2fec4d
SJ
445 hci_remove_remote_oob_data(hdev, &data->bdaddr);
446
388fc8fa
JH
447 if (ssp)
448 *ssp = data->ssp_mode;
449
70f23020 450 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 451 if (ie) {
388fc8fa
JH
452 if (ie->data.ssp_mode && ssp)
453 *ssp = true;
454
a3d4e20a 455 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 456 data->rssi != ie->data.rssi) {
a3d4e20a
JH
457 ie->data.rssi = data->rssi;
458 hci_inquiry_cache_update_resolve(hdev, ie);
459 }
460
561aafbc 461 goto update;
a3d4e20a 462 }
561aafbc
JH
463
464 /* Entry not in the cache. Add new one. */
465 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
466 if (!ie)
3175405b 467 return false;
561aafbc
JH
468
469 list_add(&ie->all, &cache->all);
470
471 if (name_known) {
472 ie->name_state = NAME_KNOWN;
473 } else {
474 ie->name_state = NAME_NOT_KNOWN;
475 list_add(&ie->list, &cache->unknown);
476 }
70f23020 477
561aafbc
JH
478update:
479 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 480 ie->name_state != NAME_PENDING) {
561aafbc
JH
481 ie->name_state = NAME_KNOWN;
482 list_del(&ie->list);
1da177e4
LT
483 }
484
70f23020
AE
485 memcpy(&ie->data, data, sizeof(*data));
486 ie->timestamp = jiffies;
1da177e4 487 cache->timestamp = jiffies;
3175405b
JH
488
489 if (ie->name_state == NAME_NOT_KNOWN)
490 return false;
491
492 return true;
1da177e4
LT
493}
494
495static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
496{
30883512 497 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
498 struct inquiry_info *info = (struct inquiry_info *) buf;
499 struct inquiry_entry *e;
500 int copied = 0;
501
561aafbc 502 list_for_each_entry(e, &cache->all, all) {
1da177e4 503 struct inquiry_data *data = &e->data;
b57c1a56
JH
504
505 if (copied >= num)
506 break;
507
1da177e4
LT
508 bacpy(&info->bdaddr, &data->bdaddr);
509 info->pscan_rep_mode = data->pscan_rep_mode;
510 info->pscan_period_mode = data->pscan_period_mode;
511 info->pscan_mode = data->pscan_mode;
512 memcpy(info->dev_class, data->dev_class, 3);
513 info->clock_offset = data->clock_offset;
b57c1a56 514
1da177e4 515 info++;
b57c1a56 516 copied++;
1da177e4
LT
517 }
518
519 BT_DBG("cache %p, copied %d", cache, copied);
520 return copied;
521}
522
523static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
524{
525 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
526 struct hci_cp_inquiry cp;
527
528 BT_DBG("%s", hdev->name);
529
530 if (test_bit(HCI_INQUIRY, &hdev->flags))
531 return;
532
533 /* Start Inquiry */
534 memcpy(&cp.lap, &ir->lap, 3);
535 cp.length = ir->length;
536 cp.num_rsp = ir->num_rsp;
a9de9248 537 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
538}
539
540int hci_inquiry(void __user *arg)
541{
542 __u8 __user *ptr = arg;
543 struct hci_inquiry_req ir;
544 struct hci_dev *hdev;
545 int err = 0, do_inquiry = 0, max_rsp;
546 long timeo;
547 __u8 *buf;
548
549 if (copy_from_user(&ir, ptr, sizeof(ir)))
550 return -EFAULT;
551
5a08ecce
AE
552 hdev = hci_dev_get(ir.dev_id);
553 if (!hdev)
1da177e4
LT
554 return -ENODEV;
555
09fd0de5 556 hci_dev_lock(hdev);
8e87d142 557 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 558 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
559 inquiry_cache_flush(hdev);
560 do_inquiry = 1;
561 }
09fd0de5 562 hci_dev_unlock(hdev);
1da177e4 563
04837f64 564 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
565
566 if (do_inquiry) {
01178cd4
JH
567 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
568 timeo);
70f23020
AE
569 if (err < 0)
570 goto done;
571 }
1da177e4 572
8fc9ced3
GP
573 /* for unlimited number of responses we will use buffer with
574 * 255 entries
575 */
1da177e4
LT
576 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
577
578 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
579 * copy it to the user space.
580 */
01df8c31 581 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 582 if (!buf) {
1da177e4
LT
583 err = -ENOMEM;
584 goto done;
585 }
586
09fd0de5 587 hci_dev_lock(hdev);
1da177e4 588 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 589 hci_dev_unlock(hdev);
1da177e4
LT
590
591 BT_DBG("num_rsp %d", ir.num_rsp);
592
593 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
594 ptr += sizeof(ir);
595 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 596 ir.num_rsp))
1da177e4 597 err = -EFAULT;
8e87d142 598 } else
1da177e4
LT
599 err = -EFAULT;
600
601 kfree(buf);
602
603done:
604 hci_dev_put(hdev);
605 return err;
606}
607
3f0f524b
JH
608static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
609{
610 u8 ad_len = 0, flags = 0;
611 size_t name_len;
612
613 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
614 flags |= LE_AD_GENERAL;
615
616 if (!lmp_bredr_capable(hdev))
617 flags |= LE_AD_NO_BREDR;
618
619 if (lmp_le_br_capable(hdev))
620 flags |= LE_AD_SIM_LE_BREDR_CTRL;
621
622 if (lmp_host_le_br_capable(hdev))
623 flags |= LE_AD_SIM_LE_BREDR_HOST;
624
625 if (flags) {
626 BT_DBG("adv flags 0x%02x", flags);
627
628 ptr[0] = 2;
629 ptr[1] = EIR_FLAGS;
630 ptr[2] = flags;
631
632 ad_len += 3;
633 ptr += 3;
634 }
635
636 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
637 ptr[0] = 2;
638 ptr[1] = EIR_TX_POWER;
639 ptr[2] = (u8) hdev->adv_tx_power;
640
641 ad_len += 3;
642 ptr += 3;
643 }
644
645 name_len = strlen(hdev->dev_name);
646 if (name_len > 0) {
647 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
648
649 if (name_len > max_len) {
650 name_len = max_len;
651 ptr[1] = EIR_NAME_SHORT;
652 } else
653 ptr[1] = EIR_NAME_COMPLETE;
654
655 ptr[0] = name_len + 1;
656
657 memcpy(ptr + 2, hdev->dev_name, name_len);
658
659 ad_len += (name_len + 2);
660 ptr += (name_len + 2);
661 }
662
663 return ad_len;
664}
665
666int hci_update_ad(struct hci_dev *hdev)
667{
668 struct hci_cp_le_set_adv_data cp;
669 u8 len;
670 int err;
671
672 hci_dev_lock(hdev);
673
674 if (!lmp_le_capable(hdev)) {
675 err = -EINVAL;
676 goto unlock;
677 }
678
679 memset(&cp, 0, sizeof(cp));
680
681 len = create_ad(hdev, cp.data);
682
683 if (hdev->adv_data_len == len &&
684 memcmp(cp.data, hdev->adv_data, len) == 0) {
685 err = 0;
686 goto unlock;
687 }
688
689 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
690 hdev->adv_data_len = len;
691
692 cp.length = len;
693 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
694
695unlock:
696 hci_dev_unlock(hdev);
697
698 return err;
699}
700
1da177e4
LT
701/* ---- HCI ioctl helpers ---- */
702
703int hci_dev_open(__u16 dev)
704{
705 struct hci_dev *hdev;
706 int ret = 0;
707
5a08ecce
AE
708 hdev = hci_dev_get(dev);
709 if (!hdev)
1da177e4
LT
710 return -ENODEV;
711
712 BT_DBG("%s %p", hdev->name, hdev);
713
714 hci_req_lock(hdev);
715
94324962
JH
716 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
717 ret = -ENODEV;
718 goto done;
719 }
720
611b30f7
MH
721 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
722 ret = -ERFKILL;
723 goto done;
724 }
725
1da177e4
LT
726 if (test_bit(HCI_UP, &hdev->flags)) {
727 ret = -EALREADY;
728 goto done;
729 }
730
731 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
732 set_bit(HCI_RAW, &hdev->flags);
733
07e3b94a
AE
734 /* Treat all non BR/EDR controllers as raw devices if
735 enable_hs is not set */
736 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
737 set_bit(HCI_RAW, &hdev->flags);
738
1da177e4
LT
739 if (hdev->open(hdev)) {
740 ret = -EIO;
741 goto done;
742 }
743
744 if (!test_bit(HCI_RAW, &hdev->flags)) {
745 atomic_set(&hdev->cmd_cnt, 1);
746 set_bit(HCI_INIT, &hdev->flags);
a5040efa 747 hdev->init_last_cmd = 0;
1da177e4 748
01178cd4 749 ret = __hci_req_sync(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
750
751 clear_bit(HCI_INIT, &hdev->flags);
752 }
753
754 if (!ret) {
755 hci_dev_hold(hdev);
756 set_bit(HCI_UP, &hdev->flags);
757 hci_notify(hdev, HCI_DEV_UP);
3f0f524b 758 hci_update_ad(hdev);
bb4b2a9a
AE
759 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
760 mgmt_valid_hdev(hdev)) {
09fd0de5 761 hci_dev_lock(hdev);
744cf19e 762 mgmt_powered(hdev, 1);
09fd0de5 763 hci_dev_unlock(hdev);
56e5cb86 764 }
8e87d142 765 } else {
1da177e4 766 /* Init failed, cleanup */
3eff45ea 767 flush_work(&hdev->tx_work);
c347b765 768 flush_work(&hdev->cmd_work);
b78752cc 769 flush_work(&hdev->rx_work);
1da177e4
LT
770
771 skb_queue_purge(&hdev->cmd_q);
772 skb_queue_purge(&hdev->rx_q);
773
774 if (hdev->flush)
775 hdev->flush(hdev);
776
777 if (hdev->sent_cmd) {
778 kfree_skb(hdev->sent_cmd);
779 hdev->sent_cmd = NULL;
780 }
781
782 hdev->close(hdev);
783 hdev->flags = 0;
784 }
785
786done:
787 hci_req_unlock(hdev);
788 hci_dev_put(hdev);
789 return ret;
790}
791
792static int hci_dev_do_close(struct hci_dev *hdev)
793{
794 BT_DBG("%s %p", hdev->name, hdev);
795
28b75a89
AG
796 cancel_work_sync(&hdev->le_scan);
797
78c04c0b
VCG
798 cancel_delayed_work(&hdev->power_off);
799
1da177e4
LT
800 hci_req_cancel(hdev, ENODEV);
801 hci_req_lock(hdev);
802
803 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 804 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
805 hci_req_unlock(hdev);
806 return 0;
807 }
808
3eff45ea
GP
809 /* Flush RX and TX works */
810 flush_work(&hdev->tx_work);
b78752cc 811 flush_work(&hdev->rx_work);
1da177e4 812
16ab91ab 813 if (hdev->discov_timeout > 0) {
e0f9309f 814 cancel_delayed_work(&hdev->discov_off);
16ab91ab 815 hdev->discov_timeout = 0;
5e5282bb 816 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
817 }
818
a8b2d5c2 819 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
820 cancel_delayed_work(&hdev->service_cache);
821
7ba8b4be
AG
822 cancel_delayed_work_sync(&hdev->le_scan_disable);
823
09fd0de5 824 hci_dev_lock(hdev);
1da177e4
LT
825 inquiry_cache_flush(hdev);
826 hci_conn_hash_flush(hdev);
09fd0de5 827 hci_dev_unlock(hdev);
1da177e4
LT
828
829 hci_notify(hdev, HCI_DEV_DOWN);
830
831 if (hdev->flush)
832 hdev->flush(hdev);
833
834 /* Reset device */
835 skb_queue_purge(&hdev->cmd_q);
836 atomic_set(&hdev->cmd_cnt, 1);
8af59467 837 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 838 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 839 set_bit(HCI_INIT, &hdev->flags);
01178cd4 840 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
841 clear_bit(HCI_INIT, &hdev->flags);
842 }
843
c347b765
GP
844 /* flush cmd work */
845 flush_work(&hdev->cmd_work);
1da177e4
LT
846
847 /* Drop queues */
848 skb_queue_purge(&hdev->rx_q);
849 skb_queue_purge(&hdev->cmd_q);
850 skb_queue_purge(&hdev->raw_q);
851
852 /* Drop last sent command */
853 if (hdev->sent_cmd) {
b79f44c1 854 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
855 kfree_skb(hdev->sent_cmd);
856 hdev->sent_cmd = NULL;
857 }
858
859 /* After this point our queues are empty
860 * and no tasks are scheduled. */
861 hdev->close(hdev);
862
bb4b2a9a
AE
863 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
864 mgmt_valid_hdev(hdev)) {
8ee56540
MH
865 hci_dev_lock(hdev);
866 mgmt_powered(hdev, 0);
867 hci_dev_unlock(hdev);
868 }
5add6af8 869
1da177e4
LT
870 /* Clear flags */
871 hdev->flags = 0;
872
ced5c338
AE
873 /* Controller radio is available but is currently powered down */
874 hdev->amp_status = 0;
875
e59fda8d 876 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 877 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 878
1da177e4
LT
879 hci_req_unlock(hdev);
880
881 hci_dev_put(hdev);
882 return 0;
883}
884
885int hci_dev_close(__u16 dev)
886{
887 struct hci_dev *hdev;
888 int err;
889
70f23020
AE
890 hdev = hci_dev_get(dev);
891 if (!hdev)
1da177e4 892 return -ENODEV;
8ee56540
MH
893
894 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
895 cancel_delayed_work(&hdev->power_off);
896
1da177e4 897 err = hci_dev_do_close(hdev);
8ee56540 898
1da177e4
LT
899 hci_dev_put(hdev);
900 return err;
901}
902
903int hci_dev_reset(__u16 dev)
904{
905 struct hci_dev *hdev;
906 int ret = 0;
907
70f23020
AE
908 hdev = hci_dev_get(dev);
909 if (!hdev)
1da177e4
LT
910 return -ENODEV;
911
912 hci_req_lock(hdev);
1da177e4
LT
913
914 if (!test_bit(HCI_UP, &hdev->flags))
915 goto done;
916
917 /* Drop queues */
918 skb_queue_purge(&hdev->rx_q);
919 skb_queue_purge(&hdev->cmd_q);
920
09fd0de5 921 hci_dev_lock(hdev);
1da177e4
LT
922 inquiry_cache_flush(hdev);
923 hci_conn_hash_flush(hdev);
09fd0de5 924 hci_dev_unlock(hdev);
1da177e4
LT
925
926 if (hdev->flush)
927 hdev->flush(hdev);
928
8e87d142 929 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 930 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
931
932 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 933 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
934
935done:
1da177e4
LT
936 hci_req_unlock(hdev);
937 hci_dev_put(hdev);
938 return ret;
939}
940
941int hci_dev_reset_stat(__u16 dev)
942{
943 struct hci_dev *hdev;
944 int ret = 0;
945
70f23020
AE
946 hdev = hci_dev_get(dev);
947 if (!hdev)
1da177e4
LT
948 return -ENODEV;
949
950 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
951
952 hci_dev_put(hdev);
953
954 return ret;
955}
956
957int hci_dev_cmd(unsigned int cmd, void __user *arg)
958{
959 struct hci_dev *hdev;
960 struct hci_dev_req dr;
961 int err = 0;
962
963 if (copy_from_user(&dr, arg, sizeof(dr)))
964 return -EFAULT;
965
70f23020
AE
966 hdev = hci_dev_get(dr.dev_id);
967 if (!hdev)
1da177e4
LT
968 return -ENODEV;
969
970 switch (cmd) {
971 case HCISETAUTH:
01178cd4
JH
972 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
973 HCI_INIT_TIMEOUT);
1da177e4
LT
974 break;
975
976 case HCISETENCRYPT:
977 if (!lmp_encrypt_capable(hdev)) {
978 err = -EOPNOTSUPP;
979 break;
980 }
981
982 if (!test_bit(HCI_AUTH, &hdev->flags)) {
983 /* Auth must be enabled first */
01178cd4
JH
984 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
985 HCI_INIT_TIMEOUT);
1da177e4
LT
986 if (err)
987 break;
988 }
989
01178cd4
JH
990 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
991 HCI_INIT_TIMEOUT);
1da177e4
LT
992 break;
993
994 case HCISETSCAN:
01178cd4
JH
995 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
996 HCI_INIT_TIMEOUT);
1da177e4
LT
997 break;
998
1da177e4 999 case HCISETLINKPOL:
01178cd4
JH
1000 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1001 HCI_INIT_TIMEOUT);
1da177e4
LT
1002 break;
1003
1004 case HCISETLINKMODE:
e4e8e37c
MH
1005 hdev->link_mode = ((__u16) dr.dev_opt) &
1006 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1007 break;
1008
1009 case HCISETPTYPE:
1010 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1011 break;
1012
1013 case HCISETACLMTU:
e4e8e37c
MH
1014 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1015 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1016 break;
1017
1018 case HCISETSCOMTU:
e4e8e37c
MH
1019 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1020 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1021 break;
1022
1023 default:
1024 err = -EINVAL;
1025 break;
1026 }
e4e8e37c 1027
1da177e4
LT
1028 hci_dev_put(hdev);
1029 return err;
1030}
1031
1032int hci_get_dev_list(void __user *arg)
1033{
8035ded4 1034 struct hci_dev *hdev;
1da177e4
LT
1035 struct hci_dev_list_req *dl;
1036 struct hci_dev_req *dr;
1da177e4
LT
1037 int n = 0, size, err;
1038 __u16 dev_num;
1039
1040 if (get_user(dev_num, (__u16 __user *) arg))
1041 return -EFAULT;
1042
1043 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1044 return -EINVAL;
1045
1046 size = sizeof(*dl) + dev_num * sizeof(*dr);
1047
70f23020
AE
1048 dl = kzalloc(size, GFP_KERNEL);
1049 if (!dl)
1da177e4
LT
1050 return -ENOMEM;
1051
1052 dr = dl->dev_req;
1053
f20d09d5 1054 read_lock(&hci_dev_list_lock);
8035ded4 1055 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1056 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1057 cancel_delayed_work(&hdev->power_off);
c542a06c 1058
a8b2d5c2
JH
1059 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1060 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1061
1da177e4
LT
1062 (dr + n)->dev_id = hdev->id;
1063 (dr + n)->dev_opt = hdev->flags;
c542a06c 1064
1da177e4
LT
1065 if (++n >= dev_num)
1066 break;
1067 }
f20d09d5 1068 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1069
1070 dl->dev_num = n;
1071 size = sizeof(*dl) + n * sizeof(*dr);
1072
1073 err = copy_to_user(arg, dl, size);
1074 kfree(dl);
1075
1076 return err ? -EFAULT : 0;
1077}
1078
1079int hci_get_dev_info(void __user *arg)
1080{
1081 struct hci_dev *hdev;
1082 struct hci_dev_info di;
1083 int err = 0;
1084
1085 if (copy_from_user(&di, arg, sizeof(di)))
1086 return -EFAULT;
1087
70f23020
AE
1088 hdev = hci_dev_get(di.dev_id);
1089 if (!hdev)
1da177e4
LT
1090 return -ENODEV;
1091
a8b2d5c2 1092 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1093 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1094
a8b2d5c2
JH
1095 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1096 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1097
1da177e4
LT
1098 strcpy(di.name, hdev->name);
1099 di.bdaddr = hdev->bdaddr;
943da25d 1100 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1101 di.flags = hdev->flags;
1102 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1103 if (lmp_bredr_capable(hdev)) {
1104 di.acl_mtu = hdev->acl_mtu;
1105 di.acl_pkts = hdev->acl_pkts;
1106 di.sco_mtu = hdev->sco_mtu;
1107 di.sco_pkts = hdev->sco_pkts;
1108 } else {
1109 di.acl_mtu = hdev->le_mtu;
1110 di.acl_pkts = hdev->le_pkts;
1111 di.sco_mtu = 0;
1112 di.sco_pkts = 0;
1113 }
1da177e4
LT
1114 di.link_policy = hdev->link_policy;
1115 di.link_mode = hdev->link_mode;
1116
1117 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1118 memcpy(&di.features, &hdev->features, sizeof(di.features));
1119
1120 if (copy_to_user(arg, &di, sizeof(di)))
1121 err = -EFAULT;
1122
1123 hci_dev_put(hdev);
1124
1125 return err;
1126}
1127
1128/* ---- Interface to HCI drivers ---- */
1129
611b30f7
MH
1130static int hci_rfkill_set_block(void *data, bool blocked)
1131{
1132 struct hci_dev *hdev = data;
1133
1134 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1135
1136 if (!blocked)
1137 return 0;
1138
1139 hci_dev_do_close(hdev);
1140
1141 return 0;
1142}
1143
1144static const struct rfkill_ops hci_rfkill_ops = {
1145 .set_block = hci_rfkill_set_block,
1146};
1147
ab81cbf9
JH
1148static void hci_power_on(struct work_struct *work)
1149{
1150 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1151
1152 BT_DBG("%s", hdev->name);
1153
1154 if (hci_dev_open(hdev->id) < 0)
1155 return;
1156
a8b2d5c2 1157 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1158 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1159 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1160
a8b2d5c2 1161 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1162 mgmt_index_added(hdev);
ab81cbf9
JH
1163}
1164
1165static void hci_power_off(struct work_struct *work)
1166{
3243553f 1167 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1168 power_off.work);
ab81cbf9
JH
1169
1170 BT_DBG("%s", hdev->name);
1171
8ee56540 1172 hci_dev_do_close(hdev);
ab81cbf9
JH
1173}
1174
16ab91ab
JH
1175static void hci_discov_off(struct work_struct *work)
1176{
1177 struct hci_dev *hdev;
1178 u8 scan = SCAN_PAGE;
1179
1180 hdev = container_of(work, struct hci_dev, discov_off.work);
1181
1182 BT_DBG("%s", hdev->name);
1183
09fd0de5 1184 hci_dev_lock(hdev);
16ab91ab
JH
1185
1186 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1187
1188 hdev->discov_timeout = 0;
1189
09fd0de5 1190 hci_dev_unlock(hdev);
16ab91ab
JH
1191}
1192
2aeb9a1a
JH
1193int hci_uuids_clear(struct hci_dev *hdev)
1194{
4821002c 1195 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1196
4821002c
JH
1197 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1198 list_del(&uuid->list);
2aeb9a1a
JH
1199 kfree(uuid);
1200 }
1201
1202 return 0;
1203}
1204
55ed8ca1
JH
1205int hci_link_keys_clear(struct hci_dev *hdev)
1206{
1207 struct list_head *p, *n;
1208
1209 list_for_each_safe(p, n, &hdev->link_keys) {
1210 struct link_key *key;
1211
1212 key = list_entry(p, struct link_key, list);
1213
1214 list_del(p);
1215 kfree(key);
1216 }
1217
1218 return 0;
1219}
1220
b899efaf
VCG
1221int hci_smp_ltks_clear(struct hci_dev *hdev)
1222{
1223 struct smp_ltk *k, *tmp;
1224
1225 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1226 list_del(&k->list);
1227 kfree(k);
1228 }
1229
1230 return 0;
1231}
1232
55ed8ca1
JH
1233struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1234{
8035ded4 1235 struct link_key *k;
55ed8ca1 1236
8035ded4 1237 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1238 if (bacmp(bdaddr, &k->bdaddr) == 0)
1239 return k;
55ed8ca1
JH
1240
1241 return NULL;
1242}
1243
745c0ce3 1244static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1245 u8 key_type, u8 old_key_type)
d25e28ab
JH
1246{
1247 /* Legacy key */
1248 if (key_type < 0x03)
745c0ce3 1249 return true;
d25e28ab
JH
1250
1251 /* Debug keys are insecure so don't store them persistently */
1252 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1253 return false;
d25e28ab
JH
1254
1255 /* Changed combination key and there's no previous one */
1256 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1257 return false;
d25e28ab
JH
1258
1259 /* Security mode 3 case */
1260 if (!conn)
745c0ce3 1261 return true;
d25e28ab
JH
1262
1263 /* Neither local nor remote side had no-bonding as requirement */
1264 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1265 return true;
d25e28ab
JH
1266
1267 /* Local side had dedicated bonding as requirement */
1268 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1269 return true;
d25e28ab
JH
1270
1271 /* Remote side had dedicated bonding as requirement */
1272 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1273 return true;
d25e28ab
JH
1274
1275 /* If none of the above criteria match, then don't store the key
1276 * persistently */
745c0ce3 1277 return false;
d25e28ab
JH
1278}
1279
c9839a11 1280struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1281{
c9839a11 1282 struct smp_ltk *k;
75d262c2 1283
c9839a11
VCG
1284 list_for_each_entry(k, &hdev->long_term_keys, list) {
1285 if (k->ediv != ediv ||
a8c5fb1a 1286 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1287 continue;
1288
c9839a11 1289 return k;
75d262c2
VCG
1290 }
1291
1292 return NULL;
1293}
75d262c2 1294
c9839a11 1295struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1296 u8 addr_type)
75d262c2 1297{
c9839a11 1298 struct smp_ltk *k;
75d262c2 1299
c9839a11
VCG
1300 list_for_each_entry(k, &hdev->long_term_keys, list)
1301 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1302 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1303 return k;
1304
1305 return NULL;
1306}
75d262c2 1307
d25e28ab 1308int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1309 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1310{
1311 struct link_key *key, *old_key;
745c0ce3
VA
1312 u8 old_key_type;
1313 bool persistent;
55ed8ca1
JH
1314
1315 old_key = hci_find_link_key(hdev, bdaddr);
1316 if (old_key) {
1317 old_key_type = old_key->type;
1318 key = old_key;
1319 } else {
12adcf3a 1320 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1321 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1322 if (!key)
1323 return -ENOMEM;
1324 list_add(&key->list, &hdev->link_keys);
1325 }
1326
6ed93dc6 1327 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1328
d25e28ab
JH
1329 /* Some buggy controller combinations generate a changed
1330 * combination key for legacy pairing even when there's no
1331 * previous key */
1332 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1333 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1334 type = HCI_LK_COMBINATION;
655fe6ec
JH
1335 if (conn)
1336 conn->key_type = type;
1337 }
d25e28ab 1338
55ed8ca1 1339 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1340 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1341 key->pin_len = pin_len;
1342
b6020ba0 1343 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1344 key->type = old_key_type;
4748fed2
JH
1345 else
1346 key->type = type;
1347
4df378a1
JH
1348 if (!new_key)
1349 return 0;
1350
1351 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1352
744cf19e 1353 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1354
6ec5bcad
VA
1355 if (conn)
1356 conn->flush_key = !persistent;
55ed8ca1
JH
1357
1358 return 0;
1359}
1360
c9839a11 1361int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1362 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1363 ediv, u8 rand[8])
75d262c2 1364{
c9839a11 1365 struct smp_ltk *key, *old_key;
75d262c2 1366
c9839a11
VCG
1367 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1368 return 0;
75d262c2 1369
c9839a11
VCG
1370 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1371 if (old_key)
75d262c2 1372 key = old_key;
c9839a11
VCG
1373 else {
1374 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1375 if (!key)
1376 return -ENOMEM;
c9839a11 1377 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1378 }
1379
75d262c2 1380 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1381 key->bdaddr_type = addr_type;
1382 memcpy(key->val, tk, sizeof(key->val));
1383 key->authenticated = authenticated;
1384 key->ediv = ediv;
1385 key->enc_size = enc_size;
1386 key->type = type;
1387 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1388
c9839a11
VCG
1389 if (!new_key)
1390 return 0;
75d262c2 1391
261cc5aa
VCG
1392 if (type & HCI_SMP_LTK)
1393 mgmt_new_ltk(hdev, key, 1);
1394
75d262c2
VCG
1395 return 0;
1396}
1397
55ed8ca1
JH
1398int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399{
1400 struct link_key *key;
1401
1402 key = hci_find_link_key(hdev, bdaddr);
1403 if (!key)
1404 return -ENOENT;
1405
6ed93dc6 1406 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1407
1408 list_del(&key->list);
1409 kfree(key);
1410
1411 return 0;
1412}
1413
b899efaf
VCG
1414int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1415{
1416 struct smp_ltk *k, *tmp;
1417
1418 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1419 if (bacmp(bdaddr, &k->bdaddr))
1420 continue;
1421
6ed93dc6 1422 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1423
1424 list_del(&k->list);
1425 kfree(k);
1426 }
1427
1428 return 0;
1429}
1430
6bd32326 1431/* HCI command timer function */
bda4f23a 1432static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1433{
1434 struct hci_dev *hdev = (void *) arg;
1435
bda4f23a
AE
1436 if (hdev->sent_cmd) {
1437 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1438 u16 opcode = __le16_to_cpu(sent->opcode);
1439
1440 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1441 } else {
1442 BT_ERR("%s command tx timeout", hdev->name);
1443 }
1444
6bd32326 1445 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1446 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1447}
1448
2763eda6 1449struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1450 bdaddr_t *bdaddr)
2763eda6
SJ
1451{
1452 struct oob_data *data;
1453
1454 list_for_each_entry(data, &hdev->remote_oob_data, list)
1455 if (bacmp(bdaddr, &data->bdaddr) == 0)
1456 return data;
1457
1458 return NULL;
1459}
1460
1461int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1462{
1463 struct oob_data *data;
1464
1465 data = hci_find_remote_oob_data(hdev, bdaddr);
1466 if (!data)
1467 return -ENOENT;
1468
6ed93dc6 1469 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1470
1471 list_del(&data->list);
1472 kfree(data);
1473
1474 return 0;
1475}
1476
1477int hci_remote_oob_data_clear(struct hci_dev *hdev)
1478{
1479 struct oob_data *data, *n;
1480
1481 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1482 list_del(&data->list);
1483 kfree(data);
1484 }
1485
1486 return 0;
1487}
1488
1489int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1490 u8 *randomizer)
2763eda6
SJ
1491{
1492 struct oob_data *data;
1493
1494 data = hci_find_remote_oob_data(hdev, bdaddr);
1495
1496 if (!data) {
1497 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1498 if (!data)
1499 return -ENOMEM;
1500
1501 bacpy(&data->bdaddr, bdaddr);
1502 list_add(&data->list, &hdev->remote_oob_data);
1503 }
1504
1505 memcpy(data->hash, hash, sizeof(data->hash));
1506 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1507
6ed93dc6 1508 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1509
1510 return 0;
1511}
1512
04124681 1513struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1514{
8035ded4 1515 struct bdaddr_list *b;
b2a66aad 1516
8035ded4 1517 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1518 if (bacmp(bdaddr, &b->bdaddr) == 0)
1519 return b;
b2a66aad
AJ
1520
1521 return NULL;
1522}
1523
1524int hci_blacklist_clear(struct hci_dev *hdev)
1525{
1526 struct list_head *p, *n;
1527
1528 list_for_each_safe(p, n, &hdev->blacklist) {
1529 struct bdaddr_list *b;
1530
1531 b = list_entry(p, struct bdaddr_list, list);
1532
1533 list_del(p);
1534 kfree(b);
1535 }
1536
1537 return 0;
1538}
1539
88c1fe4b 1540int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1541{
1542 struct bdaddr_list *entry;
b2a66aad
AJ
1543
1544 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1545 return -EBADF;
1546
5e762444
AJ
1547 if (hci_blacklist_lookup(hdev, bdaddr))
1548 return -EEXIST;
b2a66aad
AJ
1549
1550 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1551 if (!entry)
1552 return -ENOMEM;
b2a66aad
AJ
1553
1554 bacpy(&entry->bdaddr, bdaddr);
1555
1556 list_add(&entry->list, &hdev->blacklist);
1557
88c1fe4b 1558 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1559}
1560
88c1fe4b 1561int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1562{
1563 struct bdaddr_list *entry;
b2a66aad 1564
1ec918ce 1565 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1566 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1567
1568 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1569 if (!entry)
5e762444 1570 return -ENOENT;
b2a66aad
AJ
1571
1572 list_del(&entry->list);
1573 kfree(entry);
1574
88c1fe4b 1575 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1576}
1577
7ba8b4be
AG
1578static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1579{
1580 struct le_scan_params *param = (struct le_scan_params *) opt;
1581 struct hci_cp_le_set_scan_param cp;
1582
1583 memset(&cp, 0, sizeof(cp));
1584 cp.type = param->type;
1585 cp.interval = cpu_to_le16(param->interval);
1586 cp.window = cpu_to_le16(param->window);
1587
1588 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1589}
1590
1591static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1592{
1593 struct hci_cp_le_set_scan_enable cp;
1594
1595 memset(&cp, 0, sizeof(cp));
1596 cp.enable = 1;
0431a43c 1597 cp.filter_dup = 1;
7ba8b4be
AG
1598
1599 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600}
1601
1602static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1603 u16 window, int timeout)
7ba8b4be
AG
1604{
1605 long timeo = msecs_to_jiffies(3000);
1606 struct le_scan_params param;
1607 int err;
1608
1609 BT_DBG("%s", hdev->name);
1610
1611 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1612 return -EINPROGRESS;
1613
1614 param.type = type;
1615 param.interval = interval;
1616 param.window = window;
1617
1618 hci_req_lock(hdev);
1619
01178cd4
JH
1620 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1621 timeo);
7ba8b4be 1622 if (!err)
01178cd4 1623 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1624
1625 hci_req_unlock(hdev);
1626
1627 if (err < 0)
1628 return err;
1629
46818ed5
JH
1630 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1631 msecs_to_jiffies(timeout));
7ba8b4be
AG
1632
1633 return 0;
1634}
1635
7dbfac1d
AG
1636int hci_cancel_le_scan(struct hci_dev *hdev)
1637{
1638 BT_DBG("%s", hdev->name);
1639
1640 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1641 return -EALREADY;
1642
1643 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1644 struct hci_cp_le_set_scan_enable cp;
1645
1646 /* Send HCI command to disable LE Scan */
1647 memset(&cp, 0, sizeof(cp));
1648 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1649 }
1650
1651 return 0;
1652}
1653
7ba8b4be
AG
1654static void le_scan_disable_work(struct work_struct *work)
1655{
1656 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1657 le_scan_disable.work);
7ba8b4be
AG
1658 struct hci_cp_le_set_scan_enable cp;
1659
1660 BT_DBG("%s", hdev->name);
1661
1662 memset(&cp, 0, sizeof(cp));
1663
1664 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1665}
1666
28b75a89
AG
1667static void le_scan_work(struct work_struct *work)
1668{
1669 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1670 struct le_scan_params *param = &hdev->le_scan_params;
1671
1672 BT_DBG("%s", hdev->name);
1673
04124681
GP
1674 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1675 param->timeout);
28b75a89
AG
1676}
1677
1678int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1679 int timeout)
28b75a89
AG
1680{
1681 struct le_scan_params *param = &hdev->le_scan_params;
1682
1683 BT_DBG("%s", hdev->name);
1684
f1550478
JH
1685 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1686 return -ENOTSUPP;
1687
28b75a89
AG
1688 if (work_busy(&hdev->le_scan))
1689 return -EINPROGRESS;
1690
1691 param->type = type;
1692 param->interval = interval;
1693 param->window = window;
1694 param->timeout = timeout;
1695
1696 queue_work(system_long_wq, &hdev->le_scan);
1697
1698 return 0;
1699}
1700
9be0dab7
DH
1701/* Alloc HCI device */
1702struct hci_dev *hci_alloc_dev(void)
1703{
1704 struct hci_dev *hdev;
1705
1706 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1707 if (!hdev)
1708 return NULL;
1709
b1b813d4
DH
1710 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1711 hdev->esco_type = (ESCO_HV1);
1712 hdev->link_mode = (HCI_LM_ACCEPT);
1713 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1714 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1715 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1716
b1b813d4
DH
1717 hdev->sniff_max_interval = 800;
1718 hdev->sniff_min_interval = 80;
1719
1720 mutex_init(&hdev->lock);
1721 mutex_init(&hdev->req_lock);
1722
1723 INIT_LIST_HEAD(&hdev->mgmt_pending);
1724 INIT_LIST_HEAD(&hdev->blacklist);
1725 INIT_LIST_HEAD(&hdev->uuids);
1726 INIT_LIST_HEAD(&hdev->link_keys);
1727 INIT_LIST_HEAD(&hdev->long_term_keys);
1728 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1729 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1730
1731 INIT_WORK(&hdev->rx_work, hci_rx_work);
1732 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1733 INIT_WORK(&hdev->tx_work, hci_tx_work);
1734 INIT_WORK(&hdev->power_on, hci_power_on);
1735 INIT_WORK(&hdev->le_scan, le_scan_work);
1736
b1b813d4
DH
1737 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1738 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1739 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1740
9be0dab7 1741 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1742 skb_queue_head_init(&hdev->rx_q);
1743 skb_queue_head_init(&hdev->cmd_q);
1744 skb_queue_head_init(&hdev->raw_q);
1745
1746 init_waitqueue_head(&hdev->req_wait_q);
1747
bda4f23a 1748 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 1749
b1b813d4
DH
1750 hci_init_sysfs(hdev);
1751 discovery_init(hdev);
9be0dab7
DH
1752
1753 return hdev;
1754}
1755EXPORT_SYMBOL(hci_alloc_dev);
1756
1757/* Free HCI device */
1758void hci_free_dev(struct hci_dev *hdev)
1759{
1760 skb_queue_purge(&hdev->driver_init);
1761
1762 /* will free via device release */
1763 put_device(&hdev->dev);
1764}
1765EXPORT_SYMBOL(hci_free_dev);
1766
1da177e4
LT
1767/* Register HCI device */
1768int hci_register_dev(struct hci_dev *hdev)
1769{
b1b813d4 1770 int id, error;
1da177e4 1771
010666a1 1772 if (!hdev->open || !hdev->close)
1da177e4
LT
1773 return -EINVAL;
1774
08add513
MM
1775 /* Do not allow HCI_AMP devices to register at index 0,
1776 * so the index can be used as the AMP controller ID.
1777 */
3df92b31
SL
1778 switch (hdev->dev_type) {
1779 case HCI_BREDR:
1780 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1781 break;
1782 case HCI_AMP:
1783 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1784 break;
1785 default:
1786 return -EINVAL;
1da177e4 1787 }
8e87d142 1788
3df92b31
SL
1789 if (id < 0)
1790 return id;
1791
1da177e4
LT
1792 sprintf(hdev->name, "hci%d", id);
1793 hdev->id = id;
2d8b3a11
AE
1794
1795 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1796
3df92b31
SL
1797 write_lock(&hci_dev_list_lock);
1798 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1799 write_unlock(&hci_dev_list_lock);
1da177e4 1800
32845eb1 1801 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1802 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1803 if (!hdev->workqueue) {
1804 error = -ENOMEM;
1805 goto err;
1806 }
f48fd9c8 1807
6ead1bbc
JH
1808 hdev->req_workqueue = alloc_workqueue(hdev->name,
1809 WQ_HIGHPRI | WQ_UNBOUND |
1810 WQ_MEM_RECLAIM, 1);
1811 if (!hdev->req_workqueue) {
1812 destroy_workqueue(hdev->workqueue);
1813 error = -ENOMEM;
1814 goto err;
1815 }
1816
33ca954d
DH
1817 error = hci_add_sysfs(hdev);
1818 if (error < 0)
1819 goto err_wqueue;
1da177e4 1820
611b30f7 1821 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1822 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1823 hdev);
611b30f7
MH
1824 if (hdev->rfkill) {
1825 if (rfkill_register(hdev->rfkill) < 0) {
1826 rfkill_destroy(hdev->rfkill);
1827 hdev->rfkill = NULL;
1828 }
1829 }
1830
a8b2d5c2 1831 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
1832
1833 if (hdev->dev_type != HCI_AMP)
1834 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1835
1da177e4 1836 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1837 hci_dev_hold(hdev);
1da177e4 1838
19202573 1839 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 1840
1da177e4 1841 return id;
f48fd9c8 1842
33ca954d
DH
1843err_wqueue:
1844 destroy_workqueue(hdev->workqueue);
6ead1bbc 1845 destroy_workqueue(hdev->req_workqueue);
33ca954d 1846err:
3df92b31 1847 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1848 write_lock(&hci_dev_list_lock);
f48fd9c8 1849 list_del(&hdev->list);
f20d09d5 1850 write_unlock(&hci_dev_list_lock);
f48fd9c8 1851
33ca954d 1852 return error;
1da177e4
LT
1853}
1854EXPORT_SYMBOL(hci_register_dev);
1855
1856/* Unregister HCI device */
59735631 1857void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1858{
3df92b31 1859 int i, id;
ef222013 1860
c13854ce 1861 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1862
94324962
JH
1863 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1864
3df92b31
SL
1865 id = hdev->id;
1866
f20d09d5 1867 write_lock(&hci_dev_list_lock);
1da177e4 1868 list_del(&hdev->list);
f20d09d5 1869 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1870
1871 hci_dev_do_close(hdev);
1872
cd4c5391 1873 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1874 kfree_skb(hdev->reassembly[i]);
1875
b9b5ef18
GP
1876 cancel_work_sync(&hdev->power_on);
1877
ab81cbf9 1878 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1879 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1880 hci_dev_lock(hdev);
744cf19e 1881 mgmt_index_removed(hdev);
09fd0de5 1882 hci_dev_unlock(hdev);
56e5cb86 1883 }
ab81cbf9 1884
2e58ef3e
JH
1885 /* mgmt_index_removed should take care of emptying the
1886 * pending list */
1887 BUG_ON(!list_empty(&hdev->mgmt_pending));
1888
1da177e4
LT
1889 hci_notify(hdev, HCI_DEV_UNREG);
1890
611b30f7
MH
1891 if (hdev->rfkill) {
1892 rfkill_unregister(hdev->rfkill);
1893 rfkill_destroy(hdev->rfkill);
1894 }
1895
ce242970 1896 hci_del_sysfs(hdev);
147e2d59 1897
f48fd9c8 1898 destroy_workqueue(hdev->workqueue);
6ead1bbc 1899 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 1900
09fd0de5 1901 hci_dev_lock(hdev);
e2e0cacb 1902 hci_blacklist_clear(hdev);
2aeb9a1a 1903 hci_uuids_clear(hdev);
55ed8ca1 1904 hci_link_keys_clear(hdev);
b899efaf 1905 hci_smp_ltks_clear(hdev);
2763eda6 1906 hci_remote_oob_data_clear(hdev);
09fd0de5 1907 hci_dev_unlock(hdev);
e2e0cacb 1908
dc946bd8 1909 hci_dev_put(hdev);
3df92b31
SL
1910
1911 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1912}
1913EXPORT_SYMBOL(hci_unregister_dev);
1914
1915/* Suspend HCI device */
1916int hci_suspend_dev(struct hci_dev *hdev)
1917{
1918 hci_notify(hdev, HCI_DEV_SUSPEND);
1919 return 0;
1920}
1921EXPORT_SYMBOL(hci_suspend_dev);
1922
1923/* Resume HCI device */
1924int hci_resume_dev(struct hci_dev *hdev)
1925{
1926 hci_notify(hdev, HCI_DEV_RESUME);
1927 return 0;
1928}
1929EXPORT_SYMBOL(hci_resume_dev);
1930
76bca880
MH
1931/* Receive frame from HCI drivers */
1932int hci_recv_frame(struct sk_buff *skb)
1933{
1934 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1935 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1936 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1937 kfree_skb(skb);
1938 return -ENXIO;
1939 }
1940
d82603c6 1941 /* Incoming skb */
76bca880
MH
1942 bt_cb(skb)->incoming = 1;
1943
1944 /* Time stamp */
1945 __net_timestamp(skb);
1946
76bca880 1947 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1948 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1949
76bca880
MH
1950 return 0;
1951}
1952EXPORT_SYMBOL(hci_recv_frame);
1953
33e882a5 1954static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1955 int count, __u8 index)
33e882a5
SS
1956{
1957 int len = 0;
1958 int hlen = 0;
1959 int remain = count;
1960 struct sk_buff *skb;
1961 struct bt_skb_cb *scb;
1962
1963 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1964 index >= NUM_REASSEMBLY)
33e882a5
SS
1965 return -EILSEQ;
1966
1967 skb = hdev->reassembly[index];
1968
1969 if (!skb) {
1970 switch (type) {
1971 case HCI_ACLDATA_PKT:
1972 len = HCI_MAX_FRAME_SIZE;
1973 hlen = HCI_ACL_HDR_SIZE;
1974 break;
1975 case HCI_EVENT_PKT:
1976 len = HCI_MAX_EVENT_SIZE;
1977 hlen = HCI_EVENT_HDR_SIZE;
1978 break;
1979 case HCI_SCODATA_PKT:
1980 len = HCI_MAX_SCO_SIZE;
1981 hlen = HCI_SCO_HDR_SIZE;
1982 break;
1983 }
1984
1e429f38 1985 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1986 if (!skb)
1987 return -ENOMEM;
1988
1989 scb = (void *) skb->cb;
1990 scb->expect = hlen;
1991 scb->pkt_type = type;
1992
1993 skb->dev = (void *) hdev;
1994 hdev->reassembly[index] = skb;
1995 }
1996
1997 while (count) {
1998 scb = (void *) skb->cb;
89bb46d0 1999 len = min_t(uint, scb->expect, count);
33e882a5
SS
2000
2001 memcpy(skb_put(skb, len), data, len);
2002
2003 count -= len;
2004 data += len;
2005 scb->expect -= len;
2006 remain = count;
2007
2008 switch (type) {
2009 case HCI_EVENT_PKT:
2010 if (skb->len == HCI_EVENT_HDR_SIZE) {
2011 struct hci_event_hdr *h = hci_event_hdr(skb);
2012 scb->expect = h->plen;
2013
2014 if (skb_tailroom(skb) < scb->expect) {
2015 kfree_skb(skb);
2016 hdev->reassembly[index] = NULL;
2017 return -ENOMEM;
2018 }
2019 }
2020 break;
2021
2022 case HCI_ACLDATA_PKT:
2023 if (skb->len == HCI_ACL_HDR_SIZE) {
2024 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2025 scb->expect = __le16_to_cpu(h->dlen);
2026
2027 if (skb_tailroom(skb) < scb->expect) {
2028 kfree_skb(skb);
2029 hdev->reassembly[index] = NULL;
2030 return -ENOMEM;
2031 }
2032 }
2033 break;
2034
2035 case HCI_SCODATA_PKT:
2036 if (skb->len == HCI_SCO_HDR_SIZE) {
2037 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2038 scb->expect = h->dlen;
2039
2040 if (skb_tailroom(skb) < scb->expect) {
2041 kfree_skb(skb);
2042 hdev->reassembly[index] = NULL;
2043 return -ENOMEM;
2044 }
2045 }
2046 break;
2047 }
2048
2049 if (scb->expect == 0) {
2050 /* Complete frame */
2051
2052 bt_cb(skb)->pkt_type = type;
2053 hci_recv_frame(skb);
2054
2055 hdev->reassembly[index] = NULL;
2056 return remain;
2057 }
2058 }
2059
2060 return remain;
2061}
2062
ef222013
MH
2063int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2064{
f39a3c06
SS
2065 int rem = 0;
2066
ef222013
MH
2067 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2068 return -EILSEQ;
2069
da5f6c37 2070 while (count) {
1e429f38 2071 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2072 if (rem < 0)
2073 return rem;
ef222013 2074
f39a3c06
SS
2075 data += (count - rem);
2076 count = rem;
f81c6224 2077 }
ef222013 2078
f39a3c06 2079 return rem;
ef222013
MH
2080}
2081EXPORT_SYMBOL(hci_recv_fragment);
2082
99811510
SS
2083#define STREAM_REASSEMBLY 0
2084
2085int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2086{
2087 int type;
2088 int rem = 0;
2089
da5f6c37 2090 while (count) {
99811510
SS
2091 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2092
2093 if (!skb) {
2094 struct { char type; } *pkt;
2095
2096 /* Start of the frame */
2097 pkt = data;
2098 type = pkt->type;
2099
2100 data++;
2101 count--;
2102 } else
2103 type = bt_cb(skb)->pkt_type;
2104
1e429f38 2105 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2106 STREAM_REASSEMBLY);
99811510
SS
2107 if (rem < 0)
2108 return rem;
2109
2110 data += (count - rem);
2111 count = rem;
f81c6224 2112 }
99811510
SS
2113
2114 return rem;
2115}
2116EXPORT_SYMBOL(hci_recv_stream_fragment);
2117
1da177e4
LT
2118/* ---- Interface to upper protocols ---- */
2119
1da177e4
LT
2120int hci_register_cb(struct hci_cb *cb)
2121{
2122 BT_DBG("%p name %s", cb, cb->name);
2123
f20d09d5 2124 write_lock(&hci_cb_list_lock);
1da177e4 2125 list_add(&cb->list, &hci_cb_list);
f20d09d5 2126 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2127
2128 return 0;
2129}
2130EXPORT_SYMBOL(hci_register_cb);
2131
2132int hci_unregister_cb(struct hci_cb *cb)
2133{
2134 BT_DBG("%p name %s", cb, cb->name);
2135
f20d09d5 2136 write_lock(&hci_cb_list_lock);
1da177e4 2137 list_del(&cb->list);
f20d09d5 2138 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2139
2140 return 0;
2141}
2142EXPORT_SYMBOL(hci_unregister_cb);
2143
2144static int hci_send_frame(struct sk_buff *skb)
2145{
2146 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2147
2148 if (!hdev) {
2149 kfree_skb(skb);
2150 return -ENODEV;
2151 }
2152
0d48d939 2153 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2154
cd82e61c
MH
2155 /* Time stamp */
2156 __net_timestamp(skb);
1da177e4 2157
cd82e61c
MH
2158 /* Send copy to monitor */
2159 hci_send_to_monitor(hdev, skb);
2160
2161 if (atomic_read(&hdev->promisc)) {
2162 /* Send copy to the sockets */
470fe1b5 2163 hci_send_to_sock(hdev, skb);
1da177e4
LT
2164 }
2165
2166 /* Get rid of skb owner, prior to sending to the driver. */
2167 skb_orphan(skb);
2168
2169 return hdev->send(skb);
2170}
2171
2172/* Send HCI command */
a9de9248 2173int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2174{
2175 int len = HCI_COMMAND_HDR_SIZE + plen;
2176 struct hci_command_hdr *hdr;
2177 struct sk_buff *skb;
2178
f0e09510 2179 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2180
2181 skb = bt_skb_alloc(len, GFP_ATOMIC);
2182 if (!skb) {
ef222013 2183 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2184 return -ENOMEM;
2185 }
2186
2187 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2188 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2189 hdr->plen = plen;
2190
2191 if (plen)
2192 memcpy(skb_put(skb, plen), param, plen);
2193
2194 BT_DBG("skb len %d", skb->len);
2195
0d48d939 2196 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2197 skb->dev = (void *) hdev;
c78ae283 2198
a5040efa
JH
2199 if (test_bit(HCI_INIT, &hdev->flags))
2200 hdev->init_last_cmd = opcode;
2201
1da177e4 2202 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2203 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2204
2205 return 0;
2206}
1da177e4
LT
2207
2208/* Get data from the previously sent command */
a9de9248 2209void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2210{
2211 struct hci_command_hdr *hdr;
2212
2213 if (!hdev->sent_cmd)
2214 return NULL;
2215
2216 hdr = (void *) hdev->sent_cmd->data;
2217
a9de9248 2218 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2219 return NULL;
2220
f0e09510 2221 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2222
2223 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2224}
2225
2226/* Send ACL data */
2227static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2228{
2229 struct hci_acl_hdr *hdr;
2230 int len = skb->len;
2231
badff6d0
ACM
2232 skb_push(skb, HCI_ACL_HDR_SIZE);
2233 skb_reset_transport_header(skb);
9c70220b 2234 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2235 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2236 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2237}
2238
ee22be7e 2239static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2240 struct sk_buff *skb, __u16 flags)
1da177e4 2241{
ee22be7e 2242 struct hci_conn *conn = chan->conn;
1da177e4
LT
2243 struct hci_dev *hdev = conn->hdev;
2244 struct sk_buff *list;
2245
087bfd99
GP
2246 skb->len = skb_headlen(skb);
2247 skb->data_len = 0;
2248
2249 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2250
2251 switch (hdev->dev_type) {
2252 case HCI_BREDR:
2253 hci_add_acl_hdr(skb, conn->handle, flags);
2254 break;
2255 case HCI_AMP:
2256 hci_add_acl_hdr(skb, chan->handle, flags);
2257 break;
2258 default:
2259 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2260 return;
2261 }
087bfd99 2262
70f23020
AE
2263 list = skb_shinfo(skb)->frag_list;
2264 if (!list) {
1da177e4
LT
2265 /* Non fragmented */
2266 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2267
73d80deb 2268 skb_queue_tail(queue, skb);
1da177e4
LT
2269 } else {
2270 /* Fragmented */
2271 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2272
2273 skb_shinfo(skb)->frag_list = NULL;
2274
2275 /* Queue all fragments atomically */
af3e6359 2276 spin_lock(&queue->lock);
1da177e4 2277
73d80deb 2278 __skb_queue_tail(queue, skb);
e702112f
AE
2279
2280 flags &= ~ACL_START;
2281 flags |= ACL_CONT;
1da177e4
LT
2282 do {
2283 skb = list; list = list->next;
8e87d142 2284
1da177e4 2285 skb->dev = (void *) hdev;
0d48d939 2286 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2287 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2288
2289 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2290
73d80deb 2291 __skb_queue_tail(queue, skb);
1da177e4
LT
2292 } while (list);
2293
af3e6359 2294 spin_unlock(&queue->lock);
1da177e4 2295 }
73d80deb
LAD
2296}
2297
2298void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2299{
ee22be7e 2300 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2301
f0e09510 2302 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2303
2304 skb->dev = (void *) hdev;
73d80deb 2305
ee22be7e 2306 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2307
3eff45ea 2308 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2309}
1da177e4
LT
2310
2311/* Send SCO data */
0d861d8b 2312void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2313{
2314 struct hci_dev *hdev = conn->hdev;
2315 struct hci_sco_hdr hdr;
2316
2317 BT_DBG("%s len %d", hdev->name, skb->len);
2318
aca3192c 2319 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2320 hdr.dlen = skb->len;
2321
badff6d0
ACM
2322 skb_push(skb, HCI_SCO_HDR_SIZE);
2323 skb_reset_transport_header(skb);
9c70220b 2324 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2325
2326 skb->dev = (void *) hdev;
0d48d939 2327 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2328
1da177e4 2329 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2330 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2331}
1da177e4
LT
2332
2333/* ---- HCI TX task (outgoing data) ---- */
2334
2335/* HCI Connection scheduler */
6039aa73
GP
2336static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2337 int *quote)
1da177e4
LT
2338{
2339 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2340 struct hci_conn *conn = NULL, *c;
abc5de8f 2341 unsigned int num = 0, min = ~0;
1da177e4 2342
8e87d142 2343 /* We don't have to lock device here. Connections are always
1da177e4 2344 * added and removed with TX task disabled. */
bf4c6325
GP
2345
2346 rcu_read_lock();
2347
2348 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2349 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2350 continue;
769be974
MH
2351
2352 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2353 continue;
2354
1da177e4
LT
2355 num++;
2356
2357 if (c->sent < min) {
2358 min = c->sent;
2359 conn = c;
2360 }
52087a79
LAD
2361
2362 if (hci_conn_num(hdev, type) == num)
2363 break;
1da177e4
LT
2364 }
2365
bf4c6325
GP
2366 rcu_read_unlock();
2367
1da177e4 2368 if (conn) {
6ed58ec5
VT
2369 int cnt, q;
2370
2371 switch (conn->type) {
2372 case ACL_LINK:
2373 cnt = hdev->acl_cnt;
2374 break;
2375 case SCO_LINK:
2376 case ESCO_LINK:
2377 cnt = hdev->sco_cnt;
2378 break;
2379 case LE_LINK:
2380 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2381 break;
2382 default:
2383 cnt = 0;
2384 BT_ERR("Unknown link type");
2385 }
2386
2387 q = cnt / num;
1da177e4
LT
2388 *quote = q ? q : 1;
2389 } else
2390 *quote = 0;
2391
2392 BT_DBG("conn %p quote %d", conn, *quote);
2393 return conn;
2394}
2395
6039aa73 2396static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2397{
2398 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2399 struct hci_conn *c;
1da177e4 2400
bae1f5d9 2401 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2402
bf4c6325
GP
2403 rcu_read_lock();
2404
1da177e4 2405 /* Kill stalled connections */
bf4c6325 2406 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2407 if (c->type == type && c->sent) {
6ed93dc6
AE
2408 BT_ERR("%s killing stalled connection %pMR",
2409 hdev->name, &c->dst);
bed71748 2410 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2411 }
2412 }
bf4c6325
GP
2413
2414 rcu_read_unlock();
1da177e4
LT
2415}
2416
6039aa73
GP
2417static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2418 int *quote)
1da177e4 2419{
73d80deb
LAD
2420 struct hci_conn_hash *h = &hdev->conn_hash;
2421 struct hci_chan *chan = NULL;
abc5de8f 2422 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2423 struct hci_conn *conn;
73d80deb
LAD
2424 int cnt, q, conn_num = 0;
2425
2426 BT_DBG("%s", hdev->name);
2427
bf4c6325
GP
2428 rcu_read_lock();
2429
2430 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2431 struct hci_chan *tmp;
2432
2433 if (conn->type != type)
2434 continue;
2435
2436 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2437 continue;
2438
2439 conn_num++;
2440
8192edef 2441 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2442 struct sk_buff *skb;
2443
2444 if (skb_queue_empty(&tmp->data_q))
2445 continue;
2446
2447 skb = skb_peek(&tmp->data_q);
2448 if (skb->priority < cur_prio)
2449 continue;
2450
2451 if (skb->priority > cur_prio) {
2452 num = 0;
2453 min = ~0;
2454 cur_prio = skb->priority;
2455 }
2456
2457 num++;
2458
2459 if (conn->sent < min) {
2460 min = conn->sent;
2461 chan = tmp;
2462 }
2463 }
2464
2465 if (hci_conn_num(hdev, type) == conn_num)
2466 break;
2467 }
2468
bf4c6325
GP
2469 rcu_read_unlock();
2470
73d80deb
LAD
2471 if (!chan)
2472 return NULL;
2473
2474 switch (chan->conn->type) {
2475 case ACL_LINK:
2476 cnt = hdev->acl_cnt;
2477 break;
bd1eb66b
AE
2478 case AMP_LINK:
2479 cnt = hdev->block_cnt;
2480 break;
73d80deb
LAD
2481 case SCO_LINK:
2482 case ESCO_LINK:
2483 cnt = hdev->sco_cnt;
2484 break;
2485 case LE_LINK:
2486 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2487 break;
2488 default:
2489 cnt = 0;
2490 BT_ERR("Unknown link type");
2491 }
2492
2493 q = cnt / num;
2494 *quote = q ? q : 1;
2495 BT_DBG("chan %p quote %d", chan, *quote);
2496 return chan;
2497}
2498
02b20f0b
LAD
2499static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2500{
2501 struct hci_conn_hash *h = &hdev->conn_hash;
2502 struct hci_conn *conn;
2503 int num = 0;
2504
2505 BT_DBG("%s", hdev->name);
2506
bf4c6325
GP
2507 rcu_read_lock();
2508
2509 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2510 struct hci_chan *chan;
2511
2512 if (conn->type != type)
2513 continue;
2514
2515 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2516 continue;
2517
2518 num++;
2519
8192edef 2520 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2521 struct sk_buff *skb;
2522
2523 if (chan->sent) {
2524 chan->sent = 0;
2525 continue;
2526 }
2527
2528 if (skb_queue_empty(&chan->data_q))
2529 continue;
2530
2531 skb = skb_peek(&chan->data_q);
2532 if (skb->priority >= HCI_PRIO_MAX - 1)
2533 continue;
2534
2535 skb->priority = HCI_PRIO_MAX - 1;
2536
2537 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2538 skb->priority);
02b20f0b
LAD
2539 }
2540
2541 if (hci_conn_num(hdev, type) == num)
2542 break;
2543 }
bf4c6325
GP
2544
2545 rcu_read_unlock();
2546
02b20f0b
LAD
2547}
2548
b71d385a
AE
2549static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2550{
2551 /* Calculate count of blocks used by this packet */
2552 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2553}
2554
6039aa73 2555static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2556{
1da177e4
LT
2557 if (!test_bit(HCI_RAW, &hdev->flags)) {
2558 /* ACL tx timeout must be longer than maximum
2559 * link supervision timeout (40.9 seconds) */
63d2bc1b 2560 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2561 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2562 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2563 }
63d2bc1b 2564}
1da177e4 2565
6039aa73 2566static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2567{
2568 unsigned int cnt = hdev->acl_cnt;
2569 struct hci_chan *chan;
2570 struct sk_buff *skb;
2571 int quote;
2572
2573 __check_timeout(hdev, cnt);
04837f64 2574
73d80deb 2575 while (hdev->acl_cnt &&
a8c5fb1a 2576 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2577 u32 priority = (skb_peek(&chan->data_q))->priority;
2578 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2579 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2580 skb->len, skb->priority);
73d80deb 2581
ec1cce24
LAD
2582 /* Stop if priority has changed */
2583 if (skb->priority < priority)
2584 break;
2585
2586 skb = skb_dequeue(&chan->data_q);
2587
73d80deb 2588 hci_conn_enter_active_mode(chan->conn,
04124681 2589 bt_cb(skb)->force_active);
04837f64 2590
1da177e4
LT
2591 hci_send_frame(skb);
2592 hdev->acl_last_tx = jiffies;
2593
2594 hdev->acl_cnt--;
73d80deb
LAD
2595 chan->sent++;
2596 chan->conn->sent++;
1da177e4
LT
2597 }
2598 }
02b20f0b
LAD
2599
2600 if (cnt != hdev->acl_cnt)
2601 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2602}
2603
6039aa73 2604static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2605{
63d2bc1b 2606 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2607 struct hci_chan *chan;
2608 struct sk_buff *skb;
2609 int quote;
bd1eb66b 2610 u8 type;
b71d385a 2611
63d2bc1b 2612 __check_timeout(hdev, cnt);
b71d385a 2613
bd1eb66b
AE
2614 BT_DBG("%s", hdev->name);
2615
2616 if (hdev->dev_type == HCI_AMP)
2617 type = AMP_LINK;
2618 else
2619 type = ACL_LINK;
2620
b71d385a 2621 while (hdev->block_cnt > 0 &&
bd1eb66b 2622 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2623 u32 priority = (skb_peek(&chan->data_q))->priority;
2624 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2625 int blocks;
2626
2627 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2628 skb->len, skb->priority);
b71d385a
AE
2629
2630 /* Stop if priority has changed */
2631 if (skb->priority < priority)
2632 break;
2633
2634 skb = skb_dequeue(&chan->data_q);
2635
2636 blocks = __get_blocks(hdev, skb);
2637 if (blocks > hdev->block_cnt)
2638 return;
2639
2640 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2641 bt_cb(skb)->force_active);
b71d385a
AE
2642
2643 hci_send_frame(skb);
2644 hdev->acl_last_tx = jiffies;
2645
2646 hdev->block_cnt -= blocks;
2647 quote -= blocks;
2648
2649 chan->sent += blocks;
2650 chan->conn->sent += blocks;
2651 }
2652 }
2653
2654 if (cnt != hdev->block_cnt)
bd1eb66b 2655 hci_prio_recalculate(hdev, type);
b71d385a
AE
2656}
2657
6039aa73 2658static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2659{
2660 BT_DBG("%s", hdev->name);
2661
bd1eb66b
AE
2662 /* No ACL link over BR/EDR controller */
2663 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2664 return;
2665
2666 /* No AMP link over AMP controller */
2667 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
2668 return;
2669
2670 switch (hdev->flow_ctl_mode) {
2671 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2672 hci_sched_acl_pkt(hdev);
2673 break;
2674
2675 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2676 hci_sched_acl_blk(hdev);
2677 break;
2678 }
2679}
2680
1da177e4 2681/* Schedule SCO */
6039aa73 2682static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2683{
2684 struct hci_conn *conn;
2685 struct sk_buff *skb;
2686 int quote;
2687
2688 BT_DBG("%s", hdev->name);
2689
52087a79
LAD
2690 if (!hci_conn_num(hdev, SCO_LINK))
2691 return;
2692
1da177e4
LT
2693 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2694 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2695 BT_DBG("skb %p len %d", skb, skb->len);
2696 hci_send_frame(skb);
2697
2698 conn->sent++;
2699 if (conn->sent == ~0)
2700 conn->sent = 0;
2701 }
2702 }
2703}
2704
6039aa73 2705static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2706{
2707 struct hci_conn *conn;
2708 struct sk_buff *skb;
2709 int quote;
2710
2711 BT_DBG("%s", hdev->name);
2712
52087a79
LAD
2713 if (!hci_conn_num(hdev, ESCO_LINK))
2714 return;
2715
8fc9ced3
GP
2716 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2717 &quote))) {
b6a0dc82
MH
2718 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2719 BT_DBG("skb %p len %d", skb, skb->len);
2720 hci_send_frame(skb);
2721
2722 conn->sent++;
2723 if (conn->sent == ~0)
2724 conn->sent = 0;
2725 }
2726 }
2727}
2728
6039aa73 2729static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2730{
73d80deb 2731 struct hci_chan *chan;
6ed58ec5 2732 struct sk_buff *skb;
02b20f0b 2733 int quote, cnt, tmp;
6ed58ec5
VT
2734
2735 BT_DBG("%s", hdev->name);
2736
52087a79
LAD
2737 if (!hci_conn_num(hdev, LE_LINK))
2738 return;
2739
6ed58ec5
VT
2740 if (!test_bit(HCI_RAW, &hdev->flags)) {
2741 /* LE tx timeout must be longer than maximum
2742 * link supervision timeout (40.9 seconds) */
bae1f5d9 2743 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2744 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2745 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2746 }
2747
2748 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2749 tmp = cnt;
73d80deb 2750 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2751 u32 priority = (skb_peek(&chan->data_q))->priority;
2752 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2753 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2754 skb->len, skb->priority);
6ed58ec5 2755
ec1cce24
LAD
2756 /* Stop if priority has changed */
2757 if (skb->priority < priority)
2758 break;
2759
2760 skb = skb_dequeue(&chan->data_q);
2761
6ed58ec5
VT
2762 hci_send_frame(skb);
2763 hdev->le_last_tx = jiffies;
2764
2765 cnt--;
73d80deb
LAD
2766 chan->sent++;
2767 chan->conn->sent++;
6ed58ec5
VT
2768 }
2769 }
73d80deb 2770
6ed58ec5
VT
2771 if (hdev->le_pkts)
2772 hdev->le_cnt = cnt;
2773 else
2774 hdev->acl_cnt = cnt;
02b20f0b
LAD
2775
2776 if (cnt != tmp)
2777 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2778}
2779
3eff45ea 2780static void hci_tx_work(struct work_struct *work)
1da177e4 2781{
3eff45ea 2782 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2783 struct sk_buff *skb;
2784
6ed58ec5 2785 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2786 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2787
2788 /* Schedule queues and send stuff to HCI driver */
2789
2790 hci_sched_acl(hdev);
2791
2792 hci_sched_sco(hdev);
2793
b6a0dc82
MH
2794 hci_sched_esco(hdev);
2795
6ed58ec5
VT
2796 hci_sched_le(hdev);
2797
1da177e4
LT
2798 /* Send next queued raw (unknown type) packet */
2799 while ((skb = skb_dequeue(&hdev->raw_q)))
2800 hci_send_frame(skb);
1da177e4
LT
2801}
2802
25985edc 2803/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2804
2805/* ACL data packet */
6039aa73 2806static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2807{
2808 struct hci_acl_hdr *hdr = (void *) skb->data;
2809 struct hci_conn *conn;
2810 __u16 handle, flags;
2811
2812 skb_pull(skb, HCI_ACL_HDR_SIZE);
2813
2814 handle = __le16_to_cpu(hdr->handle);
2815 flags = hci_flags(handle);
2816 handle = hci_handle(handle);
2817
f0e09510 2818 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 2819 handle, flags);
1da177e4
LT
2820
2821 hdev->stat.acl_rx++;
2822
2823 hci_dev_lock(hdev);
2824 conn = hci_conn_hash_lookup_handle(hdev, handle);
2825 hci_dev_unlock(hdev);
8e87d142 2826
1da177e4 2827 if (conn) {
65983fc7 2828 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2829
1da177e4 2830 /* Send to upper protocol */
686ebf28
UF
2831 l2cap_recv_acldata(conn, skb, flags);
2832 return;
1da177e4 2833 } else {
8e87d142 2834 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2835 hdev->name, handle);
1da177e4
LT
2836 }
2837
2838 kfree_skb(skb);
2839}
2840
2841/* SCO data packet */
6039aa73 2842static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2843{
2844 struct hci_sco_hdr *hdr = (void *) skb->data;
2845 struct hci_conn *conn;
2846 __u16 handle;
2847
2848 skb_pull(skb, HCI_SCO_HDR_SIZE);
2849
2850 handle = __le16_to_cpu(hdr->handle);
2851
f0e09510 2852 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
2853
2854 hdev->stat.sco_rx++;
2855
2856 hci_dev_lock(hdev);
2857 conn = hci_conn_hash_lookup_handle(hdev, handle);
2858 hci_dev_unlock(hdev);
2859
2860 if (conn) {
1da177e4 2861 /* Send to upper protocol */
686ebf28
UF
2862 sco_recv_scodata(conn, skb);
2863 return;
1da177e4 2864 } else {
8e87d142 2865 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2866 hdev->name, handle);
1da177e4
LT
2867 }
2868
2869 kfree_skb(skb);
2870}
2871
b78752cc 2872static void hci_rx_work(struct work_struct *work)
1da177e4 2873{
b78752cc 2874 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2875 struct sk_buff *skb;
2876
2877 BT_DBG("%s", hdev->name);
2878
1da177e4 2879 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2880 /* Send copy to monitor */
2881 hci_send_to_monitor(hdev, skb);
2882
1da177e4
LT
2883 if (atomic_read(&hdev->promisc)) {
2884 /* Send copy to the sockets */
470fe1b5 2885 hci_send_to_sock(hdev, skb);
1da177e4
LT
2886 }
2887
2888 if (test_bit(HCI_RAW, &hdev->flags)) {
2889 kfree_skb(skb);
2890 continue;
2891 }
2892
2893 if (test_bit(HCI_INIT, &hdev->flags)) {
2894 /* Don't process data packets in this states. */
0d48d939 2895 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2896 case HCI_ACLDATA_PKT:
2897 case HCI_SCODATA_PKT:
2898 kfree_skb(skb);
2899 continue;
3ff50b79 2900 }
1da177e4
LT
2901 }
2902
2903 /* Process frame */
0d48d939 2904 switch (bt_cb(skb)->pkt_type) {
1da177e4 2905 case HCI_EVENT_PKT:
b78752cc 2906 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2907 hci_event_packet(hdev, skb);
2908 break;
2909
2910 case HCI_ACLDATA_PKT:
2911 BT_DBG("%s ACL data packet", hdev->name);
2912 hci_acldata_packet(hdev, skb);
2913 break;
2914
2915 case HCI_SCODATA_PKT:
2916 BT_DBG("%s SCO data packet", hdev->name);
2917 hci_scodata_packet(hdev, skb);
2918 break;
2919
2920 default:
2921 kfree_skb(skb);
2922 break;
2923 }
2924 }
1da177e4
LT
2925}
2926
c347b765 2927static void hci_cmd_work(struct work_struct *work)
1da177e4 2928{
c347b765 2929 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2930 struct sk_buff *skb;
2931
2104786b
AE
2932 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2933 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 2934
1da177e4 2935 /* Send queued commands */
5a08ecce
AE
2936 if (atomic_read(&hdev->cmd_cnt)) {
2937 skb = skb_dequeue(&hdev->cmd_q);
2938 if (!skb)
2939 return;
2940
7585b97a 2941 kfree_skb(hdev->sent_cmd);
1da177e4 2942
70f23020
AE
2943 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2944 if (hdev->sent_cmd) {
1da177e4
LT
2945 atomic_dec(&hdev->cmd_cnt);
2946 hci_send_frame(skb);
7bdb8a5c
SJ
2947 if (test_bit(HCI_RESET, &hdev->flags))
2948 del_timer(&hdev->cmd_timer);
2949 else
2950 mod_timer(&hdev->cmd_timer,
5f246e89 2951 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
2952 } else {
2953 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2954 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2955 }
2956 }
2957}
2519a1fc
AG
2958
2959int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2960{
2961 /* General inquiry access code (GIAC) */
2962 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2963 struct hci_cp_inquiry cp;
2964
2965 BT_DBG("%s", hdev->name);
2966
2967 if (test_bit(HCI_INQUIRY, &hdev->flags))
2968 return -EINPROGRESS;
2969
4663262c
JH
2970 inquiry_cache_flush(hdev);
2971
2519a1fc
AG
2972 memset(&cp, 0, sizeof(cp));
2973 memcpy(&cp.lap, lap, sizeof(cp.lap));
2974 cp.length = length;
2975
2976 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2977}
023d5049
AG
2978
2979int hci_cancel_inquiry(struct hci_dev *hdev)
2980{
2981 BT_DBG("%s", hdev->name);
2982
2983 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2984 return -EALREADY;
023d5049
AG
2985
2986 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2987}
31f7956c
AG
2988
2989u8 bdaddr_to_le(u8 bdaddr_type)
2990{
2991 switch (bdaddr_type) {
2992 case BDADDR_LE_PUBLIC:
2993 return ADDR_LE_DEV_PUBLIC;
2994
2995 default:
2996 /* Fallback to LE Random address type */
2997 return ADDR_LE_DEV_RANDOM;
2998 }
2999}