Bluetooth: Add initial support for LE-only controllers
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
23bb5763 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 61{
f0e09510 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
23bb5763 63
a5040efa
JH
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
75fb0e32
JH
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 69 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
1036b890 79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
23bb5763 88 return;
75fb0e32 89 }
1da177e4
LT
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
a8c5fb1a
GP
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
1da177e4
LT
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
e175072f 134 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
3ff50b79 144 }
1da177e4 145
a5040efa 146 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
6039aa73
GP
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
1da177e4
LT
156{
157 int ret;
158
7c6a329e
MH
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
1da177e4
LT
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
f630cf0d 175 set_bit(HCI_RESET, &hdev->flags);
a9de9248 176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
177}
178
e61ef499 179static void bredr_init(struct hci_dev *hdev)
1da177e4 180{
2455a3ea
AE
181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
1da177e4 183 /* Read Local Supported Features */
a9de9248 184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 185
1143e5a6 186 /* Read Local Version */
a9de9248 187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1da177e4
LT
188}
189
e61ef499
AE
190static void amp_init(struct hci_dev *hdev)
191{
2455a3ea
AE
192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
e61ef499
AE
194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
11778716
AE
222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
e61ef499
AE
226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
e61ef499
AE
239}
240
1da177e4
LT
241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
a9de9248 248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
a9de9248 258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
e4e8e37c 267 /* Encryption */
a9de9248 268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
269}
270
e4e8e37c
MH
271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
a418b893 275 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
8e87d142 281/* Get HCI device by index.
1da177e4
LT
282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
8035ded4 285 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
8035ded4 293 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
1da177e4
LT
302
303/* ---- Inquiry support ---- */
ff9ef578 304
30dc78e1
JH
305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
6fbe195d 309 switch (discov->state) {
343f935b 310 case DISCOVERY_FINDING:
6fbe195d 311 case DISCOVERY_RESOLVING:
30dc78e1
JH
312 return true;
313
6fbe195d
AG
314 default:
315 return false;
316 }
30dc78e1
JH
317}
318
ff9ef578
JH
319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
7b99b659
AG
328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
ff9ef578
JH
330 break;
331 case DISCOVERY_STARTING:
332 break;
343f935b 333 case DISCOVERY_FINDING:
ff9ef578
JH
334 mgmt_discovering(hdev, 1);
335 break;
30dc78e1
JH
336 case DISCOVERY_RESOLVING:
337 break;
ff9ef578
JH
338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
1da177e4
LT
345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
30883512 347 struct discovery_state *cache = &hdev->discovery;
b57c1a56 348 struct inquiry_entry *p, *n;
1da177e4 349
561aafbc
JH
350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
b57c1a56 352 kfree(p);
1da177e4 353 }
561aafbc
JH
354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
357}
358
a8c5fb1a
GP
359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
1da177e4 361{
30883512 362 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
363 struct inquiry_entry *e;
364
6ed93dc6 365 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 366
561aafbc
JH
367 list_for_each_entry(e, &cache->all, all) {
368 if (!bacmp(&e->data.bdaddr, bdaddr))
369 return e;
370 }
371
372 return NULL;
373}
374
375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 376 bdaddr_t *bdaddr)
561aafbc 377{
30883512 378 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
379 struct inquiry_entry *e;
380
6ed93dc6 381 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
382
383 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 384 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
385 return e;
386 }
387
388 return NULL;
1da177e4
LT
389}
390
30dc78e1 391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
392 bdaddr_t *bdaddr,
393 int state)
30dc78e1
JH
394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
6ed93dc6 398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
a3d4e20a 410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 411 struct inquiry_entry *ie)
a3d4e20a
JH
412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
a8c5fb1a 421 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
3175405b 429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 430 bool name_known, bool *ssp)
1da177e4 431{
30883512 432 struct discovery_state *cache = &hdev->discovery;
70f23020 433 struct inquiry_entry *ie;
1da177e4 434
6ed93dc6 435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 436
388fc8fa
JH
437 if (ssp)
438 *ssp = data->ssp_mode;
439
70f23020 440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 441 if (ie) {
388fc8fa
JH
442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
a3d4e20a 445 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 446 data->rssi != ie->data.rssi) {
a3d4e20a
JH
447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
561aafbc 451 goto update;
a3d4e20a 452 }
561aafbc
JH
453
454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
3175405b 457 return false;
561aafbc
JH
458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
70f23020 467
561aafbc
JH
468update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 470 ie->name_state != NAME_PENDING) {
561aafbc
JH
471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
1da177e4
LT
473 }
474
70f23020
AE
475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
1da177e4 477 cache->timestamp = jiffies;
3175405b
JH
478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
1da177e4
LT
483}
484
485static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486{
30883512 487 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
561aafbc 492 list_for_each_entry(e, &cache->all, all) {
1da177e4 493 struct inquiry_data *data = &e->data;
b57c1a56
JH
494
495 if (copied >= num)
496 break;
497
1da177e4
LT
498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
b57c1a56 504
1da177e4 505 info++;
b57c1a56 506 copied++;
1da177e4
LT
507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511}
512
513static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514{
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
a9de9248 527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
528}
529
530int hci_inquiry(void __user *arg)
531{
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
5a08ecce
AE
542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
1da177e4
LT
544 return -ENODEV;
545
09fd0de5 546 hci_dev_lock(hdev);
8e87d142 547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
09fd0de5 552 hci_dev_unlock(hdev);
1da177e4 553
04837f64 554 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
1da177e4 561
8fc9ced3
GP
562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
1da177e4
LT
565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
01df8c31 570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 571 if (!buf) {
1da177e4
LT
572 err = -ENOMEM;
573 goto done;
574 }
575
09fd0de5 576 hci_dev_lock(hdev);
1da177e4 577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 578 hci_dev_unlock(hdev);
1da177e4
LT
579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 585 ir.num_rsp))
1da177e4 586 err = -EFAULT;
8e87d142 587 } else
1da177e4
LT
588 err = -EFAULT;
589
590 kfree(buf);
591
592done:
593 hci_dev_put(hdev);
594 return err;
595}
596
597/* ---- HCI ioctl helpers ---- */
598
599int hci_dev_open(__u16 dev)
600{
601 struct hci_dev *hdev;
602 int ret = 0;
603
5a08ecce
AE
604 hdev = hci_dev_get(dev);
605 if (!hdev)
1da177e4
LT
606 return -ENODEV;
607
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_lock(hdev);
611
94324962
JH
612 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
613 ret = -ENODEV;
614 goto done;
615 }
616
611b30f7
MH
617 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
618 ret = -ERFKILL;
619 goto done;
620 }
621
1da177e4
LT
622 if (test_bit(HCI_UP, &hdev->flags)) {
623 ret = -EALREADY;
624 goto done;
625 }
626
627 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
628 set_bit(HCI_RAW, &hdev->flags);
629
07e3b94a
AE
630 /* Treat all non BR/EDR controllers as raw devices if
631 enable_hs is not set */
632 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
633 set_bit(HCI_RAW, &hdev->flags);
634
1da177e4
LT
635 if (hdev->open(hdev)) {
636 ret = -EIO;
637 goto done;
638 }
639
640 if (!test_bit(HCI_RAW, &hdev->flags)) {
641 atomic_set(&hdev->cmd_cnt, 1);
642 set_bit(HCI_INIT, &hdev->flags);
a5040efa 643 hdev->init_last_cmd = 0;
1da177e4 644
5f246e89 645 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
646
647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
650 if (!ret) {
651 hci_dev_hold(hdev);
652 set_bit(HCI_UP, &hdev->flags);
653 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
654 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
655 mgmt_valid_hdev(hdev)) {
09fd0de5 656 hci_dev_lock(hdev);
744cf19e 657 mgmt_powered(hdev, 1);
09fd0de5 658 hci_dev_unlock(hdev);
56e5cb86 659 }
8e87d142 660 } else {
1da177e4 661 /* Init failed, cleanup */
3eff45ea 662 flush_work(&hdev->tx_work);
c347b765 663 flush_work(&hdev->cmd_work);
b78752cc 664 flush_work(&hdev->rx_work);
1da177e4
LT
665
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->rx_q);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
672 if (hdev->sent_cmd) {
673 kfree_skb(hdev->sent_cmd);
674 hdev->sent_cmd = NULL;
675 }
676
677 hdev->close(hdev);
678 hdev->flags = 0;
679 }
680
681done:
682 hci_req_unlock(hdev);
683 hci_dev_put(hdev);
684 return ret;
685}
686
687static int hci_dev_do_close(struct hci_dev *hdev)
688{
689 BT_DBG("%s %p", hdev->name, hdev);
690
28b75a89
AG
691 cancel_work_sync(&hdev->le_scan);
692
78c04c0b
VCG
693 cancel_delayed_work(&hdev->power_off);
694
1da177e4
LT
695 hci_req_cancel(hdev, ENODEV);
696 hci_req_lock(hdev);
697
698 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 699 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
700 hci_req_unlock(hdev);
701 return 0;
702 }
703
3eff45ea
GP
704 /* Flush RX and TX works */
705 flush_work(&hdev->tx_work);
b78752cc 706 flush_work(&hdev->rx_work);
1da177e4 707
16ab91ab 708 if (hdev->discov_timeout > 0) {
e0f9309f 709 cancel_delayed_work(&hdev->discov_off);
16ab91ab 710 hdev->discov_timeout = 0;
5e5282bb 711 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
712 }
713
a8b2d5c2 714 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
715 cancel_delayed_work(&hdev->service_cache);
716
7ba8b4be
AG
717 cancel_delayed_work_sync(&hdev->le_scan_disable);
718
09fd0de5 719 hci_dev_lock(hdev);
1da177e4
LT
720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
09fd0de5 722 hci_dev_unlock(hdev);
1da177e4
LT
723
724 hci_notify(hdev, HCI_DEV_DOWN);
725
726 if (hdev->flush)
727 hdev->flush(hdev);
728
729 /* Reset device */
730 skb_queue_purge(&hdev->cmd_q);
731 atomic_set(&hdev->cmd_cnt, 1);
8af59467 732 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 733 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 734 set_bit(HCI_INIT, &hdev->flags);
5f246e89 735 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
736 clear_bit(HCI_INIT, &hdev->flags);
737 }
738
c347b765
GP
739 /* flush cmd work */
740 flush_work(&hdev->cmd_work);
1da177e4
LT
741
742 /* Drop queues */
743 skb_queue_purge(&hdev->rx_q);
744 skb_queue_purge(&hdev->cmd_q);
745 skb_queue_purge(&hdev->raw_q);
746
747 /* Drop last sent command */
748 if (hdev->sent_cmd) {
b79f44c1 749 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
750 kfree_skb(hdev->sent_cmd);
751 hdev->sent_cmd = NULL;
752 }
753
754 /* After this point our queues are empty
755 * and no tasks are scheduled. */
756 hdev->close(hdev);
757
bb4b2a9a
AE
758 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
759 mgmt_valid_hdev(hdev)) {
8ee56540
MH
760 hci_dev_lock(hdev);
761 mgmt_powered(hdev, 0);
762 hci_dev_unlock(hdev);
763 }
5add6af8 764
1da177e4
LT
765 /* Clear flags */
766 hdev->flags = 0;
767
e59fda8d 768 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 769 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 770
1da177e4
LT
771 hci_req_unlock(hdev);
772
773 hci_dev_put(hdev);
774 return 0;
775}
776
777int hci_dev_close(__u16 dev)
778{
779 struct hci_dev *hdev;
780 int err;
781
70f23020
AE
782 hdev = hci_dev_get(dev);
783 if (!hdev)
1da177e4 784 return -ENODEV;
8ee56540
MH
785
786 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
787 cancel_delayed_work(&hdev->power_off);
788
1da177e4 789 err = hci_dev_do_close(hdev);
8ee56540 790
1da177e4
LT
791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_dev_reset(__u16 dev)
796{
797 struct hci_dev *hdev;
798 int ret = 0;
799
70f23020
AE
800 hdev = hci_dev_get(dev);
801 if (!hdev)
1da177e4
LT
802 return -ENODEV;
803
804 hci_req_lock(hdev);
1da177e4
LT
805
806 if (!test_bit(HCI_UP, &hdev->flags))
807 goto done;
808
809 /* Drop queues */
810 skb_queue_purge(&hdev->rx_q);
811 skb_queue_purge(&hdev->cmd_q);
812
09fd0de5 813 hci_dev_lock(hdev);
1da177e4
LT
814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
09fd0de5 816 hci_dev_unlock(hdev);
1da177e4
LT
817
818 if (hdev->flush)
819 hdev->flush(hdev);
820
8e87d142 821 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 822 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
823
824 if (!test_bit(HCI_RAW, &hdev->flags))
5f246e89 825 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
826
827done:
1da177e4
LT
828 hci_req_unlock(hdev);
829 hci_dev_put(hdev);
830 return ret;
831}
832
833int hci_dev_reset_stat(__u16 dev)
834{
835 struct hci_dev *hdev;
836 int ret = 0;
837
70f23020
AE
838 hdev = hci_dev_get(dev);
839 if (!hdev)
1da177e4
LT
840 return -ENODEV;
841
842 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
843
844 hci_dev_put(hdev);
845
846 return ret;
847}
848
849int hci_dev_cmd(unsigned int cmd, void __user *arg)
850{
851 struct hci_dev *hdev;
852 struct hci_dev_req dr;
853 int err = 0;
854
855 if (copy_from_user(&dr, arg, sizeof(dr)))
856 return -EFAULT;
857
70f23020
AE
858 hdev = hci_dev_get(dr.dev_id);
859 if (!hdev)
1da177e4
LT
860 return -ENODEV;
861
862 switch (cmd) {
863 case HCISETAUTH:
04837f64 864 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 865 HCI_INIT_TIMEOUT);
1da177e4
LT
866 break;
867
868 case HCISETENCRYPT:
869 if (!lmp_encrypt_capable(hdev)) {
870 err = -EOPNOTSUPP;
871 break;
872 }
873
874 if (!test_bit(HCI_AUTH, &hdev->flags)) {
875 /* Auth must be enabled first */
04837f64 876 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 877 HCI_INIT_TIMEOUT);
1da177e4
LT
878 if (err)
879 break;
880 }
881
04837f64 882 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
5f246e89 883 HCI_INIT_TIMEOUT);
1da177e4
LT
884 break;
885
886 case HCISETSCAN:
04837f64 887 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
5f246e89 888 HCI_INIT_TIMEOUT);
1da177e4
LT
889 break;
890
1da177e4 891 case HCISETLINKPOL:
e4e8e37c 892 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
5f246e89 893 HCI_INIT_TIMEOUT);
1da177e4
LT
894 break;
895
896 case HCISETLINKMODE:
e4e8e37c
MH
897 hdev->link_mode = ((__u16) dr.dev_opt) &
898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
899 break;
900
901 case HCISETPTYPE:
902 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
903 break;
904
905 case HCISETACLMTU:
e4e8e37c
MH
906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
908 break;
909
910 case HCISETSCOMTU:
e4e8e37c
MH
911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
913 break;
914
915 default:
916 err = -EINVAL;
917 break;
918 }
e4e8e37c 919
1da177e4
LT
920 hci_dev_put(hdev);
921 return err;
922}
923
924int hci_get_dev_list(void __user *arg)
925{
8035ded4 926 struct hci_dev *hdev;
1da177e4
LT
927 struct hci_dev_list_req *dl;
928 struct hci_dev_req *dr;
1da177e4
LT
929 int n = 0, size, err;
930 __u16 dev_num;
931
932 if (get_user(dev_num, (__u16 __user *) arg))
933 return -EFAULT;
934
935 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
936 return -EINVAL;
937
938 size = sizeof(*dl) + dev_num * sizeof(*dr);
939
70f23020
AE
940 dl = kzalloc(size, GFP_KERNEL);
941 if (!dl)
1da177e4
LT
942 return -ENOMEM;
943
944 dr = dl->dev_req;
945
f20d09d5 946 read_lock(&hci_dev_list_lock);
8035ded4 947 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 948 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 949 cancel_delayed_work(&hdev->power_off);
c542a06c 950
a8b2d5c2
JH
951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
952 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 953
1da177e4
LT
954 (dr + n)->dev_id = hdev->id;
955 (dr + n)->dev_opt = hdev->flags;
c542a06c 956
1da177e4
LT
957 if (++n >= dev_num)
958 break;
959 }
f20d09d5 960 read_unlock(&hci_dev_list_lock);
1da177e4
LT
961
962 dl->dev_num = n;
963 size = sizeof(*dl) + n * sizeof(*dr);
964
965 err = copy_to_user(arg, dl, size);
966 kfree(dl);
967
968 return err ? -EFAULT : 0;
969}
970
971int hci_get_dev_info(void __user *arg)
972{
973 struct hci_dev *hdev;
974 struct hci_dev_info di;
975 int err = 0;
976
977 if (copy_from_user(&di, arg, sizeof(di)))
978 return -EFAULT;
979
70f23020
AE
980 hdev = hci_dev_get(di.dev_id);
981 if (!hdev)
1da177e4
LT
982 return -ENODEV;
983
a8b2d5c2 984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 985 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 986
a8b2d5c2
JH
987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 989
1da177e4
LT
990 strcpy(di.name, hdev->name);
991 di.bdaddr = hdev->bdaddr;
943da25d 992 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
993 di.flags = hdev->flags;
994 di.pkt_type = hdev->pkt_type;
995 di.acl_mtu = hdev->acl_mtu;
996 di.acl_pkts = hdev->acl_pkts;
997 di.sco_mtu = hdev->sco_mtu;
998 di.sco_pkts = hdev->sco_pkts;
999 di.link_policy = hdev->link_policy;
1000 di.link_mode = hdev->link_mode;
1001
1002 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1003 memcpy(&di.features, &hdev->features, sizeof(di.features));
1004
1005 if (copy_to_user(arg, &di, sizeof(di)))
1006 err = -EFAULT;
1007
1008 hci_dev_put(hdev);
1009
1010 return err;
1011}
1012
1013/* ---- Interface to HCI drivers ---- */
1014
611b30f7
MH
1015static int hci_rfkill_set_block(void *data, bool blocked)
1016{
1017 struct hci_dev *hdev = data;
1018
1019 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1020
1021 if (!blocked)
1022 return 0;
1023
1024 hci_dev_do_close(hdev);
1025
1026 return 0;
1027}
1028
1029static const struct rfkill_ops hci_rfkill_ops = {
1030 .set_block = hci_rfkill_set_block,
1031};
1032
ab81cbf9
JH
1033static void hci_power_on(struct work_struct *work)
1034{
1035 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1036
1037 BT_DBG("%s", hdev->name);
1038
1039 if (hci_dev_open(hdev->id) < 0)
1040 return;
1041
a8b2d5c2 1042 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
9345d40c 1043 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1044
a8b2d5c2 1045 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1046 mgmt_index_added(hdev);
ab81cbf9
JH
1047}
1048
1049static void hci_power_off(struct work_struct *work)
1050{
3243553f 1051 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1052 power_off.work);
ab81cbf9
JH
1053
1054 BT_DBG("%s", hdev->name);
1055
8ee56540 1056 hci_dev_do_close(hdev);
ab81cbf9
JH
1057}
1058
16ab91ab
JH
1059static void hci_discov_off(struct work_struct *work)
1060{
1061 struct hci_dev *hdev;
1062 u8 scan = SCAN_PAGE;
1063
1064 hdev = container_of(work, struct hci_dev, discov_off.work);
1065
1066 BT_DBG("%s", hdev->name);
1067
09fd0de5 1068 hci_dev_lock(hdev);
16ab91ab
JH
1069
1070 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1071
1072 hdev->discov_timeout = 0;
1073
09fd0de5 1074 hci_dev_unlock(hdev);
16ab91ab
JH
1075}
1076
2aeb9a1a
JH
1077int hci_uuids_clear(struct hci_dev *hdev)
1078{
1079 struct list_head *p, *n;
1080
1081 list_for_each_safe(p, n, &hdev->uuids) {
1082 struct bt_uuid *uuid;
1083
1084 uuid = list_entry(p, struct bt_uuid, list);
1085
1086 list_del(p);
1087 kfree(uuid);
1088 }
1089
1090 return 0;
1091}
1092
55ed8ca1
JH
1093int hci_link_keys_clear(struct hci_dev *hdev)
1094{
1095 struct list_head *p, *n;
1096
1097 list_for_each_safe(p, n, &hdev->link_keys) {
1098 struct link_key *key;
1099
1100 key = list_entry(p, struct link_key, list);
1101
1102 list_del(p);
1103 kfree(key);
1104 }
1105
1106 return 0;
1107}
1108
b899efaf
VCG
1109int hci_smp_ltks_clear(struct hci_dev *hdev)
1110{
1111 struct smp_ltk *k, *tmp;
1112
1113 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1114 list_del(&k->list);
1115 kfree(k);
1116 }
1117
1118 return 0;
1119}
1120
55ed8ca1
JH
1121struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1122{
8035ded4 1123 struct link_key *k;
55ed8ca1 1124
8035ded4 1125 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1126 if (bacmp(bdaddr, &k->bdaddr) == 0)
1127 return k;
55ed8ca1
JH
1128
1129 return NULL;
1130}
1131
745c0ce3 1132static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1133 u8 key_type, u8 old_key_type)
d25e28ab
JH
1134{
1135 /* Legacy key */
1136 if (key_type < 0x03)
745c0ce3 1137 return true;
d25e28ab
JH
1138
1139 /* Debug keys are insecure so don't store them persistently */
1140 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1141 return false;
d25e28ab
JH
1142
1143 /* Changed combination key and there's no previous one */
1144 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1145 return false;
d25e28ab
JH
1146
1147 /* Security mode 3 case */
1148 if (!conn)
745c0ce3 1149 return true;
d25e28ab
JH
1150
1151 /* Neither local nor remote side had no-bonding as requirement */
1152 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1153 return true;
d25e28ab
JH
1154
1155 /* Local side had dedicated bonding as requirement */
1156 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1157 return true;
d25e28ab
JH
1158
1159 /* Remote side had dedicated bonding as requirement */
1160 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1161 return true;
d25e28ab
JH
1162
1163 /* If none of the above criteria match, then don't store the key
1164 * persistently */
745c0ce3 1165 return false;
d25e28ab
JH
1166}
1167
c9839a11 1168struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1169{
c9839a11 1170 struct smp_ltk *k;
75d262c2 1171
c9839a11
VCG
1172 list_for_each_entry(k, &hdev->long_term_keys, list) {
1173 if (k->ediv != ediv ||
a8c5fb1a 1174 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1175 continue;
1176
c9839a11 1177 return k;
75d262c2
VCG
1178 }
1179
1180 return NULL;
1181}
75d262c2 1182
c9839a11 1183struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1184 u8 addr_type)
75d262c2 1185{
c9839a11 1186 struct smp_ltk *k;
75d262c2 1187
c9839a11
VCG
1188 list_for_each_entry(k, &hdev->long_term_keys, list)
1189 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1190 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1191 return k;
1192
1193 return NULL;
1194}
75d262c2 1195
d25e28ab 1196int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1197 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1198{
1199 struct link_key *key, *old_key;
745c0ce3
VA
1200 u8 old_key_type;
1201 bool persistent;
55ed8ca1
JH
1202
1203 old_key = hci_find_link_key(hdev, bdaddr);
1204 if (old_key) {
1205 old_key_type = old_key->type;
1206 key = old_key;
1207 } else {
12adcf3a 1208 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1209 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1210 if (!key)
1211 return -ENOMEM;
1212 list_add(&key->list, &hdev->link_keys);
1213 }
1214
6ed93dc6 1215 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1216
d25e28ab
JH
1217 /* Some buggy controller combinations generate a changed
1218 * combination key for legacy pairing even when there's no
1219 * previous key */
1220 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1221 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1222 type = HCI_LK_COMBINATION;
655fe6ec
JH
1223 if (conn)
1224 conn->key_type = type;
1225 }
d25e28ab 1226
55ed8ca1 1227 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1228 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1229 key->pin_len = pin_len;
1230
b6020ba0 1231 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1232 key->type = old_key_type;
4748fed2
JH
1233 else
1234 key->type = type;
1235
4df378a1
JH
1236 if (!new_key)
1237 return 0;
1238
1239 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1240
744cf19e 1241 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1242
6ec5bcad
VA
1243 if (conn)
1244 conn->flush_key = !persistent;
55ed8ca1
JH
1245
1246 return 0;
1247}
1248
c9839a11 1249int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1250 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1251 ediv, u8 rand[8])
75d262c2 1252{
c9839a11 1253 struct smp_ltk *key, *old_key;
75d262c2 1254
c9839a11
VCG
1255 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1256 return 0;
75d262c2 1257
c9839a11
VCG
1258 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1259 if (old_key)
75d262c2 1260 key = old_key;
c9839a11
VCG
1261 else {
1262 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1263 if (!key)
1264 return -ENOMEM;
c9839a11 1265 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1266 }
1267
75d262c2 1268 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1269 key->bdaddr_type = addr_type;
1270 memcpy(key->val, tk, sizeof(key->val));
1271 key->authenticated = authenticated;
1272 key->ediv = ediv;
1273 key->enc_size = enc_size;
1274 key->type = type;
1275 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1276
c9839a11
VCG
1277 if (!new_key)
1278 return 0;
75d262c2 1279
261cc5aa
VCG
1280 if (type & HCI_SMP_LTK)
1281 mgmt_new_ltk(hdev, key, 1);
1282
75d262c2
VCG
1283 return 0;
1284}
1285
55ed8ca1
JH
1286int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1287{
1288 struct link_key *key;
1289
1290 key = hci_find_link_key(hdev, bdaddr);
1291 if (!key)
1292 return -ENOENT;
1293
6ed93dc6 1294 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1295
1296 list_del(&key->list);
1297 kfree(key);
1298
1299 return 0;
1300}
1301
b899efaf
VCG
1302int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1303{
1304 struct smp_ltk *k, *tmp;
1305
1306 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1307 if (bacmp(bdaddr, &k->bdaddr))
1308 continue;
1309
6ed93dc6 1310 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1311
1312 list_del(&k->list);
1313 kfree(k);
1314 }
1315
1316 return 0;
1317}
1318
6bd32326 1319/* HCI command timer function */
bda4f23a 1320static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1321{
1322 struct hci_dev *hdev = (void *) arg;
1323
bda4f23a
AE
1324 if (hdev->sent_cmd) {
1325 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1326 u16 opcode = __le16_to_cpu(sent->opcode);
1327
1328 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1329 } else {
1330 BT_ERR("%s command tx timeout", hdev->name);
1331 }
1332
6bd32326 1333 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1334 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1335}
1336
2763eda6 1337struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1338 bdaddr_t *bdaddr)
2763eda6
SJ
1339{
1340 struct oob_data *data;
1341
1342 list_for_each_entry(data, &hdev->remote_oob_data, list)
1343 if (bacmp(bdaddr, &data->bdaddr) == 0)
1344 return data;
1345
1346 return NULL;
1347}
1348
1349int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350{
1351 struct oob_data *data;
1352
1353 data = hci_find_remote_oob_data(hdev, bdaddr);
1354 if (!data)
1355 return -ENOENT;
1356
6ed93dc6 1357 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1358
1359 list_del(&data->list);
1360 kfree(data);
1361
1362 return 0;
1363}
1364
1365int hci_remote_oob_data_clear(struct hci_dev *hdev)
1366{
1367 struct oob_data *data, *n;
1368
1369 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1370 list_del(&data->list);
1371 kfree(data);
1372 }
1373
1374 return 0;
1375}
1376
1377int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1378 u8 *randomizer)
2763eda6
SJ
1379{
1380 struct oob_data *data;
1381
1382 data = hci_find_remote_oob_data(hdev, bdaddr);
1383
1384 if (!data) {
1385 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1386 if (!data)
1387 return -ENOMEM;
1388
1389 bacpy(&data->bdaddr, bdaddr);
1390 list_add(&data->list, &hdev->remote_oob_data);
1391 }
1392
1393 memcpy(data->hash, hash, sizeof(data->hash));
1394 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1395
6ed93dc6 1396 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1397
1398 return 0;
1399}
1400
04124681 1401struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1402{
8035ded4 1403 struct bdaddr_list *b;
b2a66aad 1404
8035ded4 1405 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1406 if (bacmp(bdaddr, &b->bdaddr) == 0)
1407 return b;
b2a66aad
AJ
1408
1409 return NULL;
1410}
1411
1412int hci_blacklist_clear(struct hci_dev *hdev)
1413{
1414 struct list_head *p, *n;
1415
1416 list_for_each_safe(p, n, &hdev->blacklist) {
1417 struct bdaddr_list *b;
1418
1419 b = list_entry(p, struct bdaddr_list, list);
1420
1421 list_del(p);
1422 kfree(b);
1423 }
1424
1425 return 0;
1426}
1427
88c1fe4b 1428int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1429{
1430 struct bdaddr_list *entry;
b2a66aad
AJ
1431
1432 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1433 return -EBADF;
1434
5e762444
AJ
1435 if (hci_blacklist_lookup(hdev, bdaddr))
1436 return -EEXIST;
b2a66aad
AJ
1437
1438 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1439 if (!entry)
1440 return -ENOMEM;
b2a66aad
AJ
1441
1442 bacpy(&entry->bdaddr, bdaddr);
1443
1444 list_add(&entry->list, &hdev->blacklist);
1445
88c1fe4b 1446 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1447}
1448
88c1fe4b 1449int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1450{
1451 struct bdaddr_list *entry;
b2a66aad 1452
1ec918ce 1453 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1454 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1455
1456 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1457 if (!entry)
5e762444 1458 return -ENOENT;
b2a66aad
AJ
1459
1460 list_del(&entry->list);
1461 kfree(entry);
1462
88c1fe4b 1463 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1464}
1465
7ba8b4be
AG
1466static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1467{
1468 struct le_scan_params *param = (struct le_scan_params *) opt;
1469 struct hci_cp_le_set_scan_param cp;
1470
1471 memset(&cp, 0, sizeof(cp));
1472 cp.type = param->type;
1473 cp.interval = cpu_to_le16(param->interval);
1474 cp.window = cpu_to_le16(param->window);
1475
1476 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1477}
1478
1479static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1480{
1481 struct hci_cp_le_set_scan_enable cp;
1482
1483 memset(&cp, 0, sizeof(cp));
1484 cp.enable = 1;
0431a43c 1485 cp.filter_dup = 1;
7ba8b4be
AG
1486
1487 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1488}
1489
1490static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1491 u16 window, int timeout)
7ba8b4be
AG
1492{
1493 long timeo = msecs_to_jiffies(3000);
1494 struct le_scan_params param;
1495 int err;
1496
1497 BT_DBG("%s", hdev->name);
1498
1499 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1500 return -EINPROGRESS;
1501
1502 param.type = type;
1503 param.interval = interval;
1504 param.window = window;
1505
1506 hci_req_lock(hdev);
1507
1508 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1509 timeo);
7ba8b4be
AG
1510 if (!err)
1511 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1512
1513 hci_req_unlock(hdev);
1514
1515 if (err < 0)
1516 return err;
1517
1518 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1519 msecs_to_jiffies(timeout));
7ba8b4be
AG
1520
1521 return 0;
1522}
1523
7dbfac1d
AG
1524int hci_cancel_le_scan(struct hci_dev *hdev)
1525{
1526 BT_DBG("%s", hdev->name);
1527
1528 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1529 return -EALREADY;
1530
1531 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1532 struct hci_cp_le_set_scan_enable cp;
1533
1534 /* Send HCI command to disable LE Scan */
1535 memset(&cp, 0, sizeof(cp));
1536 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1537 }
1538
1539 return 0;
1540}
1541
7ba8b4be
AG
1542static void le_scan_disable_work(struct work_struct *work)
1543{
1544 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1545 le_scan_disable.work);
7ba8b4be
AG
1546 struct hci_cp_le_set_scan_enable cp;
1547
1548 BT_DBG("%s", hdev->name);
1549
1550 memset(&cp, 0, sizeof(cp));
1551
1552 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1553}
1554
28b75a89
AG
1555static void le_scan_work(struct work_struct *work)
1556{
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1558 struct le_scan_params *param = &hdev->le_scan_params;
1559
1560 BT_DBG("%s", hdev->name);
1561
04124681
GP
1562 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1563 param->timeout);
28b75a89
AG
1564}
1565
1566int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1567 int timeout)
28b75a89
AG
1568{
1569 struct le_scan_params *param = &hdev->le_scan_params;
1570
1571 BT_DBG("%s", hdev->name);
1572
1573 if (work_busy(&hdev->le_scan))
1574 return -EINPROGRESS;
1575
1576 param->type = type;
1577 param->interval = interval;
1578 param->window = window;
1579 param->timeout = timeout;
1580
1581 queue_work(system_long_wq, &hdev->le_scan);
1582
1583 return 0;
1584}
1585
9be0dab7
DH
1586/* Alloc HCI device */
1587struct hci_dev *hci_alloc_dev(void)
1588{
1589 struct hci_dev *hdev;
1590
1591 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1592 if (!hdev)
1593 return NULL;
1594
b1b813d4
DH
1595 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1596 hdev->esco_type = (ESCO_HV1);
1597 hdev->link_mode = (HCI_LM_ACCEPT);
1598 hdev->io_capability = 0x03; /* No Input No Output */
1599
b1b813d4
DH
1600 hdev->sniff_max_interval = 800;
1601 hdev->sniff_min_interval = 80;
1602
1603 mutex_init(&hdev->lock);
1604 mutex_init(&hdev->req_lock);
1605
1606 INIT_LIST_HEAD(&hdev->mgmt_pending);
1607 INIT_LIST_HEAD(&hdev->blacklist);
1608 INIT_LIST_HEAD(&hdev->uuids);
1609 INIT_LIST_HEAD(&hdev->link_keys);
1610 INIT_LIST_HEAD(&hdev->long_term_keys);
1611 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1612 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1613
1614 INIT_WORK(&hdev->rx_work, hci_rx_work);
1615 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1616 INIT_WORK(&hdev->tx_work, hci_tx_work);
1617 INIT_WORK(&hdev->power_on, hci_power_on);
1618 INIT_WORK(&hdev->le_scan, le_scan_work);
1619
b1b813d4
DH
1620 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1621 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1622 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1623
9be0dab7 1624 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1625 skb_queue_head_init(&hdev->rx_q);
1626 skb_queue_head_init(&hdev->cmd_q);
1627 skb_queue_head_init(&hdev->raw_q);
1628
1629 init_waitqueue_head(&hdev->req_wait_q);
1630
bda4f23a 1631 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 1632
b1b813d4
DH
1633 hci_init_sysfs(hdev);
1634 discovery_init(hdev);
9be0dab7
DH
1635
1636 return hdev;
1637}
1638EXPORT_SYMBOL(hci_alloc_dev);
1639
1640/* Free HCI device */
1641void hci_free_dev(struct hci_dev *hdev)
1642{
1643 skb_queue_purge(&hdev->driver_init);
1644
1645 /* will free via device release */
1646 put_device(&hdev->dev);
1647}
1648EXPORT_SYMBOL(hci_free_dev);
1649
1da177e4
LT
1650/* Register HCI device */
1651int hci_register_dev(struct hci_dev *hdev)
1652{
b1b813d4 1653 int id, error;
1da177e4 1654
010666a1 1655 if (!hdev->open || !hdev->close)
1da177e4
LT
1656 return -EINVAL;
1657
08add513
MM
1658 /* Do not allow HCI_AMP devices to register at index 0,
1659 * so the index can be used as the AMP controller ID.
1660 */
3df92b31
SL
1661 switch (hdev->dev_type) {
1662 case HCI_BREDR:
1663 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1664 break;
1665 case HCI_AMP:
1666 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1667 break;
1668 default:
1669 return -EINVAL;
1da177e4 1670 }
8e87d142 1671
3df92b31
SL
1672 if (id < 0)
1673 return id;
1674
1da177e4
LT
1675 sprintf(hdev->name, "hci%d", id);
1676 hdev->id = id;
2d8b3a11
AE
1677
1678 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1679
3df92b31
SL
1680 write_lock(&hci_dev_list_lock);
1681 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1682 write_unlock(&hci_dev_list_lock);
1da177e4 1683
32845eb1 1684 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1685 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1686 if (!hdev->workqueue) {
1687 error = -ENOMEM;
1688 goto err;
1689 }
f48fd9c8 1690
33ca954d
DH
1691 error = hci_add_sysfs(hdev);
1692 if (error < 0)
1693 goto err_wqueue;
1da177e4 1694
611b30f7 1695 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1696 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1697 hdev);
611b30f7
MH
1698 if (hdev->rfkill) {
1699 if (rfkill_register(hdev->rfkill) < 0) {
1700 rfkill_destroy(hdev->rfkill);
1701 hdev->rfkill = NULL;
1702 }
1703 }
1704
a8b2d5c2 1705 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
1706
1707 if (hdev->dev_type != HCI_AMP)
1708 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1709
7f971041 1710 schedule_work(&hdev->power_on);
ab81cbf9 1711
1da177e4 1712 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1713 hci_dev_hold(hdev);
1da177e4
LT
1714
1715 return id;
f48fd9c8 1716
33ca954d
DH
1717err_wqueue:
1718 destroy_workqueue(hdev->workqueue);
1719err:
3df92b31 1720 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1721 write_lock(&hci_dev_list_lock);
f48fd9c8 1722 list_del(&hdev->list);
f20d09d5 1723 write_unlock(&hci_dev_list_lock);
f48fd9c8 1724
33ca954d 1725 return error;
1da177e4
LT
1726}
1727EXPORT_SYMBOL(hci_register_dev);
1728
1729/* Unregister HCI device */
59735631 1730void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1731{
3df92b31 1732 int i, id;
ef222013 1733
c13854ce 1734 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1735
94324962
JH
1736 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1737
3df92b31
SL
1738 id = hdev->id;
1739
f20d09d5 1740 write_lock(&hci_dev_list_lock);
1da177e4 1741 list_del(&hdev->list);
f20d09d5 1742 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1743
1744 hci_dev_do_close(hdev);
1745
cd4c5391 1746 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1747 kfree_skb(hdev->reassembly[i]);
1748
ab81cbf9 1749 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1750 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1751 hci_dev_lock(hdev);
744cf19e 1752 mgmt_index_removed(hdev);
09fd0de5 1753 hci_dev_unlock(hdev);
56e5cb86 1754 }
ab81cbf9 1755
2e58ef3e
JH
1756 /* mgmt_index_removed should take care of emptying the
1757 * pending list */
1758 BUG_ON(!list_empty(&hdev->mgmt_pending));
1759
1da177e4
LT
1760 hci_notify(hdev, HCI_DEV_UNREG);
1761
611b30f7
MH
1762 if (hdev->rfkill) {
1763 rfkill_unregister(hdev->rfkill);
1764 rfkill_destroy(hdev->rfkill);
1765 }
1766
ce242970 1767 hci_del_sysfs(hdev);
147e2d59 1768
f48fd9c8
MH
1769 destroy_workqueue(hdev->workqueue);
1770
09fd0de5 1771 hci_dev_lock(hdev);
e2e0cacb 1772 hci_blacklist_clear(hdev);
2aeb9a1a 1773 hci_uuids_clear(hdev);
55ed8ca1 1774 hci_link_keys_clear(hdev);
b899efaf 1775 hci_smp_ltks_clear(hdev);
2763eda6 1776 hci_remote_oob_data_clear(hdev);
09fd0de5 1777 hci_dev_unlock(hdev);
e2e0cacb 1778
dc946bd8 1779 hci_dev_put(hdev);
3df92b31
SL
1780
1781 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1782}
1783EXPORT_SYMBOL(hci_unregister_dev);
1784
1785/* Suspend HCI device */
1786int hci_suspend_dev(struct hci_dev *hdev)
1787{
1788 hci_notify(hdev, HCI_DEV_SUSPEND);
1789 return 0;
1790}
1791EXPORT_SYMBOL(hci_suspend_dev);
1792
1793/* Resume HCI device */
1794int hci_resume_dev(struct hci_dev *hdev)
1795{
1796 hci_notify(hdev, HCI_DEV_RESUME);
1797 return 0;
1798}
1799EXPORT_SYMBOL(hci_resume_dev);
1800
76bca880
MH
1801/* Receive frame from HCI drivers */
1802int hci_recv_frame(struct sk_buff *skb)
1803{
1804 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1805 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1806 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1807 kfree_skb(skb);
1808 return -ENXIO;
1809 }
1810
1811 /* Incomming skb */
1812 bt_cb(skb)->incoming = 1;
1813
1814 /* Time stamp */
1815 __net_timestamp(skb);
1816
76bca880 1817 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1818 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1819
76bca880
MH
1820 return 0;
1821}
1822EXPORT_SYMBOL(hci_recv_frame);
1823
33e882a5 1824static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1825 int count, __u8 index)
33e882a5
SS
1826{
1827 int len = 0;
1828 int hlen = 0;
1829 int remain = count;
1830 struct sk_buff *skb;
1831 struct bt_skb_cb *scb;
1832
1833 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1834 index >= NUM_REASSEMBLY)
33e882a5
SS
1835 return -EILSEQ;
1836
1837 skb = hdev->reassembly[index];
1838
1839 if (!skb) {
1840 switch (type) {
1841 case HCI_ACLDATA_PKT:
1842 len = HCI_MAX_FRAME_SIZE;
1843 hlen = HCI_ACL_HDR_SIZE;
1844 break;
1845 case HCI_EVENT_PKT:
1846 len = HCI_MAX_EVENT_SIZE;
1847 hlen = HCI_EVENT_HDR_SIZE;
1848 break;
1849 case HCI_SCODATA_PKT:
1850 len = HCI_MAX_SCO_SIZE;
1851 hlen = HCI_SCO_HDR_SIZE;
1852 break;
1853 }
1854
1e429f38 1855 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1856 if (!skb)
1857 return -ENOMEM;
1858
1859 scb = (void *) skb->cb;
1860 scb->expect = hlen;
1861 scb->pkt_type = type;
1862
1863 skb->dev = (void *) hdev;
1864 hdev->reassembly[index] = skb;
1865 }
1866
1867 while (count) {
1868 scb = (void *) skb->cb;
89bb46d0 1869 len = min_t(uint, scb->expect, count);
33e882a5
SS
1870
1871 memcpy(skb_put(skb, len), data, len);
1872
1873 count -= len;
1874 data += len;
1875 scb->expect -= len;
1876 remain = count;
1877
1878 switch (type) {
1879 case HCI_EVENT_PKT:
1880 if (skb->len == HCI_EVENT_HDR_SIZE) {
1881 struct hci_event_hdr *h = hci_event_hdr(skb);
1882 scb->expect = h->plen;
1883
1884 if (skb_tailroom(skb) < scb->expect) {
1885 kfree_skb(skb);
1886 hdev->reassembly[index] = NULL;
1887 return -ENOMEM;
1888 }
1889 }
1890 break;
1891
1892 case HCI_ACLDATA_PKT:
1893 if (skb->len == HCI_ACL_HDR_SIZE) {
1894 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1895 scb->expect = __le16_to_cpu(h->dlen);
1896
1897 if (skb_tailroom(skb) < scb->expect) {
1898 kfree_skb(skb);
1899 hdev->reassembly[index] = NULL;
1900 return -ENOMEM;
1901 }
1902 }
1903 break;
1904
1905 case HCI_SCODATA_PKT:
1906 if (skb->len == HCI_SCO_HDR_SIZE) {
1907 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1908 scb->expect = h->dlen;
1909
1910 if (skb_tailroom(skb) < scb->expect) {
1911 kfree_skb(skb);
1912 hdev->reassembly[index] = NULL;
1913 return -ENOMEM;
1914 }
1915 }
1916 break;
1917 }
1918
1919 if (scb->expect == 0) {
1920 /* Complete frame */
1921
1922 bt_cb(skb)->pkt_type = type;
1923 hci_recv_frame(skb);
1924
1925 hdev->reassembly[index] = NULL;
1926 return remain;
1927 }
1928 }
1929
1930 return remain;
1931}
1932
ef222013
MH
1933int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1934{
f39a3c06
SS
1935 int rem = 0;
1936
ef222013
MH
1937 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1938 return -EILSEQ;
1939
da5f6c37 1940 while (count) {
1e429f38 1941 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1942 if (rem < 0)
1943 return rem;
ef222013 1944
f39a3c06
SS
1945 data += (count - rem);
1946 count = rem;
f81c6224 1947 }
ef222013 1948
f39a3c06 1949 return rem;
ef222013
MH
1950}
1951EXPORT_SYMBOL(hci_recv_fragment);
1952
99811510
SS
1953#define STREAM_REASSEMBLY 0
1954
1955int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1956{
1957 int type;
1958 int rem = 0;
1959
da5f6c37 1960 while (count) {
99811510
SS
1961 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1962
1963 if (!skb) {
1964 struct { char type; } *pkt;
1965
1966 /* Start of the frame */
1967 pkt = data;
1968 type = pkt->type;
1969
1970 data++;
1971 count--;
1972 } else
1973 type = bt_cb(skb)->pkt_type;
1974
1e429f38 1975 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 1976 STREAM_REASSEMBLY);
99811510
SS
1977 if (rem < 0)
1978 return rem;
1979
1980 data += (count - rem);
1981 count = rem;
f81c6224 1982 }
99811510
SS
1983
1984 return rem;
1985}
1986EXPORT_SYMBOL(hci_recv_stream_fragment);
1987
1da177e4
LT
1988/* ---- Interface to upper protocols ---- */
1989
1da177e4
LT
1990int hci_register_cb(struct hci_cb *cb)
1991{
1992 BT_DBG("%p name %s", cb, cb->name);
1993
f20d09d5 1994 write_lock(&hci_cb_list_lock);
1da177e4 1995 list_add(&cb->list, &hci_cb_list);
f20d09d5 1996 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1997
1998 return 0;
1999}
2000EXPORT_SYMBOL(hci_register_cb);
2001
2002int hci_unregister_cb(struct hci_cb *cb)
2003{
2004 BT_DBG("%p name %s", cb, cb->name);
2005
f20d09d5 2006 write_lock(&hci_cb_list_lock);
1da177e4 2007 list_del(&cb->list);
f20d09d5 2008 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2009
2010 return 0;
2011}
2012EXPORT_SYMBOL(hci_unregister_cb);
2013
2014static int hci_send_frame(struct sk_buff *skb)
2015{
2016 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2017
2018 if (!hdev) {
2019 kfree_skb(skb);
2020 return -ENODEV;
2021 }
2022
0d48d939 2023 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2024
cd82e61c
MH
2025 /* Time stamp */
2026 __net_timestamp(skb);
1da177e4 2027
cd82e61c
MH
2028 /* Send copy to monitor */
2029 hci_send_to_monitor(hdev, skb);
2030
2031 if (atomic_read(&hdev->promisc)) {
2032 /* Send copy to the sockets */
470fe1b5 2033 hci_send_to_sock(hdev, skb);
1da177e4
LT
2034 }
2035
2036 /* Get rid of skb owner, prior to sending to the driver. */
2037 skb_orphan(skb);
2038
2039 return hdev->send(skb);
2040}
2041
2042/* Send HCI command */
a9de9248 2043int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2044{
2045 int len = HCI_COMMAND_HDR_SIZE + plen;
2046 struct hci_command_hdr *hdr;
2047 struct sk_buff *skb;
2048
f0e09510 2049 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2050
2051 skb = bt_skb_alloc(len, GFP_ATOMIC);
2052 if (!skb) {
ef222013 2053 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2054 return -ENOMEM;
2055 }
2056
2057 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2058 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2059 hdr->plen = plen;
2060
2061 if (plen)
2062 memcpy(skb_put(skb, plen), param, plen);
2063
2064 BT_DBG("skb len %d", skb->len);
2065
0d48d939 2066 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2067 skb->dev = (void *) hdev;
c78ae283 2068
a5040efa
JH
2069 if (test_bit(HCI_INIT, &hdev->flags))
2070 hdev->init_last_cmd = opcode;
2071
1da177e4 2072 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2073 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2074
2075 return 0;
2076}
1da177e4
LT
2077
2078/* Get data from the previously sent command */
a9de9248 2079void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2080{
2081 struct hci_command_hdr *hdr;
2082
2083 if (!hdev->sent_cmd)
2084 return NULL;
2085
2086 hdr = (void *) hdev->sent_cmd->data;
2087
a9de9248 2088 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2089 return NULL;
2090
f0e09510 2091 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2092
2093 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2094}
2095
2096/* Send ACL data */
2097static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2098{
2099 struct hci_acl_hdr *hdr;
2100 int len = skb->len;
2101
badff6d0
ACM
2102 skb_push(skb, HCI_ACL_HDR_SIZE);
2103 skb_reset_transport_header(skb);
9c70220b 2104 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2105 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2106 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2107}
2108
ee22be7e 2109static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2110 struct sk_buff *skb, __u16 flags)
1da177e4 2111{
ee22be7e 2112 struct hci_conn *conn = chan->conn;
1da177e4
LT
2113 struct hci_dev *hdev = conn->hdev;
2114 struct sk_buff *list;
2115
087bfd99
GP
2116 skb->len = skb_headlen(skb);
2117 skb->data_len = 0;
2118
2119 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2120
2121 switch (hdev->dev_type) {
2122 case HCI_BREDR:
2123 hci_add_acl_hdr(skb, conn->handle, flags);
2124 break;
2125 case HCI_AMP:
2126 hci_add_acl_hdr(skb, chan->handle, flags);
2127 break;
2128 default:
2129 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2130 return;
2131 }
087bfd99 2132
70f23020
AE
2133 list = skb_shinfo(skb)->frag_list;
2134 if (!list) {
1da177e4
LT
2135 /* Non fragmented */
2136 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2137
73d80deb 2138 skb_queue_tail(queue, skb);
1da177e4
LT
2139 } else {
2140 /* Fragmented */
2141 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2142
2143 skb_shinfo(skb)->frag_list = NULL;
2144
2145 /* Queue all fragments atomically */
af3e6359 2146 spin_lock(&queue->lock);
1da177e4 2147
73d80deb 2148 __skb_queue_tail(queue, skb);
e702112f
AE
2149
2150 flags &= ~ACL_START;
2151 flags |= ACL_CONT;
1da177e4
LT
2152 do {
2153 skb = list; list = list->next;
8e87d142 2154
1da177e4 2155 skb->dev = (void *) hdev;
0d48d939 2156 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2157 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2158
2159 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2160
73d80deb 2161 __skb_queue_tail(queue, skb);
1da177e4
LT
2162 } while (list);
2163
af3e6359 2164 spin_unlock(&queue->lock);
1da177e4 2165 }
73d80deb
LAD
2166}
2167
2168void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2169{
ee22be7e 2170 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2171
f0e09510 2172 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2173
2174 skb->dev = (void *) hdev;
73d80deb 2175
ee22be7e 2176 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2177
3eff45ea 2178 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2179}
1da177e4
LT
2180
2181/* Send SCO data */
0d861d8b 2182void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2183{
2184 struct hci_dev *hdev = conn->hdev;
2185 struct hci_sco_hdr hdr;
2186
2187 BT_DBG("%s len %d", hdev->name, skb->len);
2188
aca3192c 2189 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2190 hdr.dlen = skb->len;
2191
badff6d0
ACM
2192 skb_push(skb, HCI_SCO_HDR_SIZE);
2193 skb_reset_transport_header(skb);
9c70220b 2194 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2195
2196 skb->dev = (void *) hdev;
0d48d939 2197 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2198
1da177e4 2199 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2200 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2201}
1da177e4
LT
2202
2203/* ---- HCI TX task (outgoing data) ---- */
2204
2205/* HCI Connection scheduler */
6039aa73
GP
2206static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2207 int *quote)
1da177e4
LT
2208{
2209 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2210 struct hci_conn *conn = NULL, *c;
abc5de8f 2211 unsigned int num = 0, min = ~0;
1da177e4 2212
8e87d142 2213 /* We don't have to lock device here. Connections are always
1da177e4 2214 * added and removed with TX task disabled. */
bf4c6325
GP
2215
2216 rcu_read_lock();
2217
2218 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2219 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2220 continue;
769be974
MH
2221
2222 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2223 continue;
2224
1da177e4
LT
2225 num++;
2226
2227 if (c->sent < min) {
2228 min = c->sent;
2229 conn = c;
2230 }
52087a79
LAD
2231
2232 if (hci_conn_num(hdev, type) == num)
2233 break;
1da177e4
LT
2234 }
2235
bf4c6325
GP
2236 rcu_read_unlock();
2237
1da177e4 2238 if (conn) {
6ed58ec5
VT
2239 int cnt, q;
2240
2241 switch (conn->type) {
2242 case ACL_LINK:
2243 cnt = hdev->acl_cnt;
2244 break;
2245 case SCO_LINK:
2246 case ESCO_LINK:
2247 cnt = hdev->sco_cnt;
2248 break;
2249 case LE_LINK:
2250 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2251 break;
2252 default:
2253 cnt = 0;
2254 BT_ERR("Unknown link type");
2255 }
2256
2257 q = cnt / num;
1da177e4
LT
2258 *quote = q ? q : 1;
2259 } else
2260 *quote = 0;
2261
2262 BT_DBG("conn %p quote %d", conn, *quote);
2263 return conn;
2264}
2265
6039aa73 2266static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2267{
2268 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2269 struct hci_conn *c;
1da177e4 2270
bae1f5d9 2271 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2272
bf4c6325
GP
2273 rcu_read_lock();
2274
1da177e4 2275 /* Kill stalled connections */
bf4c6325 2276 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2277 if (c->type == type && c->sent) {
6ed93dc6
AE
2278 BT_ERR("%s killing stalled connection %pMR",
2279 hdev->name, &c->dst);
7490c6c2 2280 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2281 }
2282 }
bf4c6325
GP
2283
2284 rcu_read_unlock();
1da177e4
LT
2285}
2286
6039aa73
GP
2287static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2288 int *quote)
1da177e4 2289{
73d80deb
LAD
2290 struct hci_conn_hash *h = &hdev->conn_hash;
2291 struct hci_chan *chan = NULL;
abc5de8f 2292 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2293 struct hci_conn *conn;
73d80deb
LAD
2294 int cnt, q, conn_num = 0;
2295
2296 BT_DBG("%s", hdev->name);
2297
bf4c6325
GP
2298 rcu_read_lock();
2299
2300 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2301 struct hci_chan *tmp;
2302
2303 if (conn->type != type)
2304 continue;
2305
2306 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2307 continue;
2308
2309 conn_num++;
2310
8192edef 2311 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2312 struct sk_buff *skb;
2313
2314 if (skb_queue_empty(&tmp->data_q))
2315 continue;
2316
2317 skb = skb_peek(&tmp->data_q);
2318 if (skb->priority < cur_prio)
2319 continue;
2320
2321 if (skb->priority > cur_prio) {
2322 num = 0;
2323 min = ~0;
2324 cur_prio = skb->priority;
2325 }
2326
2327 num++;
2328
2329 if (conn->sent < min) {
2330 min = conn->sent;
2331 chan = tmp;
2332 }
2333 }
2334
2335 if (hci_conn_num(hdev, type) == conn_num)
2336 break;
2337 }
2338
bf4c6325
GP
2339 rcu_read_unlock();
2340
73d80deb
LAD
2341 if (!chan)
2342 return NULL;
2343
2344 switch (chan->conn->type) {
2345 case ACL_LINK:
2346 cnt = hdev->acl_cnt;
2347 break;
bd1eb66b
AE
2348 case AMP_LINK:
2349 cnt = hdev->block_cnt;
2350 break;
73d80deb
LAD
2351 case SCO_LINK:
2352 case ESCO_LINK:
2353 cnt = hdev->sco_cnt;
2354 break;
2355 case LE_LINK:
2356 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357 break;
2358 default:
2359 cnt = 0;
2360 BT_ERR("Unknown link type");
2361 }
2362
2363 q = cnt / num;
2364 *quote = q ? q : 1;
2365 BT_DBG("chan %p quote %d", chan, *quote);
2366 return chan;
2367}
2368
02b20f0b
LAD
2369static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2370{
2371 struct hci_conn_hash *h = &hdev->conn_hash;
2372 struct hci_conn *conn;
2373 int num = 0;
2374
2375 BT_DBG("%s", hdev->name);
2376
bf4c6325
GP
2377 rcu_read_lock();
2378
2379 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2380 struct hci_chan *chan;
2381
2382 if (conn->type != type)
2383 continue;
2384
2385 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2386 continue;
2387
2388 num++;
2389
8192edef 2390 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2391 struct sk_buff *skb;
2392
2393 if (chan->sent) {
2394 chan->sent = 0;
2395 continue;
2396 }
2397
2398 if (skb_queue_empty(&chan->data_q))
2399 continue;
2400
2401 skb = skb_peek(&chan->data_q);
2402 if (skb->priority >= HCI_PRIO_MAX - 1)
2403 continue;
2404
2405 skb->priority = HCI_PRIO_MAX - 1;
2406
2407 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2408 skb->priority);
02b20f0b
LAD
2409 }
2410
2411 if (hci_conn_num(hdev, type) == num)
2412 break;
2413 }
bf4c6325
GP
2414
2415 rcu_read_unlock();
2416
02b20f0b
LAD
2417}
2418
b71d385a
AE
2419static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2420{
2421 /* Calculate count of blocks used by this packet */
2422 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2423}
2424
6039aa73 2425static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2426{
1da177e4
LT
2427 if (!test_bit(HCI_RAW, &hdev->flags)) {
2428 /* ACL tx timeout must be longer than maximum
2429 * link supervision timeout (40.9 seconds) */
63d2bc1b 2430 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2431 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2432 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2433 }
63d2bc1b 2434}
1da177e4 2435
6039aa73 2436static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2437{
2438 unsigned int cnt = hdev->acl_cnt;
2439 struct hci_chan *chan;
2440 struct sk_buff *skb;
2441 int quote;
2442
2443 __check_timeout(hdev, cnt);
04837f64 2444
73d80deb 2445 while (hdev->acl_cnt &&
a8c5fb1a 2446 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2447 u32 priority = (skb_peek(&chan->data_q))->priority;
2448 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2449 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2450 skb->len, skb->priority);
73d80deb 2451
ec1cce24
LAD
2452 /* Stop if priority has changed */
2453 if (skb->priority < priority)
2454 break;
2455
2456 skb = skb_dequeue(&chan->data_q);
2457
73d80deb 2458 hci_conn_enter_active_mode(chan->conn,
04124681 2459 bt_cb(skb)->force_active);
04837f64 2460
1da177e4
LT
2461 hci_send_frame(skb);
2462 hdev->acl_last_tx = jiffies;
2463
2464 hdev->acl_cnt--;
73d80deb
LAD
2465 chan->sent++;
2466 chan->conn->sent++;
1da177e4
LT
2467 }
2468 }
02b20f0b
LAD
2469
2470 if (cnt != hdev->acl_cnt)
2471 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2472}
2473
6039aa73 2474static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2475{
63d2bc1b 2476 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2477 struct hci_chan *chan;
2478 struct sk_buff *skb;
2479 int quote;
bd1eb66b 2480 u8 type;
b71d385a 2481
63d2bc1b 2482 __check_timeout(hdev, cnt);
b71d385a 2483
bd1eb66b
AE
2484 BT_DBG("%s", hdev->name);
2485
2486 if (hdev->dev_type == HCI_AMP)
2487 type = AMP_LINK;
2488 else
2489 type = ACL_LINK;
2490
b71d385a 2491 while (hdev->block_cnt > 0 &&
bd1eb66b 2492 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2493 u32 priority = (skb_peek(&chan->data_q))->priority;
2494 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2495 int blocks;
2496
2497 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2498 skb->len, skb->priority);
b71d385a
AE
2499
2500 /* Stop if priority has changed */
2501 if (skb->priority < priority)
2502 break;
2503
2504 skb = skb_dequeue(&chan->data_q);
2505
2506 blocks = __get_blocks(hdev, skb);
2507 if (blocks > hdev->block_cnt)
2508 return;
2509
2510 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2511 bt_cb(skb)->force_active);
b71d385a
AE
2512
2513 hci_send_frame(skb);
2514 hdev->acl_last_tx = jiffies;
2515
2516 hdev->block_cnt -= blocks;
2517 quote -= blocks;
2518
2519 chan->sent += blocks;
2520 chan->conn->sent += blocks;
2521 }
2522 }
2523
2524 if (cnt != hdev->block_cnt)
bd1eb66b 2525 hci_prio_recalculate(hdev, type);
b71d385a
AE
2526}
2527
6039aa73 2528static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2529{
2530 BT_DBG("%s", hdev->name);
2531
bd1eb66b
AE
2532 /* No ACL link over BR/EDR controller */
2533 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2534 return;
2535
2536 /* No AMP link over AMP controller */
2537 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
2538 return;
2539
2540 switch (hdev->flow_ctl_mode) {
2541 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2542 hci_sched_acl_pkt(hdev);
2543 break;
2544
2545 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2546 hci_sched_acl_blk(hdev);
2547 break;
2548 }
2549}
2550
1da177e4 2551/* Schedule SCO */
6039aa73 2552static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2553{
2554 struct hci_conn *conn;
2555 struct sk_buff *skb;
2556 int quote;
2557
2558 BT_DBG("%s", hdev->name);
2559
52087a79
LAD
2560 if (!hci_conn_num(hdev, SCO_LINK))
2561 return;
2562
1da177e4
LT
2563 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2564 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2565 BT_DBG("skb %p len %d", skb, skb->len);
2566 hci_send_frame(skb);
2567
2568 conn->sent++;
2569 if (conn->sent == ~0)
2570 conn->sent = 0;
2571 }
2572 }
2573}
2574
6039aa73 2575static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2576{
2577 struct hci_conn *conn;
2578 struct sk_buff *skb;
2579 int quote;
2580
2581 BT_DBG("%s", hdev->name);
2582
52087a79
LAD
2583 if (!hci_conn_num(hdev, ESCO_LINK))
2584 return;
2585
8fc9ced3
GP
2586 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2587 &quote))) {
b6a0dc82
MH
2588 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2589 BT_DBG("skb %p len %d", skb, skb->len);
2590 hci_send_frame(skb);
2591
2592 conn->sent++;
2593 if (conn->sent == ~0)
2594 conn->sent = 0;
2595 }
2596 }
2597}
2598
6039aa73 2599static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2600{
73d80deb 2601 struct hci_chan *chan;
6ed58ec5 2602 struct sk_buff *skb;
02b20f0b 2603 int quote, cnt, tmp;
6ed58ec5
VT
2604
2605 BT_DBG("%s", hdev->name);
2606
52087a79
LAD
2607 if (!hci_conn_num(hdev, LE_LINK))
2608 return;
2609
6ed58ec5
VT
2610 if (!test_bit(HCI_RAW, &hdev->flags)) {
2611 /* LE tx timeout must be longer than maximum
2612 * link supervision timeout (40.9 seconds) */
bae1f5d9 2613 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2614 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2615 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2616 }
2617
2618 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2619 tmp = cnt;
73d80deb 2620 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2621 u32 priority = (skb_peek(&chan->data_q))->priority;
2622 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2623 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2624 skb->len, skb->priority);
6ed58ec5 2625
ec1cce24
LAD
2626 /* Stop if priority has changed */
2627 if (skb->priority < priority)
2628 break;
2629
2630 skb = skb_dequeue(&chan->data_q);
2631
6ed58ec5
VT
2632 hci_send_frame(skb);
2633 hdev->le_last_tx = jiffies;
2634
2635 cnt--;
73d80deb
LAD
2636 chan->sent++;
2637 chan->conn->sent++;
6ed58ec5
VT
2638 }
2639 }
73d80deb 2640
6ed58ec5
VT
2641 if (hdev->le_pkts)
2642 hdev->le_cnt = cnt;
2643 else
2644 hdev->acl_cnt = cnt;
02b20f0b
LAD
2645
2646 if (cnt != tmp)
2647 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2648}
2649
3eff45ea 2650static void hci_tx_work(struct work_struct *work)
1da177e4 2651{
3eff45ea 2652 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2653 struct sk_buff *skb;
2654
6ed58ec5 2655 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2656 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2657
2658 /* Schedule queues and send stuff to HCI driver */
2659
2660 hci_sched_acl(hdev);
2661
2662 hci_sched_sco(hdev);
2663
b6a0dc82
MH
2664 hci_sched_esco(hdev);
2665
6ed58ec5
VT
2666 hci_sched_le(hdev);
2667
1da177e4
LT
2668 /* Send next queued raw (unknown type) packet */
2669 while ((skb = skb_dequeue(&hdev->raw_q)))
2670 hci_send_frame(skb);
1da177e4
LT
2671}
2672
25985edc 2673/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2674
2675/* ACL data packet */
6039aa73 2676static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2677{
2678 struct hci_acl_hdr *hdr = (void *) skb->data;
2679 struct hci_conn *conn;
2680 __u16 handle, flags;
2681
2682 skb_pull(skb, HCI_ACL_HDR_SIZE);
2683
2684 handle = __le16_to_cpu(hdr->handle);
2685 flags = hci_flags(handle);
2686 handle = hci_handle(handle);
2687
f0e09510 2688 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 2689 handle, flags);
1da177e4
LT
2690
2691 hdev->stat.acl_rx++;
2692
2693 hci_dev_lock(hdev);
2694 conn = hci_conn_hash_lookup_handle(hdev, handle);
2695 hci_dev_unlock(hdev);
8e87d142 2696
1da177e4 2697 if (conn) {
65983fc7 2698 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2699
671267bf
JH
2700 hci_dev_lock(hdev);
2701 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2702 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2703 mgmt_device_connected(hdev, &conn->dst, conn->type,
2704 conn->dst_type, 0, NULL, 0,
2705 conn->dev_class);
2706 hci_dev_unlock(hdev);
2707
1da177e4 2708 /* Send to upper protocol */
686ebf28
UF
2709 l2cap_recv_acldata(conn, skb, flags);
2710 return;
1da177e4 2711 } else {
8e87d142 2712 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2713 hdev->name, handle);
1da177e4
LT
2714 }
2715
2716 kfree_skb(skb);
2717}
2718
2719/* SCO data packet */
6039aa73 2720static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2721{
2722 struct hci_sco_hdr *hdr = (void *) skb->data;
2723 struct hci_conn *conn;
2724 __u16 handle;
2725
2726 skb_pull(skb, HCI_SCO_HDR_SIZE);
2727
2728 handle = __le16_to_cpu(hdr->handle);
2729
f0e09510 2730 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
2731
2732 hdev->stat.sco_rx++;
2733
2734 hci_dev_lock(hdev);
2735 conn = hci_conn_hash_lookup_handle(hdev, handle);
2736 hci_dev_unlock(hdev);
2737
2738 if (conn) {
1da177e4 2739 /* Send to upper protocol */
686ebf28
UF
2740 sco_recv_scodata(conn, skb);
2741 return;
1da177e4 2742 } else {
8e87d142 2743 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2744 hdev->name, handle);
1da177e4
LT
2745 }
2746
2747 kfree_skb(skb);
2748}
2749
b78752cc 2750static void hci_rx_work(struct work_struct *work)
1da177e4 2751{
b78752cc 2752 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2753 struct sk_buff *skb;
2754
2755 BT_DBG("%s", hdev->name);
2756
1da177e4 2757 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2758 /* Send copy to monitor */
2759 hci_send_to_monitor(hdev, skb);
2760
1da177e4
LT
2761 if (atomic_read(&hdev->promisc)) {
2762 /* Send copy to the sockets */
470fe1b5 2763 hci_send_to_sock(hdev, skb);
1da177e4
LT
2764 }
2765
2766 if (test_bit(HCI_RAW, &hdev->flags)) {
2767 kfree_skb(skb);
2768 continue;
2769 }
2770
2771 if (test_bit(HCI_INIT, &hdev->flags)) {
2772 /* Don't process data packets in this states. */
0d48d939 2773 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2774 case HCI_ACLDATA_PKT:
2775 case HCI_SCODATA_PKT:
2776 kfree_skb(skb);
2777 continue;
3ff50b79 2778 }
1da177e4
LT
2779 }
2780
2781 /* Process frame */
0d48d939 2782 switch (bt_cb(skb)->pkt_type) {
1da177e4 2783 case HCI_EVENT_PKT:
b78752cc 2784 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2785 hci_event_packet(hdev, skb);
2786 break;
2787
2788 case HCI_ACLDATA_PKT:
2789 BT_DBG("%s ACL data packet", hdev->name);
2790 hci_acldata_packet(hdev, skb);
2791 break;
2792
2793 case HCI_SCODATA_PKT:
2794 BT_DBG("%s SCO data packet", hdev->name);
2795 hci_scodata_packet(hdev, skb);
2796 break;
2797
2798 default:
2799 kfree_skb(skb);
2800 break;
2801 }
2802 }
1da177e4
LT
2803}
2804
c347b765 2805static void hci_cmd_work(struct work_struct *work)
1da177e4 2806{
c347b765 2807 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2808 struct sk_buff *skb;
2809
2104786b
AE
2810 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2811 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 2812
1da177e4 2813 /* Send queued commands */
5a08ecce
AE
2814 if (atomic_read(&hdev->cmd_cnt)) {
2815 skb = skb_dequeue(&hdev->cmd_q);
2816 if (!skb)
2817 return;
2818
7585b97a 2819 kfree_skb(hdev->sent_cmd);
1da177e4 2820
70f23020
AE
2821 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2822 if (hdev->sent_cmd) {
1da177e4
LT
2823 atomic_dec(&hdev->cmd_cnt);
2824 hci_send_frame(skb);
7bdb8a5c
SJ
2825 if (test_bit(HCI_RESET, &hdev->flags))
2826 del_timer(&hdev->cmd_timer);
2827 else
2828 mod_timer(&hdev->cmd_timer,
5f246e89 2829 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
2830 } else {
2831 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2832 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2833 }
2834 }
2835}
2519a1fc
AG
2836
2837int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2838{
2839 /* General inquiry access code (GIAC) */
2840 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2841 struct hci_cp_inquiry cp;
2842
2843 BT_DBG("%s", hdev->name);
2844
2845 if (test_bit(HCI_INQUIRY, &hdev->flags))
2846 return -EINPROGRESS;
2847
4663262c
JH
2848 inquiry_cache_flush(hdev);
2849
2519a1fc
AG
2850 memset(&cp, 0, sizeof(cp));
2851 memcpy(&cp.lap, lap, sizeof(cp.lap));
2852 cp.length = length;
2853
2854 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2855}
023d5049
AG
2856
2857int hci_cancel_inquiry(struct hci_dev *hdev)
2858{
2859 BT_DBG("%s", hdev->name);
2860
2861 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2862 return -EALREADY;
023d5049
AG
2863
2864 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2865}
31f7956c
AG
2866
2867u8 bdaddr_to_le(u8 bdaddr_type)
2868{
2869 switch (bdaddr_type) {
2870 case BDADDR_LE_PUBLIC:
2871 return ADDR_LE_DEV_PUBLIC;
2872
2873 default:
2874 /* Fallback to LE Random address type */
2875 return ADDR_LE_DEV_RANDOM;
2876 }
2877}