ath9k_hw: Program filter coefficients correctly
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
23bb5763 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 61{
f0e09510 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
23bb5763 63
a5040efa
JH
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
75fb0e32
JH
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 69 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
1036b890 79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
23bb5763 88 return;
75fb0e32 89 }
1da177e4
LT
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
a8c5fb1a
GP
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
1da177e4
LT
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
e175072f 134 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
3ff50b79 144 }
1da177e4 145
a5040efa 146 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
6039aa73
GP
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
1da177e4
LT
156{
157 int ret;
158
7c6a329e
MH
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
1da177e4
LT
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
f630cf0d 175 set_bit(HCI_RESET, &hdev->flags);
a9de9248 176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
177}
178
e61ef499 179static void bredr_init(struct hci_dev *hdev)
1da177e4 180{
2455a3ea
AE
181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
1da177e4 183 /* Read Local Supported Features */
a9de9248 184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 185
1143e5a6 186 /* Read Local Version */
a9de9248 187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1da177e4
LT
188}
189
e61ef499
AE
190static void amp_init(struct hci_dev *hdev)
191{
2455a3ea
AE
192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
e61ef499
AE
194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
11778716
AE
222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
e61ef499
AE
226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
e61ef499
AE
239}
240
1da177e4
LT
241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
a9de9248 248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
a9de9248 258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
e4e8e37c 267 /* Encryption */
a9de9248 268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
269}
270
e4e8e37c
MH
271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
a418b893 275 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
8e87d142 281/* Get HCI device by index.
1da177e4
LT
282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
8035ded4 285 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
8035ded4 293 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
1da177e4
LT
302
303/* ---- Inquiry support ---- */
ff9ef578 304
30dc78e1
JH
305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
6fbe195d 309 switch (discov->state) {
343f935b 310 case DISCOVERY_FINDING:
6fbe195d 311 case DISCOVERY_RESOLVING:
30dc78e1
JH
312 return true;
313
6fbe195d
AG
314 default:
315 return false;
316 }
30dc78e1
JH
317}
318
ff9ef578
JH
319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
7b99b659
AG
328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
ff9ef578
JH
330 break;
331 case DISCOVERY_STARTING:
332 break;
343f935b 333 case DISCOVERY_FINDING:
ff9ef578
JH
334 mgmt_discovering(hdev, 1);
335 break;
30dc78e1
JH
336 case DISCOVERY_RESOLVING:
337 break;
ff9ef578
JH
338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
1da177e4
LT
345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
30883512 347 struct discovery_state *cache = &hdev->discovery;
b57c1a56 348 struct inquiry_entry *p, *n;
1da177e4 349
561aafbc
JH
350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
b57c1a56 352 kfree(p);
1da177e4 353 }
561aafbc
JH
354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
357}
358
a8c5fb1a
GP
359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
1da177e4 361{
30883512 362 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
363 struct inquiry_entry *e;
364
6ed93dc6 365 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 366
561aafbc
JH
367 list_for_each_entry(e, &cache->all, all) {
368 if (!bacmp(&e->data.bdaddr, bdaddr))
369 return e;
370 }
371
372 return NULL;
373}
374
375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 376 bdaddr_t *bdaddr)
561aafbc 377{
30883512 378 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
379 struct inquiry_entry *e;
380
6ed93dc6 381 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
382
383 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 384 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
385 return e;
386 }
387
388 return NULL;
1da177e4
LT
389}
390
30dc78e1 391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
392 bdaddr_t *bdaddr,
393 int state)
30dc78e1
JH
394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
6ed93dc6 398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
a3d4e20a 410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 411 struct inquiry_entry *ie)
a3d4e20a
JH
412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
a8c5fb1a 421 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
3175405b 429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 430 bool name_known, bool *ssp)
1da177e4 431{
30883512 432 struct discovery_state *cache = &hdev->discovery;
70f23020 433 struct inquiry_entry *ie;
1da177e4 434
6ed93dc6 435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 436
388fc8fa
JH
437 if (ssp)
438 *ssp = data->ssp_mode;
439
70f23020 440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 441 if (ie) {
388fc8fa
JH
442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
a3d4e20a 445 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 446 data->rssi != ie->data.rssi) {
a3d4e20a
JH
447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
561aafbc 451 goto update;
a3d4e20a 452 }
561aafbc
JH
453
454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
3175405b 457 return false;
561aafbc
JH
458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
70f23020 467
561aafbc
JH
468update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 470 ie->name_state != NAME_PENDING) {
561aafbc
JH
471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
1da177e4
LT
473 }
474
70f23020
AE
475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
1da177e4 477 cache->timestamp = jiffies;
3175405b
JH
478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
1da177e4
LT
483}
484
485static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486{
30883512 487 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
561aafbc 492 list_for_each_entry(e, &cache->all, all) {
1da177e4 493 struct inquiry_data *data = &e->data;
b57c1a56
JH
494
495 if (copied >= num)
496 break;
497
1da177e4
LT
498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
b57c1a56 504
1da177e4 505 info++;
b57c1a56 506 copied++;
1da177e4
LT
507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511}
512
513static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514{
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
a9de9248 527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
528}
529
530int hci_inquiry(void __user *arg)
531{
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
5a08ecce
AE
542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
1da177e4
LT
544 return -ENODEV;
545
09fd0de5 546 hci_dev_lock(hdev);
8e87d142 547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
09fd0de5 552 hci_dev_unlock(hdev);
1da177e4 553
04837f64 554 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
1da177e4 561
8fc9ced3
GP
562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
1da177e4
LT
565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
01df8c31 570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 571 if (!buf) {
1da177e4
LT
572 err = -ENOMEM;
573 goto done;
574 }
575
09fd0de5 576 hci_dev_lock(hdev);
1da177e4 577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 578 hci_dev_unlock(hdev);
1da177e4
LT
579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 585 ir.num_rsp))
1da177e4 586 err = -EFAULT;
8e87d142 587 } else
1da177e4
LT
588 err = -EFAULT;
589
590 kfree(buf);
591
592done:
593 hci_dev_put(hdev);
594 return err;
595}
596
597/* ---- HCI ioctl helpers ---- */
598
599int hci_dev_open(__u16 dev)
600{
601 struct hci_dev *hdev;
602 int ret = 0;
603
5a08ecce
AE
604 hdev = hci_dev_get(dev);
605 if (!hdev)
1da177e4
LT
606 return -ENODEV;
607
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_lock(hdev);
611
94324962
JH
612 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
613 ret = -ENODEV;
614 goto done;
615 }
616
611b30f7
MH
617 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
618 ret = -ERFKILL;
619 goto done;
620 }
621
1da177e4
LT
622 if (test_bit(HCI_UP, &hdev->flags)) {
623 ret = -EALREADY;
624 goto done;
625 }
626
627 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
628 set_bit(HCI_RAW, &hdev->flags);
629
07e3b94a
AE
630 /* Treat all non BR/EDR controllers as raw devices if
631 enable_hs is not set */
632 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
633 set_bit(HCI_RAW, &hdev->flags);
634
1da177e4
LT
635 if (hdev->open(hdev)) {
636 ret = -EIO;
637 goto done;
638 }
639
640 if (!test_bit(HCI_RAW, &hdev->flags)) {
641 atomic_set(&hdev->cmd_cnt, 1);
642 set_bit(HCI_INIT, &hdev->flags);
a5040efa 643 hdev->init_last_cmd = 0;
1da177e4 644
5f246e89 645 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
646
647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
650 if (!ret) {
651 hci_dev_hold(hdev);
652 set_bit(HCI_UP, &hdev->flags);
653 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
654 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
655 mgmt_valid_hdev(hdev)) {
09fd0de5 656 hci_dev_lock(hdev);
744cf19e 657 mgmt_powered(hdev, 1);
09fd0de5 658 hci_dev_unlock(hdev);
56e5cb86 659 }
8e87d142 660 } else {
1da177e4 661 /* Init failed, cleanup */
3eff45ea 662 flush_work(&hdev->tx_work);
c347b765 663 flush_work(&hdev->cmd_work);
b78752cc 664 flush_work(&hdev->rx_work);
1da177e4
LT
665
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->rx_q);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
672 if (hdev->sent_cmd) {
673 kfree_skb(hdev->sent_cmd);
674 hdev->sent_cmd = NULL;
675 }
676
677 hdev->close(hdev);
678 hdev->flags = 0;
679 }
680
681done:
682 hci_req_unlock(hdev);
683 hci_dev_put(hdev);
684 return ret;
685}
686
687static int hci_dev_do_close(struct hci_dev *hdev)
688{
689 BT_DBG("%s %p", hdev->name, hdev);
690
28b75a89
AG
691 cancel_work_sync(&hdev->le_scan);
692
78c04c0b
VCG
693 cancel_delayed_work(&hdev->power_off);
694
1da177e4
LT
695 hci_req_cancel(hdev, ENODEV);
696 hci_req_lock(hdev);
697
698 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 699 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
700 hci_req_unlock(hdev);
701 return 0;
702 }
703
3eff45ea
GP
704 /* Flush RX and TX works */
705 flush_work(&hdev->tx_work);
b78752cc 706 flush_work(&hdev->rx_work);
1da177e4 707
16ab91ab 708 if (hdev->discov_timeout > 0) {
e0f9309f 709 cancel_delayed_work(&hdev->discov_off);
16ab91ab 710 hdev->discov_timeout = 0;
5e5282bb 711 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
712 }
713
a8b2d5c2 714 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
715 cancel_delayed_work(&hdev->service_cache);
716
7ba8b4be
AG
717 cancel_delayed_work_sync(&hdev->le_scan_disable);
718
09fd0de5 719 hci_dev_lock(hdev);
1da177e4
LT
720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
09fd0de5 722 hci_dev_unlock(hdev);
1da177e4
LT
723
724 hci_notify(hdev, HCI_DEV_DOWN);
725
726 if (hdev->flush)
727 hdev->flush(hdev);
728
729 /* Reset device */
730 skb_queue_purge(&hdev->cmd_q);
731 atomic_set(&hdev->cmd_cnt, 1);
8af59467 732 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 733 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 734 set_bit(HCI_INIT, &hdev->flags);
5f246e89 735 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
736 clear_bit(HCI_INIT, &hdev->flags);
737 }
738
c347b765
GP
739 /* flush cmd work */
740 flush_work(&hdev->cmd_work);
1da177e4
LT
741
742 /* Drop queues */
743 skb_queue_purge(&hdev->rx_q);
744 skb_queue_purge(&hdev->cmd_q);
745 skb_queue_purge(&hdev->raw_q);
746
747 /* Drop last sent command */
748 if (hdev->sent_cmd) {
b79f44c1 749 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
750 kfree_skb(hdev->sent_cmd);
751 hdev->sent_cmd = NULL;
752 }
753
754 /* After this point our queues are empty
755 * and no tasks are scheduled. */
756 hdev->close(hdev);
757
bb4b2a9a
AE
758 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
759 mgmt_valid_hdev(hdev)) {
8ee56540
MH
760 hci_dev_lock(hdev);
761 mgmt_powered(hdev, 0);
762 hci_dev_unlock(hdev);
763 }
5add6af8 764
1da177e4
LT
765 /* Clear flags */
766 hdev->flags = 0;
767
e59fda8d 768 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 769 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 770
1da177e4
LT
771 hci_req_unlock(hdev);
772
773 hci_dev_put(hdev);
774 return 0;
775}
776
777int hci_dev_close(__u16 dev)
778{
779 struct hci_dev *hdev;
780 int err;
781
70f23020
AE
782 hdev = hci_dev_get(dev);
783 if (!hdev)
1da177e4 784 return -ENODEV;
8ee56540
MH
785
786 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
787 cancel_delayed_work(&hdev->power_off);
788
1da177e4 789 err = hci_dev_do_close(hdev);
8ee56540 790
1da177e4
LT
791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_dev_reset(__u16 dev)
796{
797 struct hci_dev *hdev;
798 int ret = 0;
799
70f23020
AE
800 hdev = hci_dev_get(dev);
801 if (!hdev)
1da177e4
LT
802 return -ENODEV;
803
804 hci_req_lock(hdev);
1da177e4
LT
805
806 if (!test_bit(HCI_UP, &hdev->flags))
807 goto done;
808
809 /* Drop queues */
810 skb_queue_purge(&hdev->rx_q);
811 skb_queue_purge(&hdev->cmd_q);
812
09fd0de5 813 hci_dev_lock(hdev);
1da177e4
LT
814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
09fd0de5 816 hci_dev_unlock(hdev);
1da177e4
LT
817
818 if (hdev->flush)
819 hdev->flush(hdev);
820
8e87d142 821 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 822 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
823
824 if (!test_bit(HCI_RAW, &hdev->flags))
5f246e89 825 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
826
827done:
1da177e4
LT
828 hci_req_unlock(hdev);
829 hci_dev_put(hdev);
830 return ret;
831}
832
833int hci_dev_reset_stat(__u16 dev)
834{
835 struct hci_dev *hdev;
836 int ret = 0;
837
70f23020
AE
838 hdev = hci_dev_get(dev);
839 if (!hdev)
1da177e4
LT
840 return -ENODEV;
841
842 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
843
844 hci_dev_put(hdev);
845
846 return ret;
847}
848
849int hci_dev_cmd(unsigned int cmd, void __user *arg)
850{
851 struct hci_dev *hdev;
852 struct hci_dev_req dr;
853 int err = 0;
854
855 if (copy_from_user(&dr, arg, sizeof(dr)))
856 return -EFAULT;
857
70f23020
AE
858 hdev = hci_dev_get(dr.dev_id);
859 if (!hdev)
1da177e4
LT
860 return -ENODEV;
861
862 switch (cmd) {
863 case HCISETAUTH:
04837f64 864 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 865 HCI_INIT_TIMEOUT);
1da177e4
LT
866 break;
867
868 case HCISETENCRYPT:
869 if (!lmp_encrypt_capable(hdev)) {
870 err = -EOPNOTSUPP;
871 break;
872 }
873
874 if (!test_bit(HCI_AUTH, &hdev->flags)) {
875 /* Auth must be enabled first */
04837f64 876 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 877 HCI_INIT_TIMEOUT);
1da177e4
LT
878 if (err)
879 break;
880 }
881
04837f64 882 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
5f246e89 883 HCI_INIT_TIMEOUT);
1da177e4
LT
884 break;
885
886 case HCISETSCAN:
04837f64 887 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
5f246e89 888 HCI_INIT_TIMEOUT);
1da177e4
LT
889 break;
890
1da177e4 891 case HCISETLINKPOL:
e4e8e37c 892 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
5f246e89 893 HCI_INIT_TIMEOUT);
1da177e4
LT
894 break;
895
896 case HCISETLINKMODE:
e4e8e37c
MH
897 hdev->link_mode = ((__u16) dr.dev_opt) &
898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
899 break;
900
901 case HCISETPTYPE:
902 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
903 break;
904
905 case HCISETACLMTU:
e4e8e37c
MH
906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
908 break;
909
910 case HCISETSCOMTU:
e4e8e37c
MH
911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
913 break;
914
915 default:
916 err = -EINVAL;
917 break;
918 }
e4e8e37c 919
1da177e4
LT
920 hci_dev_put(hdev);
921 return err;
922}
923
924int hci_get_dev_list(void __user *arg)
925{
8035ded4 926 struct hci_dev *hdev;
1da177e4
LT
927 struct hci_dev_list_req *dl;
928 struct hci_dev_req *dr;
1da177e4
LT
929 int n = 0, size, err;
930 __u16 dev_num;
931
932 if (get_user(dev_num, (__u16 __user *) arg))
933 return -EFAULT;
934
935 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
936 return -EINVAL;
937
938 size = sizeof(*dl) + dev_num * sizeof(*dr);
939
70f23020
AE
940 dl = kzalloc(size, GFP_KERNEL);
941 if (!dl)
1da177e4
LT
942 return -ENOMEM;
943
944 dr = dl->dev_req;
945
f20d09d5 946 read_lock(&hci_dev_list_lock);
8035ded4 947 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 948 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 949 cancel_delayed_work(&hdev->power_off);
c542a06c 950
a8b2d5c2
JH
951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
952 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 953
1da177e4
LT
954 (dr + n)->dev_id = hdev->id;
955 (dr + n)->dev_opt = hdev->flags;
c542a06c 956
1da177e4
LT
957 if (++n >= dev_num)
958 break;
959 }
f20d09d5 960 read_unlock(&hci_dev_list_lock);
1da177e4
LT
961
962 dl->dev_num = n;
963 size = sizeof(*dl) + n * sizeof(*dr);
964
965 err = copy_to_user(arg, dl, size);
966 kfree(dl);
967
968 return err ? -EFAULT : 0;
969}
970
971int hci_get_dev_info(void __user *arg)
972{
973 struct hci_dev *hdev;
974 struct hci_dev_info di;
975 int err = 0;
976
977 if (copy_from_user(&di, arg, sizeof(di)))
978 return -EFAULT;
979
70f23020
AE
980 hdev = hci_dev_get(di.dev_id);
981 if (!hdev)
1da177e4
LT
982 return -ENODEV;
983
a8b2d5c2 984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 985 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 986
a8b2d5c2
JH
987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 989
1da177e4
LT
990 strcpy(di.name, hdev->name);
991 di.bdaddr = hdev->bdaddr;
943da25d 992 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
993 di.flags = hdev->flags;
994 di.pkt_type = hdev->pkt_type;
572c7f84
JH
995 if (lmp_bredr_capable(hdev)) {
996 di.acl_mtu = hdev->acl_mtu;
997 di.acl_pkts = hdev->acl_pkts;
998 di.sco_mtu = hdev->sco_mtu;
999 di.sco_pkts = hdev->sco_pkts;
1000 } else {
1001 di.acl_mtu = hdev->le_mtu;
1002 di.acl_pkts = hdev->le_pkts;
1003 di.sco_mtu = 0;
1004 di.sco_pkts = 0;
1005 }
1da177e4
LT
1006 di.link_policy = hdev->link_policy;
1007 di.link_mode = hdev->link_mode;
1008
1009 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1010 memcpy(&di.features, &hdev->features, sizeof(di.features));
1011
1012 if (copy_to_user(arg, &di, sizeof(di)))
1013 err = -EFAULT;
1014
1015 hci_dev_put(hdev);
1016
1017 return err;
1018}
1019
1020/* ---- Interface to HCI drivers ---- */
1021
611b30f7
MH
1022static int hci_rfkill_set_block(void *data, bool blocked)
1023{
1024 struct hci_dev *hdev = data;
1025
1026 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1027
1028 if (!blocked)
1029 return 0;
1030
1031 hci_dev_do_close(hdev);
1032
1033 return 0;
1034}
1035
1036static const struct rfkill_ops hci_rfkill_ops = {
1037 .set_block = hci_rfkill_set_block,
1038};
1039
ab81cbf9
JH
1040static void hci_power_on(struct work_struct *work)
1041{
1042 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1043
1044 BT_DBG("%s", hdev->name);
1045
1046 if (hci_dev_open(hdev->id) < 0)
1047 return;
1048
a8b2d5c2 1049 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
9345d40c 1050 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1051
a8b2d5c2 1052 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1053 mgmt_index_added(hdev);
ab81cbf9
JH
1054}
1055
1056static void hci_power_off(struct work_struct *work)
1057{
3243553f 1058 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1059 power_off.work);
ab81cbf9
JH
1060
1061 BT_DBG("%s", hdev->name);
1062
8ee56540 1063 hci_dev_do_close(hdev);
ab81cbf9
JH
1064}
1065
16ab91ab
JH
1066static void hci_discov_off(struct work_struct *work)
1067{
1068 struct hci_dev *hdev;
1069 u8 scan = SCAN_PAGE;
1070
1071 hdev = container_of(work, struct hci_dev, discov_off.work);
1072
1073 BT_DBG("%s", hdev->name);
1074
09fd0de5 1075 hci_dev_lock(hdev);
16ab91ab
JH
1076
1077 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1078
1079 hdev->discov_timeout = 0;
1080
09fd0de5 1081 hci_dev_unlock(hdev);
16ab91ab
JH
1082}
1083
2aeb9a1a
JH
1084int hci_uuids_clear(struct hci_dev *hdev)
1085{
1086 struct list_head *p, *n;
1087
1088 list_for_each_safe(p, n, &hdev->uuids) {
1089 struct bt_uuid *uuid;
1090
1091 uuid = list_entry(p, struct bt_uuid, list);
1092
1093 list_del(p);
1094 kfree(uuid);
1095 }
1096
1097 return 0;
1098}
1099
55ed8ca1
JH
1100int hci_link_keys_clear(struct hci_dev *hdev)
1101{
1102 struct list_head *p, *n;
1103
1104 list_for_each_safe(p, n, &hdev->link_keys) {
1105 struct link_key *key;
1106
1107 key = list_entry(p, struct link_key, list);
1108
1109 list_del(p);
1110 kfree(key);
1111 }
1112
1113 return 0;
1114}
1115
b899efaf
VCG
1116int hci_smp_ltks_clear(struct hci_dev *hdev)
1117{
1118 struct smp_ltk *k, *tmp;
1119
1120 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1121 list_del(&k->list);
1122 kfree(k);
1123 }
1124
1125 return 0;
1126}
1127
55ed8ca1
JH
1128struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1129{
8035ded4 1130 struct link_key *k;
55ed8ca1 1131
8035ded4 1132 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1133 if (bacmp(bdaddr, &k->bdaddr) == 0)
1134 return k;
55ed8ca1
JH
1135
1136 return NULL;
1137}
1138
745c0ce3 1139static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1140 u8 key_type, u8 old_key_type)
d25e28ab
JH
1141{
1142 /* Legacy key */
1143 if (key_type < 0x03)
745c0ce3 1144 return true;
d25e28ab
JH
1145
1146 /* Debug keys are insecure so don't store them persistently */
1147 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1148 return false;
d25e28ab
JH
1149
1150 /* Changed combination key and there's no previous one */
1151 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1152 return false;
d25e28ab
JH
1153
1154 /* Security mode 3 case */
1155 if (!conn)
745c0ce3 1156 return true;
d25e28ab
JH
1157
1158 /* Neither local nor remote side had no-bonding as requirement */
1159 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1160 return true;
d25e28ab
JH
1161
1162 /* Local side had dedicated bonding as requirement */
1163 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1164 return true;
d25e28ab
JH
1165
1166 /* Remote side had dedicated bonding as requirement */
1167 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1168 return true;
d25e28ab
JH
1169
1170 /* If none of the above criteria match, then don't store the key
1171 * persistently */
745c0ce3 1172 return false;
d25e28ab
JH
1173}
1174
c9839a11 1175struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1176{
c9839a11 1177 struct smp_ltk *k;
75d262c2 1178
c9839a11
VCG
1179 list_for_each_entry(k, &hdev->long_term_keys, list) {
1180 if (k->ediv != ediv ||
a8c5fb1a 1181 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1182 continue;
1183
c9839a11 1184 return k;
75d262c2
VCG
1185 }
1186
1187 return NULL;
1188}
75d262c2 1189
c9839a11 1190struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1191 u8 addr_type)
75d262c2 1192{
c9839a11 1193 struct smp_ltk *k;
75d262c2 1194
c9839a11
VCG
1195 list_for_each_entry(k, &hdev->long_term_keys, list)
1196 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1197 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1198 return k;
1199
1200 return NULL;
1201}
75d262c2 1202
d25e28ab 1203int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1204 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1205{
1206 struct link_key *key, *old_key;
745c0ce3
VA
1207 u8 old_key_type;
1208 bool persistent;
55ed8ca1
JH
1209
1210 old_key = hci_find_link_key(hdev, bdaddr);
1211 if (old_key) {
1212 old_key_type = old_key->type;
1213 key = old_key;
1214 } else {
12adcf3a 1215 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1216 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1217 if (!key)
1218 return -ENOMEM;
1219 list_add(&key->list, &hdev->link_keys);
1220 }
1221
6ed93dc6 1222 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1223
d25e28ab
JH
1224 /* Some buggy controller combinations generate a changed
1225 * combination key for legacy pairing even when there's no
1226 * previous key */
1227 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1228 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1229 type = HCI_LK_COMBINATION;
655fe6ec
JH
1230 if (conn)
1231 conn->key_type = type;
1232 }
d25e28ab 1233
55ed8ca1 1234 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1235 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1236 key->pin_len = pin_len;
1237
b6020ba0 1238 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1239 key->type = old_key_type;
4748fed2
JH
1240 else
1241 key->type = type;
1242
4df378a1
JH
1243 if (!new_key)
1244 return 0;
1245
1246 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1247
744cf19e 1248 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1249
6ec5bcad
VA
1250 if (conn)
1251 conn->flush_key = !persistent;
55ed8ca1
JH
1252
1253 return 0;
1254}
1255
c9839a11 1256int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1257 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1258 ediv, u8 rand[8])
75d262c2 1259{
c9839a11 1260 struct smp_ltk *key, *old_key;
75d262c2 1261
c9839a11
VCG
1262 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1263 return 0;
75d262c2 1264
c9839a11
VCG
1265 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1266 if (old_key)
75d262c2 1267 key = old_key;
c9839a11
VCG
1268 else {
1269 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1270 if (!key)
1271 return -ENOMEM;
c9839a11 1272 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1273 }
1274
75d262c2 1275 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1276 key->bdaddr_type = addr_type;
1277 memcpy(key->val, tk, sizeof(key->val));
1278 key->authenticated = authenticated;
1279 key->ediv = ediv;
1280 key->enc_size = enc_size;
1281 key->type = type;
1282 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1283
c9839a11
VCG
1284 if (!new_key)
1285 return 0;
75d262c2 1286
261cc5aa
VCG
1287 if (type & HCI_SMP_LTK)
1288 mgmt_new_ltk(hdev, key, 1);
1289
75d262c2
VCG
1290 return 0;
1291}
1292
55ed8ca1
JH
1293int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1294{
1295 struct link_key *key;
1296
1297 key = hci_find_link_key(hdev, bdaddr);
1298 if (!key)
1299 return -ENOENT;
1300
6ed93dc6 1301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1302
1303 list_del(&key->list);
1304 kfree(key);
1305
1306 return 0;
1307}
1308
b899efaf
VCG
1309int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310{
1311 struct smp_ltk *k, *tmp;
1312
1313 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1314 if (bacmp(bdaddr, &k->bdaddr))
1315 continue;
1316
6ed93dc6 1317 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1318
1319 list_del(&k->list);
1320 kfree(k);
1321 }
1322
1323 return 0;
1324}
1325
6bd32326 1326/* HCI command timer function */
bda4f23a 1327static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1328{
1329 struct hci_dev *hdev = (void *) arg;
1330
bda4f23a
AE
1331 if (hdev->sent_cmd) {
1332 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1333 u16 opcode = __le16_to_cpu(sent->opcode);
1334
1335 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1336 } else {
1337 BT_ERR("%s command tx timeout", hdev->name);
1338 }
1339
6bd32326 1340 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1341 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1342}
1343
2763eda6 1344struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1345 bdaddr_t *bdaddr)
2763eda6
SJ
1346{
1347 struct oob_data *data;
1348
1349 list_for_each_entry(data, &hdev->remote_oob_data, list)
1350 if (bacmp(bdaddr, &data->bdaddr) == 0)
1351 return data;
1352
1353 return NULL;
1354}
1355
1356int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1357{
1358 struct oob_data *data;
1359
1360 data = hci_find_remote_oob_data(hdev, bdaddr);
1361 if (!data)
1362 return -ENOENT;
1363
6ed93dc6 1364 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1365
1366 list_del(&data->list);
1367 kfree(data);
1368
1369 return 0;
1370}
1371
1372int hci_remote_oob_data_clear(struct hci_dev *hdev)
1373{
1374 struct oob_data *data, *n;
1375
1376 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1377 list_del(&data->list);
1378 kfree(data);
1379 }
1380
1381 return 0;
1382}
1383
1384int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1385 u8 *randomizer)
2763eda6
SJ
1386{
1387 struct oob_data *data;
1388
1389 data = hci_find_remote_oob_data(hdev, bdaddr);
1390
1391 if (!data) {
1392 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1393 if (!data)
1394 return -ENOMEM;
1395
1396 bacpy(&data->bdaddr, bdaddr);
1397 list_add(&data->list, &hdev->remote_oob_data);
1398 }
1399
1400 memcpy(data->hash, hash, sizeof(data->hash));
1401 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1402
6ed93dc6 1403 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1404
1405 return 0;
1406}
1407
04124681 1408struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1409{
8035ded4 1410 struct bdaddr_list *b;
b2a66aad 1411
8035ded4 1412 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1413 if (bacmp(bdaddr, &b->bdaddr) == 0)
1414 return b;
b2a66aad
AJ
1415
1416 return NULL;
1417}
1418
1419int hci_blacklist_clear(struct hci_dev *hdev)
1420{
1421 struct list_head *p, *n;
1422
1423 list_for_each_safe(p, n, &hdev->blacklist) {
1424 struct bdaddr_list *b;
1425
1426 b = list_entry(p, struct bdaddr_list, list);
1427
1428 list_del(p);
1429 kfree(b);
1430 }
1431
1432 return 0;
1433}
1434
88c1fe4b 1435int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1436{
1437 struct bdaddr_list *entry;
b2a66aad
AJ
1438
1439 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1440 return -EBADF;
1441
5e762444
AJ
1442 if (hci_blacklist_lookup(hdev, bdaddr))
1443 return -EEXIST;
b2a66aad
AJ
1444
1445 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1446 if (!entry)
1447 return -ENOMEM;
b2a66aad
AJ
1448
1449 bacpy(&entry->bdaddr, bdaddr);
1450
1451 list_add(&entry->list, &hdev->blacklist);
1452
88c1fe4b 1453 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1454}
1455
88c1fe4b 1456int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1457{
1458 struct bdaddr_list *entry;
b2a66aad 1459
1ec918ce 1460 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1461 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1462
1463 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1464 if (!entry)
5e762444 1465 return -ENOENT;
b2a66aad
AJ
1466
1467 list_del(&entry->list);
1468 kfree(entry);
1469
88c1fe4b 1470 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1471}
1472
7ba8b4be
AG
1473static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1474{
1475 struct le_scan_params *param = (struct le_scan_params *) opt;
1476 struct hci_cp_le_set_scan_param cp;
1477
1478 memset(&cp, 0, sizeof(cp));
1479 cp.type = param->type;
1480 cp.interval = cpu_to_le16(param->interval);
1481 cp.window = cpu_to_le16(param->window);
1482
1483 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1484}
1485
1486static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1487{
1488 struct hci_cp_le_set_scan_enable cp;
1489
1490 memset(&cp, 0, sizeof(cp));
1491 cp.enable = 1;
0431a43c 1492 cp.filter_dup = 1;
7ba8b4be
AG
1493
1494 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1495}
1496
1497static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1498 u16 window, int timeout)
7ba8b4be
AG
1499{
1500 long timeo = msecs_to_jiffies(3000);
1501 struct le_scan_params param;
1502 int err;
1503
1504 BT_DBG("%s", hdev->name);
1505
1506 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1507 return -EINPROGRESS;
1508
1509 param.type = type;
1510 param.interval = interval;
1511 param.window = window;
1512
1513 hci_req_lock(hdev);
1514
1515 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1516 timeo);
7ba8b4be
AG
1517 if (!err)
1518 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1519
1520 hci_req_unlock(hdev);
1521
1522 if (err < 0)
1523 return err;
1524
1525 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1526 msecs_to_jiffies(timeout));
7ba8b4be
AG
1527
1528 return 0;
1529}
1530
7dbfac1d
AG
1531int hci_cancel_le_scan(struct hci_dev *hdev)
1532{
1533 BT_DBG("%s", hdev->name);
1534
1535 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1536 return -EALREADY;
1537
1538 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1539 struct hci_cp_le_set_scan_enable cp;
1540
1541 /* Send HCI command to disable LE Scan */
1542 memset(&cp, 0, sizeof(cp));
1543 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1544 }
1545
1546 return 0;
1547}
1548
7ba8b4be
AG
1549static void le_scan_disable_work(struct work_struct *work)
1550{
1551 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1552 le_scan_disable.work);
7ba8b4be
AG
1553 struct hci_cp_le_set_scan_enable cp;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 memset(&cp, 0, sizeof(cp));
1558
1559 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1560}
1561
28b75a89
AG
1562static void le_scan_work(struct work_struct *work)
1563{
1564 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1565 struct le_scan_params *param = &hdev->le_scan_params;
1566
1567 BT_DBG("%s", hdev->name);
1568
04124681
GP
1569 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1570 param->timeout);
28b75a89
AG
1571}
1572
1573int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1574 int timeout)
28b75a89
AG
1575{
1576 struct le_scan_params *param = &hdev->le_scan_params;
1577
1578 BT_DBG("%s", hdev->name);
1579
f1550478
JH
1580 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1581 return -ENOTSUPP;
1582
28b75a89
AG
1583 if (work_busy(&hdev->le_scan))
1584 return -EINPROGRESS;
1585
1586 param->type = type;
1587 param->interval = interval;
1588 param->window = window;
1589 param->timeout = timeout;
1590
1591 queue_work(system_long_wq, &hdev->le_scan);
1592
1593 return 0;
1594}
1595
9be0dab7
DH
1596/* Alloc HCI device */
1597struct hci_dev *hci_alloc_dev(void)
1598{
1599 struct hci_dev *hdev;
1600
1601 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1602 if (!hdev)
1603 return NULL;
1604
b1b813d4
DH
1605 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1606 hdev->esco_type = (ESCO_HV1);
1607 hdev->link_mode = (HCI_LM_ACCEPT);
1608 hdev->io_capability = 0x03; /* No Input No Output */
1609
b1b813d4
DH
1610 hdev->sniff_max_interval = 800;
1611 hdev->sniff_min_interval = 80;
1612
1613 mutex_init(&hdev->lock);
1614 mutex_init(&hdev->req_lock);
1615
1616 INIT_LIST_HEAD(&hdev->mgmt_pending);
1617 INIT_LIST_HEAD(&hdev->blacklist);
1618 INIT_LIST_HEAD(&hdev->uuids);
1619 INIT_LIST_HEAD(&hdev->link_keys);
1620 INIT_LIST_HEAD(&hdev->long_term_keys);
1621 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1622 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1623
1624 INIT_WORK(&hdev->rx_work, hci_rx_work);
1625 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1626 INIT_WORK(&hdev->tx_work, hci_tx_work);
1627 INIT_WORK(&hdev->power_on, hci_power_on);
1628 INIT_WORK(&hdev->le_scan, le_scan_work);
1629
b1b813d4
DH
1630 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1631 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1632 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1633
9be0dab7 1634 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1635 skb_queue_head_init(&hdev->rx_q);
1636 skb_queue_head_init(&hdev->cmd_q);
1637 skb_queue_head_init(&hdev->raw_q);
1638
1639 init_waitqueue_head(&hdev->req_wait_q);
1640
bda4f23a 1641 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 1642
b1b813d4
DH
1643 hci_init_sysfs(hdev);
1644 discovery_init(hdev);
9be0dab7
DH
1645
1646 return hdev;
1647}
1648EXPORT_SYMBOL(hci_alloc_dev);
1649
1650/* Free HCI device */
1651void hci_free_dev(struct hci_dev *hdev)
1652{
1653 skb_queue_purge(&hdev->driver_init);
1654
1655 /* will free via device release */
1656 put_device(&hdev->dev);
1657}
1658EXPORT_SYMBOL(hci_free_dev);
1659
1da177e4
LT
1660/* Register HCI device */
1661int hci_register_dev(struct hci_dev *hdev)
1662{
b1b813d4 1663 int id, error;
1da177e4 1664
010666a1 1665 if (!hdev->open || !hdev->close)
1da177e4
LT
1666 return -EINVAL;
1667
08add513
MM
1668 /* Do not allow HCI_AMP devices to register at index 0,
1669 * so the index can be used as the AMP controller ID.
1670 */
3df92b31
SL
1671 switch (hdev->dev_type) {
1672 case HCI_BREDR:
1673 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1674 break;
1675 case HCI_AMP:
1676 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1677 break;
1678 default:
1679 return -EINVAL;
1da177e4 1680 }
8e87d142 1681
3df92b31
SL
1682 if (id < 0)
1683 return id;
1684
1da177e4
LT
1685 sprintf(hdev->name, "hci%d", id);
1686 hdev->id = id;
2d8b3a11
AE
1687
1688 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1689
3df92b31
SL
1690 write_lock(&hci_dev_list_lock);
1691 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1692 write_unlock(&hci_dev_list_lock);
1da177e4 1693
32845eb1 1694 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1695 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1696 if (!hdev->workqueue) {
1697 error = -ENOMEM;
1698 goto err;
1699 }
f48fd9c8 1700
33ca954d
DH
1701 error = hci_add_sysfs(hdev);
1702 if (error < 0)
1703 goto err_wqueue;
1da177e4 1704
611b30f7 1705 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1706 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1707 hdev);
611b30f7
MH
1708 if (hdev->rfkill) {
1709 if (rfkill_register(hdev->rfkill) < 0) {
1710 rfkill_destroy(hdev->rfkill);
1711 hdev->rfkill = NULL;
1712 }
1713 }
1714
a8b2d5c2 1715 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
1716
1717 if (hdev->dev_type != HCI_AMP)
1718 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1719
7f971041 1720 schedule_work(&hdev->power_on);
ab81cbf9 1721
1da177e4 1722 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1723 hci_dev_hold(hdev);
1da177e4
LT
1724
1725 return id;
f48fd9c8 1726
33ca954d
DH
1727err_wqueue:
1728 destroy_workqueue(hdev->workqueue);
1729err:
3df92b31 1730 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1731 write_lock(&hci_dev_list_lock);
f48fd9c8 1732 list_del(&hdev->list);
f20d09d5 1733 write_unlock(&hci_dev_list_lock);
f48fd9c8 1734
33ca954d 1735 return error;
1da177e4
LT
1736}
1737EXPORT_SYMBOL(hci_register_dev);
1738
1739/* Unregister HCI device */
59735631 1740void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1741{
3df92b31 1742 int i, id;
ef222013 1743
c13854ce 1744 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1745
94324962
JH
1746 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1747
3df92b31
SL
1748 id = hdev->id;
1749
f20d09d5 1750 write_lock(&hci_dev_list_lock);
1da177e4 1751 list_del(&hdev->list);
f20d09d5 1752 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1753
1754 hci_dev_do_close(hdev);
1755
cd4c5391 1756 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1757 kfree_skb(hdev->reassembly[i]);
1758
ab81cbf9 1759 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1760 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1761 hci_dev_lock(hdev);
744cf19e 1762 mgmt_index_removed(hdev);
09fd0de5 1763 hci_dev_unlock(hdev);
56e5cb86 1764 }
ab81cbf9 1765
2e58ef3e
JH
1766 /* mgmt_index_removed should take care of emptying the
1767 * pending list */
1768 BUG_ON(!list_empty(&hdev->mgmt_pending));
1769
1da177e4
LT
1770 hci_notify(hdev, HCI_DEV_UNREG);
1771
611b30f7
MH
1772 if (hdev->rfkill) {
1773 rfkill_unregister(hdev->rfkill);
1774 rfkill_destroy(hdev->rfkill);
1775 }
1776
ce242970 1777 hci_del_sysfs(hdev);
147e2d59 1778
f48fd9c8
MH
1779 destroy_workqueue(hdev->workqueue);
1780
09fd0de5 1781 hci_dev_lock(hdev);
e2e0cacb 1782 hci_blacklist_clear(hdev);
2aeb9a1a 1783 hci_uuids_clear(hdev);
55ed8ca1 1784 hci_link_keys_clear(hdev);
b899efaf 1785 hci_smp_ltks_clear(hdev);
2763eda6 1786 hci_remote_oob_data_clear(hdev);
09fd0de5 1787 hci_dev_unlock(hdev);
e2e0cacb 1788
dc946bd8 1789 hci_dev_put(hdev);
3df92b31
SL
1790
1791 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1792}
1793EXPORT_SYMBOL(hci_unregister_dev);
1794
1795/* Suspend HCI device */
1796int hci_suspend_dev(struct hci_dev *hdev)
1797{
1798 hci_notify(hdev, HCI_DEV_SUSPEND);
1799 return 0;
1800}
1801EXPORT_SYMBOL(hci_suspend_dev);
1802
1803/* Resume HCI device */
1804int hci_resume_dev(struct hci_dev *hdev)
1805{
1806 hci_notify(hdev, HCI_DEV_RESUME);
1807 return 0;
1808}
1809EXPORT_SYMBOL(hci_resume_dev);
1810
76bca880
MH
1811/* Receive frame from HCI drivers */
1812int hci_recv_frame(struct sk_buff *skb)
1813{
1814 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1815 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1816 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1817 kfree_skb(skb);
1818 return -ENXIO;
1819 }
1820
1821 /* Incomming skb */
1822 bt_cb(skb)->incoming = 1;
1823
1824 /* Time stamp */
1825 __net_timestamp(skb);
1826
76bca880 1827 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1828 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1829
76bca880
MH
1830 return 0;
1831}
1832EXPORT_SYMBOL(hci_recv_frame);
1833
33e882a5 1834static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1835 int count, __u8 index)
33e882a5
SS
1836{
1837 int len = 0;
1838 int hlen = 0;
1839 int remain = count;
1840 struct sk_buff *skb;
1841 struct bt_skb_cb *scb;
1842
1843 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1844 index >= NUM_REASSEMBLY)
33e882a5
SS
1845 return -EILSEQ;
1846
1847 skb = hdev->reassembly[index];
1848
1849 if (!skb) {
1850 switch (type) {
1851 case HCI_ACLDATA_PKT:
1852 len = HCI_MAX_FRAME_SIZE;
1853 hlen = HCI_ACL_HDR_SIZE;
1854 break;
1855 case HCI_EVENT_PKT:
1856 len = HCI_MAX_EVENT_SIZE;
1857 hlen = HCI_EVENT_HDR_SIZE;
1858 break;
1859 case HCI_SCODATA_PKT:
1860 len = HCI_MAX_SCO_SIZE;
1861 hlen = HCI_SCO_HDR_SIZE;
1862 break;
1863 }
1864
1e429f38 1865 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1866 if (!skb)
1867 return -ENOMEM;
1868
1869 scb = (void *) skb->cb;
1870 scb->expect = hlen;
1871 scb->pkt_type = type;
1872
1873 skb->dev = (void *) hdev;
1874 hdev->reassembly[index] = skb;
1875 }
1876
1877 while (count) {
1878 scb = (void *) skb->cb;
89bb46d0 1879 len = min_t(uint, scb->expect, count);
33e882a5
SS
1880
1881 memcpy(skb_put(skb, len), data, len);
1882
1883 count -= len;
1884 data += len;
1885 scb->expect -= len;
1886 remain = count;
1887
1888 switch (type) {
1889 case HCI_EVENT_PKT:
1890 if (skb->len == HCI_EVENT_HDR_SIZE) {
1891 struct hci_event_hdr *h = hci_event_hdr(skb);
1892 scb->expect = h->plen;
1893
1894 if (skb_tailroom(skb) < scb->expect) {
1895 kfree_skb(skb);
1896 hdev->reassembly[index] = NULL;
1897 return -ENOMEM;
1898 }
1899 }
1900 break;
1901
1902 case HCI_ACLDATA_PKT:
1903 if (skb->len == HCI_ACL_HDR_SIZE) {
1904 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1905 scb->expect = __le16_to_cpu(h->dlen);
1906
1907 if (skb_tailroom(skb) < scb->expect) {
1908 kfree_skb(skb);
1909 hdev->reassembly[index] = NULL;
1910 return -ENOMEM;
1911 }
1912 }
1913 break;
1914
1915 case HCI_SCODATA_PKT:
1916 if (skb->len == HCI_SCO_HDR_SIZE) {
1917 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1918 scb->expect = h->dlen;
1919
1920 if (skb_tailroom(skb) < scb->expect) {
1921 kfree_skb(skb);
1922 hdev->reassembly[index] = NULL;
1923 return -ENOMEM;
1924 }
1925 }
1926 break;
1927 }
1928
1929 if (scb->expect == 0) {
1930 /* Complete frame */
1931
1932 bt_cb(skb)->pkt_type = type;
1933 hci_recv_frame(skb);
1934
1935 hdev->reassembly[index] = NULL;
1936 return remain;
1937 }
1938 }
1939
1940 return remain;
1941}
1942
ef222013
MH
1943int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1944{
f39a3c06
SS
1945 int rem = 0;
1946
ef222013
MH
1947 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1948 return -EILSEQ;
1949
da5f6c37 1950 while (count) {
1e429f38 1951 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1952 if (rem < 0)
1953 return rem;
ef222013 1954
f39a3c06
SS
1955 data += (count - rem);
1956 count = rem;
f81c6224 1957 }
ef222013 1958
f39a3c06 1959 return rem;
ef222013
MH
1960}
1961EXPORT_SYMBOL(hci_recv_fragment);
1962
99811510
SS
1963#define STREAM_REASSEMBLY 0
1964
1965int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1966{
1967 int type;
1968 int rem = 0;
1969
da5f6c37 1970 while (count) {
99811510
SS
1971 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1972
1973 if (!skb) {
1974 struct { char type; } *pkt;
1975
1976 /* Start of the frame */
1977 pkt = data;
1978 type = pkt->type;
1979
1980 data++;
1981 count--;
1982 } else
1983 type = bt_cb(skb)->pkt_type;
1984
1e429f38 1985 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 1986 STREAM_REASSEMBLY);
99811510
SS
1987 if (rem < 0)
1988 return rem;
1989
1990 data += (count - rem);
1991 count = rem;
f81c6224 1992 }
99811510
SS
1993
1994 return rem;
1995}
1996EXPORT_SYMBOL(hci_recv_stream_fragment);
1997
1da177e4
LT
1998/* ---- Interface to upper protocols ---- */
1999
1da177e4
LT
2000int hci_register_cb(struct hci_cb *cb)
2001{
2002 BT_DBG("%p name %s", cb, cb->name);
2003
f20d09d5 2004 write_lock(&hci_cb_list_lock);
1da177e4 2005 list_add(&cb->list, &hci_cb_list);
f20d09d5 2006 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2007
2008 return 0;
2009}
2010EXPORT_SYMBOL(hci_register_cb);
2011
2012int hci_unregister_cb(struct hci_cb *cb)
2013{
2014 BT_DBG("%p name %s", cb, cb->name);
2015
f20d09d5 2016 write_lock(&hci_cb_list_lock);
1da177e4 2017 list_del(&cb->list);
f20d09d5 2018 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2019
2020 return 0;
2021}
2022EXPORT_SYMBOL(hci_unregister_cb);
2023
2024static int hci_send_frame(struct sk_buff *skb)
2025{
2026 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2027
2028 if (!hdev) {
2029 kfree_skb(skb);
2030 return -ENODEV;
2031 }
2032
0d48d939 2033 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2034
cd82e61c
MH
2035 /* Time stamp */
2036 __net_timestamp(skb);
1da177e4 2037
cd82e61c
MH
2038 /* Send copy to monitor */
2039 hci_send_to_monitor(hdev, skb);
2040
2041 if (atomic_read(&hdev->promisc)) {
2042 /* Send copy to the sockets */
470fe1b5 2043 hci_send_to_sock(hdev, skb);
1da177e4
LT
2044 }
2045
2046 /* Get rid of skb owner, prior to sending to the driver. */
2047 skb_orphan(skb);
2048
2049 return hdev->send(skb);
2050}
2051
2052/* Send HCI command */
a9de9248 2053int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2054{
2055 int len = HCI_COMMAND_HDR_SIZE + plen;
2056 struct hci_command_hdr *hdr;
2057 struct sk_buff *skb;
2058
f0e09510 2059 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2060
2061 skb = bt_skb_alloc(len, GFP_ATOMIC);
2062 if (!skb) {
ef222013 2063 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2064 return -ENOMEM;
2065 }
2066
2067 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2068 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2069 hdr->plen = plen;
2070
2071 if (plen)
2072 memcpy(skb_put(skb, plen), param, plen);
2073
2074 BT_DBG("skb len %d", skb->len);
2075
0d48d939 2076 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2077 skb->dev = (void *) hdev;
c78ae283 2078
a5040efa
JH
2079 if (test_bit(HCI_INIT, &hdev->flags))
2080 hdev->init_last_cmd = opcode;
2081
1da177e4 2082 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2083 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2084
2085 return 0;
2086}
1da177e4
LT
2087
2088/* Get data from the previously sent command */
a9de9248 2089void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2090{
2091 struct hci_command_hdr *hdr;
2092
2093 if (!hdev->sent_cmd)
2094 return NULL;
2095
2096 hdr = (void *) hdev->sent_cmd->data;
2097
a9de9248 2098 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2099 return NULL;
2100
f0e09510 2101 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2102
2103 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2104}
2105
2106/* Send ACL data */
2107static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2108{
2109 struct hci_acl_hdr *hdr;
2110 int len = skb->len;
2111
badff6d0
ACM
2112 skb_push(skb, HCI_ACL_HDR_SIZE);
2113 skb_reset_transport_header(skb);
9c70220b 2114 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2115 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2116 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2117}
2118
ee22be7e 2119static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2120 struct sk_buff *skb, __u16 flags)
1da177e4 2121{
ee22be7e 2122 struct hci_conn *conn = chan->conn;
1da177e4
LT
2123 struct hci_dev *hdev = conn->hdev;
2124 struct sk_buff *list;
2125
087bfd99
GP
2126 skb->len = skb_headlen(skb);
2127 skb->data_len = 0;
2128
2129 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2130
2131 switch (hdev->dev_type) {
2132 case HCI_BREDR:
2133 hci_add_acl_hdr(skb, conn->handle, flags);
2134 break;
2135 case HCI_AMP:
2136 hci_add_acl_hdr(skb, chan->handle, flags);
2137 break;
2138 default:
2139 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2140 return;
2141 }
087bfd99 2142
70f23020
AE
2143 list = skb_shinfo(skb)->frag_list;
2144 if (!list) {
1da177e4
LT
2145 /* Non fragmented */
2146 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2147
73d80deb 2148 skb_queue_tail(queue, skb);
1da177e4
LT
2149 } else {
2150 /* Fragmented */
2151 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2152
2153 skb_shinfo(skb)->frag_list = NULL;
2154
2155 /* Queue all fragments atomically */
af3e6359 2156 spin_lock(&queue->lock);
1da177e4 2157
73d80deb 2158 __skb_queue_tail(queue, skb);
e702112f
AE
2159
2160 flags &= ~ACL_START;
2161 flags |= ACL_CONT;
1da177e4
LT
2162 do {
2163 skb = list; list = list->next;
8e87d142 2164
1da177e4 2165 skb->dev = (void *) hdev;
0d48d939 2166 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2167 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2168
2169 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2170
73d80deb 2171 __skb_queue_tail(queue, skb);
1da177e4
LT
2172 } while (list);
2173
af3e6359 2174 spin_unlock(&queue->lock);
1da177e4 2175 }
73d80deb
LAD
2176}
2177
2178void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2179{
ee22be7e 2180 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2181
f0e09510 2182 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2183
2184 skb->dev = (void *) hdev;
73d80deb 2185
ee22be7e 2186 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2187
3eff45ea 2188 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2189}
1da177e4
LT
2190
2191/* Send SCO data */
0d861d8b 2192void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2193{
2194 struct hci_dev *hdev = conn->hdev;
2195 struct hci_sco_hdr hdr;
2196
2197 BT_DBG("%s len %d", hdev->name, skb->len);
2198
aca3192c 2199 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2200 hdr.dlen = skb->len;
2201
badff6d0
ACM
2202 skb_push(skb, HCI_SCO_HDR_SIZE);
2203 skb_reset_transport_header(skb);
9c70220b 2204 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2205
2206 skb->dev = (void *) hdev;
0d48d939 2207 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2208
1da177e4 2209 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2210 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2211}
1da177e4
LT
2212
2213/* ---- HCI TX task (outgoing data) ---- */
2214
2215/* HCI Connection scheduler */
6039aa73
GP
2216static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2217 int *quote)
1da177e4
LT
2218{
2219 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2220 struct hci_conn *conn = NULL, *c;
abc5de8f 2221 unsigned int num = 0, min = ~0;
1da177e4 2222
8e87d142 2223 /* We don't have to lock device here. Connections are always
1da177e4 2224 * added and removed with TX task disabled. */
bf4c6325
GP
2225
2226 rcu_read_lock();
2227
2228 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2229 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2230 continue;
769be974
MH
2231
2232 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2233 continue;
2234
1da177e4
LT
2235 num++;
2236
2237 if (c->sent < min) {
2238 min = c->sent;
2239 conn = c;
2240 }
52087a79
LAD
2241
2242 if (hci_conn_num(hdev, type) == num)
2243 break;
1da177e4
LT
2244 }
2245
bf4c6325
GP
2246 rcu_read_unlock();
2247
1da177e4 2248 if (conn) {
6ed58ec5
VT
2249 int cnt, q;
2250
2251 switch (conn->type) {
2252 case ACL_LINK:
2253 cnt = hdev->acl_cnt;
2254 break;
2255 case SCO_LINK:
2256 case ESCO_LINK:
2257 cnt = hdev->sco_cnt;
2258 break;
2259 case LE_LINK:
2260 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2261 break;
2262 default:
2263 cnt = 0;
2264 BT_ERR("Unknown link type");
2265 }
2266
2267 q = cnt / num;
1da177e4
LT
2268 *quote = q ? q : 1;
2269 } else
2270 *quote = 0;
2271
2272 BT_DBG("conn %p quote %d", conn, *quote);
2273 return conn;
2274}
2275
6039aa73 2276static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2277{
2278 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2279 struct hci_conn *c;
1da177e4 2280
bae1f5d9 2281 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2282
bf4c6325
GP
2283 rcu_read_lock();
2284
1da177e4 2285 /* Kill stalled connections */
bf4c6325 2286 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2287 if (c->type == type && c->sent) {
6ed93dc6
AE
2288 BT_ERR("%s killing stalled connection %pMR",
2289 hdev->name, &c->dst);
7490c6c2 2290 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2291 }
2292 }
bf4c6325
GP
2293
2294 rcu_read_unlock();
1da177e4
LT
2295}
2296
6039aa73
GP
2297static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2298 int *quote)
1da177e4 2299{
73d80deb
LAD
2300 struct hci_conn_hash *h = &hdev->conn_hash;
2301 struct hci_chan *chan = NULL;
abc5de8f 2302 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2303 struct hci_conn *conn;
73d80deb
LAD
2304 int cnt, q, conn_num = 0;
2305
2306 BT_DBG("%s", hdev->name);
2307
bf4c6325
GP
2308 rcu_read_lock();
2309
2310 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2311 struct hci_chan *tmp;
2312
2313 if (conn->type != type)
2314 continue;
2315
2316 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2317 continue;
2318
2319 conn_num++;
2320
8192edef 2321 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2322 struct sk_buff *skb;
2323
2324 if (skb_queue_empty(&tmp->data_q))
2325 continue;
2326
2327 skb = skb_peek(&tmp->data_q);
2328 if (skb->priority < cur_prio)
2329 continue;
2330
2331 if (skb->priority > cur_prio) {
2332 num = 0;
2333 min = ~0;
2334 cur_prio = skb->priority;
2335 }
2336
2337 num++;
2338
2339 if (conn->sent < min) {
2340 min = conn->sent;
2341 chan = tmp;
2342 }
2343 }
2344
2345 if (hci_conn_num(hdev, type) == conn_num)
2346 break;
2347 }
2348
bf4c6325
GP
2349 rcu_read_unlock();
2350
73d80deb
LAD
2351 if (!chan)
2352 return NULL;
2353
2354 switch (chan->conn->type) {
2355 case ACL_LINK:
2356 cnt = hdev->acl_cnt;
2357 break;
bd1eb66b
AE
2358 case AMP_LINK:
2359 cnt = hdev->block_cnt;
2360 break;
73d80deb
LAD
2361 case SCO_LINK:
2362 case ESCO_LINK:
2363 cnt = hdev->sco_cnt;
2364 break;
2365 case LE_LINK:
2366 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2367 break;
2368 default:
2369 cnt = 0;
2370 BT_ERR("Unknown link type");
2371 }
2372
2373 q = cnt / num;
2374 *quote = q ? q : 1;
2375 BT_DBG("chan %p quote %d", chan, *quote);
2376 return chan;
2377}
2378
02b20f0b
LAD
2379static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2380{
2381 struct hci_conn_hash *h = &hdev->conn_hash;
2382 struct hci_conn *conn;
2383 int num = 0;
2384
2385 BT_DBG("%s", hdev->name);
2386
bf4c6325
GP
2387 rcu_read_lock();
2388
2389 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2390 struct hci_chan *chan;
2391
2392 if (conn->type != type)
2393 continue;
2394
2395 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2396 continue;
2397
2398 num++;
2399
8192edef 2400 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2401 struct sk_buff *skb;
2402
2403 if (chan->sent) {
2404 chan->sent = 0;
2405 continue;
2406 }
2407
2408 if (skb_queue_empty(&chan->data_q))
2409 continue;
2410
2411 skb = skb_peek(&chan->data_q);
2412 if (skb->priority >= HCI_PRIO_MAX - 1)
2413 continue;
2414
2415 skb->priority = HCI_PRIO_MAX - 1;
2416
2417 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2418 skb->priority);
02b20f0b
LAD
2419 }
2420
2421 if (hci_conn_num(hdev, type) == num)
2422 break;
2423 }
bf4c6325
GP
2424
2425 rcu_read_unlock();
2426
02b20f0b
LAD
2427}
2428
b71d385a
AE
2429static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2430{
2431 /* Calculate count of blocks used by this packet */
2432 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2433}
2434
6039aa73 2435static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2436{
1da177e4
LT
2437 if (!test_bit(HCI_RAW, &hdev->flags)) {
2438 /* ACL tx timeout must be longer than maximum
2439 * link supervision timeout (40.9 seconds) */
63d2bc1b 2440 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2441 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2442 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2443 }
63d2bc1b 2444}
1da177e4 2445
6039aa73 2446static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2447{
2448 unsigned int cnt = hdev->acl_cnt;
2449 struct hci_chan *chan;
2450 struct sk_buff *skb;
2451 int quote;
2452
2453 __check_timeout(hdev, cnt);
04837f64 2454
73d80deb 2455 while (hdev->acl_cnt &&
a8c5fb1a 2456 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2457 u32 priority = (skb_peek(&chan->data_q))->priority;
2458 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2459 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2460 skb->len, skb->priority);
73d80deb 2461
ec1cce24
LAD
2462 /* Stop if priority has changed */
2463 if (skb->priority < priority)
2464 break;
2465
2466 skb = skb_dequeue(&chan->data_q);
2467
73d80deb 2468 hci_conn_enter_active_mode(chan->conn,
04124681 2469 bt_cb(skb)->force_active);
04837f64 2470
1da177e4
LT
2471 hci_send_frame(skb);
2472 hdev->acl_last_tx = jiffies;
2473
2474 hdev->acl_cnt--;
73d80deb
LAD
2475 chan->sent++;
2476 chan->conn->sent++;
1da177e4
LT
2477 }
2478 }
02b20f0b
LAD
2479
2480 if (cnt != hdev->acl_cnt)
2481 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2482}
2483
6039aa73 2484static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2485{
63d2bc1b 2486 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2487 struct hci_chan *chan;
2488 struct sk_buff *skb;
2489 int quote;
bd1eb66b 2490 u8 type;
b71d385a 2491
63d2bc1b 2492 __check_timeout(hdev, cnt);
b71d385a 2493
bd1eb66b
AE
2494 BT_DBG("%s", hdev->name);
2495
2496 if (hdev->dev_type == HCI_AMP)
2497 type = AMP_LINK;
2498 else
2499 type = ACL_LINK;
2500
b71d385a 2501 while (hdev->block_cnt > 0 &&
bd1eb66b 2502 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2503 u32 priority = (skb_peek(&chan->data_q))->priority;
2504 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2505 int blocks;
2506
2507 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2508 skb->len, skb->priority);
b71d385a
AE
2509
2510 /* Stop if priority has changed */
2511 if (skb->priority < priority)
2512 break;
2513
2514 skb = skb_dequeue(&chan->data_q);
2515
2516 blocks = __get_blocks(hdev, skb);
2517 if (blocks > hdev->block_cnt)
2518 return;
2519
2520 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2521 bt_cb(skb)->force_active);
b71d385a
AE
2522
2523 hci_send_frame(skb);
2524 hdev->acl_last_tx = jiffies;
2525
2526 hdev->block_cnt -= blocks;
2527 quote -= blocks;
2528
2529 chan->sent += blocks;
2530 chan->conn->sent += blocks;
2531 }
2532 }
2533
2534 if (cnt != hdev->block_cnt)
bd1eb66b 2535 hci_prio_recalculate(hdev, type);
b71d385a
AE
2536}
2537
6039aa73 2538static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2539{
2540 BT_DBG("%s", hdev->name);
2541
bd1eb66b
AE
2542 /* No ACL link over BR/EDR controller */
2543 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2544 return;
2545
2546 /* No AMP link over AMP controller */
2547 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
2548 return;
2549
2550 switch (hdev->flow_ctl_mode) {
2551 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2552 hci_sched_acl_pkt(hdev);
2553 break;
2554
2555 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2556 hci_sched_acl_blk(hdev);
2557 break;
2558 }
2559}
2560
1da177e4 2561/* Schedule SCO */
6039aa73 2562static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2563{
2564 struct hci_conn *conn;
2565 struct sk_buff *skb;
2566 int quote;
2567
2568 BT_DBG("%s", hdev->name);
2569
52087a79
LAD
2570 if (!hci_conn_num(hdev, SCO_LINK))
2571 return;
2572
1da177e4
LT
2573 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2574 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2575 BT_DBG("skb %p len %d", skb, skb->len);
2576 hci_send_frame(skb);
2577
2578 conn->sent++;
2579 if (conn->sent == ~0)
2580 conn->sent = 0;
2581 }
2582 }
2583}
2584
6039aa73 2585static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2586{
2587 struct hci_conn *conn;
2588 struct sk_buff *skb;
2589 int quote;
2590
2591 BT_DBG("%s", hdev->name);
2592
52087a79
LAD
2593 if (!hci_conn_num(hdev, ESCO_LINK))
2594 return;
2595
8fc9ced3
GP
2596 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2597 &quote))) {
b6a0dc82
MH
2598 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2599 BT_DBG("skb %p len %d", skb, skb->len);
2600 hci_send_frame(skb);
2601
2602 conn->sent++;
2603 if (conn->sent == ~0)
2604 conn->sent = 0;
2605 }
2606 }
2607}
2608
6039aa73 2609static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2610{
73d80deb 2611 struct hci_chan *chan;
6ed58ec5 2612 struct sk_buff *skb;
02b20f0b 2613 int quote, cnt, tmp;
6ed58ec5
VT
2614
2615 BT_DBG("%s", hdev->name);
2616
52087a79
LAD
2617 if (!hci_conn_num(hdev, LE_LINK))
2618 return;
2619
6ed58ec5
VT
2620 if (!test_bit(HCI_RAW, &hdev->flags)) {
2621 /* LE tx timeout must be longer than maximum
2622 * link supervision timeout (40.9 seconds) */
bae1f5d9 2623 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2624 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2625 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2626 }
2627
2628 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2629 tmp = cnt;
73d80deb 2630 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2631 u32 priority = (skb_peek(&chan->data_q))->priority;
2632 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2633 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2634 skb->len, skb->priority);
6ed58ec5 2635
ec1cce24
LAD
2636 /* Stop if priority has changed */
2637 if (skb->priority < priority)
2638 break;
2639
2640 skb = skb_dequeue(&chan->data_q);
2641
6ed58ec5
VT
2642 hci_send_frame(skb);
2643 hdev->le_last_tx = jiffies;
2644
2645 cnt--;
73d80deb
LAD
2646 chan->sent++;
2647 chan->conn->sent++;
6ed58ec5
VT
2648 }
2649 }
73d80deb 2650
6ed58ec5
VT
2651 if (hdev->le_pkts)
2652 hdev->le_cnt = cnt;
2653 else
2654 hdev->acl_cnt = cnt;
02b20f0b
LAD
2655
2656 if (cnt != tmp)
2657 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2658}
2659
3eff45ea 2660static void hci_tx_work(struct work_struct *work)
1da177e4 2661{
3eff45ea 2662 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2663 struct sk_buff *skb;
2664
6ed58ec5 2665 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2666 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2667
2668 /* Schedule queues and send stuff to HCI driver */
2669
2670 hci_sched_acl(hdev);
2671
2672 hci_sched_sco(hdev);
2673
b6a0dc82
MH
2674 hci_sched_esco(hdev);
2675
6ed58ec5
VT
2676 hci_sched_le(hdev);
2677
1da177e4
LT
2678 /* Send next queued raw (unknown type) packet */
2679 while ((skb = skb_dequeue(&hdev->raw_q)))
2680 hci_send_frame(skb);
1da177e4
LT
2681}
2682
25985edc 2683/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2684
2685/* ACL data packet */
6039aa73 2686static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2687{
2688 struct hci_acl_hdr *hdr = (void *) skb->data;
2689 struct hci_conn *conn;
2690 __u16 handle, flags;
2691
2692 skb_pull(skb, HCI_ACL_HDR_SIZE);
2693
2694 handle = __le16_to_cpu(hdr->handle);
2695 flags = hci_flags(handle);
2696 handle = hci_handle(handle);
2697
f0e09510 2698 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 2699 handle, flags);
1da177e4
LT
2700
2701 hdev->stat.acl_rx++;
2702
2703 hci_dev_lock(hdev);
2704 conn = hci_conn_hash_lookup_handle(hdev, handle);
2705 hci_dev_unlock(hdev);
8e87d142 2706
1da177e4 2707 if (conn) {
65983fc7 2708 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2709
671267bf
JH
2710 hci_dev_lock(hdev);
2711 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2712 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2713 mgmt_device_connected(hdev, &conn->dst, conn->type,
2714 conn->dst_type, 0, NULL, 0,
2715 conn->dev_class);
2716 hci_dev_unlock(hdev);
2717
1da177e4 2718 /* Send to upper protocol */
686ebf28
UF
2719 l2cap_recv_acldata(conn, skb, flags);
2720 return;
1da177e4 2721 } else {
8e87d142 2722 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2723 hdev->name, handle);
1da177e4
LT
2724 }
2725
2726 kfree_skb(skb);
2727}
2728
2729/* SCO data packet */
6039aa73 2730static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2731{
2732 struct hci_sco_hdr *hdr = (void *) skb->data;
2733 struct hci_conn *conn;
2734 __u16 handle;
2735
2736 skb_pull(skb, HCI_SCO_HDR_SIZE);
2737
2738 handle = __le16_to_cpu(hdr->handle);
2739
f0e09510 2740 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
2741
2742 hdev->stat.sco_rx++;
2743
2744 hci_dev_lock(hdev);
2745 conn = hci_conn_hash_lookup_handle(hdev, handle);
2746 hci_dev_unlock(hdev);
2747
2748 if (conn) {
1da177e4 2749 /* Send to upper protocol */
686ebf28
UF
2750 sco_recv_scodata(conn, skb);
2751 return;
1da177e4 2752 } else {
8e87d142 2753 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2754 hdev->name, handle);
1da177e4
LT
2755 }
2756
2757 kfree_skb(skb);
2758}
2759
b78752cc 2760static void hci_rx_work(struct work_struct *work)
1da177e4 2761{
b78752cc 2762 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2763 struct sk_buff *skb;
2764
2765 BT_DBG("%s", hdev->name);
2766
1da177e4 2767 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2768 /* Send copy to monitor */
2769 hci_send_to_monitor(hdev, skb);
2770
1da177e4
LT
2771 if (atomic_read(&hdev->promisc)) {
2772 /* Send copy to the sockets */
470fe1b5 2773 hci_send_to_sock(hdev, skb);
1da177e4
LT
2774 }
2775
2776 if (test_bit(HCI_RAW, &hdev->flags)) {
2777 kfree_skb(skb);
2778 continue;
2779 }
2780
2781 if (test_bit(HCI_INIT, &hdev->flags)) {
2782 /* Don't process data packets in this states. */
0d48d939 2783 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2784 case HCI_ACLDATA_PKT:
2785 case HCI_SCODATA_PKT:
2786 kfree_skb(skb);
2787 continue;
3ff50b79 2788 }
1da177e4
LT
2789 }
2790
2791 /* Process frame */
0d48d939 2792 switch (bt_cb(skb)->pkt_type) {
1da177e4 2793 case HCI_EVENT_PKT:
b78752cc 2794 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2795 hci_event_packet(hdev, skb);
2796 break;
2797
2798 case HCI_ACLDATA_PKT:
2799 BT_DBG("%s ACL data packet", hdev->name);
2800 hci_acldata_packet(hdev, skb);
2801 break;
2802
2803 case HCI_SCODATA_PKT:
2804 BT_DBG("%s SCO data packet", hdev->name);
2805 hci_scodata_packet(hdev, skb);
2806 break;
2807
2808 default:
2809 kfree_skb(skb);
2810 break;
2811 }
2812 }
1da177e4
LT
2813}
2814
c347b765 2815static void hci_cmd_work(struct work_struct *work)
1da177e4 2816{
c347b765 2817 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2818 struct sk_buff *skb;
2819
2104786b
AE
2820 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2821 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 2822
1da177e4 2823 /* Send queued commands */
5a08ecce
AE
2824 if (atomic_read(&hdev->cmd_cnt)) {
2825 skb = skb_dequeue(&hdev->cmd_q);
2826 if (!skb)
2827 return;
2828
7585b97a 2829 kfree_skb(hdev->sent_cmd);
1da177e4 2830
70f23020
AE
2831 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2832 if (hdev->sent_cmd) {
1da177e4
LT
2833 atomic_dec(&hdev->cmd_cnt);
2834 hci_send_frame(skb);
7bdb8a5c
SJ
2835 if (test_bit(HCI_RESET, &hdev->flags))
2836 del_timer(&hdev->cmd_timer);
2837 else
2838 mod_timer(&hdev->cmd_timer,
5f246e89 2839 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
2840 } else {
2841 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2842 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2843 }
2844 }
2845}
2519a1fc
AG
2846
2847int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2848{
2849 /* General inquiry access code (GIAC) */
2850 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2851 struct hci_cp_inquiry cp;
2852
2853 BT_DBG("%s", hdev->name);
2854
2855 if (test_bit(HCI_INQUIRY, &hdev->flags))
2856 return -EINPROGRESS;
2857
4663262c
JH
2858 inquiry_cache_flush(hdev);
2859
2519a1fc
AG
2860 memset(&cp, 0, sizeof(cp));
2861 memcpy(&cp.lap, lap, sizeof(cp.lap));
2862 cp.length = length;
2863
2864 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2865}
023d5049
AG
2866
2867int hci_cancel_inquiry(struct hci_dev *hdev)
2868{
2869 BT_DBG("%s", hdev->name);
2870
2871 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2872 return -EALREADY;
023d5049
AG
2873
2874 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2875}
31f7956c
AG
2876
2877u8 bdaddr_to_le(u8 bdaddr_type)
2878{
2879 switch (bdaddr_type) {
2880 case BDADDR_LE_PUBLIC:
2881 return ADDR_LE_DEV_PUBLIC;
2882
2883 default:
2884 /* Fallback to LE Random address type */
2885 return ADDR_LE_DEV_RANDOM;
2886 }
2887}